bpf_dsl_seccomp_unittest.cc 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include <errno.h>
  5. #include <fcntl.h>
  6. #include <pthread.h>
  7. #include <sched.h>
  8. #include <signal.h>
  9. #include <stddef.h>
  10. #include <stdint.h>
  11. #include <sys/prctl.h>
  12. #include <sys/ptrace.h>
  13. #include <sys/socket.h>
  14. #include <sys/syscall.h>
  15. #include <sys/time.h>
  16. #include <sys/types.h>
  17. #include <sys/utsname.h>
  18. #include <unistd.h>
  19. #if defined(ANDROID)
  20. // Work-around for buggy headers in Android's NDK
  21. #define __user
  22. #endif
  23. #include <linux/futex.h>
  24. #include "base/bind.h"
  25. #include "base/check.h"
  26. #include "base/memory/raw_ptr.h"
  27. #include "base/posix/eintr_wrapper.h"
  28. #include "base/synchronization/waitable_event.h"
  29. #include "base/system/sys_info.h"
  30. #include "base/threading/thread.h"
  31. #include "build/build_config.h"
  32. #include "build/chromeos_buildflags.h"
  33. #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
  34. #include "sandbox/linux/bpf_dsl/errorcode.h"
  35. #include "sandbox/linux/bpf_dsl/linux_syscall_ranges.h"
  36. #include "sandbox/linux/bpf_dsl/policy.h"
  37. #include "sandbox/linux/bpf_dsl/seccomp_macros.h"
  38. #include "sandbox/linux/seccomp-bpf/bpf_tests.h"
  39. #include "sandbox/linux/seccomp-bpf/die.h"
  40. #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
  41. #include "sandbox/linux/seccomp-bpf/syscall.h"
  42. #include "sandbox/linux/seccomp-bpf/trap.h"
  43. #include "sandbox/linux/services/syscall_wrappers.h"
  44. #include "sandbox/linux/services/thread_helpers.h"
  45. #include "sandbox/linux/system_headers/linux_syscalls.h"
  46. #include "sandbox/linux/tests/scoped_temporary_file.h"
  47. #include "sandbox/linux/tests/unit_tests.h"
  48. #include "testing/gtest/include/gtest/gtest.h"
  49. // Workaround for Android's prctl.h file.
  50. #ifndef PR_GET_ENDIAN
  51. #define PR_GET_ENDIAN 19
  52. #endif
  53. #ifndef PR_CAPBSET_READ
  54. #define PR_CAPBSET_READ 23
  55. #define PR_CAPBSET_DROP 24
  56. #endif
  57. #define CASES SANDBOX_BPF_DSL_CASES
  58. namespace sandbox {
  59. namespace bpf_dsl {
  60. namespace {
  61. const int kExpectedReturnValue = 42;
  62. const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
  63. // Set the global environment to allow the use of UnsafeTrap() policies.
  64. void EnableUnsafeTraps() {
  65. // The use of UnsafeTrap() causes us to print a warning message. This is
  66. // generally desirable, but it results in the unittest failing, as it doesn't
  67. // expect any messages on "stderr". So, temporarily disable messages. The
  68. // BPF_TEST() is guaranteed to turn messages back on, after the policy
  69. // function has completed.
  70. setenv(kSandboxDebuggingEnv, "t", 0);
  71. Die::SuppressInfoMessages(true);
  72. }
  73. // BPF_TEST does a lot of the boiler-plate code around setting up a
  74. // policy and optional passing data between the caller, the policy and
  75. // any Trap() handlers. This is great for writing short and concise tests,
  76. // and it helps us accidentally forgetting any of the crucial steps in
  77. // setting up the sandbox. But it wouldn't hurt to have at least one test
  78. // that explicitly walks through all these steps.
  79. intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) {
  80. BPF_ASSERT(aux);
  81. int* counter = static_cast<int*>(aux);
  82. return (*counter)++;
  83. }
  84. class VerboseAPITestingPolicy : public Policy {
  85. public:
  86. explicit VerboseAPITestingPolicy(int* counter_ptr)
  87. : counter_ptr_(counter_ptr) {}
  88. VerboseAPITestingPolicy(const VerboseAPITestingPolicy&) = delete;
  89. VerboseAPITestingPolicy& operator=(const VerboseAPITestingPolicy&) = delete;
  90. ~VerboseAPITestingPolicy() override {}
  91. ResultExpr EvaluateSyscall(int sysno) const override {
  92. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  93. if (sysno == __NR_uname) {
  94. return Trap(IncreaseCounter, counter_ptr_);
  95. }
  96. return Allow();
  97. }
  98. private:
  99. raw_ptr<int> counter_ptr_;
  100. };
  101. SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) {
  102. if (SandboxBPF::SupportsSeccompSandbox(
  103. SandboxBPF::SeccompLevel::SINGLE_THREADED)) {
  104. static int counter = 0;
  105. SandboxBPF sandbox(std::make_unique<VerboseAPITestingPolicy>(&counter));
  106. BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
  107. BPF_ASSERT_EQ(0, counter);
  108. BPF_ASSERT_EQ(0, syscall(__NR_uname, 0));
  109. BPF_ASSERT_EQ(1, counter);
  110. BPF_ASSERT_EQ(1, syscall(__NR_uname, 0));
  111. BPF_ASSERT_EQ(2, counter);
  112. }
  113. }
  114. // A simple denylist test
  115. class DenylistNanosleepPolicy : public Policy {
  116. public:
  117. DenylistNanosleepPolicy() {}
  118. DenylistNanosleepPolicy(const DenylistNanosleepPolicy&) = delete;
  119. DenylistNanosleepPolicy& operator=(const DenylistNanosleepPolicy&) = delete;
  120. ~DenylistNanosleepPolicy() override {}
  121. ResultExpr EvaluateSyscall(int sysno) const override {
  122. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  123. switch (sysno) {
  124. case __NR_nanosleep:
  125. return Error(EACCES);
  126. default:
  127. return Allow();
  128. }
  129. }
  130. static void AssertNanosleepFails() {
  131. const struct timespec ts = {0, 0};
  132. errno = 0;
  133. BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
  134. BPF_ASSERT_EQ(EACCES, errno);
  135. }
  136. };
  137. BPF_TEST_C(SandboxBPF, ApplyBasicDenylistPolicy, DenylistNanosleepPolicy) {
  138. DenylistNanosleepPolicy::AssertNanosleepFails();
  139. }
  140. BPF_TEST_C(SandboxBPF, UseVsyscall, DenylistNanosleepPolicy) {
  141. time_t current_time;
  142. // time() is implemented as a vsyscall. With an older glibc, with
  143. // vsyscall=emulate and some versions of the seccomp BPF patch
  144. // we may get SIGKILL-ed. Detect this!
  145. BPF_ASSERT_NE(static_cast<time_t>(-1), time(&current_time));
  146. }
  147. bool IsSyscallForTestHarness(int sysno) {
  148. if (sysno == __NR_exit_group || sysno == __NR_write) {
  149. // exit_group is special and we really need it to work.
  150. // write() is needed for BPF_ASSERT() to report a useful error message.
  151. return true;
  152. }
  153. #if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
  154. defined(UNDEFINED_SANITIZER)
  155. // UBSan_vptr checker needs mmap, munmap, pipe, write.
  156. // ASan and MSan don't need any of these for normal operation, but they
  157. // require at least mmap & munmap to print a report if an error is detected.
  158. // ASan requires sigaltstack.
  159. if (sysno == kMMapNr || sysno == __NR_munmap ||
  160. #if !defined(__aarch64__)
  161. sysno == __NR_pipe ||
  162. #else
  163. sysno == __NR_pipe2 ||
  164. #endif
  165. sysno == __NR_sigaltstack) {
  166. return true;
  167. }
  168. #endif
  169. return false;
  170. }
  171. // Now do a simple allowlist test
  172. class AllowlistGetpidPolicy : public Policy {
  173. public:
  174. AllowlistGetpidPolicy() {}
  175. AllowlistGetpidPolicy(const AllowlistGetpidPolicy&) = delete;
  176. AllowlistGetpidPolicy& operator=(const AllowlistGetpidPolicy&) = delete;
  177. ~AllowlistGetpidPolicy() override {}
  178. ResultExpr EvaluateSyscall(int sysno) const override {
  179. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  180. if (IsSyscallForTestHarness(sysno) || sysno == __NR_getpid) {
  181. return Allow();
  182. }
  183. return Error(ENOMEM);
  184. }
  185. };
  186. BPF_TEST_C(SandboxBPF, ApplyBasicAllowlistPolicy, AllowlistGetpidPolicy) {
  187. // getpid() should be allowed
  188. errno = 0;
  189. BPF_ASSERT(sys_getpid() > 0);
  190. BPF_ASSERT(errno == 0);
  191. // getpgid() should be denied
  192. BPF_ASSERT(getpgid(0) == -1);
  193. BPF_ASSERT(errno == ENOMEM);
  194. }
  195. // A simple denylist policy, with a SIGSYS handler
  196. intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
  197. // We also check that the auxiliary data is correct
  198. SANDBOX_ASSERT(aux);
  199. *(static_cast<int*>(aux)) = kExpectedReturnValue;
  200. return -ENOMEM;
  201. }
  202. class DenylistNanosleepTrapPolicy : public Policy {
  203. public:
  204. explicit DenylistNanosleepTrapPolicy(int* aux) : aux_(aux) {}
  205. DenylistNanosleepTrapPolicy(const DenylistNanosleepTrapPolicy&) = delete;
  206. DenylistNanosleepTrapPolicy& operator=(const DenylistNanosleepTrapPolicy&) =
  207. delete;
  208. ~DenylistNanosleepTrapPolicy() override {}
  209. ResultExpr EvaluateSyscall(int sysno) const override {
  210. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  211. switch (sysno) {
  212. case __NR_nanosleep:
  213. return Trap(EnomemHandler, aux_);
  214. default:
  215. return Allow();
  216. }
  217. }
  218. private:
  219. raw_ptr<int> aux_;
  220. };
  221. BPF_TEST(SandboxBPF,
  222. BasicDenylistWithSigsys,
  223. DenylistNanosleepTrapPolicy,
  224. int /* (*BPF_AUX) */) {
  225. // getpid() should work properly
  226. errno = 0;
  227. BPF_ASSERT(sys_getpid() > 0);
  228. BPF_ASSERT(errno == 0);
  229. // Our Auxiliary Data, should be reset by the signal handler
  230. *BPF_AUX = -1;
  231. const struct timespec ts = {0, 0};
  232. BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
  233. BPF_ASSERT(errno == ENOMEM);
  234. // We expect the signal handler to modify AuxData
  235. BPF_ASSERT(*BPF_AUX == kExpectedReturnValue);
  236. }
  237. // A simple test that verifies we can return arbitrary errno values.
  238. class ErrnoTestPolicy : public Policy {
  239. public:
  240. ErrnoTestPolicy() {}
  241. ErrnoTestPolicy(const ErrnoTestPolicy&) = delete;
  242. ErrnoTestPolicy& operator=(const ErrnoTestPolicy&) = delete;
  243. ~ErrnoTestPolicy() override {}
  244. ResultExpr EvaluateSyscall(int sysno) const override;
  245. };
  246. ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const {
  247. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  248. switch (sysno) {
  249. case __NR_dup3: // dup2 is a wrapper of dup3 in android
  250. #if defined(__NR_dup2)
  251. case __NR_dup2:
  252. #endif
  253. // Pretend that dup2() worked, but don't actually do anything.
  254. return Error(0);
  255. case __NR_setuid:
  256. #if defined(__NR_setuid32)
  257. case __NR_setuid32:
  258. #endif
  259. // Return errno = 1.
  260. return Error(1);
  261. case __NR_setgid:
  262. #if defined(__NR_setgid32)
  263. case __NR_setgid32:
  264. #endif
  265. // Return maximum errno value (typically 4095).
  266. return Error(ErrorCode::ERR_MAX_ERRNO);
  267. case __NR_uname:
  268. // Return errno = 42;
  269. return Error(42);
  270. default:
  271. return Allow();
  272. }
  273. }
  274. BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) {
  275. // Verify that dup2() returns success, but doesn't actually run.
  276. int fds[4];
  277. BPF_ASSERT(pipe(fds) == 0);
  278. BPF_ASSERT(pipe(fds + 2) == 0);
  279. BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
  280. char buf[1] = {};
  281. BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
  282. BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
  283. BPF_ASSERT(read(fds[0], buf, 1) == 1);
  284. // If dup2() executed, we will read \xAA, but it dup2() has been turned
  285. // into a no-op by our policy, then we will read \x55.
  286. BPF_ASSERT(buf[0] == '\x55');
  287. // Verify that we can return the minimum and maximum errno values.
  288. errno = 0;
  289. BPF_ASSERT(setuid(0) == -1);
  290. BPF_ASSERT(errno == 1);
  291. // On Android, errno is only supported up to 255, otherwise errno
  292. // processing is skipped.
  293. // We work around this (crbug.com/181647).
  294. if (sandbox::IsAndroid() && setgid(0) != -1) {
  295. errno = 0;
  296. BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO);
  297. BPF_ASSERT(errno == 0);
  298. } else {
  299. errno = 0;
  300. BPF_ASSERT(setgid(0) == -1);
  301. BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO);
  302. }
  303. // Finally, test an errno in between the minimum and maximum.
  304. errno = 0;
  305. struct utsname uts_buf;
  306. BPF_ASSERT(uname(&uts_buf) == -1);
  307. BPF_ASSERT(errno == 42);
  308. }
  309. // Testing the stacking of two sandboxes
  310. class StackingPolicyPartOne : public Policy {
  311. public:
  312. StackingPolicyPartOne() {}
  313. StackingPolicyPartOne(const StackingPolicyPartOne&) = delete;
  314. StackingPolicyPartOne& operator=(const StackingPolicyPartOne&) = delete;
  315. ~StackingPolicyPartOne() override {}
  316. ResultExpr EvaluateSyscall(int sysno) const override {
  317. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  318. switch (sysno) {
  319. case __NR_getppid: {
  320. const Arg<int> arg(0);
  321. return If(arg == 0, Allow()).Else(Error(EPERM));
  322. }
  323. default:
  324. return Allow();
  325. }
  326. }
  327. };
  328. class StackingPolicyPartTwo : public Policy {
  329. public:
  330. StackingPolicyPartTwo() {}
  331. StackingPolicyPartTwo(const StackingPolicyPartTwo&) = delete;
  332. StackingPolicyPartTwo& operator=(const StackingPolicyPartTwo&) = delete;
  333. ~StackingPolicyPartTwo() override {}
  334. ResultExpr EvaluateSyscall(int sysno) const override {
  335. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  336. switch (sysno) {
  337. case __NR_getppid: {
  338. const Arg<int> arg(0);
  339. return If(arg == 0, Error(EINVAL)).Else(Allow());
  340. }
  341. default:
  342. return Allow();
  343. }
  344. }
  345. };
  346. // Depending on DCHECK being enabled or not the test may create some output.
  347. // Therefore explicitly specify the death test to allow some noise.
  348. BPF_DEATH_TEST_C(SandboxBPF,
  349. StackingPolicy,
  350. DEATH_SUCCESS_ALLOW_NOISE(),
  351. StackingPolicyPartOne) {
  352. errno = 0;
  353. BPF_ASSERT(syscall(__NR_getppid, 0) > 0);
  354. BPF_ASSERT(errno == 0);
  355. BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
  356. BPF_ASSERT(errno == EPERM);
  357. // Stack a second sandbox with its own policy. Verify that we can further
  358. // restrict filters, but we cannot relax existing filters.
  359. SandboxBPF sandbox(std::make_unique<StackingPolicyPartTwo>());
  360. BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
  361. errno = 0;
  362. BPF_ASSERT(syscall(__NR_getppid, 0) == -1);
  363. BPF_ASSERT(errno == EINVAL);
  364. BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
  365. BPF_ASSERT(errno == EPERM);
  366. }
  367. // A more complex, but synthetic policy. This tests the correctness of the BPF
  368. // program by iterating through all syscalls and checking for an errno that
  369. // depends on the syscall number. Unlike the Verifier, this exercises the BPF
  370. // interpreter in the kernel.
  371. // We try to make sure we exercise optimizations in the BPF compiler. We make
  372. // sure that the compiler can have an opportunity to coalesce syscalls with
  373. // contiguous numbers and we also make sure that disjoint sets can return the
  374. // same errno.
  375. int SysnoToRandomErrno(int sysno) {
  376. // Small contiguous sets of 3 system calls return an errno equal to the
  377. // index of that set + 1 (so that we never return a NUL errno).
  378. return ((sysno & ~3) >> 2) % 29 + 1;
  379. }
  380. class SyntheticPolicy : public Policy {
  381. public:
  382. SyntheticPolicy() {}
  383. SyntheticPolicy(const SyntheticPolicy&) = delete;
  384. SyntheticPolicy& operator=(const SyntheticPolicy&) = delete;
  385. ~SyntheticPolicy() override {}
  386. ResultExpr EvaluateSyscall(int sysno) const override {
  387. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  388. if (IsSyscallForTestHarness(sysno)) {
  389. return Allow();
  390. }
  391. return Error(SysnoToRandomErrno(sysno));
  392. }
  393. };
  394. BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) {
  395. // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
  396. // overflow.
  397. BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
  398. static_cast<int>(MAX_PUBLIC_SYSCALL));
  399. for (int syscall_number = static_cast<int>(MIN_SYSCALL);
  400. syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
  401. ++syscall_number) {
  402. if (IsSyscallForTestHarness(syscall_number)) {
  403. continue;
  404. }
  405. errno = 0;
  406. BPF_ASSERT(syscall(syscall_number) == -1);
  407. BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number));
  408. }
  409. }
  410. #if defined(__arm__)
  411. // A simple policy that tests whether ARM private system calls are supported
  412. // by our BPF compiler and by the BPF interpreter in the kernel.
  413. // For ARM private system calls, return an errno equal to their offset from
  414. // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno).
  415. int ArmPrivateSysnoToErrno(int sysno) {
  416. if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) &&
  417. sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
  418. return (sysno - MIN_PRIVATE_SYSCALL) + 1;
  419. } else {
  420. return ENOSYS;
  421. }
  422. }
  423. class ArmPrivatePolicy : public Policy {
  424. public:
  425. ArmPrivatePolicy() {}
  426. ArmPrivatePolicy(const ArmPrivatePolicy&) = delete;
  427. ArmPrivatePolicy& operator=(const ArmPrivatePolicy&) = delete;
  428. ~ArmPrivatePolicy() override {}
  429. ResultExpr EvaluateSyscall(int sysno) const override {
  430. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  431. // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual
  432. // ARM private system calls.
  433. if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) &&
  434. sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
  435. return Error(ArmPrivateSysnoToErrno(sysno));
  436. }
  437. return Allow();
  438. }
  439. };
  440. BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) {
  441. for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
  442. syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
  443. ++syscall_number) {
  444. errno = 0;
  445. BPF_ASSERT(syscall(syscall_number) == -1);
  446. BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
  447. }
  448. }
  449. #endif // defined(__arm__)
  450. intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
  451. // Count all invocations of our callback function.
  452. ++*reinterpret_cast<int*>(aux);
  453. // Verify that within the callback function all filtering is temporarily
  454. // disabled.
  455. BPF_ASSERT(sys_getpid() > 1);
  456. // Verify that we can now call the underlying system call without causing
  457. // infinite recursion.
  458. return SandboxBPF::ForwardSyscall(args);
  459. }
  460. class GreyListedPolicy : public Policy {
  461. public:
  462. explicit GreyListedPolicy(int* aux) : aux_(aux) {
  463. // Set the global environment for unsafe traps once.
  464. EnableUnsafeTraps();
  465. }
  466. GreyListedPolicy(const GreyListedPolicy&) = delete;
  467. GreyListedPolicy& operator=(const GreyListedPolicy&) = delete;
  468. ~GreyListedPolicy() override {}
  469. ResultExpr EvaluateSyscall(int sysno) const override {
  470. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  471. // Some system calls must always be allowed, if our policy wants to make
  472. // use of UnsafeTrap()
  473. if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) {
  474. return Allow();
  475. } else if (sysno == __NR_getpid) {
  476. // Disallow getpid()
  477. return Error(EPERM);
  478. } else {
  479. // Allow (and count) all other system calls.
  480. return UnsafeTrap(CountSyscalls, aux_);
  481. }
  482. }
  483. private:
  484. raw_ptr<int> aux_;
  485. };
  486. BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) {
  487. BPF_ASSERT(sys_getpid() == -1);
  488. BPF_ASSERT(errno == EPERM);
  489. BPF_ASSERT(*BPF_AUX == 0);
  490. BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
  491. BPF_ASSERT(*BPF_AUX == 2);
  492. char name[17] = {};
  493. BPF_ASSERT(!syscall(__NR_prctl,
  494. PR_GET_NAME,
  495. name,
  496. (void*)NULL,
  497. (void*)NULL,
  498. (void*)NULL));
  499. BPF_ASSERT(*BPF_AUX == 3);
  500. BPF_ASSERT(*name);
  501. }
  502. SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) {
  503. // Disabling warning messages that could confuse our test framework.
  504. setenv(kSandboxDebuggingEnv, "t", 0);
  505. Die::SuppressInfoMessages(true);
  506. unsetenv(kSandboxDebuggingEnv);
  507. SANDBOX_ASSERT(Trap::Registry()->EnableUnsafeTraps() == false);
  508. setenv(kSandboxDebuggingEnv, "", 1);
  509. SANDBOX_ASSERT(Trap::Registry()->EnableUnsafeTraps() == false);
  510. setenv(kSandboxDebuggingEnv, "t", 1);
  511. SANDBOX_ASSERT(Trap::Registry()->EnableUnsafeTraps() == true);
  512. }
  513. intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
  514. if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
  515. // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
  516. // return an error. But our handler allows this call.
  517. return 0;
  518. } else {
  519. return SandboxBPF::ForwardSyscall(args);
  520. }
  521. }
  522. class PrctlPolicy : public Policy {
  523. public:
  524. PrctlPolicy() {}
  525. PrctlPolicy(const PrctlPolicy&) = delete;
  526. PrctlPolicy& operator=(const PrctlPolicy&) = delete;
  527. ~PrctlPolicy() override {}
  528. ResultExpr EvaluateSyscall(int sysno) const override {
  529. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  530. setenv(kSandboxDebuggingEnv, "t", 0);
  531. Die::SuppressInfoMessages(true);
  532. if (sysno == __NR_prctl) {
  533. // Handle prctl() inside an UnsafeTrap()
  534. return UnsafeTrap(PrctlHandler, nullptr);
  535. }
  536. // Allow all other system calls.
  537. return Allow();
  538. }
  539. };
  540. BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) {
  541. // This call should never be allowed. But our policy will intercept it and
  542. // let it pass successfully.
  543. BPF_ASSERT(
  544. !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
  545. // Verify that the call will fail, if it makes it all the way to the kernel.
  546. BPF_ASSERT(
  547. prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
  548. // And verify that other uses of prctl() work just fine.
  549. char name[17] = {};
  550. BPF_ASSERT(!syscall(__NR_prctl,
  551. PR_GET_NAME,
  552. name,
  553. (void*)NULL,
  554. (void*)NULL,
  555. (void*)NULL));
  556. BPF_ASSERT(*name);
  557. // Finally, verify that system calls other than prctl() are completely
  558. // unaffected by our policy.
  559. struct utsname uts = {};
  560. BPF_ASSERT(!uname(&uts));
  561. BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
  562. }
  563. intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
  564. return SandboxBPF::ForwardSyscall(args);
  565. }
  566. class RedirectAllSyscallsPolicy : public Policy {
  567. public:
  568. RedirectAllSyscallsPolicy() {}
  569. RedirectAllSyscallsPolicy(const RedirectAllSyscallsPolicy&) = delete;
  570. RedirectAllSyscallsPolicy& operator=(const RedirectAllSyscallsPolicy&) =
  571. delete;
  572. ~RedirectAllSyscallsPolicy() override {}
  573. ResultExpr EvaluateSyscall(int sysno) const override;
  574. };
  575. ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const {
  576. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  577. setenv(kSandboxDebuggingEnv, "t", 0);
  578. Die::SuppressInfoMessages(true);
  579. // Some system calls must always be allowed, if our policy wants to make
  580. // use of UnsafeTrap()
  581. if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
  582. return Allow();
  583. return UnsafeTrap(AllowRedirectedSyscall, nullptr);
  584. }
  585. #if !defined(ADDRESS_SANITIZER)
  586. // ASan does not allow changing the signal handler for SIGBUS, and treats it as
  587. // a fatal signal.
  588. int bus_handler_fd_ = -1;
  589. void SigBusHandler(int, siginfo_t* info, void* void_context) {
  590. BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
  591. }
  592. BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) {
  593. // We use the SIGBUS bit in the signal mask as a thread-local boolean
  594. // value in the implementation of UnsafeTrap(). This is obviously a bit
  595. // of a hack that could conceivably interfere with code that uses SIGBUS
  596. // in more traditional ways. This test verifies that basic functionality
  597. // of SIGBUS is not impacted, but it is certainly possibly to construe
  598. // more complex uses of signals where our use of the SIGBUS mask is not
  599. // 100% transparent. This is expected behavior.
  600. int fds[2];
  601. BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
  602. bus_handler_fd_ = fds[1];
  603. struct sigaction sa = {};
  604. sa.sa_sigaction = SigBusHandler;
  605. sa.sa_flags = SA_SIGINFO;
  606. BPF_ASSERT(sigaction(SIGBUS, &sa, nullptr) == 0);
  607. kill(getpid(), SIGBUS);
  608. char c = '\000';
  609. BPF_ASSERT(read(fds[0], &c, 1) == 1);
  610. BPF_ASSERT(close(fds[0]) == 0);
  611. BPF_ASSERT(close(fds[1]) == 0);
  612. BPF_ASSERT(c == 0x55);
  613. }
  614. #endif // !defined(ADDRESS_SANITIZER)
  615. BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) {
  616. // Signal masks are potentially tricky to handle. For instance, if we
  617. // ever tried to update them from inside a Trap() or UnsafeTrap() handler,
  618. // the call to sigreturn() at the end of the signal handler would undo
  619. // all of our efforts. So, it makes sense to test that sigprocmask()
  620. // works, even if we have a policy in place that makes use of UnsafeTrap().
  621. // In practice, this works because we force sigprocmask() to be handled
  622. // entirely in the kernel.
  623. sigset_t mask0, mask1, mask2;
  624. // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't
  625. // change the mask (it shouldn't have been, as it isn't blocked by default
  626. // in POSIX).
  627. //
  628. // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose.
  629. sigemptyset(&mask0);
  630. BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1));
  631. BPF_ASSERT(!sigismember(&mask1, SIGUSR2));
  632. // Try again, and this time we verify that we can block it. This
  633. // requires a second call to sigprocmask().
  634. sigaddset(&mask0, SIGUSR2);
  635. BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, nullptr));
  636. BPF_ASSERT(!sigprocmask(SIG_BLOCK, nullptr, &mask2));
  637. BPF_ASSERT(sigismember(&mask2, SIGUSR2));
  638. }
  639. BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
  640. // An UnsafeTrap() (or for that matter, a Trap()) has to report error
  641. // conditions by returning an exit code in the range -1..-4096. This
  642. // should happen automatically if using ForwardSyscall(). If the TrapFnc()
  643. // uses some other method to make system calls, then it is responsible
  644. // for computing the correct return code.
  645. // This test verifies that ForwardSyscall() does the correct thing.
  646. // The glibc system wrapper will ultimately set errno for us. So, from normal
  647. // userspace, all of this should be completely transparent.
  648. errno = 0;
  649. BPF_ASSERT(close(-1) == -1);
  650. BPF_ASSERT(errno == EBADF);
  651. // Explicitly avoid the glibc wrapper. This is not normally the way anybody
  652. // would make system calls, but it allows us to verify that we don't
  653. // accidentally mess with errno, when we shouldn't.
  654. errno = 0;
  655. struct arch_seccomp_data args = {};
  656. args.nr = __NR_close;
  657. args.args[0] = -1;
  658. BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF);
  659. BPF_ASSERT(errno == 0);
  660. }
  661. // Simple test demonstrating how to use SandboxBPF::Cond()
  662. class SimpleCondTestPolicy : public Policy {
  663. public:
  664. SimpleCondTestPolicy() {}
  665. SimpleCondTestPolicy(const SimpleCondTestPolicy&) = delete;
  666. SimpleCondTestPolicy& operator=(const SimpleCondTestPolicy&) = delete;
  667. ~SimpleCondTestPolicy() override {}
  668. ResultExpr EvaluateSyscall(int sysno) const override;
  669. };
  670. ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const {
  671. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  672. // We deliberately return unusual errno values upon failure, so that we
  673. // can uniquely test for these values. In a "real" policy, you would want
  674. // to return more traditional values.
  675. int flags_argument_position = -1;
  676. switch (sysno) {
  677. #if defined(__NR_open)
  678. case __NR_open:
  679. flags_argument_position = 1;
  680. [[fallthrough]];
  681. #endif
  682. case __NR_openat: { // open can be a wrapper for openat(2).
  683. if (sysno == __NR_openat)
  684. flags_argument_position = 2;
  685. // Allow opening files for reading, but don't allow writing.
  686. static_assert(O_RDONLY == 0, "O_RDONLY must be all zero bits");
  687. const Arg<int> flags(flags_argument_position);
  688. return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow());
  689. }
  690. case __NR_prctl: {
  691. // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
  692. // disallow everything else.
  693. const Arg<int> option(0);
  694. return Switch(option)
  695. .CASES((PR_SET_DUMPABLE, PR_GET_DUMPABLE), Allow())
  696. .Default(Error(ENOMEM));
  697. }
  698. default:
  699. return Allow();
  700. }
  701. }
  702. BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) {
  703. int fd;
  704. BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1);
  705. BPF_ASSERT(errno == EROFS);
  706. BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0);
  707. close(fd);
  708. int ret;
  709. BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
  710. BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
  711. BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
  712. BPF_ASSERT(errno == ENOMEM);
  713. }
  714. // This test exercises the SandboxBPF::Cond() method by building a complex
  715. // tree of conditional equality operations. It then makes system calls and
  716. // verifies that they return the values that we expected from our BPF
  717. // program.
  718. class EqualityStressTest {
  719. public:
  720. EqualityStressTest() {
  721. // We want a deterministic test
  722. srand(0);
  723. // Iterates over system call numbers and builds a random tree of
  724. // equality tests.
  725. // We are actually constructing a graph of ArgValue objects. This
  726. // graph will later be used to a) compute our sandbox policy, and
  727. // b) drive the code that verifies the output from the BPF program.
  728. static_assert(
  729. kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
  730. "kNumTestCases must be significantly smaller than the number "
  731. "of system calls");
  732. for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
  733. if (IsReservedSyscall(sysno)) {
  734. // Skip reserved system calls. This ensures that our test frame
  735. // work isn't impacted by the fact that we are overriding
  736. // a lot of different system calls.
  737. ++end;
  738. arg_values_.push_back(nullptr);
  739. } else {
  740. arg_values_.push_back(
  741. RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
  742. }
  743. }
  744. }
  745. ~EqualityStressTest() {
  746. for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
  747. iter != arg_values_.end();
  748. ++iter) {
  749. DeleteArgValue(*iter);
  750. }
  751. }
  752. ResultExpr Policy(int sysno) {
  753. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  754. if (sysno < 0 || sysno >= (int)arg_values_.size() ||
  755. IsReservedSyscall(sysno)) {
  756. // We only return ErrorCode values for the system calls that
  757. // are part of our test data. Every other system call remains
  758. // allowed.
  759. return Allow();
  760. } else {
  761. // ToErrorCode() turns an ArgValue object into an ErrorCode that is
  762. // suitable for use by a sandbox policy.
  763. return ToErrorCode(arg_values_[sysno]);
  764. }
  765. }
  766. void VerifyFilter() {
  767. // Iterate over all system calls. Skip the system calls that have
  768. // previously been determined as being reserved.
  769. for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) {
  770. if (!arg_values_[sysno]) {
  771. // Skip reserved system calls.
  772. continue;
  773. }
  774. // Verify that system calls return the values that we expect them to
  775. // return. This involves passing different combinations of system call
  776. // parameters in order to exercise all possible code paths through the
  777. // BPF filter program.
  778. // We arbitrarily start by setting all six system call arguments to
  779. // zero. And we then recursive traverse our tree of ArgValues to
  780. // determine the necessary combinations of parameters.
  781. intptr_t args[6] = {};
  782. Verify(sysno, args, *arg_values_[sysno]);
  783. }
  784. }
  785. private:
  786. struct ArgValue {
  787. int argno; // Argument number to inspect.
  788. int size; // Number of test cases (must be > 0).
  789. struct Tests {
  790. uint32_t k_value; // Value to compare syscall arg against.
  791. int err; // If non-zero, errno value to return.
  792. raw_ptr<struct ArgValue>
  793. arg_value; // Otherwise, more args needs inspecting.
  794. }* tests;
  795. int err; // If none of the tests passed, this is what
  796. raw_ptr<struct ArgValue>
  797. arg_value; // we'll return (this is the "else" branch).
  798. };
  799. bool IsReservedSyscall(int sysno) {
  800. // There are a handful of system calls that we should never use in our
  801. // test cases. These system calls are needed to allow the test framework
  802. // to run properly.
  803. // If we wanted to write fully generic code, there are more system calls
  804. // that could be listed here, and it is quite difficult to come up with a
  805. // truly comprehensive list. After all, we are deliberately making system
  806. // calls unavailable. In practice, we have a pretty good idea of the system
  807. // calls that will be made by this particular test. So, this small list is
  808. // sufficient. But if anybody copy'n'pasted this code for other uses, they
  809. // would have to review that the list.
  810. return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
  811. sysno == __NR_exit_group || sysno == __NR_restart_syscall;
  812. }
  813. ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
  814. // Create a new ArgValue and fill it with random data. We use as bit mask
  815. // to keep track of the system call parameters that have previously been
  816. // set; this ensures that we won't accidentally define a contradictory
  817. // set of equality tests.
  818. struct ArgValue* arg_value = new ArgValue();
  819. args_mask |= 1 << argno;
  820. arg_value->argno = argno;
  821. // Apply some restrictions on just how complex our tests can be.
  822. // Otherwise, we end up with a BPF program that is too complicated for
  823. // the kernel to load.
  824. int fan_out = kMaxFanOut;
  825. if (remaining_args > 3) {
  826. fan_out = 1;
  827. } else if (remaining_args > 2) {
  828. fan_out = 2;
  829. }
  830. // Create a couple of different test cases with randomized values that
  831. // we want to use when comparing system call parameter number "argno".
  832. arg_value->size = rand() % fan_out + 1;
  833. arg_value->tests = new ArgValue::Tests[arg_value->size];
  834. uint32_t k_value = rand();
  835. for (int n = 0; n < arg_value->size; ++n) {
  836. // Ensure that we have unique values
  837. k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
  838. // There are two possible types of nodes. Either this is a leaf node;
  839. // in that case, we have completed all the equality tests that we
  840. // wanted to perform, and we can now compute a random "errno" value that
  841. // we should return. Or this is part of a more complex boolean
  842. // expression; in that case, we have to recursively add tests for some
  843. // of system call parameters that we have not yet included in our
  844. // tests.
  845. arg_value->tests[n].k_value = k_value;
  846. if (!remaining_args || (rand() & 1)) {
  847. arg_value->tests[n].err = (rand() % 1000) + 1;
  848. arg_value->tests[n].arg_value = nullptr;
  849. } else {
  850. arg_value->tests[n].err = 0;
  851. arg_value->tests[n].arg_value =
  852. RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
  853. }
  854. }
  855. // Finally, we have to define what we should return if none of the
  856. // previous equality tests pass. Again, we can either deal with a leaf
  857. // node, or we can randomly add another couple of tests.
  858. if (!remaining_args || (rand() & 1)) {
  859. arg_value->err = (rand() % 1000) + 1;
  860. arg_value->arg_value = nullptr;
  861. } else {
  862. arg_value->err = 0;
  863. arg_value->arg_value =
  864. RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
  865. }
  866. // We have now built a new (sub-)tree of ArgValues defining a set of
  867. // boolean expressions for testing random system call arguments against
  868. // random values. Return this tree to our caller.
  869. return arg_value;
  870. }
  871. int RandomArg(int args_mask) {
  872. // Compute a random system call parameter number.
  873. int argno = rand() % kMaxArgs;
  874. // Make sure that this same parameter number has not previously been
  875. // used. Otherwise, we could end up with a test that is impossible to
  876. // satisfy (e.g. args[0] == 1 && args[0] == 2).
  877. while (args_mask & (1 << argno)) {
  878. argno = (argno + 1) % kMaxArgs;
  879. }
  880. return argno;
  881. }
  882. void DeleteArgValue(ArgValue* arg_value) {
  883. // Delete an ArgValue and all of its child nodes. This requires
  884. // recursively descending into the tree.
  885. if (arg_value) {
  886. if (arg_value->size) {
  887. for (int n = 0; n < arg_value->size; ++n) {
  888. if (!arg_value->tests[n].err) {
  889. DeleteArgValue(arg_value->tests[n].arg_value);
  890. }
  891. }
  892. delete[] arg_value->tests;
  893. }
  894. if (!arg_value->err) {
  895. DeleteArgValue(arg_value->arg_value);
  896. }
  897. delete arg_value;
  898. }
  899. }
  900. ResultExpr ToErrorCode(ArgValue* arg_value) {
  901. // Compute the ResultExpr that should be returned, if none of our
  902. // tests succeed (i.e. the system call parameter doesn't match any
  903. // of the values in arg_value->tests[].k_value).
  904. ResultExpr err;
  905. if (arg_value->err) {
  906. // If this was a leaf node, return the errno value that we expect to
  907. // return from the BPF filter program.
  908. err = Error(arg_value->err);
  909. } else {
  910. // If this wasn't a leaf node yet, recursively descend into the rest
  911. // of the tree. This will end up adding a few more SandboxBPF::Cond()
  912. // tests to our ErrorCode.
  913. err = ToErrorCode(arg_value->arg_value);
  914. }
  915. // Now, iterate over all the test cases that we want to compare against.
  916. // This builds a chain of SandboxBPF::Cond() tests
  917. // (aka "if ... elif ... elif ... elif ... fi")
  918. for (int n = arg_value->size; n-- > 0;) {
  919. ResultExpr matched;
  920. // Again, we distinguish between leaf nodes and subtrees.
  921. if (arg_value->tests[n].err) {
  922. matched = Error(arg_value->tests[n].err);
  923. } else {
  924. matched = ToErrorCode(arg_value->tests[n].arg_value);
  925. }
  926. // For now, all of our tests are limited to 32bit.
  927. // We have separate tests that check the behavior of 32bit vs. 64bit
  928. // conditional expressions.
  929. const Arg<uint32_t> arg(arg_value->argno);
  930. err = If(arg == arg_value->tests[n].k_value, matched).Else(err);
  931. }
  932. return err;
  933. }
  934. void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
  935. uint32_t mismatched = 0;
  936. // Iterate over all the k_values in arg_value.tests[] and verify that
  937. // we see the expected return values from system calls, when we pass
  938. // the k_value as a parameter in a system call.
  939. for (int n = arg_value.size; n-- > 0;) {
  940. mismatched += arg_value.tests[n].k_value;
  941. args[arg_value.argno] = arg_value.tests[n].k_value;
  942. if (arg_value.tests[n].err) {
  943. VerifyErrno(sysno, args, arg_value.tests[n].err);
  944. } else {
  945. Verify(sysno, args, *arg_value.tests[n].arg_value);
  946. }
  947. }
  948. // Find a k_value that doesn't match any of the k_values in
  949. // arg_value.tests[]. In most cases, the current value of "mismatched"
  950. // would fit this requirement. But on the off-chance that it happens
  951. // to collide, we double-check.
  952. try_again:
  953. for (int n = arg_value.size; n-- > 0;) {
  954. if (mismatched == arg_value.tests[n].k_value) {
  955. ++mismatched;
  956. goto try_again;
  957. }
  958. }
  959. // Now verify that we see the expected return value from system calls,
  960. // if we pass a value that doesn't match any of the conditions (i.e. this
  961. // is testing the "else" clause of the conditions).
  962. args[arg_value.argno] = mismatched;
  963. if (arg_value.err) {
  964. VerifyErrno(sysno, args, arg_value.err);
  965. } else {
  966. Verify(sysno, args, *arg_value.arg_value);
  967. }
  968. // Reset args[arg_value.argno]. This is not technically needed, but it
  969. // makes it easier to reason about the correctness of our tests.
  970. args[arg_value.argno] = 0;
  971. }
  972. void VerifyErrno(int sysno, intptr_t* args, int err) {
  973. // We installed BPF filters that return different errno values
  974. // based on the system call number and the parameters that we decided
  975. // to pass in. Verify that this condition holds true.
  976. BPF_ASSERT(
  977. Syscall::Call(
  978. sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
  979. -err);
  980. }
  981. // Vector of ArgValue trees. These trees define all the possible boolean
  982. // expressions that we want to turn into a BPF filter program.
  983. std::vector<ArgValue*> arg_values_;
  984. // Don't increase these values. We are pushing the limits of the maximum
  985. // BPF program that the kernel will allow us to load. If the values are
  986. // increased too much, the test will start failing.
  987. #if defined(__aarch64__)
  988. static const int kNumTestCases = 30;
  989. #else
  990. static const int kNumTestCases = 40;
  991. #endif
  992. static const int kMaxFanOut = 3;
  993. static const int kMaxArgs = 6;
  994. };
  995. class EqualityStressTestPolicy : public Policy {
  996. public:
  997. explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {}
  998. EqualityStressTestPolicy(const EqualityStressTestPolicy&) = delete;
  999. EqualityStressTestPolicy& operator=(const EqualityStressTestPolicy&) = delete;
  1000. ~EqualityStressTestPolicy() override {}
  1001. ResultExpr EvaluateSyscall(int sysno) const override {
  1002. return aux_->Policy(sysno);
  1003. }
  1004. private:
  1005. raw_ptr<EqualityStressTest> aux_;
  1006. };
  1007. BPF_TEST(SandboxBPF,
  1008. EqualityTests,
  1009. EqualityStressTestPolicy,
  1010. EqualityStressTest /* (*BPF_AUX) */) {
  1011. BPF_AUX->VerifyFilter();
  1012. }
  1013. class EqualityArgumentWidthPolicy : public Policy {
  1014. public:
  1015. EqualityArgumentWidthPolicy() {}
  1016. EqualityArgumentWidthPolicy(const EqualityArgumentWidthPolicy&) = delete;
  1017. EqualityArgumentWidthPolicy& operator=(const EqualityArgumentWidthPolicy&) =
  1018. delete;
  1019. ~EqualityArgumentWidthPolicy() override {}
  1020. ResultExpr EvaluateSyscall(int sysno) const override;
  1021. };
  1022. ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const {
  1023. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1024. if (sysno == __NR_uname) {
  1025. const Arg<int> option(0);
  1026. const Arg<uint32_t> arg32(1);
  1027. const Arg<uint64_t> arg64(1);
  1028. return Switch(option)
  1029. .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2)))
  1030. #if __SIZEOF_POINTER__ > 4
  1031. .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2)))
  1032. #endif
  1033. .Default(Error(3));
  1034. }
  1035. return Allow();
  1036. }
  1037. BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
  1038. BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1);
  1039. BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2);
  1040. #if __SIZEOF_POINTER__ > 4
  1041. // On 32bit machines, there is no way to pass a 64bit argument through the
  1042. // syscall interface. So, we have to skip the part of the test that requires
  1043. // 64bit arguments.
  1044. BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1);
  1045. BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2);
  1046. BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2);
  1047. BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2);
  1048. #endif
  1049. }
  1050. #if __SIZEOF_POINTER__ > 4
  1051. // On 32bit machines, there is no way to pass a 64bit argument through the
  1052. // syscall interface. So, we have to skip the part of the test that requires
  1053. // 64bit arguments.
  1054. BPF_DEATH_TEST_C(SandboxBPF,
  1055. EqualityArgumentUnallowed64bit,
  1056. DEATH_MESSAGE("Unexpected 64bit argument detected"),
  1057. EqualityArgumentWidthPolicy) {
  1058. Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL);
  1059. }
  1060. #endif
  1061. class EqualityWithNegativeArgumentsPolicy : public Policy {
  1062. public:
  1063. EqualityWithNegativeArgumentsPolicy() {}
  1064. EqualityWithNegativeArgumentsPolicy(
  1065. const EqualityWithNegativeArgumentsPolicy&) = delete;
  1066. EqualityWithNegativeArgumentsPolicy& operator=(
  1067. const EqualityWithNegativeArgumentsPolicy&) = delete;
  1068. ~EqualityWithNegativeArgumentsPolicy() override {}
  1069. ResultExpr EvaluateSyscall(int sysno) const override {
  1070. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1071. if (sysno == __NR_uname) {
  1072. // TODO(mdempsky): This currently can't be Arg<int> because then
  1073. // 0xFFFFFFFF will be treated as a (signed) int, and then when
  1074. // Arg::EqualTo casts it to uint64_t, it will be sign extended.
  1075. const Arg<unsigned> arg(0);
  1076. return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2));
  1077. }
  1078. return Allow();
  1079. }
  1080. };
  1081. BPF_TEST_C(SandboxBPF,
  1082. EqualityWithNegativeArguments,
  1083. EqualityWithNegativeArgumentsPolicy) {
  1084. BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1);
  1085. BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1);
  1086. BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1);
  1087. }
  1088. #if __SIZEOF_POINTER__ > 4
  1089. BPF_DEATH_TEST_C(SandboxBPF,
  1090. EqualityWithNegative64bitArguments,
  1091. DEATH_MESSAGE("Unexpected 64bit argument detected"),
  1092. EqualityWithNegativeArgumentsPolicy) {
  1093. // When expecting a 32bit system call argument, we look at the MSB of the
  1094. // 64bit value and allow both "0" and "-1". But the latter is allowed only
  1095. // iff the LSB was negative. So, this death test should error out.
  1096. BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
  1097. }
  1098. #endif
  1099. class AllBitTestPolicy : public Policy {
  1100. public:
  1101. AllBitTestPolicy() {}
  1102. AllBitTestPolicy(const AllBitTestPolicy&) = delete;
  1103. AllBitTestPolicy& operator=(const AllBitTestPolicy&) = delete;
  1104. ~AllBitTestPolicy() override {}
  1105. ResultExpr EvaluateSyscall(int sysno) const override;
  1106. private:
  1107. static ResultExpr HasAllBits32(uint32_t bits);
  1108. static ResultExpr HasAllBits64(uint64_t bits);
  1109. };
  1110. ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) {
  1111. if (bits == 0) {
  1112. return Error(1);
  1113. }
  1114. const Arg<uint32_t> arg(1);
  1115. return If((arg & bits) == bits, Error(1)).Else(Error(0));
  1116. }
  1117. ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) {
  1118. if (bits == 0) {
  1119. return Error(1);
  1120. }
  1121. const Arg<uint64_t> arg(1);
  1122. return If((arg & bits) == bits, Error(1)).Else(Error(0));
  1123. }
  1124. ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const {
  1125. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1126. // Test masked-equality cases that should trigger the "has all bits"
  1127. // peephole optimizations. We try to find bitmasks that could conceivably
  1128. // touch corner cases.
  1129. // For all of these tests, we override the uname(). We can make use with
  1130. // a single system call number, as we use the first system call argument to
  1131. // select the different bit masks that we want to test against.
  1132. if (sysno == __NR_uname) {
  1133. const Arg<int> option(0);
  1134. return Switch(option)
  1135. .Case(0, HasAllBits32(0x0))
  1136. .Case(1, HasAllBits32(0x1))
  1137. .Case(2, HasAllBits32(0x3))
  1138. .Case(3, HasAllBits32(0x80000000))
  1139. #if __SIZEOF_POINTER__ > 4
  1140. .Case(4, HasAllBits64(0x0))
  1141. .Case(5, HasAllBits64(0x1))
  1142. .Case(6, HasAllBits64(0x3))
  1143. .Case(7, HasAllBits64(0x80000000))
  1144. .Case(8, HasAllBits64(0x100000000ULL))
  1145. .Case(9, HasAllBits64(0x300000000ULL))
  1146. .Case(10, HasAllBits64(0x100000001ULL))
  1147. #endif
  1148. .Default(Kill());
  1149. }
  1150. return Allow();
  1151. }
  1152. // Define a macro that performs tests using our test policy.
  1153. // NOTE: Not all of the arguments in this macro are actually used!
  1154. // They are here just to serve as documentation of the conditions
  1155. // implemented in the test policy.
  1156. // Most notably, "op" and "mask" are unused by the macro. If you want
  1157. // to make changes to these values, you will have to edit the
  1158. // test policy instead.
  1159. #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
  1160. BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value))
  1161. // Our uname() system call returns ErrorCode(1) for success and
  1162. // ErrorCode(0) for failure. Syscall::Call() turns this into an
  1163. // exit code of -1 or 0.
  1164. #define EXPECT_FAILURE 0
  1165. #define EXPECT_SUCCESS -1
  1166. // A couple of our tests behave differently on 32bit and 64bit systems, as
  1167. // there is no way for a 32bit system call to pass in a 64bit system call
  1168. // argument "arg".
  1169. // We expect these tests to succeed on 64bit systems, but to tail on 32bit
  1170. // systems.
  1171. #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
  1172. BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) {
  1173. // 32bit test: all of 0x0 (should always be true)
  1174. BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS);
  1175. BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS);
  1176. BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS);
  1177. BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS);
  1178. BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS);
  1179. // 32bit test: all of 0x1
  1180. BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE);
  1181. BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS);
  1182. BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE);
  1183. BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS);
  1184. // 32bit test: all of 0x3
  1185. BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE);
  1186. BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE);
  1187. BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE);
  1188. BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS);
  1189. BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS);
  1190. // 32bit test: all of 0x80000000
  1191. BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE);
  1192. BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE);
  1193. BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
  1194. BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
  1195. BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
  1196. #if __SIZEOF_POINTER__ > 4
  1197. // 64bit test: all of 0x0 (should always be true)
  1198. BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS);
  1199. BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS);
  1200. BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS);
  1201. BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS);
  1202. BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
  1203. BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
  1204. BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
  1205. BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS);
  1206. // 64bit test: all of 0x1
  1207. BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE);
  1208. BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS);
  1209. BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE);
  1210. BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS);
  1211. BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE);
  1212. BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS);
  1213. BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE);
  1214. BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS);
  1215. // 64bit test: all of 0x3
  1216. BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE);
  1217. BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE);
  1218. BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE);
  1219. BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS);
  1220. BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS);
  1221. BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE);
  1222. BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE);
  1223. BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE);
  1224. BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS);
  1225. BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS);
  1226. // 64bit test: all of 0x80000000
  1227. BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE);
  1228. BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE);
  1229. BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
  1230. BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
  1231. BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
  1232. BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
  1233. BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
  1234. BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
  1235. BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
  1236. BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
  1237. // 64bit test: all of 0x100000000
  1238. BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
  1239. BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
  1240. BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
  1241. BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
  1242. BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
  1243. BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
  1244. BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
  1245. BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
  1246. // 64bit test: all of 0x300000000
  1247. BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
  1248. BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
  1249. BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
  1250. BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
  1251. BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
  1252. BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
  1253. BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
  1254. BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
  1255. BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
  1256. BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
  1257. // 64bit test: all of 0x100000001
  1258. BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
  1259. BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
  1260. BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
  1261. BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS);
  1262. BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE);
  1263. BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
  1264. #endif
  1265. }
  1266. class AnyBitTestPolicy : public Policy {
  1267. public:
  1268. AnyBitTestPolicy() {}
  1269. AnyBitTestPolicy(const AnyBitTestPolicy&) = delete;
  1270. AnyBitTestPolicy& operator=(const AnyBitTestPolicy&) = delete;
  1271. ~AnyBitTestPolicy() override {}
  1272. ResultExpr EvaluateSyscall(int sysno) const override;
  1273. private:
  1274. static ResultExpr HasAnyBits32(uint32_t);
  1275. static ResultExpr HasAnyBits64(uint64_t);
  1276. };
  1277. ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) {
  1278. if (bits == 0) {
  1279. return Error(0);
  1280. }
  1281. const Arg<uint32_t> arg(1);
  1282. return If((arg & bits) != 0, Error(1)).Else(Error(0));
  1283. }
  1284. ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) {
  1285. if (bits == 0) {
  1286. return Error(0);
  1287. }
  1288. const Arg<uint64_t> arg(1);
  1289. return If((arg & bits) != 0, Error(1)).Else(Error(0));
  1290. }
  1291. ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const {
  1292. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1293. // Test masked-equality cases that should trigger the "has any bits"
  1294. // peephole optimizations. We try to find bitmasks that could conceivably
  1295. // touch corner cases.
  1296. // For all of these tests, we override the uname(). We can make use with
  1297. // a single system call number, as we use the first system call argument to
  1298. // select the different bit masks that we want to test against.
  1299. if (sysno == __NR_uname) {
  1300. const Arg<int> option(0);
  1301. return Switch(option)
  1302. .Case(0, HasAnyBits32(0x0))
  1303. .Case(1, HasAnyBits32(0x1))
  1304. .Case(2, HasAnyBits32(0x3))
  1305. .Case(3, HasAnyBits32(0x80000000))
  1306. #if __SIZEOF_POINTER__ > 4
  1307. .Case(4, HasAnyBits64(0x0))
  1308. .Case(5, HasAnyBits64(0x1))
  1309. .Case(6, HasAnyBits64(0x3))
  1310. .Case(7, HasAnyBits64(0x80000000))
  1311. .Case(8, HasAnyBits64(0x100000000ULL))
  1312. .Case(9, HasAnyBits64(0x300000000ULL))
  1313. .Case(10, HasAnyBits64(0x100000001ULL))
  1314. #endif
  1315. .Default(Kill());
  1316. }
  1317. return Allow();
  1318. }
  1319. BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) {
  1320. // 32bit test: any of 0x0 (should always be false)
  1321. BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE);
  1322. BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE);
  1323. BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE);
  1324. BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE);
  1325. BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE);
  1326. // 32bit test: any of 0x1
  1327. BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE);
  1328. BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS);
  1329. BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE);
  1330. BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS);
  1331. // 32bit test: any of 0x3
  1332. BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE);
  1333. BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS);
  1334. BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS);
  1335. BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS);
  1336. BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS);
  1337. // 32bit test: any of 0x80000000
  1338. BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE);
  1339. BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE);
  1340. BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
  1341. BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
  1342. BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
  1343. #if __SIZEOF_POINTER__ > 4
  1344. // 64bit test: any of 0x0 (should always be false)
  1345. BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE);
  1346. BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE);
  1347. BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE);
  1348. BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE);
  1349. BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
  1350. BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
  1351. BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
  1352. BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE);
  1353. // 64bit test: any of 0x1
  1354. BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE);
  1355. BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS);
  1356. BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE);
  1357. BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS);
  1358. BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
  1359. BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE);
  1360. BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE);
  1361. BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
  1362. // 64bit test: any of 0x3
  1363. BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE);
  1364. BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1365. BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1366. BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1367. BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1368. BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE);
  1369. BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1370. BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1371. BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1372. BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
  1373. // 64bit test: any of 0x80000000
  1374. BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE);
  1375. BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE);
  1376. BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
  1377. BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
  1378. BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
  1379. BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
  1380. BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
  1381. BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
  1382. BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
  1383. BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
  1384. // 64bit test: any of 0x100000000
  1385. BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
  1386. BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
  1387. BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
  1388. BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
  1389. BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
  1390. BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
  1391. BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
  1392. BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
  1393. // 64bit test: any of 0x300000000
  1394. BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
  1395. BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1396. BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1397. BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1398. BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1399. BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
  1400. BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1401. BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1402. BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1403. BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
  1404. // 64bit test: any of 0x100000001
  1405. BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE);
  1406. BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
  1407. BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS);
  1408. BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
  1409. BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS);
  1410. BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
  1411. #endif
  1412. }
  1413. class MaskedEqualTestPolicy : public Policy {
  1414. public:
  1415. MaskedEqualTestPolicy() {}
  1416. MaskedEqualTestPolicy(const MaskedEqualTestPolicy&) = delete;
  1417. MaskedEqualTestPolicy& operator=(const MaskedEqualTestPolicy&) = delete;
  1418. ~MaskedEqualTestPolicy() override {}
  1419. ResultExpr EvaluateSyscall(int sysno) const override;
  1420. private:
  1421. static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value);
  1422. static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value);
  1423. };
  1424. ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) {
  1425. const Arg<uint32_t> arg(1);
  1426. return If((arg & mask) == value, Error(1)).Else(Error(0));
  1427. }
  1428. ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) {
  1429. const Arg<uint64_t> arg(1);
  1430. return If((arg & mask) == value, Error(1)).Else(Error(0));
  1431. }
  1432. ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const {
  1433. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1434. if (sysno == __NR_uname) {
  1435. const Arg<int> option(0);
  1436. return Switch(option)
  1437. .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa))
  1438. #if __SIZEOF_POINTER__ > 4
  1439. .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000))
  1440. .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa))
  1441. #endif
  1442. .Default(Kill());
  1443. }
  1444. return Allow();
  1445. }
  1446. #define MASKEQ_TEST(rulenum, arg, expected_result) \
  1447. BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result))
  1448. BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) {
  1449. // Allowed: 0x__55__aa
  1450. MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE);
  1451. MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE);
  1452. MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE);
  1453. MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE);
  1454. MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE);
  1455. MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS);
  1456. MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE);
  1457. MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE);
  1458. MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS);
  1459. MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS);
  1460. MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS);
  1461. MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS);
  1462. #if __SIZEOF_POINTER__ > 4
  1463. // Allowed: 0x__55__aa________
  1464. MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE);
  1465. MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE);
  1466. MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE);
  1467. MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE);
  1468. MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE);
  1469. MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE);
  1470. MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE);
  1471. MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS);
  1472. MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE);
  1473. MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE);
  1474. MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS);
  1475. MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS);
  1476. MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS);
  1477. MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
  1478. MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
  1479. MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS);
  1480. // Allowed: 0x__55__aa__55__aa
  1481. MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE);
  1482. MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE);
  1483. MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE);
  1484. MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE);
  1485. MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE);
  1486. MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE);
  1487. MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE);
  1488. MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE);
  1489. MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE);
  1490. MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS);
  1491. MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE);
  1492. MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE);
  1493. MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE);
  1494. MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE);
  1495. MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS);
  1496. MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS);
  1497. MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS);
  1498. #endif
  1499. }
  1500. intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
  1501. if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
  1502. // We expect to get called for an attempt to fork(). No need to log that
  1503. // call. But if we ever get called for anything else, we want to verbosely
  1504. // print as much information as possible.
  1505. const char* msg = (const char*)aux;
  1506. printf(
  1507. "Clone() was called with unexpected arguments\n"
  1508. " nr: %d\n"
  1509. " 1: 0x%llX\n"
  1510. " 2: 0x%llX\n"
  1511. " 3: 0x%llX\n"
  1512. " 4: 0x%llX\n"
  1513. " 5: 0x%llX\n"
  1514. " 6: 0x%llX\n"
  1515. "%s\n",
  1516. args.nr,
  1517. (long long)args.args[0],
  1518. (long long)args.args[1],
  1519. (long long)args.args[2],
  1520. (long long)args.args[3],
  1521. (long long)args.args[4],
  1522. (long long)args.args[5],
  1523. msg);
  1524. }
  1525. return -EPERM;
  1526. }
  1527. class PthreadPolicyEquality : public Policy {
  1528. public:
  1529. PthreadPolicyEquality() {}
  1530. PthreadPolicyEquality(const PthreadPolicyEquality&) = delete;
  1531. PthreadPolicyEquality& operator=(const PthreadPolicyEquality&) = delete;
  1532. ~PthreadPolicyEquality() override {}
  1533. ResultExpr EvaluateSyscall(int sysno) const override;
  1534. };
  1535. ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const {
  1536. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1537. // This policy allows creating threads with pthread_create(). But it
  1538. // doesn't allow any other uses of clone(). Most notably, it does not
  1539. // allow callers to implement fork() or vfork() by passing suitable flags
  1540. // to the clone() system call.
  1541. if (sysno == __NR_clone) {
  1542. // We have seen two different valid combinations of flags. Glibc
  1543. // uses the more modern flags, sets the TLS from the call to clone(), and
  1544. // uses futexes to monitor threads. Android's C run-time library, doesn't
  1545. // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
  1546. // More recent versions of Android don't set CLONE_DETACHED anymore, so
  1547. // the last case accounts for that.
  1548. // The following policy is very strict. It only allows the exact masks
  1549. // that we have seen in known implementations. It is probably somewhat
  1550. // stricter than what we would want to do.
  1551. const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
  1552. CLONE_SIGHAND | CLONE_THREAD |
  1553. CLONE_SYSVSEM | CLONE_SETTLS |
  1554. CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
  1555. const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES |
  1556. CLONE_SIGHAND | CLONE_THREAD |
  1557. CLONE_SYSVSEM;
  1558. const Arg<unsigned long> flags(0);
  1559. return Switch(flags)
  1560. .CASES((kGlibcCloneMask, (kBaseAndroidCloneMask | CLONE_DETACHED),
  1561. kBaseAndroidCloneMask),
  1562. Allow())
  1563. .Default(Trap(PthreadTrapHandler, "Unknown mask"));
  1564. }
  1565. return Allow();
  1566. }
  1567. class PthreadPolicyBitMask : public Policy {
  1568. public:
  1569. PthreadPolicyBitMask() {}
  1570. PthreadPolicyBitMask(const PthreadPolicyBitMask&) = delete;
  1571. PthreadPolicyBitMask& operator=(const PthreadPolicyBitMask&) = delete;
  1572. ~PthreadPolicyBitMask() override {}
  1573. ResultExpr EvaluateSyscall(int sysno) const override;
  1574. private:
  1575. static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits);
  1576. static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits);
  1577. };
  1578. BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg,
  1579. unsigned long bits) {
  1580. return (arg & bits) != 0;
  1581. }
  1582. BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg,
  1583. unsigned long bits) {
  1584. return (arg & bits) == bits;
  1585. }
  1586. ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const {
  1587. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1588. // This policy allows creating threads with pthread_create(). But it
  1589. // doesn't allow any other uses of clone(). Most notably, it does not
  1590. // allow callers to implement fork() or vfork() by passing suitable flags
  1591. // to the clone() system call.
  1592. if (sysno == __NR_clone) {
  1593. // We have seen two different valid combinations of flags. Glibc
  1594. // uses the more modern flags, sets the TLS from the call to clone(), and
  1595. // uses futexes to monitor threads. Android's C run-time library, doesn't
  1596. // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
  1597. // The following policy allows for either combination of flags, but it
  1598. // is generally a little more conservative than strictly necessary. We
  1599. // err on the side of rather safe than sorry.
  1600. // Very noticeably though, we disallow fork() (which is often just a
  1601. // wrapper around clone()).
  1602. const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES |
  1603. CLONE_SIGHAND | CLONE_THREAD |
  1604. CLONE_SYSVSEM;
  1605. const unsigned long kFutexFlags =
  1606. CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
  1607. const unsigned long kNoopFlags = CLONE_DETACHED;
  1608. const unsigned long kKnownFlags =
  1609. kMandatoryFlags | kFutexFlags | kNoopFlags;
  1610. const Arg<unsigned long> flags(0);
  1611. return If(HasAnyBits(flags, ~kKnownFlags),
  1612. Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found"))
  1613. .ElseIf(Not(HasAllBits(flags, kMandatoryFlags)),
  1614. Trap(PthreadTrapHandler,
  1615. "Missing mandatory CLONE_XXX flags "
  1616. "when creating new thread"))
  1617. .ElseIf(AllOf(Not(HasAllBits(flags, kFutexFlags)),
  1618. HasAnyBits(flags, kFutexFlags)),
  1619. Trap(PthreadTrapHandler,
  1620. "Must set either all or none of the TLS and futex bits in "
  1621. "call to clone()"))
  1622. .Else(Allow());
  1623. }
  1624. return Allow();
  1625. }
  1626. static void* ThreadFnc(void* arg) {
  1627. ++*reinterpret_cast<int*>(arg);
  1628. Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
  1629. return nullptr;
  1630. }
  1631. static void PthreadTest() {
  1632. // Attempt to start a joinable thread. This should succeed.
  1633. pthread_t thread;
  1634. int thread_ran = 0;
  1635. BPF_ASSERT(!pthread_create(&thread, nullptr, ThreadFnc, &thread_ran));
  1636. BPF_ASSERT(!pthread_join(thread, nullptr));
  1637. BPF_ASSERT(thread_ran);
  1638. // Attempt to start a detached thread. This should succeed.
  1639. thread_ran = 0;
  1640. pthread_attr_t attr;
  1641. BPF_ASSERT(!pthread_attr_init(&attr));
  1642. BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
  1643. BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
  1644. BPF_ASSERT(!pthread_attr_destroy(&attr));
  1645. while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
  1646. -EINTR) {
  1647. }
  1648. BPF_ASSERT(thread_ran);
  1649. // Attempt to fork() a process using clone(). This should fail. We use the
  1650. // same flags that glibc uses when calling fork(). But we don't actually
  1651. // try calling the fork() implementation in the C run-time library, as
  1652. // run-time libraries other than glibc might call __NR_fork instead of
  1653. // __NR_clone, and that would introduce a bogus test failure.
  1654. int pid;
  1655. BPF_ASSERT(Syscall::Call(__NR_clone,
  1656. CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
  1657. 0,
  1658. 0,
  1659. &pid) == -EPERM);
  1660. }
  1661. BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) {
  1662. PthreadTest();
  1663. }
  1664. BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) {
  1665. PthreadTest();
  1666. }
  1667. // libc might not define these even though the kernel supports it.
  1668. #ifndef PTRACE_O_TRACESECCOMP
  1669. #define PTRACE_O_TRACESECCOMP 0x00000080
  1670. #endif
  1671. #ifdef PTRACE_EVENT_SECCOMP
  1672. #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
  1673. #else
  1674. // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they
  1675. // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by
  1676. // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If
  1677. // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both
  1678. // values here.
  1679. #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8)
  1680. #endif
  1681. #if defined(__arm__)
  1682. #ifndef PTRACE_SET_SYSCALL
  1683. #define PTRACE_SET_SYSCALL 23
  1684. #endif
  1685. #endif
  1686. #if defined(__aarch64__)
  1687. #ifndef PTRACE_GETREGS
  1688. #if defined(__GLIBC__)
  1689. #define PTRACE_GETREGS static_cast<enum __ptrace_request>(12)
  1690. #else
  1691. #define PTRACE_GETREGS 12
  1692. #endif // defined(__GLIBC__)
  1693. #endif // !defined(PTRACE_GETREGS)
  1694. #endif // defined(__aarch64__)
  1695. #if defined(__aarch64__)
  1696. #ifndef PTRACE_SETREGS
  1697. #if defined(__GLIBC__)
  1698. #define PTRACE_SETREGS static_cast<enum __ptrace_request>(13)
  1699. #else
  1700. #define PTRACE_SETREGS 13
  1701. #endif // defined(__GLIBC__)
  1702. #endif // !defined(PTRACE_SETREGS)
  1703. #endif // defined(__aarch64__)
  1704. // Changes the syscall to run for a child being sandboxed using seccomp-bpf with
  1705. // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on
  1706. // PTRACE_EVENT_SECCOMP.
  1707. //
  1708. // regs should contain the current set of registers of the child, obtained using
  1709. // PTRACE_GETREGS.
  1710. //
  1711. // Depending on the architecture, this may modify regs, so the caller is
  1712. // responsible for committing these changes using PTRACE_SETREGS.
  1713. #if !defined(__arm__) && !defined(__aarch64__) && !defined(__mips__)
  1714. long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) {
  1715. #if defined(__arm__)
  1716. // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the
  1717. // libc ptrace call as the request parameter is an enum, and
  1718. // PTRACE_SET_SYSCALL may not be in the enum.
  1719. return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number);
  1720. #else
  1721. SECCOMP_PT_SYSCALL(*regs) = syscall_number;
  1722. return 0;
  1723. #endif
  1724. }
  1725. #endif
  1726. const uint16_t kTraceData = 0xcc;
  1727. class TraceAllPolicy : public Policy {
  1728. public:
  1729. TraceAllPolicy() {}
  1730. TraceAllPolicy(const TraceAllPolicy&) = delete;
  1731. TraceAllPolicy& operator=(const TraceAllPolicy&) = delete;
  1732. ~TraceAllPolicy() override {}
  1733. ResultExpr EvaluateSyscall(int system_call_number) const override {
  1734. return Trace(kTraceData);
  1735. }
  1736. };
  1737. SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) {
  1738. if (!SandboxBPF::SupportsSeccompSandbox(
  1739. SandboxBPF::SeccompLevel::SINGLE_THREADED)) {
  1740. return;
  1741. }
  1742. // This test is disabled on arm due to a kernel bug.
  1743. // See https://code.google.com/p/chromium/issues/detail?id=383977
  1744. #if defined(__arm__) || defined(__aarch64__)
  1745. printf("This test is currently disabled on ARM32/64 due to a kernel bug.");
  1746. #elif defined(__mips__)
  1747. // TODO: Figure out how to support specificity of handling indirect syscalls
  1748. // in this test and enable it.
  1749. printf("This test is currently disabled on MIPS.");
  1750. #else
  1751. pid_t pid = fork();
  1752. BPF_ASSERT_NE(-1, pid);
  1753. if (pid == 0) {
  1754. pid_t my_pid = getpid();
  1755. BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL));
  1756. BPF_ASSERT_EQ(0, raise(SIGSTOP));
  1757. SandboxBPF sandbox(std::make_unique<TraceAllPolicy>());
  1758. BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
  1759. // getpid is allowed.
  1760. BPF_ASSERT_EQ(my_pid, sys_getpid());
  1761. // write to stdout is skipped and returns a fake value.
  1762. BPF_ASSERT_EQ(kExpectedReturnValue,
  1763. syscall(__NR_write, STDOUT_FILENO, "A", 1));
  1764. // kill is rewritten to exit(kExpectedReturnValue).
  1765. syscall(__NR_kill, my_pid, SIGKILL);
  1766. // Should not be reached.
  1767. BPF_ASSERT(false);
  1768. }
  1769. int status;
  1770. BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1);
  1771. BPF_ASSERT(WIFSTOPPED(status));
  1772. BPF_ASSERT_NE(-1,
  1773. ptrace(PTRACE_SETOPTIONS,
  1774. pid,
  1775. NULL,
  1776. reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP)));
  1777. BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
  1778. while (true) {
  1779. BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1);
  1780. if (WIFEXITED(status) || WIFSIGNALED(status)) {
  1781. BPF_ASSERT(WIFEXITED(status));
  1782. BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status));
  1783. break;
  1784. }
  1785. if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
  1786. !IS_SECCOMP_EVENT(status)) {
  1787. BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
  1788. continue;
  1789. }
  1790. unsigned long data;
  1791. BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data));
  1792. BPF_ASSERT_EQ(kTraceData, data);
  1793. regs_struct regs;
  1794. BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, &regs));
  1795. switch (SECCOMP_PT_SYSCALL(regs)) {
  1796. case __NR_write:
  1797. // Skip writes to stdout, make it return kExpectedReturnValue. Allow
  1798. // writes to stderr so that BPF_ASSERT messages show up.
  1799. if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) {
  1800. BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, -1));
  1801. SECCOMP_PT_RESULT(regs) = kExpectedReturnValue;
  1802. BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
  1803. }
  1804. break;
  1805. case __NR_kill:
  1806. // Rewrite to exit(kExpectedReturnValue).
  1807. BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, __NR_exit));
  1808. SECCOMP_PT_PARM1(regs) = kExpectedReturnValue;
  1809. BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
  1810. break;
  1811. default:
  1812. // Allow all other syscalls.
  1813. break;
  1814. }
  1815. BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
  1816. }
  1817. #endif
  1818. }
  1819. // Android does not expose pread64 nor pwrite64.
  1820. #if !BUILDFLAG(IS_ANDROID)
  1821. bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) {
  1822. while (count > 0) {
  1823. const ssize_t transfered =
  1824. HANDLE_EINTR(pwrite64(fd, buffer, count, offset));
  1825. if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
  1826. return false;
  1827. }
  1828. count -= transfered;
  1829. buffer += transfered;
  1830. offset += transfered;
  1831. }
  1832. return true;
  1833. }
  1834. bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) {
  1835. while (count > 0) {
  1836. const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset));
  1837. if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
  1838. return false;
  1839. }
  1840. count -= transfered;
  1841. buffer += transfered;
  1842. offset += transfered;
  1843. }
  1844. return true;
  1845. }
  1846. bool pread_64_was_forwarded = false;
  1847. class TrapPread64Policy : public Policy {
  1848. public:
  1849. TrapPread64Policy() {}
  1850. TrapPread64Policy(const TrapPread64Policy&) = delete;
  1851. TrapPread64Policy& operator=(const TrapPread64Policy&) = delete;
  1852. ~TrapPread64Policy() override {}
  1853. ResultExpr EvaluateSyscall(int system_call_number) const override {
  1854. // Set the global environment for unsafe traps once.
  1855. if (system_call_number == MIN_SYSCALL) {
  1856. EnableUnsafeTraps();
  1857. }
  1858. if (system_call_number == __NR_pread64) {
  1859. return UnsafeTrap(ForwardPreadHandler, nullptr);
  1860. }
  1861. return Allow();
  1862. }
  1863. private:
  1864. static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args,
  1865. void* aux) {
  1866. BPF_ASSERT(args.nr == __NR_pread64);
  1867. pread_64_was_forwarded = true;
  1868. return SandboxBPF::ForwardSyscall(args);
  1869. }
  1870. };
  1871. // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split
  1872. // between two arguments. In this test, we make sure that ForwardSyscall() can
  1873. // forward it properly.
  1874. BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) {
  1875. ScopedTemporaryFile temp_file;
  1876. const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF;
  1877. const char kTestString[] = "This is a test!";
  1878. BPF_ASSERT(FullPwrite64(
  1879. temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset));
  1880. char read_test_string[sizeof(kTestString)] = {0};
  1881. BPF_ASSERT(FullPread64(temp_file.fd(),
  1882. read_test_string,
  1883. sizeof(read_test_string),
  1884. kLargeOffset));
  1885. BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString)));
  1886. BPF_ASSERT(pread_64_was_forwarded);
  1887. }
  1888. #endif // !BUILDFLAG(IS_ANDROID)
  1889. void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) {
  1890. base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr);
  1891. // Wait for the main thread to signal that the filter has been applied.
  1892. if (!event->IsSignaled()) {
  1893. event->Wait();
  1894. }
  1895. BPF_ASSERT(event->IsSignaled());
  1896. DenylistNanosleepPolicy::AssertNanosleepFails();
  1897. return nullptr;
  1898. }
  1899. SANDBOX_TEST(SandboxBPF, Tsync) {
  1900. const bool supports_multi_threaded = SandboxBPF::SupportsSeccompSandbox(
  1901. SandboxBPF::SeccompLevel::MULTI_THREADED);
  1902. // On Chrome OS tsync is mandatory.
  1903. #if BUILDFLAG(IS_CHROMEOS_ASH)
  1904. if (base::SysInfo::IsRunningOnChromeOS()) {
  1905. BPF_ASSERT_EQ(true, supports_multi_threaded);
  1906. }
  1907. // else a Chrome OS build not running on a Chrome OS device e.g. Chrome bots.
  1908. // In this case fall through.
  1909. #endif
  1910. if (!supports_multi_threaded) {
  1911. return;
  1912. }
  1913. base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL,
  1914. base::WaitableEvent::InitialState::NOT_SIGNALED);
  1915. // Create a thread on which to invoke the blocked syscall.
  1916. pthread_t thread;
  1917. BPF_ASSERT_EQ(
  1918. 0, pthread_create(&thread, nullptr, &TsyncApplyToTwoThreadsFunc, &event));
  1919. // Test that nanoseelp success.
  1920. const struct timespec ts = {0, 0};
  1921. BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
  1922. // Engage the sandbox.
  1923. SandboxBPF sandbox(std::make_unique<DenylistNanosleepPolicy>());
  1924. BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED));
  1925. // This thread should have the filter applied as well.
  1926. DenylistNanosleepPolicy::AssertNanosleepFails();
  1927. // Signal the condition to invoke the system call.
  1928. event.Signal();
  1929. // Wait for the thread to finish.
  1930. BPF_ASSERT_EQ(0, pthread_join(thread, nullptr));
  1931. }
  1932. class AllowAllPolicy : public Policy {
  1933. public:
  1934. AllowAllPolicy() {}
  1935. AllowAllPolicy(const AllowAllPolicy&) = delete;
  1936. AllowAllPolicy& operator=(const AllowAllPolicy&) = delete;
  1937. ~AllowAllPolicy() override {}
  1938. ResultExpr EvaluateSyscall(int sysno) const override { return Allow(); }
  1939. };
  1940. SANDBOX_DEATH_TEST(
  1941. SandboxBPF,
  1942. StartMultiThreadedAsSingleThreaded,
  1943. DEATH_MESSAGE(
  1944. ThreadHelpers::GetAssertSingleThreadedErrorMessageForTests())) {
  1945. base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded");
  1946. BPF_ASSERT(thread.Start());
  1947. SandboxBPF sandbox(std::make_unique<AllowAllPolicy>());
  1948. BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED));
  1949. }
  1950. // A stub handler for the UnsafeTrap. Never called.
  1951. intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) {
  1952. return -1;
  1953. }
  1954. class UnsafeTrapWithCondPolicy : public Policy {
  1955. public:
  1956. UnsafeTrapWithCondPolicy() {}
  1957. UnsafeTrapWithCondPolicy(const UnsafeTrapWithCondPolicy&) = delete;
  1958. UnsafeTrapWithCondPolicy& operator=(const UnsafeTrapWithCondPolicy&) = delete;
  1959. ~UnsafeTrapWithCondPolicy() override {}
  1960. ResultExpr EvaluateSyscall(int sysno) const override {
  1961. DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
  1962. setenv(kSandboxDebuggingEnv, "t", 0);
  1963. Die::SuppressInfoMessages(true);
  1964. if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
  1965. return Allow();
  1966. if (IsSyscallForTestHarness(sysno))
  1967. return Allow();
  1968. switch (sysno) {
  1969. case __NR_uname: {
  1970. const Arg<uint32_t> arg(0);
  1971. return If(arg == 0, Allow()).Else(Error(EPERM));
  1972. }
  1973. case __NR_setgid: {
  1974. const Arg<uint32_t> arg(0);
  1975. return Switch(arg)
  1976. .Case(100, Error(ENOMEM))
  1977. .Case(200, Error(ENOSYS))
  1978. .Default(Error(EPERM));
  1979. }
  1980. case __NR_close:
  1981. return Allow();
  1982. case __NR_getppid:
  1983. return UnsafeTrap(NoOpHandler, nullptr);
  1984. default:
  1985. return Error(EPERM);
  1986. }
  1987. }
  1988. };
  1989. BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) {
  1990. BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0));
  1991. BPF_ASSERT_EQ(EFAULT, errno);
  1992. BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1));
  1993. BPF_ASSERT_EQ(EPERM, errno);
  1994. BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100));
  1995. BPF_ASSERT_EQ(ENOMEM, errno);
  1996. BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200));
  1997. BPF_ASSERT_EQ(ENOSYS, errno);
  1998. BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300));
  1999. BPF_ASSERT_EQ(EPERM, errno);
  2000. }
  2001. } // namespace
  2002. } // namespace bpf_dsl
  2003. } // namespace sandbox