kfence_test.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Test cases for KFENCE memory safety error detector. Since the interface with
  4. * which KFENCE's reports are obtained is via the console, this is the output we
  5. * should verify. For each test case checks the presence (or absence) of
  6. * generated reports. Relies on 'console' tracepoint to capture reports as they
  7. * appear in the kernel log.
  8. *
  9. * Copyright (C) 2020, Google LLC.
  10. * Author: Alexander Potapenko <glider@google.com>
  11. * Marco Elver <elver@google.com>
  12. */
  13. #include <kunit/test.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kfence.h>
  17. #include <linux/mm.h>
  18. #include <linux/random.h>
  19. #include <linux/slab.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/string.h>
  22. #include <linux/tracepoint.h>
  23. #include <trace/events/printk.h>
  24. #include "kfence.h"
  25. /* Report as observed from console. */
  26. static struct {
  27. spinlock_t lock;
  28. int nlines;
  29. char lines[2][256];
  30. } observed = {
  31. .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
  32. };
  33. /* Probe for console output: obtains observed lines of interest. */
  34. static void probe_console(void *ignore, const char *buf, size_t len)
  35. {
  36. unsigned long flags;
  37. int nlines;
  38. spin_lock_irqsave(&observed.lock, flags);
  39. nlines = observed.nlines;
  40. if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
  41. /*
  42. * KFENCE report and related to the test.
  43. *
  44. * The provided @buf is not NUL-terminated; copy no more than
  45. * @len bytes and let strscpy() add the missing NUL-terminator.
  46. */
  47. strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
  48. nlines = 1;
  49. } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
  50. strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
  51. }
  52. WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
  53. spin_unlock_irqrestore(&observed.lock, flags);
  54. }
  55. /* Check if a report related to the test exists. */
  56. static bool report_available(void)
  57. {
  58. return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
  59. }
  60. /* Information we expect in a report. */
  61. struct expect_report {
  62. enum kfence_error_type type; /* The type or error. */
  63. void *fn; /* Function pointer to expected function where access occurred. */
  64. char *addr; /* Address at which the bad access occurred. */
  65. bool is_write; /* Is access a write. */
  66. };
  67. static const char *get_access_type(const struct expect_report *r)
  68. {
  69. return r->is_write ? "write" : "read";
  70. }
  71. /* Check observed report matches information in @r. */
  72. static bool report_matches(const struct expect_report *r)
  73. {
  74. bool ret = false;
  75. unsigned long flags;
  76. typeof(observed.lines) expect;
  77. const char *end;
  78. char *cur;
  79. /* Doubled-checked locking. */
  80. if (!report_available())
  81. return false;
  82. /* Generate expected report contents. */
  83. /* Title */
  84. cur = expect[0];
  85. end = &expect[0][sizeof(expect[0]) - 1];
  86. switch (r->type) {
  87. case KFENCE_ERROR_OOB:
  88. cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
  89. get_access_type(r));
  90. break;
  91. case KFENCE_ERROR_UAF:
  92. cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
  93. get_access_type(r));
  94. break;
  95. case KFENCE_ERROR_CORRUPTION:
  96. cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
  97. break;
  98. case KFENCE_ERROR_INVALID:
  99. cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
  100. get_access_type(r));
  101. break;
  102. case KFENCE_ERROR_INVALID_FREE:
  103. cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
  104. break;
  105. }
  106. scnprintf(cur, end - cur, " in %pS", r->fn);
  107. /* The exact offset won't match, remove it; also strip module name. */
  108. cur = strchr(expect[0], '+');
  109. if (cur)
  110. *cur = '\0';
  111. /* Access information */
  112. cur = expect[1];
  113. end = &expect[1][sizeof(expect[1]) - 1];
  114. switch (r->type) {
  115. case KFENCE_ERROR_OOB:
  116. cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
  117. break;
  118. case KFENCE_ERROR_UAF:
  119. cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
  120. break;
  121. case KFENCE_ERROR_CORRUPTION:
  122. cur += scnprintf(cur, end - cur, "Corrupted memory at");
  123. break;
  124. case KFENCE_ERROR_INVALID:
  125. cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
  126. break;
  127. case KFENCE_ERROR_INVALID_FREE:
  128. cur += scnprintf(cur, end - cur, "Invalid free of");
  129. break;
  130. }
  131. cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
  132. spin_lock_irqsave(&observed.lock, flags);
  133. if (!report_available())
  134. goto out; /* A new report is being captured. */
  135. /* Finally match expected output to what we actually observed. */
  136. ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
  137. out:
  138. spin_unlock_irqrestore(&observed.lock, flags);
  139. return ret;
  140. }
  141. /* ===== Test cases ===== */
  142. #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
  143. /* Cache used by tests; if NULL, allocate from kmalloc instead. */
  144. static struct kmem_cache *test_cache;
  145. static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
  146. void (*ctor)(void *))
  147. {
  148. if (test->priv != TEST_PRIV_WANT_MEMCACHE)
  149. return size;
  150. kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
  151. /*
  152. * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
  153. * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
  154. * allocate via memcg, if enabled.
  155. */
  156. flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
  157. test_cache = kmem_cache_create("test", size, 1, flags, ctor);
  158. KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
  159. return size;
  160. }
  161. static void test_cache_destroy(void)
  162. {
  163. if (!test_cache)
  164. return;
  165. kmem_cache_destroy(test_cache);
  166. test_cache = NULL;
  167. }
  168. static inline size_t kmalloc_cache_alignment(size_t size)
  169. {
  170. return kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]->align;
  171. }
  172. /* Must always inline to match stack trace against caller. */
  173. static __always_inline void test_free(void *ptr)
  174. {
  175. if (test_cache)
  176. kmem_cache_free(test_cache, ptr);
  177. else
  178. kfree(ptr);
  179. }
  180. /*
  181. * If this should be a KFENCE allocation, and on which side the allocation and
  182. * the closest guard page should be.
  183. */
  184. enum allocation_policy {
  185. ALLOCATE_ANY, /* KFENCE, any side. */
  186. ALLOCATE_LEFT, /* KFENCE, left side of page. */
  187. ALLOCATE_RIGHT, /* KFENCE, right side of page. */
  188. ALLOCATE_NONE, /* No KFENCE allocation. */
  189. };
  190. /*
  191. * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
  192. * current test_cache if set up.
  193. */
  194. static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
  195. {
  196. void *alloc;
  197. unsigned long timeout, resched_after;
  198. const char *policy_name;
  199. switch (policy) {
  200. case ALLOCATE_ANY:
  201. policy_name = "any";
  202. break;
  203. case ALLOCATE_LEFT:
  204. policy_name = "left";
  205. break;
  206. case ALLOCATE_RIGHT:
  207. policy_name = "right";
  208. break;
  209. case ALLOCATE_NONE:
  210. policy_name = "none";
  211. break;
  212. }
  213. kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
  214. policy_name, !!test_cache);
  215. /*
  216. * 100x the sample interval should be more than enough to ensure we get
  217. * a KFENCE allocation eventually.
  218. */
  219. timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
  220. /*
  221. * Especially for non-preemption kernels, ensure the allocation-gate
  222. * timer can catch up: after @resched_after, every failed allocation
  223. * attempt yields, to ensure the allocation-gate timer is scheduled.
  224. */
  225. resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
  226. do {
  227. if (test_cache)
  228. alloc = kmem_cache_alloc(test_cache, gfp);
  229. else
  230. alloc = kmalloc(size, gfp);
  231. if (is_kfence_address(alloc)) {
  232. struct page *page = virt_to_head_page(alloc);
  233. struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)];
  234. /*
  235. * Verify that various helpers return the right values
  236. * even for KFENCE objects; these are required so that
  237. * memcg accounting works correctly.
  238. */
  239. KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
  240. KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
  241. if (policy == ALLOCATE_ANY)
  242. return alloc;
  243. if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
  244. return alloc;
  245. if (policy == ALLOCATE_RIGHT &&
  246. !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
  247. return alloc;
  248. } else if (policy == ALLOCATE_NONE)
  249. return alloc;
  250. test_free(alloc);
  251. if (time_after(jiffies, resched_after))
  252. cond_resched();
  253. } while (time_before(jiffies, timeout));
  254. KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
  255. return NULL; /* Unreachable. */
  256. }
  257. static void test_out_of_bounds_read(struct kunit *test)
  258. {
  259. size_t size = 32;
  260. struct expect_report expect = {
  261. .type = KFENCE_ERROR_OOB,
  262. .fn = test_out_of_bounds_read,
  263. .is_write = false,
  264. };
  265. char *buf;
  266. setup_test_cache(test, size, 0, NULL);
  267. /*
  268. * If we don't have our own cache, adjust based on alignment, so that we
  269. * actually access guard pages on either side.
  270. */
  271. if (!test_cache)
  272. size = kmalloc_cache_alignment(size);
  273. /* Test both sides. */
  274. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
  275. expect.addr = buf - 1;
  276. READ_ONCE(*expect.addr);
  277. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  278. test_free(buf);
  279. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
  280. expect.addr = buf + size;
  281. READ_ONCE(*expect.addr);
  282. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  283. test_free(buf);
  284. }
  285. static void test_out_of_bounds_write(struct kunit *test)
  286. {
  287. size_t size = 32;
  288. struct expect_report expect = {
  289. .type = KFENCE_ERROR_OOB,
  290. .fn = test_out_of_bounds_write,
  291. .is_write = true,
  292. };
  293. char *buf;
  294. setup_test_cache(test, size, 0, NULL);
  295. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
  296. expect.addr = buf - 1;
  297. WRITE_ONCE(*expect.addr, 42);
  298. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  299. test_free(buf);
  300. }
  301. static void test_use_after_free_read(struct kunit *test)
  302. {
  303. const size_t size = 32;
  304. struct expect_report expect = {
  305. .type = KFENCE_ERROR_UAF,
  306. .fn = test_use_after_free_read,
  307. .is_write = false,
  308. };
  309. setup_test_cache(test, size, 0, NULL);
  310. expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  311. test_free(expect.addr);
  312. READ_ONCE(*expect.addr);
  313. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  314. }
  315. static void test_double_free(struct kunit *test)
  316. {
  317. const size_t size = 32;
  318. struct expect_report expect = {
  319. .type = KFENCE_ERROR_INVALID_FREE,
  320. .fn = test_double_free,
  321. };
  322. setup_test_cache(test, size, 0, NULL);
  323. expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  324. test_free(expect.addr);
  325. test_free(expect.addr); /* Double-free. */
  326. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  327. }
  328. static void test_invalid_addr_free(struct kunit *test)
  329. {
  330. const size_t size = 32;
  331. struct expect_report expect = {
  332. .type = KFENCE_ERROR_INVALID_FREE,
  333. .fn = test_invalid_addr_free,
  334. };
  335. char *buf;
  336. setup_test_cache(test, size, 0, NULL);
  337. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  338. expect.addr = buf + 1; /* Free on invalid address. */
  339. test_free(expect.addr); /* Invalid address free. */
  340. test_free(buf); /* No error. */
  341. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  342. }
  343. static void test_corruption(struct kunit *test)
  344. {
  345. size_t size = 32;
  346. struct expect_report expect = {
  347. .type = KFENCE_ERROR_CORRUPTION,
  348. .fn = test_corruption,
  349. };
  350. char *buf;
  351. setup_test_cache(test, size, 0, NULL);
  352. /* Test both sides. */
  353. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
  354. expect.addr = buf + size;
  355. WRITE_ONCE(*expect.addr, 42);
  356. test_free(buf);
  357. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  358. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
  359. expect.addr = buf - 1;
  360. WRITE_ONCE(*expect.addr, 42);
  361. test_free(buf);
  362. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  363. }
  364. /*
  365. * KFENCE is unable to detect an OOB if the allocation's alignment requirements
  366. * leave a gap between the object and the guard page. Specifically, an
  367. * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
  368. * respectively. Therefore it is impossible for the allocated object to
  369. * contiguously line up with the right guard page.
  370. *
  371. * However, we test that an access to memory beyond the gap results in KFENCE
  372. * detecting an OOB access.
  373. */
  374. static void test_kmalloc_aligned_oob_read(struct kunit *test)
  375. {
  376. const size_t size = 73;
  377. const size_t align = kmalloc_cache_alignment(size);
  378. struct expect_report expect = {
  379. .type = KFENCE_ERROR_OOB,
  380. .fn = test_kmalloc_aligned_oob_read,
  381. .is_write = false,
  382. };
  383. char *buf;
  384. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
  385. /*
  386. * The object is offset to the right, so there won't be an OOB to the
  387. * left of it.
  388. */
  389. READ_ONCE(*(buf - 1));
  390. KUNIT_EXPECT_FALSE(test, report_available());
  391. /*
  392. * @buf must be aligned on @align, therefore buf + size belongs to the
  393. * same page -> no OOB.
  394. */
  395. READ_ONCE(*(buf + size));
  396. KUNIT_EXPECT_FALSE(test, report_available());
  397. /* Overflowing by @align bytes will result in an OOB. */
  398. expect.addr = buf + size + align;
  399. READ_ONCE(*expect.addr);
  400. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  401. test_free(buf);
  402. }
  403. static void test_kmalloc_aligned_oob_write(struct kunit *test)
  404. {
  405. const size_t size = 73;
  406. struct expect_report expect = {
  407. .type = KFENCE_ERROR_CORRUPTION,
  408. .fn = test_kmalloc_aligned_oob_write,
  409. };
  410. char *buf;
  411. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
  412. /*
  413. * The object is offset to the right, so we won't get a page
  414. * fault immediately after it.
  415. */
  416. expect.addr = buf + size;
  417. WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
  418. KUNIT_EXPECT_FALSE(test, report_available());
  419. test_free(buf);
  420. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  421. }
  422. /* Test cache shrinking and destroying with KFENCE. */
  423. static void test_shrink_memcache(struct kunit *test)
  424. {
  425. const size_t size = 32;
  426. void *buf;
  427. setup_test_cache(test, size, 0, NULL);
  428. KUNIT_EXPECT_TRUE(test, test_cache);
  429. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  430. kmem_cache_shrink(test_cache);
  431. test_free(buf);
  432. KUNIT_EXPECT_FALSE(test, report_available());
  433. }
  434. static void ctor_set_x(void *obj)
  435. {
  436. /* Every object has at least 8 bytes. */
  437. memset(obj, 'x', 8);
  438. }
  439. /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
  440. static void test_free_bulk(struct kunit *test)
  441. {
  442. int iter;
  443. for (iter = 0; iter < 5; iter++) {
  444. const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
  445. (iter & 1) ? ctor_set_x : NULL);
  446. void *objects[] = {
  447. test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
  448. test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
  449. test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
  450. test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
  451. test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
  452. };
  453. kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
  454. KUNIT_ASSERT_FALSE(test, report_available());
  455. test_cache_destroy();
  456. }
  457. }
  458. /* Test init-on-free works. */
  459. static void test_init_on_free(struct kunit *test)
  460. {
  461. const size_t size = 32;
  462. struct expect_report expect = {
  463. .type = KFENCE_ERROR_UAF,
  464. .fn = test_init_on_free,
  465. .is_write = false,
  466. };
  467. int i;
  468. if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON))
  469. return;
  470. /* Assume it hasn't been disabled on command line. */
  471. setup_test_cache(test, size, 0, NULL);
  472. expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  473. for (i = 0; i < size; i++)
  474. expect.addr[i] = i + 1;
  475. test_free(expect.addr);
  476. for (i = 0; i < size; i++) {
  477. /*
  478. * This may fail if the page was recycled by KFENCE and then
  479. * written to again -- this however, is near impossible with a
  480. * default config.
  481. */
  482. KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
  483. if (!i) /* Only check first access to not fail test if page is ever re-protected. */
  484. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  485. }
  486. }
  487. /* Ensure that constructors work properly. */
  488. static void test_memcache_ctor(struct kunit *test)
  489. {
  490. const size_t size = 32;
  491. char *buf;
  492. int i;
  493. setup_test_cache(test, size, 0, ctor_set_x);
  494. buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  495. for (i = 0; i < 8; i++)
  496. KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
  497. test_free(buf);
  498. KUNIT_EXPECT_FALSE(test, report_available());
  499. }
  500. /* Test that memory is zeroed if requested. */
  501. static void test_gfpzero(struct kunit *test)
  502. {
  503. const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
  504. char *buf1, *buf2;
  505. int i;
  506. if (CONFIG_KFENCE_SAMPLE_INTERVAL > 100) {
  507. kunit_warn(test, "skipping ... would take too long\n");
  508. return;
  509. }
  510. setup_test_cache(test, size, 0, NULL);
  511. buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  512. for (i = 0; i < size; i++)
  513. buf1[i] = i + 1;
  514. test_free(buf1);
  515. /* Try to get same address again -- this can take a while. */
  516. for (i = 0;; i++) {
  517. buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
  518. if (buf1 == buf2)
  519. break;
  520. test_free(buf2);
  521. if (i == CONFIG_KFENCE_NUM_OBJECTS) {
  522. kunit_warn(test, "giving up ... cannot get same object back\n");
  523. return;
  524. }
  525. }
  526. for (i = 0; i < size; i++)
  527. KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
  528. test_free(buf2);
  529. KUNIT_EXPECT_FALSE(test, report_available());
  530. }
  531. static void test_invalid_access(struct kunit *test)
  532. {
  533. const struct expect_report expect = {
  534. .type = KFENCE_ERROR_INVALID,
  535. .fn = test_invalid_access,
  536. .addr = &__kfence_pool[10],
  537. .is_write = false,
  538. };
  539. READ_ONCE(__kfence_pool[10]);
  540. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  541. }
  542. /* Test SLAB_TYPESAFE_BY_RCU works. */
  543. static void test_memcache_typesafe_by_rcu(struct kunit *test)
  544. {
  545. const size_t size = 32;
  546. struct expect_report expect = {
  547. .type = KFENCE_ERROR_UAF,
  548. .fn = test_memcache_typesafe_by_rcu,
  549. .is_write = false,
  550. };
  551. setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
  552. KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
  553. expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
  554. *expect.addr = 42;
  555. rcu_read_lock();
  556. test_free(expect.addr);
  557. KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
  558. /*
  559. * Up to this point, memory should not have been freed yet, and
  560. * therefore there should be no KFENCE report from the above access.
  561. */
  562. rcu_read_unlock();
  563. /* Above access to @expect.addr should not have generated a report! */
  564. KUNIT_EXPECT_FALSE(test, report_available());
  565. /* Only after rcu_barrier() is the memory guaranteed to be freed. */
  566. rcu_barrier();
  567. /* Expect use-after-free. */
  568. KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
  569. KUNIT_EXPECT_TRUE(test, report_matches(&expect));
  570. }
  571. /* Test krealloc(). */
  572. static void test_krealloc(struct kunit *test)
  573. {
  574. const size_t size = 32;
  575. const struct expect_report expect = {
  576. .type = KFENCE_ERROR_UAF,
  577. .fn = test_krealloc,
  578. .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
  579. .is_write = false,
  580. };
  581. char *buf = expect.addr;
  582. int i;
  583. KUNIT_EXPECT_FALSE(test, test_cache);
  584. KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
  585. for (i = 0; i < size; i++)
  586. buf[i] = i + 1;
  587. /* Check that we successfully change the size. */
  588. buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
  589. /* Note: Might no longer be a KFENCE alloc. */
  590. KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
  591. for (i = 0; i < size; i++)
  592. KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
  593. for (; i < size * 3; i++) /* Fill to extra bytes. */
  594. buf[i] = i + 1;
  595. buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
  596. KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
  597. for (i = 0; i < size * 2; i++)
  598. KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
  599. buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
  600. KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
  601. KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
  602. READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
  603. KUNIT_ASSERT_TRUE(test, report_matches(&expect));
  604. }
  605. /* Test that some objects from a bulk allocation belong to KFENCE pool. */
  606. static void test_memcache_alloc_bulk(struct kunit *test)
  607. {
  608. const size_t size = 32;
  609. bool pass = false;
  610. unsigned long timeout;
  611. setup_test_cache(test, size, 0, NULL);
  612. KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
  613. /*
  614. * 100x the sample interval should be more than enough to ensure we get
  615. * a KFENCE allocation eventually.
  616. */
  617. timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
  618. do {
  619. void *objects[100];
  620. int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
  621. objects);
  622. if (!num)
  623. continue;
  624. for (i = 0; i < ARRAY_SIZE(objects); i++) {
  625. if (is_kfence_address(objects[i])) {
  626. pass = true;
  627. break;
  628. }
  629. }
  630. kmem_cache_free_bulk(test_cache, num, objects);
  631. /*
  632. * kmem_cache_alloc_bulk() disables interrupts, and calling it
  633. * in a tight loop may not give KFENCE a chance to switch the
  634. * static branch. Call cond_resched() to let KFENCE chime in.
  635. */
  636. cond_resched();
  637. } while (!pass && time_before(jiffies, timeout));
  638. KUNIT_EXPECT_TRUE(test, pass);
  639. KUNIT_EXPECT_FALSE(test, report_available());
  640. }
  641. /*
  642. * KUnit does not provide a way to provide arguments to tests, and we encode
  643. * additional info in the name. Set up 2 tests per test case, one using the
  644. * default allocator, and another using a custom memcache (suffix '-memcache').
  645. */
  646. #define KFENCE_KUNIT_CASE(test_name) \
  647. { .run_case = test_name, .name = #test_name }, \
  648. { .run_case = test_name, .name = #test_name "-memcache" }
  649. static struct kunit_case kfence_test_cases[] = {
  650. KFENCE_KUNIT_CASE(test_out_of_bounds_read),
  651. KFENCE_KUNIT_CASE(test_out_of_bounds_write),
  652. KFENCE_KUNIT_CASE(test_use_after_free_read),
  653. KFENCE_KUNIT_CASE(test_double_free),
  654. KFENCE_KUNIT_CASE(test_invalid_addr_free),
  655. KFENCE_KUNIT_CASE(test_corruption),
  656. KFENCE_KUNIT_CASE(test_free_bulk),
  657. KFENCE_KUNIT_CASE(test_init_on_free),
  658. KUNIT_CASE(test_kmalloc_aligned_oob_read),
  659. KUNIT_CASE(test_kmalloc_aligned_oob_write),
  660. KUNIT_CASE(test_shrink_memcache),
  661. KUNIT_CASE(test_memcache_ctor),
  662. KUNIT_CASE(test_invalid_access),
  663. KUNIT_CASE(test_gfpzero),
  664. KUNIT_CASE(test_memcache_typesafe_by_rcu),
  665. KUNIT_CASE(test_krealloc),
  666. KUNIT_CASE(test_memcache_alloc_bulk),
  667. {},
  668. };
  669. /* ===== End test cases ===== */
  670. static int test_init(struct kunit *test)
  671. {
  672. unsigned long flags;
  673. int i;
  674. spin_lock_irqsave(&observed.lock, flags);
  675. for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
  676. observed.lines[i][0] = '\0';
  677. observed.nlines = 0;
  678. spin_unlock_irqrestore(&observed.lock, flags);
  679. /* Any test with 'memcache' in its name will want a memcache. */
  680. if (strstr(test->name, "memcache"))
  681. test->priv = TEST_PRIV_WANT_MEMCACHE;
  682. else
  683. test->priv = NULL;
  684. return 0;
  685. }
  686. static void test_exit(struct kunit *test)
  687. {
  688. test_cache_destroy();
  689. }
  690. static struct kunit_suite kfence_test_suite = {
  691. .name = "kfence",
  692. .test_cases = kfence_test_cases,
  693. .init = test_init,
  694. .exit = test_exit,
  695. };
  696. static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
  697. static void register_tracepoints(struct tracepoint *tp, void *ignore)
  698. {
  699. check_trace_callback_type_console(probe_console);
  700. if (!strcmp(tp->name, "console"))
  701. WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
  702. }
  703. static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
  704. {
  705. if (!strcmp(tp->name, "console"))
  706. tracepoint_probe_unregister(tp, probe_console, NULL);
  707. }
  708. /*
  709. * We only want to do tracepoints setup and teardown once, therefore we have to
  710. * customize the init and exit functions and cannot rely on kunit_test_suite().
  711. */
  712. static int __init kfence_test_init(void)
  713. {
  714. /*
  715. * Because we want to be able to build the test as a module, we need to
  716. * iterate through all known tracepoints, since the static registration
  717. * won't work here.
  718. */
  719. for_each_kernel_tracepoint(register_tracepoints, NULL);
  720. return __kunit_test_suites_init(kfence_test_suites);
  721. }
  722. static void kfence_test_exit(void)
  723. {
  724. __kunit_test_suites_exit(kfence_test_suites);
  725. for_each_kernel_tracepoint(unregister_tracepoints, NULL);
  726. tracepoint_synchronize_unregister();
  727. }
  728. late_initcall(kfence_test_init);
  729. module_exit(kfence_test_exit);
  730. MODULE_LICENSE("GPL v2");
  731. MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");