test_kasan.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
  6. */
  7. #include <linux/bitops.h>
  8. #include <linux/delay.h>
  9. #include <linux/kasan.h>
  10. #include <linux/kernel.h>
  11. #include <linux/mm.h>
  12. #include <linux/mman.h>
  13. #include <linux/module.h>
  14. #include <linux/printk.h>
  15. #include <linux/random.h>
  16. #include <linux/slab.h>
  17. #include <linux/string.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/io.h>
  20. #include <linux/vmalloc.h>
  21. #include <asm/page.h>
  22. #include <kunit/test.h>
  23. #include "../mm/kasan/kasan.h"
  24. #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
  25. /*
  26. * Some tests use these global variables to store return values from function
  27. * calls that could otherwise be eliminated by the compiler as dead code.
  28. */
  29. void *kasan_ptr_result;
  30. int kasan_int_result;
  31. static struct kunit_resource resource;
  32. static struct kunit_kasan_expectation fail_data;
  33. static bool multishot;
  34. /*
  35. * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
  36. * first detected bug and panic the kernel if panic_on_warn is enabled. For
  37. * hardware tag-based KASAN also allow tag checking to be reenabled for each
  38. * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
  39. */
  40. static int kasan_test_init(struct kunit *test)
  41. {
  42. if (!kasan_enabled()) {
  43. kunit_err(test, "can't run KASAN tests with KASAN disabled");
  44. return -1;
  45. }
  46. multishot = kasan_save_enable_multi_shot();
  47. kasan_set_tagging_report_once(false);
  48. return 0;
  49. }
  50. static void kasan_test_exit(struct kunit *test)
  51. {
  52. kasan_set_tagging_report_once(true);
  53. kasan_restore_multi_shot(multishot);
  54. }
  55. /**
  56. * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
  57. * KASAN report; causes a test failure otherwise. This relies on a KUnit
  58. * resource named "kasan_data". Do not use this name for KUnit resources
  59. * outside of KASAN tests.
  60. *
  61. * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
  62. * checking is auto-disabled. When this happens, this test handler reenables
  63. * tag checking. As tag checking can be only disabled or enabled per CPU,
  64. * this handler disables migration (preemption).
  65. *
  66. * Since the compiler doesn't see that the expression can change the fail_data
  67. * fields, it can reorder or optimize away the accesses to those fields.
  68. * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
  69. * expression to prevent that.
  70. */
  71. #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
  72. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
  73. !kasan_async_mode_enabled()) \
  74. migrate_disable(); \
  75. WRITE_ONCE(fail_data.report_expected, true); \
  76. WRITE_ONCE(fail_data.report_found, false); \
  77. kunit_add_named_resource(test, \
  78. NULL, \
  79. NULL, \
  80. &resource, \
  81. "kasan_data", &fail_data); \
  82. barrier(); \
  83. expression; \
  84. barrier(); \
  85. if (kasan_async_mode_enabled()) \
  86. kasan_force_async_fault(); \
  87. barrier(); \
  88. KUNIT_EXPECT_EQ(test, \
  89. READ_ONCE(fail_data.report_expected), \
  90. READ_ONCE(fail_data.report_found)); \
  91. if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
  92. !kasan_async_mode_enabled()) { \
  93. if (READ_ONCE(fail_data.report_found)) \
  94. kasan_enable_tagging_sync(); \
  95. migrate_enable(); \
  96. } \
  97. } while (0)
  98. #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
  99. if (!IS_ENABLED(config)) { \
  100. kunit_info((test), "skipping, " #config " required"); \
  101. return; \
  102. } \
  103. } while (0)
  104. #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
  105. if (IS_ENABLED(config)) { \
  106. kunit_info((test), "skipping, " #config " enabled"); \
  107. return; \
  108. } \
  109. } while (0)
  110. static void kmalloc_oob_right(struct kunit *test)
  111. {
  112. char *ptr;
  113. size_t size = 123;
  114. ptr = kmalloc(size, GFP_KERNEL);
  115. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  116. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
  117. kfree(ptr);
  118. }
  119. static void kmalloc_oob_left(struct kunit *test)
  120. {
  121. char *ptr;
  122. size_t size = 15;
  123. ptr = kmalloc(size, GFP_KERNEL);
  124. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  125. KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
  126. kfree(ptr);
  127. }
  128. static void kmalloc_node_oob_right(struct kunit *test)
  129. {
  130. char *ptr;
  131. size_t size = 4096;
  132. ptr = kmalloc_node(size, GFP_KERNEL, 0);
  133. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  134. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
  135. kfree(ptr);
  136. }
  137. /*
  138. * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
  139. * fit into a slab cache and therefore is allocated via the page allocator
  140. * fallback. Since this kind of fallback is only implemented for SLUB, these
  141. * tests are limited to that allocator.
  142. */
  143. static void kmalloc_pagealloc_oob_right(struct kunit *test)
  144. {
  145. char *ptr;
  146. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  147. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  148. ptr = kmalloc(size, GFP_KERNEL);
  149. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  150. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
  151. kfree(ptr);
  152. }
  153. static void kmalloc_pagealloc_uaf(struct kunit *test)
  154. {
  155. char *ptr;
  156. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  157. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  158. ptr = kmalloc(size, GFP_KERNEL);
  159. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  160. kfree(ptr);
  161. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
  162. }
  163. static void kmalloc_pagealloc_invalid_free(struct kunit *test)
  164. {
  165. char *ptr;
  166. size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
  167. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  168. ptr = kmalloc(size, GFP_KERNEL);
  169. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  170. KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
  171. }
  172. static void pagealloc_oob_right(struct kunit *test)
  173. {
  174. char *ptr;
  175. struct page *pages;
  176. size_t order = 4;
  177. size_t size = (1UL << (PAGE_SHIFT + order));
  178. /*
  179. * With generic KASAN page allocations have no redzones, thus
  180. * out-of-bounds detection is not guaranteed.
  181. * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
  182. */
  183. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  184. pages = alloc_pages(GFP_KERNEL, order);
  185. ptr = page_address(pages);
  186. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  187. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
  188. free_pages((unsigned long)ptr, order);
  189. }
  190. static void pagealloc_uaf(struct kunit *test)
  191. {
  192. char *ptr;
  193. struct page *pages;
  194. size_t order = 4;
  195. pages = alloc_pages(GFP_KERNEL, order);
  196. ptr = page_address(pages);
  197. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  198. free_pages((unsigned long)ptr, order);
  199. KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
  200. }
  201. static void kmalloc_large_oob_right(struct kunit *test)
  202. {
  203. char *ptr;
  204. size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
  205. /*
  206. * Allocate a chunk that is large enough, but still fits into a slab
  207. * and does not trigger the page allocator fallback in SLUB.
  208. */
  209. ptr = kmalloc(size, GFP_KERNEL);
  210. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  211. KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
  212. kfree(ptr);
  213. }
  214. static void krealloc_more_oob_helper(struct kunit *test,
  215. size_t size1, size_t size2)
  216. {
  217. char *ptr1, *ptr2;
  218. size_t middle;
  219. KUNIT_ASSERT_LT(test, size1, size2);
  220. middle = size1 + (size2 - size1) / 2;
  221. ptr1 = kmalloc(size1, GFP_KERNEL);
  222. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  223. ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
  224. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  225. /* All offsets up to size2 must be accessible. */
  226. ptr2[size1 - 1] = 'x';
  227. ptr2[size1] = 'x';
  228. ptr2[middle] = 'x';
  229. ptr2[size2 - 1] = 'x';
  230. /* Generic mode is precise, so unaligned size2 must be inaccessible. */
  231. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  232. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
  233. /* For all modes first aligned offset after size2 must be inaccessible. */
  234. KUNIT_EXPECT_KASAN_FAIL(test,
  235. ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
  236. kfree(ptr2);
  237. }
  238. static void krealloc_less_oob_helper(struct kunit *test,
  239. size_t size1, size_t size2)
  240. {
  241. char *ptr1, *ptr2;
  242. size_t middle;
  243. KUNIT_ASSERT_LT(test, size2, size1);
  244. middle = size2 + (size1 - size2) / 2;
  245. ptr1 = kmalloc(size1, GFP_KERNEL);
  246. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  247. ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
  248. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  249. /* Must be accessible for all modes. */
  250. ptr2[size2 - 1] = 'x';
  251. /* Generic mode is precise, so unaligned size2 must be inaccessible. */
  252. if (IS_ENABLED(CONFIG_KASAN_GENERIC))
  253. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
  254. /* For all modes first aligned offset after size2 must be inaccessible. */
  255. KUNIT_EXPECT_KASAN_FAIL(test,
  256. ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
  257. /*
  258. * For all modes all size2, middle, and size1 should land in separate
  259. * granules and thus the latter two offsets should be inaccessible.
  260. */
  261. KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
  262. round_down(middle, KASAN_GRANULE_SIZE));
  263. KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
  264. round_down(size1, KASAN_GRANULE_SIZE));
  265. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
  266. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
  267. KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
  268. kfree(ptr2);
  269. }
  270. static void krealloc_more_oob(struct kunit *test)
  271. {
  272. krealloc_more_oob_helper(test, 201, 235);
  273. }
  274. static void krealloc_less_oob(struct kunit *test)
  275. {
  276. krealloc_less_oob_helper(test, 235, 201);
  277. }
  278. static void krealloc_pagealloc_more_oob(struct kunit *test)
  279. {
  280. /* page_alloc fallback in only implemented for SLUB. */
  281. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  282. krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
  283. KMALLOC_MAX_CACHE_SIZE + 235);
  284. }
  285. static void krealloc_pagealloc_less_oob(struct kunit *test)
  286. {
  287. /* page_alloc fallback in only implemented for SLUB. */
  288. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
  289. krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
  290. KMALLOC_MAX_CACHE_SIZE + 201);
  291. }
  292. /*
  293. * Check that krealloc() detects a use-after-free, returns NULL,
  294. * and doesn't unpoison the freed object.
  295. */
  296. static void krealloc_uaf(struct kunit *test)
  297. {
  298. char *ptr1, *ptr2;
  299. int size1 = 201;
  300. int size2 = 235;
  301. ptr1 = kmalloc(size1, GFP_KERNEL);
  302. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  303. kfree(ptr1);
  304. KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
  305. KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
  306. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
  307. }
  308. static void kmalloc_oob_16(struct kunit *test)
  309. {
  310. struct {
  311. u64 words[2];
  312. } *ptr1, *ptr2;
  313. /* This test is specifically crafted for the generic mode. */
  314. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  315. ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
  316. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  317. ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
  318. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  319. KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
  320. kfree(ptr1);
  321. kfree(ptr2);
  322. }
  323. static void kmalloc_uaf_16(struct kunit *test)
  324. {
  325. struct {
  326. u64 words[2];
  327. } *ptr1, *ptr2;
  328. ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
  329. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  330. ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
  331. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  332. kfree(ptr2);
  333. KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
  334. kfree(ptr1);
  335. }
  336. static void kmalloc_oob_memset_2(struct kunit *test)
  337. {
  338. char *ptr;
  339. size_t size = 8;
  340. ptr = kmalloc(size, GFP_KERNEL);
  341. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  342. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
  343. kfree(ptr);
  344. }
  345. static void kmalloc_oob_memset_4(struct kunit *test)
  346. {
  347. char *ptr;
  348. size_t size = 8;
  349. ptr = kmalloc(size, GFP_KERNEL);
  350. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  351. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
  352. kfree(ptr);
  353. }
  354. static void kmalloc_oob_memset_8(struct kunit *test)
  355. {
  356. char *ptr;
  357. size_t size = 8;
  358. ptr = kmalloc(size, GFP_KERNEL);
  359. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  360. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
  361. kfree(ptr);
  362. }
  363. static void kmalloc_oob_memset_16(struct kunit *test)
  364. {
  365. char *ptr;
  366. size_t size = 16;
  367. ptr = kmalloc(size, GFP_KERNEL);
  368. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  369. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
  370. kfree(ptr);
  371. }
  372. static void kmalloc_oob_in_memset(struct kunit *test)
  373. {
  374. char *ptr;
  375. size_t size = 666;
  376. ptr = kmalloc(size, GFP_KERNEL);
  377. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  378. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
  379. kfree(ptr);
  380. }
  381. static void kmalloc_memmove_invalid_size(struct kunit *test)
  382. {
  383. char *ptr;
  384. size_t size = 64;
  385. volatile size_t invalid_size = -2;
  386. ptr = kmalloc(size, GFP_KERNEL);
  387. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  388. memset((char *)ptr, 0, 64);
  389. KUNIT_EXPECT_KASAN_FAIL(test,
  390. memmove((char *)ptr, (char *)ptr + 4, invalid_size));
  391. kfree(ptr);
  392. }
  393. static void kmalloc_uaf(struct kunit *test)
  394. {
  395. char *ptr;
  396. size_t size = 10;
  397. ptr = kmalloc(size, GFP_KERNEL);
  398. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  399. kfree(ptr);
  400. KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
  401. }
  402. static void kmalloc_uaf_memset(struct kunit *test)
  403. {
  404. char *ptr;
  405. size_t size = 33;
  406. ptr = kmalloc(size, GFP_KERNEL);
  407. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  408. kfree(ptr);
  409. KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
  410. }
  411. static void kmalloc_uaf2(struct kunit *test)
  412. {
  413. char *ptr1, *ptr2;
  414. size_t size = 43;
  415. int counter = 0;
  416. again:
  417. ptr1 = kmalloc(size, GFP_KERNEL);
  418. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
  419. kfree(ptr1);
  420. ptr2 = kmalloc(size, GFP_KERNEL);
  421. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
  422. /*
  423. * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
  424. * Allow up to 16 attempts at generating different tags.
  425. */
  426. if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
  427. kfree(ptr2);
  428. goto again;
  429. }
  430. KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
  431. KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
  432. kfree(ptr2);
  433. }
  434. static void kfree_via_page(struct kunit *test)
  435. {
  436. char *ptr;
  437. size_t size = 8;
  438. struct page *page;
  439. unsigned long offset;
  440. ptr = kmalloc(size, GFP_KERNEL);
  441. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  442. page = virt_to_page(ptr);
  443. offset = offset_in_page(ptr);
  444. kfree(page_address(page) + offset);
  445. }
  446. static void kfree_via_phys(struct kunit *test)
  447. {
  448. char *ptr;
  449. size_t size = 8;
  450. phys_addr_t phys;
  451. ptr = kmalloc(size, GFP_KERNEL);
  452. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  453. phys = virt_to_phys(ptr);
  454. kfree(phys_to_virt(phys));
  455. }
  456. static void kmem_cache_oob(struct kunit *test)
  457. {
  458. char *p;
  459. size_t size = 200;
  460. struct kmem_cache *cache;
  461. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  462. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  463. p = kmem_cache_alloc(cache, GFP_KERNEL);
  464. if (!p) {
  465. kunit_err(test, "Allocation failed: %s\n", __func__);
  466. kmem_cache_destroy(cache);
  467. return;
  468. }
  469. KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
  470. kmem_cache_free(cache, p);
  471. kmem_cache_destroy(cache);
  472. }
  473. static void kmem_cache_accounted(struct kunit *test)
  474. {
  475. int i;
  476. char *p;
  477. size_t size = 200;
  478. struct kmem_cache *cache;
  479. cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
  480. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  481. /*
  482. * Several allocations with a delay to allow for lazy per memcg kmem
  483. * cache creation.
  484. */
  485. for (i = 0; i < 5; i++) {
  486. p = kmem_cache_alloc(cache, GFP_KERNEL);
  487. if (!p)
  488. goto free_cache;
  489. kmem_cache_free(cache, p);
  490. msleep(100);
  491. }
  492. free_cache:
  493. kmem_cache_destroy(cache);
  494. }
  495. static void kmem_cache_bulk(struct kunit *test)
  496. {
  497. struct kmem_cache *cache;
  498. size_t size = 200;
  499. char *p[10];
  500. bool ret;
  501. int i;
  502. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  503. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  504. ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
  505. if (!ret) {
  506. kunit_err(test, "Allocation failed: %s\n", __func__);
  507. kmem_cache_destroy(cache);
  508. return;
  509. }
  510. for (i = 0; i < ARRAY_SIZE(p); i++)
  511. p[i][0] = p[i][size - 1] = 42;
  512. kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
  513. kmem_cache_destroy(cache);
  514. }
  515. static char global_array[10];
  516. static void kasan_global_oob(struct kunit *test)
  517. {
  518. /*
  519. * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
  520. * from failing here and panicing the kernel, access the array via a
  521. * volatile pointer, which will prevent the compiler from being able to
  522. * determine the array bounds.
  523. *
  524. * This access uses a volatile pointer to char (char *volatile) rather
  525. * than the more conventional pointer to volatile char (volatile char *)
  526. * because we want to prevent the compiler from making inferences about
  527. * the pointer itself (i.e. its array bounds), not the data that it
  528. * refers to.
  529. */
  530. char *volatile array = global_array;
  531. char *p = &array[ARRAY_SIZE(global_array) + 3];
  532. /* Only generic mode instruments globals. */
  533. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  534. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  535. }
  536. /* Check that ksize() makes the whole object accessible. */
  537. static void ksize_unpoisons_memory(struct kunit *test)
  538. {
  539. char *ptr;
  540. size_t size = 123, real_size;
  541. ptr = kmalloc(size, GFP_KERNEL);
  542. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  543. real_size = ksize(ptr);
  544. /* This access shouldn't trigger a KASAN report. */
  545. ptr[size] = 'x';
  546. /* This one must. */
  547. KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
  548. kfree(ptr);
  549. }
  550. /*
  551. * Check that a use-after-free is detected by ksize() and via normal accesses
  552. * after it.
  553. */
  554. static void ksize_uaf(struct kunit *test)
  555. {
  556. char *ptr;
  557. int size = 128 - KASAN_GRANULE_SIZE;
  558. ptr = kmalloc(size, GFP_KERNEL);
  559. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  560. kfree(ptr);
  561. KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
  562. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
  563. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
  564. }
  565. static void kasan_stack_oob(struct kunit *test)
  566. {
  567. char stack_array[10];
  568. /* See comment in kasan_global_oob. */
  569. char *volatile array = stack_array;
  570. char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
  571. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  572. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  573. }
  574. static void kasan_alloca_oob_left(struct kunit *test)
  575. {
  576. volatile int i = 10;
  577. char alloca_array[i];
  578. /* See comment in kasan_global_oob. */
  579. char *volatile array = alloca_array;
  580. char *p = array - 1;
  581. /* Only generic mode instruments dynamic allocas. */
  582. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  583. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  584. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  585. }
  586. static void kasan_alloca_oob_right(struct kunit *test)
  587. {
  588. volatile int i = 10;
  589. char alloca_array[i];
  590. /* See comment in kasan_global_oob. */
  591. char *volatile array = alloca_array;
  592. char *p = array + i;
  593. /* Only generic mode instruments dynamic allocas. */
  594. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  595. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
  596. KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
  597. }
  598. static void kmem_cache_double_free(struct kunit *test)
  599. {
  600. char *p;
  601. size_t size = 200;
  602. struct kmem_cache *cache;
  603. cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
  604. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  605. p = kmem_cache_alloc(cache, GFP_KERNEL);
  606. if (!p) {
  607. kunit_err(test, "Allocation failed: %s\n", __func__);
  608. kmem_cache_destroy(cache);
  609. return;
  610. }
  611. kmem_cache_free(cache, p);
  612. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
  613. kmem_cache_destroy(cache);
  614. }
  615. static void kmem_cache_invalid_free(struct kunit *test)
  616. {
  617. char *p;
  618. size_t size = 200;
  619. struct kmem_cache *cache;
  620. cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
  621. NULL);
  622. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
  623. p = kmem_cache_alloc(cache, GFP_KERNEL);
  624. if (!p) {
  625. kunit_err(test, "Allocation failed: %s\n", __func__);
  626. kmem_cache_destroy(cache);
  627. return;
  628. }
  629. /* Trigger invalid free, the object doesn't get freed. */
  630. KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
  631. /*
  632. * Properly free the object to prevent the "Objects remaining in
  633. * test_cache on __kmem_cache_shutdown" BUG failure.
  634. */
  635. kmem_cache_free(cache, p);
  636. kmem_cache_destroy(cache);
  637. }
  638. static void kasan_memchr(struct kunit *test)
  639. {
  640. char *ptr;
  641. size_t size = 24;
  642. /*
  643. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  644. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  645. */
  646. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  647. if (OOB_TAG_OFF)
  648. size = round_up(size, OOB_TAG_OFF);
  649. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  650. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  651. KUNIT_EXPECT_KASAN_FAIL(test,
  652. kasan_ptr_result = memchr(ptr, '1', size + 1));
  653. kfree(ptr);
  654. }
  655. static void kasan_memcmp(struct kunit *test)
  656. {
  657. char *ptr;
  658. size_t size = 24;
  659. int arr[9];
  660. /*
  661. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  662. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  663. */
  664. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  665. if (OOB_TAG_OFF)
  666. size = round_up(size, OOB_TAG_OFF);
  667. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  668. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  669. memset(arr, 0, sizeof(arr));
  670. KUNIT_EXPECT_KASAN_FAIL(test,
  671. kasan_int_result = memcmp(ptr, arr, size+1));
  672. kfree(ptr);
  673. }
  674. static void kasan_strings(struct kunit *test)
  675. {
  676. char *ptr;
  677. size_t size = 24;
  678. /*
  679. * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
  680. * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
  681. */
  682. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
  683. ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
  684. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  685. kfree(ptr);
  686. /*
  687. * Try to cause only 1 invalid access (less spam in dmesg).
  688. * For that we need ptr to point to zeroed byte.
  689. * Skip metadata that could be stored in freed object so ptr
  690. * will likely point to zeroed byte.
  691. */
  692. ptr += 16;
  693. KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
  694. KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
  695. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
  696. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
  697. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
  698. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
  699. }
  700. static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
  701. {
  702. KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
  703. KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
  704. KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
  705. KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
  706. KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
  707. KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
  708. KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
  709. KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
  710. }
  711. static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
  712. {
  713. KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
  714. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
  715. KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
  716. KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
  717. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
  718. KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
  719. KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
  720. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
  721. #if defined(clear_bit_unlock_is_negative_byte)
  722. KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
  723. clear_bit_unlock_is_negative_byte(nr, addr));
  724. #endif
  725. }
  726. static void kasan_bitops_generic(struct kunit *test)
  727. {
  728. long *bits;
  729. /* This test is specifically crafted for the generic mode. */
  730. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
  731. /*
  732. * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
  733. * this way we do not actually corrupt other memory.
  734. */
  735. bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
  736. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
  737. /*
  738. * Below calls try to access bit within allocated memory; however, the
  739. * below accesses are still out-of-bounds, since bitops are defined to
  740. * operate on the whole long the bit is in.
  741. */
  742. kasan_bitops_modify(test, BITS_PER_LONG, bits);
  743. /*
  744. * Below calls try to access bit beyond allocated memory.
  745. */
  746. kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
  747. kfree(bits);
  748. }
  749. static void kasan_bitops_tags(struct kunit *test)
  750. {
  751. long *bits;
  752. /* This test is specifically crafted for tag-based modes. */
  753. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  754. /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
  755. bits = kzalloc(48, GFP_KERNEL);
  756. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
  757. /* Do the accesses past the 48 allocated bytes, but within the redone. */
  758. kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
  759. kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
  760. kfree(bits);
  761. }
  762. static void kmalloc_double_kzfree(struct kunit *test)
  763. {
  764. char *ptr;
  765. size_t size = 16;
  766. ptr = kmalloc(size, GFP_KERNEL);
  767. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  768. kfree_sensitive(ptr);
  769. KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
  770. }
  771. static void vmalloc_oob(struct kunit *test)
  772. {
  773. void *area;
  774. KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
  775. /*
  776. * We have to be careful not to hit the guard page.
  777. * The MMU will catch that and crash us.
  778. */
  779. area = vmalloc(3000);
  780. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
  781. KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
  782. vfree(area);
  783. }
  784. /*
  785. * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
  786. * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
  787. * modes.
  788. */
  789. static void match_all_not_assigned(struct kunit *test)
  790. {
  791. char *ptr;
  792. struct page *pages;
  793. int i, size, order;
  794. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  795. for (i = 0; i < 256; i++) {
  796. size = (get_random_int() % 1024) + 1;
  797. ptr = kmalloc(size, GFP_KERNEL);
  798. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  799. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  800. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  801. kfree(ptr);
  802. }
  803. for (i = 0; i < 256; i++) {
  804. order = (get_random_int() % 4) + 1;
  805. pages = alloc_pages(GFP_KERNEL, order);
  806. ptr = page_address(pages);
  807. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  808. KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
  809. KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  810. free_pages((unsigned long)ptr, order);
  811. }
  812. }
  813. /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
  814. static void match_all_ptr_tag(struct kunit *test)
  815. {
  816. char *ptr;
  817. u8 tag;
  818. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  819. ptr = kmalloc(128, GFP_KERNEL);
  820. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  821. /* Backup the assigned tag. */
  822. tag = get_tag(ptr);
  823. KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
  824. /* Reset the tag to 0xff.*/
  825. ptr = set_tag(ptr, KASAN_TAG_KERNEL);
  826. /* This access shouldn't trigger a KASAN report. */
  827. *ptr = 0;
  828. /* Recover the pointer tag and free. */
  829. ptr = set_tag(ptr, tag);
  830. kfree(ptr);
  831. }
  832. /* Check that there are no match-all memory tags for tag-based modes. */
  833. static void match_all_mem_tag(struct kunit *test)
  834. {
  835. char *ptr;
  836. int tag;
  837. KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
  838. ptr = kmalloc(128, GFP_KERNEL);
  839. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
  840. KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
  841. /* For each possible tag value not matching the pointer tag. */
  842. for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
  843. if (tag == get_tag(ptr))
  844. continue;
  845. /* Mark the first memory granule with the chosen memory tag. */
  846. kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
  847. /* This access must cause a KASAN report. */
  848. KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
  849. }
  850. /* Recover the memory tag and free. */
  851. kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
  852. kfree(ptr);
  853. }
  854. static struct kunit_case kasan_kunit_test_cases[] = {
  855. KUNIT_CASE(kmalloc_oob_right),
  856. KUNIT_CASE(kmalloc_oob_left),
  857. KUNIT_CASE(kmalloc_node_oob_right),
  858. KUNIT_CASE(kmalloc_pagealloc_oob_right),
  859. KUNIT_CASE(kmalloc_pagealloc_uaf),
  860. KUNIT_CASE(kmalloc_pagealloc_invalid_free),
  861. KUNIT_CASE(pagealloc_oob_right),
  862. KUNIT_CASE(pagealloc_uaf),
  863. KUNIT_CASE(kmalloc_large_oob_right),
  864. KUNIT_CASE(krealloc_more_oob),
  865. KUNIT_CASE(krealloc_less_oob),
  866. KUNIT_CASE(krealloc_pagealloc_more_oob),
  867. KUNIT_CASE(krealloc_pagealloc_less_oob),
  868. KUNIT_CASE(krealloc_uaf),
  869. KUNIT_CASE(kmalloc_oob_16),
  870. KUNIT_CASE(kmalloc_uaf_16),
  871. KUNIT_CASE(kmalloc_oob_in_memset),
  872. KUNIT_CASE(kmalloc_oob_memset_2),
  873. KUNIT_CASE(kmalloc_oob_memset_4),
  874. KUNIT_CASE(kmalloc_oob_memset_8),
  875. KUNIT_CASE(kmalloc_oob_memset_16),
  876. KUNIT_CASE(kmalloc_memmove_invalid_size),
  877. KUNIT_CASE(kmalloc_uaf),
  878. KUNIT_CASE(kmalloc_uaf_memset),
  879. KUNIT_CASE(kmalloc_uaf2),
  880. KUNIT_CASE(kfree_via_page),
  881. KUNIT_CASE(kfree_via_phys),
  882. KUNIT_CASE(kmem_cache_oob),
  883. KUNIT_CASE(kmem_cache_accounted),
  884. KUNIT_CASE(kmem_cache_bulk),
  885. KUNIT_CASE(kasan_global_oob),
  886. KUNIT_CASE(kasan_stack_oob),
  887. KUNIT_CASE(kasan_alloca_oob_left),
  888. KUNIT_CASE(kasan_alloca_oob_right),
  889. KUNIT_CASE(ksize_unpoisons_memory),
  890. KUNIT_CASE(ksize_uaf),
  891. KUNIT_CASE(kmem_cache_double_free),
  892. KUNIT_CASE(kmem_cache_invalid_free),
  893. KUNIT_CASE(kasan_memchr),
  894. KUNIT_CASE(kasan_memcmp),
  895. KUNIT_CASE(kasan_strings),
  896. KUNIT_CASE(kasan_bitops_generic),
  897. KUNIT_CASE(kasan_bitops_tags),
  898. KUNIT_CASE(kmalloc_double_kzfree),
  899. KUNIT_CASE(vmalloc_oob),
  900. KUNIT_CASE(match_all_not_assigned),
  901. KUNIT_CASE(match_all_ptr_tag),
  902. KUNIT_CASE(match_all_mem_tag),
  903. {}
  904. };
  905. static struct kunit_suite kasan_kunit_test_suite = {
  906. .name = "kasan",
  907. .init = kasan_test_init,
  908. .test_cases = kasan_kunit_test_cases,
  909. .exit = kasan_test_exit,
  910. };
  911. kunit_test_suite(kasan_kunit_test_suite);
  912. MODULE_LICENSE("GPL");