kasan_init.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kasan.h>
  3. #include <linux/sched/task.h>
  4. #include <linux/memblock.h>
  5. #include <linux/pgtable.h>
  6. #include <asm/pgalloc.h>
  7. #include <asm/kasan.h>
  8. #include <asm/mem_detect.h>
  9. #include <asm/processor.h>
  10. #include <asm/sclp.h>
  11. #include <asm/facility.h>
  12. #include <asm/sections.h>
  13. #include <asm/setup.h>
  14. #include <asm/uv.h>
  15. unsigned long kasan_vmax;
  16. static unsigned long segment_pos __initdata;
  17. static unsigned long segment_low __initdata;
  18. static unsigned long pgalloc_pos __initdata;
  19. static unsigned long pgalloc_low __initdata;
  20. static unsigned long pgalloc_freeable __initdata;
  21. static bool has_edat __initdata;
  22. static bool has_nx __initdata;
  23. #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
  24. static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
  25. static void __init kasan_early_panic(const char *reason)
  26. {
  27. sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
  28. sclp_early_printk(reason);
  29. disabled_wait();
  30. }
  31. static void * __init kasan_early_alloc_segment(void)
  32. {
  33. segment_pos -= _SEGMENT_SIZE;
  34. if (segment_pos < segment_low)
  35. kasan_early_panic("out of memory during initialisation\n");
  36. return (void *)segment_pos;
  37. }
  38. static void * __init kasan_early_alloc_pages(unsigned int order)
  39. {
  40. pgalloc_pos -= (PAGE_SIZE << order);
  41. if (pgalloc_pos < pgalloc_low)
  42. kasan_early_panic("out of memory during initialisation\n");
  43. return (void *)pgalloc_pos;
  44. }
  45. static void * __init kasan_early_crst_alloc(unsigned long val)
  46. {
  47. unsigned long *table;
  48. table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
  49. if (table)
  50. crst_table_init(table, val);
  51. return table;
  52. }
  53. static pte_t * __init kasan_early_pte_alloc(void)
  54. {
  55. static void *pte_leftover;
  56. pte_t *pte;
  57. BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
  58. if (!pte_leftover) {
  59. pte_leftover = kasan_early_alloc_pages(0);
  60. pte = pte_leftover + _PAGE_TABLE_SIZE;
  61. } else {
  62. pte = pte_leftover;
  63. pte_leftover = NULL;
  64. }
  65. memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
  66. return pte;
  67. }
  68. enum populate_mode {
  69. POPULATE_ONE2ONE,
  70. POPULATE_MAP,
  71. POPULATE_ZERO_SHADOW,
  72. POPULATE_SHALLOW
  73. };
  74. static void __init kasan_early_vmemmap_populate(unsigned long address,
  75. unsigned long end,
  76. enum populate_mode mode)
  77. {
  78. unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
  79. pgd_t *pg_dir;
  80. p4d_t *p4_dir;
  81. pud_t *pu_dir;
  82. pmd_t *pm_dir;
  83. pte_t *pt_dir;
  84. pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
  85. if (!has_nx)
  86. pgt_prot_zero &= ~_PAGE_NOEXEC;
  87. pgt_prot = pgprot_val(PAGE_KERNEL);
  88. sgt_prot = pgprot_val(SEGMENT_KERNEL);
  89. if (!has_nx || mode == POPULATE_ONE2ONE) {
  90. pgt_prot &= ~_PAGE_NOEXEC;
  91. sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
  92. }
  93. /*
  94. * The first 1MB of 1:1 mapping is mapped with 4KB pages
  95. */
  96. while (address < end) {
  97. pg_dir = pgd_offset_k(address);
  98. if (pgd_none(*pg_dir)) {
  99. if (mode == POPULATE_ZERO_SHADOW &&
  100. IS_ALIGNED(address, PGDIR_SIZE) &&
  101. end - address >= PGDIR_SIZE) {
  102. pgd_populate(&init_mm, pg_dir,
  103. kasan_early_shadow_p4d);
  104. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  105. continue;
  106. }
  107. p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
  108. pgd_populate(&init_mm, pg_dir, p4_dir);
  109. }
  110. if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
  111. mode == POPULATE_SHALLOW) {
  112. address = (address + P4D_SIZE) & P4D_MASK;
  113. continue;
  114. }
  115. p4_dir = p4d_offset(pg_dir, address);
  116. if (p4d_none(*p4_dir)) {
  117. if (mode == POPULATE_ZERO_SHADOW &&
  118. IS_ALIGNED(address, P4D_SIZE) &&
  119. end - address >= P4D_SIZE) {
  120. p4d_populate(&init_mm, p4_dir,
  121. kasan_early_shadow_pud);
  122. address = (address + P4D_SIZE) & P4D_MASK;
  123. continue;
  124. }
  125. pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
  126. p4d_populate(&init_mm, p4_dir, pu_dir);
  127. }
  128. if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
  129. mode == POPULATE_SHALLOW) {
  130. address = (address + PUD_SIZE) & PUD_MASK;
  131. continue;
  132. }
  133. pu_dir = pud_offset(p4_dir, address);
  134. if (pud_none(*pu_dir)) {
  135. if (mode == POPULATE_ZERO_SHADOW &&
  136. IS_ALIGNED(address, PUD_SIZE) &&
  137. end - address >= PUD_SIZE) {
  138. pud_populate(&init_mm, pu_dir,
  139. kasan_early_shadow_pmd);
  140. address = (address + PUD_SIZE) & PUD_MASK;
  141. continue;
  142. }
  143. pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
  144. pud_populate(&init_mm, pu_dir, pm_dir);
  145. }
  146. pm_dir = pmd_offset(pu_dir, address);
  147. if (pmd_none(*pm_dir)) {
  148. if (IS_ALIGNED(address, PMD_SIZE) &&
  149. end - address >= PMD_SIZE) {
  150. if (mode == POPULATE_ZERO_SHADOW) {
  151. pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
  152. address = (address + PMD_SIZE) & PMD_MASK;
  153. continue;
  154. } else if (has_edat && address) {
  155. void *page;
  156. if (mode == POPULATE_ONE2ONE) {
  157. page = (void *)address;
  158. } else {
  159. page = kasan_early_alloc_segment();
  160. memset(page, 0, _SEGMENT_SIZE);
  161. }
  162. pmd_val(*pm_dir) = __pa(page) | sgt_prot;
  163. address = (address + PMD_SIZE) & PMD_MASK;
  164. continue;
  165. }
  166. }
  167. pt_dir = kasan_early_pte_alloc();
  168. pmd_populate(&init_mm, pm_dir, pt_dir);
  169. } else if (pmd_large(*pm_dir)) {
  170. address = (address + PMD_SIZE) & PMD_MASK;
  171. continue;
  172. }
  173. pt_dir = pte_offset_kernel(pm_dir, address);
  174. if (pte_none(*pt_dir)) {
  175. void *page;
  176. switch (mode) {
  177. case POPULATE_ONE2ONE:
  178. page = (void *)address;
  179. pte_val(*pt_dir) = __pa(page) | pgt_prot;
  180. break;
  181. case POPULATE_MAP:
  182. page = kasan_early_alloc_pages(0);
  183. memset(page, 0, PAGE_SIZE);
  184. pte_val(*pt_dir) = __pa(page) | pgt_prot;
  185. break;
  186. case POPULATE_ZERO_SHADOW:
  187. page = kasan_early_shadow_page;
  188. pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
  189. break;
  190. case POPULATE_SHALLOW:
  191. /* should never happen */
  192. break;
  193. }
  194. }
  195. address += PAGE_SIZE;
  196. }
  197. }
  198. static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
  199. {
  200. unsigned long asce_bits;
  201. asce_bits = asce_type | _ASCE_TABLE_LENGTH;
  202. S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
  203. S390_lowcore.user_asce = S390_lowcore.kernel_asce;
  204. __ctl_load(S390_lowcore.kernel_asce, 1, 1);
  205. __ctl_load(S390_lowcore.kernel_asce, 7, 7);
  206. __ctl_load(S390_lowcore.kernel_asce, 13, 13);
  207. }
  208. static void __init kasan_enable_dat(void)
  209. {
  210. psw_t psw;
  211. psw.mask = __extract_psw();
  212. psw_bits(psw).dat = 1;
  213. psw_bits(psw).as = PSW_BITS_AS_HOME;
  214. __load_psw_mask(psw.mask);
  215. }
  216. static void __init kasan_early_detect_facilities(void)
  217. {
  218. if (test_facility(8)) {
  219. has_edat = true;
  220. __ctl_set_bit(0, 23);
  221. }
  222. if (!noexec_disabled && test_facility(130)) {
  223. has_nx = true;
  224. __ctl_set_bit(0, 20);
  225. }
  226. }
  227. static bool __init has_uv_sec_stor_limit(void)
  228. {
  229. /*
  230. * keep these conditions in line with setup_uv()
  231. */
  232. if (!is_prot_virt_host())
  233. return false;
  234. if (is_prot_virt_guest())
  235. return false;
  236. if (!test_facility(158))
  237. return false;
  238. return !!uv_info.max_sec_stor_addr;
  239. }
  240. void __init kasan_early_init(void)
  241. {
  242. unsigned long untracked_mem_end;
  243. unsigned long shadow_alloc_size;
  244. unsigned long vmax_unlimited;
  245. unsigned long initrd_end;
  246. unsigned long asce_type;
  247. unsigned long memsize;
  248. unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
  249. pte_t pte_z;
  250. pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
  251. pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
  252. p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
  253. kasan_early_detect_facilities();
  254. if (!has_nx)
  255. pgt_prot &= ~_PAGE_NOEXEC;
  256. pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
  257. memsize = get_mem_detect_end();
  258. if (!memsize)
  259. kasan_early_panic("cannot detect physical memory size\n");
  260. /* respect mem= cmdline parameter */
  261. if (memory_end_set && memsize > memory_end)
  262. memsize = memory_end;
  263. if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
  264. memsize = min(memsize, OLDMEM_SIZE);
  265. memsize = min(memsize, KASAN_SHADOW_START);
  266. if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
  267. /* 4 level paging */
  268. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
  269. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
  270. crst_table_init((unsigned long *)early_pg_dir,
  271. _REGION2_ENTRY_EMPTY);
  272. untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION1_SIZE;
  273. if (has_uv_sec_stor_limit())
  274. kasan_vmax = min(vmax_unlimited, uv_info.max_sec_stor_addr);
  275. asce_type = _ASCE_TYPE_REGION2;
  276. } else {
  277. /* 3 level paging */
  278. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
  279. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
  280. crst_table_init((unsigned long *)early_pg_dir,
  281. _REGION3_ENTRY_EMPTY);
  282. untracked_mem_end = kasan_vmax = vmax_unlimited = _REGION2_SIZE;
  283. asce_type = _ASCE_TYPE_REGION3;
  284. }
  285. /* init kasan zero shadow */
  286. crst_table_init((unsigned long *)kasan_early_shadow_p4d,
  287. p4d_val(p4d_z));
  288. crst_table_init((unsigned long *)kasan_early_shadow_pud,
  289. pud_val(pud_z));
  290. crst_table_init((unsigned long *)kasan_early_shadow_pmd,
  291. pmd_val(pmd_z));
  292. memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
  293. shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
  294. pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
  295. if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
  296. initrd_end =
  297. round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
  298. pgalloc_low = max(pgalloc_low, initrd_end);
  299. }
  300. if (pgalloc_low + shadow_alloc_size > memsize)
  301. kasan_early_panic("out of memory during initialisation\n");
  302. if (has_edat) {
  303. segment_pos = round_down(memsize, _SEGMENT_SIZE);
  304. segment_low = segment_pos - shadow_alloc_size;
  305. pgalloc_pos = segment_low;
  306. } else {
  307. pgalloc_pos = memsize;
  308. }
  309. init_mm.pgd = early_pg_dir;
  310. /*
  311. * Current memory layout:
  312. * +- 0 -------------+ +- shadow start -+
  313. * | 1:1 ram mapping | /| 1/8 ram |
  314. * | | / | |
  315. * +- end of ram ----+ / +----------------+
  316. * | ... gap ... | / | |
  317. * | |/ | kasan |
  318. * +- shadow start --+ | zero |
  319. * | 1/8 addr space | | page |
  320. * +- shadow end -+ | mapping |
  321. * | ... gap ... |\ | (untracked) |
  322. * +- vmalloc area -+ \ | |
  323. * | vmalloc_size | \ | |
  324. * +- modules vaddr -+ \ +----------------+
  325. * | 2Gb | \| unmapped | allocated per module
  326. * +-----------------+ +- shadow end ---+
  327. *
  328. * Current memory layout (KASAN_VMALLOC):
  329. * +- 0 -------------+ +- shadow start -+
  330. * | 1:1 ram mapping | /| 1/8 ram |
  331. * | | / | |
  332. * +- end of ram ----+ / +----------------+
  333. * | ... gap ... | / | kasan |
  334. * | |/ | zero |
  335. * +- shadow start --+ | page |
  336. * | 1/8 addr space | | mapping |
  337. * +- shadow end -+ | (untracked) |
  338. * | ... gap ... |\ | |
  339. * +- vmalloc area -+ \ +- vmalloc area -+
  340. * | vmalloc_size | \ |shallow populate|
  341. * +- modules vaddr -+ \ +- modules area -+
  342. * | 2Gb | \|shallow populate|
  343. * +-----------------+ +- shadow end ---+
  344. */
  345. /* populate kasan shadow (for identity mapping and zero page mapping) */
  346. kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
  347. if (IS_ENABLED(CONFIG_MODULES))
  348. untracked_mem_end = kasan_vmax - MODULES_LEN;
  349. if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
  350. untracked_mem_end = kasan_vmax - vmalloc_size - MODULES_LEN;
  351. /* shallowly populate kasan shadow for vmalloc and modules */
  352. kasan_early_vmemmap_populate(__sha(untracked_mem_end),
  353. __sha(kasan_vmax), POPULATE_SHALLOW);
  354. }
  355. /* populate kasan shadow for untracked memory */
  356. kasan_early_vmemmap_populate(__sha(max_physmem_end),
  357. __sha(untracked_mem_end),
  358. POPULATE_ZERO_SHADOW);
  359. kasan_early_vmemmap_populate(__sha(kasan_vmax),
  360. __sha(vmax_unlimited),
  361. POPULATE_ZERO_SHADOW);
  362. /* memory allocated for identity mapping structs will be freed later */
  363. pgalloc_freeable = pgalloc_pos;
  364. /* populate identity mapping */
  365. kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
  366. kasan_set_pgd(early_pg_dir, asce_type);
  367. kasan_enable_dat();
  368. /* enable kasan */
  369. init_task.kasan_depth = 0;
  370. memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
  371. sclp_early_printk("KernelAddressSanitizer initialized\n");
  372. }
  373. void __init kasan_copy_shadow(pgd_t *pg_dir)
  374. {
  375. /*
  376. * At this point we are still running on early pages setup early_pg_dir,
  377. * while swapper_pg_dir has just been initialized with identity mapping.
  378. * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
  379. */
  380. pgd_t *pg_dir_src;
  381. pgd_t *pg_dir_dst;
  382. p4d_t *p4_dir_src;
  383. p4d_t *p4_dir_dst;
  384. pud_t *pu_dir_src;
  385. pud_t *pu_dir_dst;
  386. pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
  387. pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
  388. p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
  389. p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
  390. if (!p4d_folded(*p4_dir_src)) {
  391. /* 4 level paging */
  392. memcpy(p4_dir_dst, p4_dir_src,
  393. (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
  394. return;
  395. }
  396. /* 3 level paging */
  397. pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
  398. pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
  399. memcpy(pu_dir_dst, pu_dir_src,
  400. (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
  401. }
  402. void __init kasan_free_early_identity(void)
  403. {
  404. memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
  405. }