kasan_init.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2019 Andes Technology Corporation
  3. #include <linux/pfn.h>
  4. #include <linux/init_task.h>
  5. #include <linux/kasan.h>
  6. #include <linux/kernel.h>
  7. #include <linux/memblock.h>
  8. #include <linux/pgtable.h>
  9. #include <asm/tlbflush.h>
  10. #include <asm/fixmap.h>
  11. extern pgd_t early_pg_dir[PTRS_PER_PGD];
  12. asmlinkage void __init kasan_early_init(void)
  13. {
  14. uintptr_t i;
  15. pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
  16. BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
  17. KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
  18. for (i = 0; i < PTRS_PER_PTE; ++i)
  19. set_pte(kasan_early_shadow_pte + i,
  20. pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
  21. for (i = 0; i < PTRS_PER_PMD; ++i)
  22. set_pmd(kasan_early_shadow_pmd + i,
  23. pfn_pmd(PFN_DOWN
  24. (__pa((uintptr_t) kasan_early_shadow_pte)),
  25. __pgprot(_PAGE_TABLE)));
  26. for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
  27. i += PGDIR_SIZE, ++pgd)
  28. set_pgd(pgd,
  29. pfn_pgd(PFN_DOWN
  30. (__pa(((uintptr_t) kasan_early_shadow_pmd))),
  31. __pgprot(_PAGE_TABLE)));
  32. /* init for swapper_pg_dir */
  33. pgd = pgd_offset_k(KASAN_SHADOW_START);
  34. for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
  35. i += PGDIR_SIZE, ++pgd)
  36. set_pgd(pgd,
  37. pfn_pgd(PFN_DOWN
  38. (__pa(((uintptr_t) kasan_early_shadow_pmd))),
  39. __pgprot(_PAGE_TABLE)));
  40. local_flush_tlb_all();
  41. }
  42. static void __init populate(void *start, void *end)
  43. {
  44. unsigned long i, offset;
  45. unsigned long vaddr = (unsigned long)start & PAGE_MASK;
  46. unsigned long vend = PAGE_ALIGN((unsigned long)end);
  47. unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
  48. unsigned long n_ptes =
  49. ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
  50. unsigned long n_pmds =
  51. ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
  52. pte_t *pte =
  53. memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
  54. pmd_t *pmd =
  55. memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
  56. pgd_t *pgd = pgd_offset_k(vaddr);
  57. for (i = 0; i < n_pages; i++) {
  58. phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
  59. set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
  60. }
  61. for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
  62. set_pmd(&pmd[i],
  63. pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
  64. __pgprot(_PAGE_TABLE)));
  65. for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
  66. set_pgd(&pgd[i],
  67. pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
  68. __pgprot(_PAGE_TABLE)));
  69. local_flush_tlb_all();
  70. memset(start, 0, end - start);
  71. }
  72. void __init kasan_init(void)
  73. {
  74. phys_addr_t _start, _end;
  75. u64 i;
  76. kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
  77. (void *)kasan_mem_to_shadow((void *)
  78. VMALLOC_END));
  79. for_each_mem_range(i, &_start, &_end) {
  80. void *start = (void *)__va(_start);
  81. void *end = (void *)__va(_end);
  82. if (start >= end)
  83. break;
  84. populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
  85. };
  86. for (i = 0; i < PTRS_PER_PTE; i++)
  87. set_pte(&kasan_early_shadow_pte[i],
  88. mk_pte(virt_to_page(kasan_early_shadow_page),
  89. __pgprot(_PAGE_PRESENT | _PAGE_READ |
  90. _PAGE_ACCESSED)));
  91. memset(kasan_early_shadow_page, 0, PAGE_SIZE);
  92. init_task.kasan_depth = 0;
  93. }