patch.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 SiFive
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/mm.h>
  7. #include <linux/memory.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/stop_machine.h>
  10. #include <asm/kprobes.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/fixmap.h>
  13. #include <asm/patch.h>
  14. struct patch_insn {
  15. void *addr;
  16. u32 insn;
  17. atomic_t cpu_count;
  18. };
  19. #ifdef CONFIG_MMU
  20. /*
  21. * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
  22. * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
  23. * So use '__always_inline' and 'const unsigned int fixmap' here.
  24. */
  25. static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
  26. {
  27. uintptr_t uintaddr = (uintptr_t) addr;
  28. struct page *page;
  29. if (core_kernel_text(uintaddr))
  30. page = phys_to_page(__pa_symbol(addr));
  31. else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  32. page = vmalloc_to_page(addr);
  33. else
  34. return addr;
  35. BUG_ON(!page);
  36. return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
  37. (uintaddr & ~PAGE_MASK));
  38. }
  39. static void patch_unmap(int fixmap)
  40. {
  41. clear_fixmap(fixmap);
  42. }
  43. NOKPROBE_SYMBOL(patch_unmap);
  44. static int patch_insn_write(void *addr, const void *insn, size_t len)
  45. {
  46. void *waddr = addr;
  47. bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
  48. int ret;
  49. /*
  50. * Before reaching here, it was expected to lock the text_mutex
  51. * already, so we don't need to give another lock here and could
  52. * ensure that it was safe between each cores.
  53. */
  54. //lockdep_assert_held(&text_mutex);
  55. if (across_pages)
  56. patch_map(addr + len, FIX_TEXT_POKE1);
  57. waddr = patch_map(addr, FIX_TEXT_POKE0);
  58. ret = copy_to_kernel_nofault(waddr, insn, len);
  59. patch_unmap(FIX_TEXT_POKE0);
  60. if (across_pages)
  61. patch_unmap(FIX_TEXT_POKE1);
  62. return ret;
  63. }
  64. NOKPROBE_SYMBOL(patch_insn_write);
  65. #else
  66. static int patch_insn_write(void *addr, const void *insn, size_t len)
  67. {
  68. return copy_to_kernel_nofault(addr, insn, len);
  69. }
  70. NOKPROBE_SYMBOL(patch_insn_write);
  71. #endif /* CONFIG_MMU */
  72. int patch_text_nosync(void *addr, const void *insns, size_t len)
  73. {
  74. u32 *tp = addr;
  75. int ret;
  76. ret = patch_insn_write(tp, insns, len);
  77. if (!ret)
  78. flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
  79. return ret;
  80. }
  81. NOKPROBE_SYMBOL(patch_text_nosync);
  82. static int patch_text_cb(void *data)
  83. {
  84. struct patch_insn *patch = data;
  85. int ret = 0;
  86. if (atomic_inc_return(&patch->cpu_count) == 1) {
  87. ret =
  88. patch_text_nosync(patch->addr, &patch->insn,
  89. GET_INSN_LENGTH(patch->insn));
  90. atomic_inc(&patch->cpu_count);
  91. } else {
  92. while (atomic_read(&patch->cpu_count) <= num_online_cpus())
  93. cpu_relax();
  94. smp_mb();
  95. }
  96. return ret;
  97. }
  98. NOKPROBE_SYMBOL(patch_text_cb);
  99. int patch_text(void *addr, u32 insn)
  100. {
  101. struct patch_insn patch = {
  102. .addr = addr,
  103. .insn = insn,
  104. .cpu_count = ATOMIC_INIT(0),
  105. };
  106. return stop_machine_cpuslocked(patch_text_cb,
  107. &patch, cpu_online_mask);
  108. }
  109. NOKPROBE_SYMBOL(patch_text);