extable.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Rewritten by Rusty Russell, on the backs of many others...
  3. Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
  4. */
  5. #include <linux/ftrace.h>
  6. #include <linux/memory.h>
  7. #include <linux/extable.h>
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/init.h>
  11. #include <linux/kprobes.h>
  12. #include <linux/filter.h>
  13. #include <asm/sections.h>
  14. #include <linux/uaccess.h>
  15. /*
  16. * mutex protecting text section modification (dynamic code patching).
  17. * some users need to sleep (allocating memory...) while they hold this lock.
  18. *
  19. * Note: Also protects SMP-alternatives modification on x86.
  20. *
  21. * NOT exported to modules - patching kernel text is a really delicate matter.
  22. */
  23. DEFINE_MUTEX(text_mutex);
  24. extern struct exception_table_entry __start___ex_table[];
  25. extern struct exception_table_entry __stop___ex_table[];
  26. /* Cleared by build time tools if the table is already sorted. */
  27. u32 __initdata __visible main_extable_sort_needed = 1;
  28. /* Sort the kernel's built-in exception table */
  29. void __init sort_main_extable(void)
  30. {
  31. if (main_extable_sort_needed &&
  32. &__stop___ex_table > &__start___ex_table) {
  33. pr_notice("Sorting __ex_table...\n");
  34. sort_extable(__start___ex_table, __stop___ex_table);
  35. }
  36. }
  37. /* Given an address, look for it in the kernel exception table */
  38. const
  39. struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
  40. {
  41. return search_extable(__start___ex_table,
  42. __stop___ex_table - __start___ex_table, addr);
  43. }
  44. /* Given an address, look for it in the exception tables. */
  45. const struct exception_table_entry *search_exception_tables(unsigned long addr)
  46. {
  47. const struct exception_table_entry *e;
  48. e = search_kernel_exception_table(addr);
  49. if (!e)
  50. e = search_module_extables(addr);
  51. if (!e)
  52. e = search_bpf_extables(addr);
  53. return e;
  54. }
  55. int init_kernel_text(unsigned long addr)
  56. {
  57. if (addr >= (unsigned long)_sinittext &&
  58. addr < (unsigned long)_einittext)
  59. return 1;
  60. return 0;
  61. }
  62. int notrace core_kernel_text(unsigned long addr)
  63. {
  64. if (addr >= (unsigned long)_stext &&
  65. addr < (unsigned long)_etext)
  66. return 1;
  67. if (system_state < SYSTEM_RUNNING &&
  68. init_kernel_text(addr))
  69. return 1;
  70. return 0;
  71. }
  72. /**
  73. * core_kernel_data - tell if addr points to kernel data
  74. * @addr: address to test
  75. *
  76. * Returns true if @addr passed in is from the core kernel data
  77. * section.
  78. *
  79. * Note: On some archs it may return true for core RODATA, and false
  80. * for others. But will always be true for core RW data.
  81. */
  82. int core_kernel_data(unsigned long addr)
  83. {
  84. if (addr >= (unsigned long)_sdata &&
  85. addr < (unsigned long)_edata)
  86. return 1;
  87. return 0;
  88. }
  89. int __kernel_text_address(unsigned long addr)
  90. {
  91. if (kernel_text_address(addr))
  92. return 1;
  93. /*
  94. * There might be init symbols in saved stacktraces.
  95. * Give those symbols a chance to be printed in
  96. * backtraces (such as lockdep traces).
  97. *
  98. * Since we are after the module-symbols check, there's
  99. * no danger of address overlap:
  100. */
  101. if (init_kernel_text(addr))
  102. return 1;
  103. return 0;
  104. }
  105. int kernel_text_address(unsigned long addr)
  106. {
  107. bool no_rcu;
  108. int ret = 1;
  109. if (core_kernel_text(addr))
  110. return 1;
  111. /*
  112. * If a stack dump happens while RCU is not watching, then
  113. * RCU needs to be notified that it requires to start
  114. * watching again. This can happen either by tracing that
  115. * triggers a stack trace, or a WARN() that happens during
  116. * coming back from idle, or cpu on or offlining.
  117. *
  118. * is_module_text_address() as well as the kprobe slots,
  119. * is_bpf_text_address() and is_bpf_image_address require
  120. * RCU to be watching.
  121. */
  122. no_rcu = !rcu_is_watching();
  123. /* Treat this like an NMI as it can happen anywhere */
  124. if (no_rcu)
  125. rcu_nmi_enter();
  126. if (is_module_text_address(addr))
  127. goto out;
  128. if (is_ftrace_trampoline(addr))
  129. goto out;
  130. if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
  131. goto out;
  132. if (is_bpf_text_address(addr))
  133. goto out;
  134. ret = 0;
  135. out:
  136. if (no_rcu)
  137. rcu_nmi_exit();
  138. return ret;
  139. }
  140. /*
  141. * On some architectures (PPC64, IA64) function pointers
  142. * are actually only tokens to some data that then holds the
  143. * real function address. As a result, to find if a function
  144. * pointer is part of the kernel text, we need to do some
  145. * special dereferencing first.
  146. */
  147. int func_ptr_is_kernel_text(void *ptr)
  148. {
  149. unsigned long addr;
  150. addr = (unsigned long) dereference_function_descriptor(ptr);
  151. if (core_kernel_text(addr))
  152. return 1;
  153. return is_module_text_address(addr);
  154. }