patch.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * patch.c - livepatch patching functions
  4. *
  5. * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  6. * Copyright (C) 2014 SUSE
  7. * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/livepatch.h>
  11. #include <linux/list.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/rculist.h>
  14. #include <linux/slab.h>
  15. #include <linux/bug.h>
  16. #include <linux/printk.h>
  17. #include "core.h"
  18. #include "patch.h"
  19. #include "transition.h"
  20. static LIST_HEAD(klp_ops);
  21. struct klp_ops *klp_find_ops(void *old_func)
  22. {
  23. struct klp_ops *ops;
  24. struct klp_func *func;
  25. list_for_each_entry(ops, &klp_ops, node) {
  26. func = list_first_entry(&ops->func_stack, struct klp_func,
  27. stack_node);
  28. if (func->old_func == old_func)
  29. return ops;
  30. }
  31. return NULL;
  32. }
  33. static void notrace klp_ftrace_handler(unsigned long ip,
  34. unsigned long parent_ip,
  35. struct ftrace_ops *fops,
  36. struct pt_regs *regs)
  37. {
  38. struct klp_ops *ops;
  39. struct klp_func *func;
  40. int patch_state;
  41. ops = container_of(fops, struct klp_ops, fops);
  42. /*
  43. * A variant of synchronize_rcu() is used to allow patching functions
  44. * where RCU is not watching, see klp_synchronize_transition().
  45. */
  46. preempt_disable_notrace();
  47. func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  48. stack_node);
  49. /*
  50. * func should never be NULL because preemption should be disabled here
  51. * and unregister_ftrace_function() does the equivalent of a
  52. * synchronize_rcu() before the func_stack removal.
  53. */
  54. if (WARN_ON_ONCE(!func))
  55. goto unlock;
  56. /*
  57. * In the enable path, enforce the order of the ops->func_stack and
  58. * func->transition reads. The corresponding write barrier is in
  59. * __klp_enable_patch().
  60. *
  61. * (Note that this barrier technically isn't needed in the disable
  62. * path. In the rare case where klp_update_patch_state() runs before
  63. * this handler, its TIF_PATCH_PENDING read and this func->transition
  64. * read need to be ordered. But klp_update_patch_state() already
  65. * enforces that.)
  66. */
  67. smp_rmb();
  68. if (unlikely(func->transition)) {
  69. /*
  70. * Enforce the order of the func->transition and
  71. * current->patch_state reads. Otherwise we could read an
  72. * out-of-date task state and pick the wrong function. The
  73. * corresponding write barrier is in klp_init_transition().
  74. */
  75. smp_rmb();
  76. patch_state = current->patch_state;
  77. WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
  78. if (patch_state == KLP_UNPATCHED) {
  79. /*
  80. * Use the previously patched version of the function.
  81. * If no previous patches exist, continue with the
  82. * original function.
  83. */
  84. func = list_entry_rcu(func->stack_node.next,
  85. struct klp_func, stack_node);
  86. if (&func->stack_node == &ops->func_stack)
  87. goto unlock;
  88. }
  89. }
  90. /*
  91. * NOPs are used to replace existing patches with original code.
  92. * Do nothing! Setting pc would cause an infinite loop.
  93. */
  94. if (func->nop)
  95. goto unlock;
  96. klp_arch_set_pc(regs, (unsigned long)func->new_func);
  97. unlock:
  98. preempt_enable_notrace();
  99. }
  100. /*
  101. * Convert a function address into the appropriate ftrace location.
  102. *
  103. * Usually this is just the address of the function, but on some architectures
  104. * it's more complicated so allow them to provide a custom behaviour.
  105. */
  106. #ifndef klp_get_ftrace_location
  107. static unsigned long klp_get_ftrace_location(unsigned long faddr)
  108. {
  109. return faddr;
  110. }
  111. #endif
  112. static void klp_unpatch_func(struct klp_func *func)
  113. {
  114. struct klp_ops *ops;
  115. if (WARN_ON(!func->patched))
  116. return;
  117. if (WARN_ON(!func->old_func))
  118. return;
  119. ops = klp_find_ops(func->old_func);
  120. if (WARN_ON(!ops))
  121. return;
  122. if (list_is_singular(&ops->func_stack)) {
  123. unsigned long ftrace_loc;
  124. ftrace_loc =
  125. klp_get_ftrace_location((unsigned long)func->old_func);
  126. if (WARN_ON(!ftrace_loc))
  127. return;
  128. WARN_ON(unregister_ftrace_function(&ops->fops));
  129. WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
  130. list_del_rcu(&func->stack_node);
  131. list_del(&ops->node);
  132. kfree(ops);
  133. } else {
  134. list_del_rcu(&func->stack_node);
  135. }
  136. func->patched = false;
  137. }
  138. static int klp_patch_func(struct klp_func *func)
  139. {
  140. struct klp_ops *ops;
  141. int ret;
  142. if (WARN_ON(!func->old_func))
  143. return -EINVAL;
  144. if (WARN_ON(func->patched))
  145. return -EINVAL;
  146. ops = klp_find_ops(func->old_func);
  147. if (!ops) {
  148. unsigned long ftrace_loc;
  149. ftrace_loc =
  150. klp_get_ftrace_location((unsigned long)func->old_func);
  151. if (!ftrace_loc) {
  152. pr_err("failed to find location for function '%s'\n",
  153. func->old_name);
  154. return -EINVAL;
  155. }
  156. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  157. if (!ops)
  158. return -ENOMEM;
  159. ops->fops.func = klp_ftrace_handler;
  160. ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
  161. FTRACE_OPS_FL_DYNAMIC |
  162. FTRACE_OPS_FL_IPMODIFY |
  163. FTRACE_OPS_FL_PERMANENT;
  164. list_add(&ops->node, &klp_ops);
  165. INIT_LIST_HEAD(&ops->func_stack);
  166. list_add_rcu(&func->stack_node, &ops->func_stack);
  167. ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
  168. if (ret) {
  169. pr_err("failed to set ftrace filter for function '%s' (%d)\n",
  170. func->old_name, ret);
  171. goto err;
  172. }
  173. ret = register_ftrace_function(&ops->fops);
  174. if (ret) {
  175. pr_err("failed to register ftrace handler for function '%s' (%d)\n",
  176. func->old_name, ret);
  177. ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
  178. goto err;
  179. }
  180. } else {
  181. list_add_rcu(&func->stack_node, &ops->func_stack);
  182. }
  183. func->patched = true;
  184. return 0;
  185. err:
  186. list_del_rcu(&func->stack_node);
  187. list_del(&ops->node);
  188. kfree(ops);
  189. return ret;
  190. }
  191. static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
  192. {
  193. struct klp_func *func;
  194. klp_for_each_func(obj, func) {
  195. if (nops_only && !func->nop)
  196. continue;
  197. if (func->patched)
  198. klp_unpatch_func(func);
  199. }
  200. if (obj->dynamic || !nops_only)
  201. obj->patched = false;
  202. }
  203. void klp_unpatch_object(struct klp_object *obj)
  204. {
  205. __klp_unpatch_object(obj, false);
  206. }
  207. int klp_patch_object(struct klp_object *obj)
  208. {
  209. struct klp_func *func;
  210. int ret;
  211. if (WARN_ON(obj->patched))
  212. return -EINVAL;
  213. klp_for_each_func(obj, func) {
  214. ret = klp_patch_func(func);
  215. if (ret) {
  216. klp_unpatch_object(obj);
  217. return ret;
  218. }
  219. }
  220. obj->patched = true;
  221. return 0;
  222. }
  223. static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
  224. {
  225. struct klp_object *obj;
  226. klp_for_each_object(patch, obj)
  227. if (obj->patched)
  228. __klp_unpatch_object(obj, nops_only);
  229. }
  230. void klp_unpatch_objects(struct klp_patch *patch)
  231. {
  232. __klp_unpatch_objects(patch, false);
  233. }
  234. void klp_unpatch_objects_dynamic(struct klp_patch *patch)
  235. {
  236. __klp_unpatch_objects(patch, true);
  237. }