error-inject.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. // SPDX-License-Identifier: GPL-2.0
  2. // error-inject.c: Function-level error injection table
  3. #include <linux/error-injection.h>
  4. #include <linux/debugfs.h>
  5. #include <linux/kallsyms.h>
  6. #include <linux/kprobes.h>
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. /* Whitelist of symbols that can be overridden for error injection. */
  12. static LIST_HEAD(error_injection_list);
  13. static DEFINE_MUTEX(ei_mutex);
  14. struct ei_entry {
  15. struct list_head list;
  16. unsigned long start_addr;
  17. unsigned long end_addr;
  18. int etype;
  19. void *priv;
  20. };
  21. bool within_error_injection_list(unsigned long addr)
  22. {
  23. struct ei_entry *ent;
  24. bool ret = false;
  25. mutex_lock(&ei_mutex);
  26. list_for_each_entry(ent, &error_injection_list, list) {
  27. if (addr >= ent->start_addr && addr < ent->end_addr) {
  28. ret = true;
  29. break;
  30. }
  31. }
  32. mutex_unlock(&ei_mutex);
  33. return ret;
  34. }
  35. int get_injectable_error_type(unsigned long addr)
  36. {
  37. struct ei_entry *ent;
  38. list_for_each_entry(ent, &error_injection_list, list) {
  39. if (addr >= ent->start_addr && addr < ent->end_addr)
  40. return ent->etype;
  41. }
  42. return EI_ETYPE_NONE;
  43. }
  44. /*
  45. * Lookup and populate the error_injection_list.
  46. *
  47. * For safety reasons we only allow certain functions to be overridden with
  48. * bpf_error_injection, so we need to populate the list of the symbols that have
  49. * been marked as safe for overriding.
  50. */
  51. static void populate_error_injection_list(struct error_injection_entry *start,
  52. struct error_injection_entry *end,
  53. void *priv)
  54. {
  55. struct error_injection_entry *iter;
  56. struct ei_entry *ent;
  57. unsigned long entry, offset = 0, size = 0;
  58. mutex_lock(&ei_mutex);
  59. for (iter = start; iter < end; iter++) {
  60. entry = arch_deref_entry_point((void *)iter->addr);
  61. if (!kernel_text_address(entry) ||
  62. !kallsyms_lookup_size_offset(entry, &size, &offset)) {
  63. pr_err("Failed to find error inject entry at %p\n",
  64. (void *)entry);
  65. continue;
  66. }
  67. ent = kmalloc(sizeof(*ent), GFP_KERNEL);
  68. if (!ent)
  69. break;
  70. ent->start_addr = entry;
  71. ent->end_addr = entry + size;
  72. ent->etype = iter->etype;
  73. ent->priv = priv;
  74. INIT_LIST_HEAD(&ent->list);
  75. list_add_tail(&ent->list, &error_injection_list);
  76. }
  77. mutex_unlock(&ei_mutex);
  78. }
  79. /* Markers of the _error_inject_whitelist section */
  80. extern struct error_injection_entry __start_error_injection_whitelist[];
  81. extern struct error_injection_entry __stop_error_injection_whitelist[];
  82. static void __init populate_kernel_ei_list(void)
  83. {
  84. populate_error_injection_list(__start_error_injection_whitelist,
  85. __stop_error_injection_whitelist,
  86. NULL);
  87. }
  88. #ifdef CONFIG_MODULES
  89. static void module_load_ei_list(struct module *mod)
  90. {
  91. if (!mod->num_ei_funcs)
  92. return;
  93. populate_error_injection_list(mod->ei_funcs,
  94. mod->ei_funcs + mod->num_ei_funcs, mod);
  95. }
  96. static void module_unload_ei_list(struct module *mod)
  97. {
  98. struct ei_entry *ent, *n;
  99. if (!mod->num_ei_funcs)
  100. return;
  101. mutex_lock(&ei_mutex);
  102. list_for_each_entry_safe(ent, n, &error_injection_list, list) {
  103. if (ent->priv == mod) {
  104. list_del_init(&ent->list);
  105. kfree(ent);
  106. }
  107. }
  108. mutex_unlock(&ei_mutex);
  109. }
  110. /* Module notifier call back, checking error injection table on the module */
  111. static int ei_module_callback(struct notifier_block *nb,
  112. unsigned long val, void *data)
  113. {
  114. struct module *mod = data;
  115. if (val == MODULE_STATE_COMING)
  116. module_load_ei_list(mod);
  117. else if (val == MODULE_STATE_GOING)
  118. module_unload_ei_list(mod);
  119. return NOTIFY_DONE;
  120. }
  121. static struct notifier_block ei_module_nb = {
  122. .notifier_call = ei_module_callback,
  123. .priority = 0
  124. };
  125. static __init int module_ei_init(void)
  126. {
  127. return register_module_notifier(&ei_module_nb);
  128. }
  129. #else /* !CONFIG_MODULES */
  130. #define module_ei_init() (0)
  131. #endif
  132. /*
  133. * error_injection/whitelist -- shows which functions can be overridden for
  134. * error injection.
  135. */
  136. static void *ei_seq_start(struct seq_file *m, loff_t *pos)
  137. {
  138. mutex_lock(&ei_mutex);
  139. return seq_list_start(&error_injection_list, *pos);
  140. }
  141. static void ei_seq_stop(struct seq_file *m, void *v)
  142. {
  143. mutex_unlock(&ei_mutex);
  144. }
  145. static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
  146. {
  147. return seq_list_next(v, &error_injection_list, pos);
  148. }
  149. static const char *error_type_string(int etype)
  150. {
  151. switch (etype) {
  152. case EI_ETYPE_NULL:
  153. return "NULL";
  154. case EI_ETYPE_ERRNO:
  155. return "ERRNO";
  156. case EI_ETYPE_ERRNO_NULL:
  157. return "ERRNO_NULL";
  158. default:
  159. return "(unknown)";
  160. }
  161. }
  162. static int ei_seq_show(struct seq_file *m, void *v)
  163. {
  164. struct ei_entry *ent = list_entry(v, struct ei_entry, list);
  165. seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,
  166. error_type_string(ent->etype));
  167. return 0;
  168. }
  169. static const struct seq_operations ei_seq_ops = {
  170. .start = ei_seq_start,
  171. .next = ei_seq_next,
  172. .stop = ei_seq_stop,
  173. .show = ei_seq_show,
  174. };
  175. static int ei_open(struct inode *inode, struct file *filp)
  176. {
  177. return seq_open(filp, &ei_seq_ops);
  178. }
  179. static const struct file_operations debugfs_ei_ops = {
  180. .open = ei_open,
  181. .read = seq_read,
  182. .llseek = seq_lseek,
  183. .release = seq_release,
  184. };
  185. static int __init ei_debugfs_init(void)
  186. {
  187. struct dentry *dir, *file;
  188. dir = debugfs_create_dir("error_injection", NULL);
  189. if (!dir)
  190. return -ENOMEM;
  191. file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
  192. if (!file) {
  193. debugfs_remove(dir);
  194. return -ENOMEM;
  195. }
  196. return 0;
  197. }
  198. static int __init init_error_injection(void)
  199. {
  200. populate_kernel_ei_list();
  201. if (!module_ei_init())
  202. ei_debugfs_init();
  203. return 0;
  204. }
  205. late_initcall(init_error_injection);