hw_breakpoint.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Copyright (C) 2007 Alan Stern
  5. * Copyright (C) 2009 IBM Corporation
  6. * Copyright (C) 2009 Frederic Weisbecker <fweisbec@gmail.com>
  7. *
  8. * Authors: Alan Stern <stern@rowland.harvard.edu>
  9. * K.Prasad <prasad@linux.vnet.ibm.com>
  10. * Frederic Weisbecker <fweisbec@gmail.com>
  11. */
  12. /*
  13. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  14. * using the CPU's debug registers.
  15. */
  16. #include <linux/perf_event.h>
  17. #include <linux/hw_breakpoint.h>
  18. #include <linux/irqflags.h>
  19. #include <linux/notifier.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/percpu.h>
  23. #include <linux/kdebug.h>
  24. #include <linux/kernel.h>
  25. #include <linux/export.h>
  26. #include <linux/sched.h>
  27. #include <linux/smp.h>
  28. #include <asm/hw_breakpoint.h>
  29. #include <asm/processor.h>
  30. #include <asm/debugreg.h>
  31. #include <asm/user.h>
  32. #include <asm/desc.h>
  33. #include <asm/tlbflush.h>
  34. /* Per cpu debug control register value */
  35. DEFINE_PER_CPU(unsigned long, cpu_dr7);
  36. EXPORT_PER_CPU_SYMBOL(cpu_dr7);
  37. /* Per cpu debug address registers values */
  38. static DEFINE_PER_CPU(unsigned long, cpu_debugreg[HBP_NUM]);
  39. /*
  40. * Stores the breakpoints currently in use on each breakpoint address
  41. * register for each cpus
  42. */
  43. static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
  44. static inline unsigned long
  45. __encode_dr7(int drnum, unsigned int len, unsigned int type)
  46. {
  47. unsigned long bp_info;
  48. bp_info = (len | type) & 0xf;
  49. bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE);
  50. bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE));
  51. return bp_info;
  52. }
  53. /*
  54. * Encode the length, type, Exact, and Enable bits for a particular breakpoint
  55. * as stored in debug register 7.
  56. */
  57. unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type)
  58. {
  59. return __encode_dr7(drnum, len, type) | DR_GLOBAL_SLOWDOWN;
  60. }
  61. /*
  62. * Decode the length and type bits for a particular breakpoint as
  63. * stored in debug register 7. Return the "enabled" status.
  64. */
  65. int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type)
  66. {
  67. int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
  68. *len = (bp_info & 0xc) | 0x40;
  69. *type = (bp_info & 0x3) | 0x80;
  70. return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
  71. }
  72. /*
  73. * Install a perf counter breakpoint.
  74. *
  75. * We seek a free debug address register and use it for this
  76. * breakpoint. Eventually we enable it in the debug control register.
  77. *
  78. * Atomic: we hold the counter->ctx->lock and we only handle variables
  79. * and registers local to this cpu.
  80. */
  81. int arch_install_hw_breakpoint(struct perf_event *bp)
  82. {
  83. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  84. unsigned long *dr7;
  85. int i;
  86. lockdep_assert_irqs_disabled();
  87. for (i = 0; i < HBP_NUM; i++) {
  88. struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
  89. if (!*slot) {
  90. *slot = bp;
  91. break;
  92. }
  93. }
  94. if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
  95. return -EBUSY;
  96. set_debugreg(info->address, i);
  97. __this_cpu_write(cpu_debugreg[i], info->address);
  98. dr7 = this_cpu_ptr(&cpu_dr7);
  99. *dr7 |= encode_dr7(i, info->len, info->type);
  100. /*
  101. * Ensure we first write cpu_dr7 before we set the DR7 register.
  102. * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
  103. */
  104. barrier();
  105. set_debugreg(*dr7, 7);
  106. if (info->mask)
  107. set_dr_addr_mask(info->mask, i);
  108. return 0;
  109. }
  110. /*
  111. * Uninstall the breakpoint contained in the given counter.
  112. *
  113. * First we search the debug address register it uses and then we disable
  114. * it.
  115. *
  116. * Atomic: we hold the counter->ctx->lock and we only handle variables
  117. * and registers local to this cpu.
  118. */
  119. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  120. {
  121. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  122. unsigned long dr7;
  123. int i;
  124. lockdep_assert_irqs_disabled();
  125. for (i = 0; i < HBP_NUM; i++) {
  126. struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
  127. if (*slot == bp) {
  128. *slot = NULL;
  129. break;
  130. }
  131. }
  132. if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
  133. return;
  134. dr7 = this_cpu_read(cpu_dr7);
  135. dr7 &= ~__encode_dr7(i, info->len, info->type);
  136. set_debugreg(dr7, 7);
  137. if (info->mask)
  138. set_dr_addr_mask(0, i);
  139. /*
  140. * Ensure the write to cpu_dr7 is after we've set the DR7 register.
  141. * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
  142. */
  143. barrier();
  144. this_cpu_write(cpu_dr7, dr7);
  145. }
  146. static int arch_bp_generic_len(int x86_len)
  147. {
  148. switch (x86_len) {
  149. case X86_BREAKPOINT_LEN_1:
  150. return HW_BREAKPOINT_LEN_1;
  151. case X86_BREAKPOINT_LEN_2:
  152. return HW_BREAKPOINT_LEN_2;
  153. case X86_BREAKPOINT_LEN_4:
  154. return HW_BREAKPOINT_LEN_4;
  155. #ifdef CONFIG_X86_64
  156. case X86_BREAKPOINT_LEN_8:
  157. return HW_BREAKPOINT_LEN_8;
  158. #endif
  159. default:
  160. return -EINVAL;
  161. }
  162. }
  163. int arch_bp_generic_fields(int x86_len, int x86_type,
  164. int *gen_len, int *gen_type)
  165. {
  166. int len;
  167. /* Type */
  168. switch (x86_type) {
  169. case X86_BREAKPOINT_EXECUTE:
  170. if (x86_len != X86_BREAKPOINT_LEN_X)
  171. return -EINVAL;
  172. *gen_type = HW_BREAKPOINT_X;
  173. *gen_len = sizeof(long);
  174. return 0;
  175. case X86_BREAKPOINT_WRITE:
  176. *gen_type = HW_BREAKPOINT_W;
  177. break;
  178. case X86_BREAKPOINT_RW:
  179. *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
  180. break;
  181. default:
  182. return -EINVAL;
  183. }
  184. /* Len */
  185. len = arch_bp_generic_len(x86_len);
  186. if (len < 0)
  187. return -EINVAL;
  188. *gen_len = len;
  189. return 0;
  190. }
  191. /*
  192. * Check for virtual address in kernel space.
  193. */
  194. int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
  195. {
  196. unsigned long va;
  197. int len;
  198. va = hw->address;
  199. len = arch_bp_generic_len(hw->len);
  200. WARN_ON_ONCE(len < 0);
  201. /*
  202. * We don't need to worry about va + len - 1 overflowing:
  203. * we already require that va is aligned to a multiple of len.
  204. */
  205. return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
  206. }
  207. /*
  208. * Checks whether the range [addr, end], overlaps the area [base, base + size).
  209. */
  210. static inline bool within_area(unsigned long addr, unsigned long end,
  211. unsigned long base, unsigned long size)
  212. {
  213. return end >= base && addr < (base + size);
  214. }
  215. /*
  216. * Checks whether the range from addr to end, inclusive, overlaps the fixed
  217. * mapped CPU entry area range or other ranges used for CPU entry.
  218. */
  219. static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
  220. {
  221. int cpu;
  222. /* CPU entry erea is always used for CPU entry */
  223. if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
  224. CPU_ENTRY_AREA_TOTAL_SIZE))
  225. return true;
  226. /*
  227. * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
  228. * GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
  229. */
  230. #ifdef CONFIG_SMP
  231. if (within_area(addr, end, (unsigned long)__per_cpu_offset,
  232. sizeof(unsigned long) * nr_cpu_ids))
  233. return true;
  234. #else
  235. if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
  236. sizeof(pcpu_unit_offsets)))
  237. return true;
  238. #endif
  239. for_each_possible_cpu(cpu) {
  240. /* The original rw GDT is being used after load_direct_gdt() */
  241. if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
  242. GDT_SIZE))
  243. return true;
  244. /*
  245. * cpu_tss_rw is not directly referenced by hardware, but
  246. * cpu_tss_rw is also used in CPU entry code,
  247. */
  248. if (within_area(addr, end,
  249. (unsigned long)&per_cpu(cpu_tss_rw, cpu),
  250. sizeof(struct tss_struct)))
  251. return true;
  252. /*
  253. * cpu_tlbstate.user_pcid_flush_mask is used for CPU entry.
  254. * If a data breakpoint on it, it will cause an unwanted #DB.
  255. * Protect the full cpu_tlbstate structure to be sure.
  256. */
  257. if (within_area(addr, end,
  258. (unsigned long)&per_cpu(cpu_tlbstate, cpu),
  259. sizeof(struct tlb_state)))
  260. return true;
  261. /*
  262. * When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
  263. * will read per-cpu cpu_dr7 before clear dr7 register.
  264. */
  265. if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
  266. sizeof(cpu_dr7)))
  267. return true;
  268. }
  269. return false;
  270. }
  271. static int arch_build_bp_info(struct perf_event *bp,
  272. const struct perf_event_attr *attr,
  273. struct arch_hw_breakpoint *hw)
  274. {
  275. unsigned long bp_end;
  276. bp_end = attr->bp_addr + attr->bp_len - 1;
  277. if (bp_end < attr->bp_addr)
  278. return -EINVAL;
  279. /*
  280. * Prevent any breakpoint of any type that overlaps the CPU
  281. * entry area and data. This protects the IST stacks and also
  282. * reduces the chance that we ever find out what happens if
  283. * there's a data breakpoint on the GDT, IDT, or TSS.
  284. */
  285. if (within_cpu_entry(attr->bp_addr, bp_end))
  286. return -EINVAL;
  287. hw->address = attr->bp_addr;
  288. hw->mask = 0;
  289. /* Type */
  290. switch (attr->bp_type) {
  291. case HW_BREAKPOINT_W:
  292. hw->type = X86_BREAKPOINT_WRITE;
  293. break;
  294. case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
  295. hw->type = X86_BREAKPOINT_RW;
  296. break;
  297. case HW_BREAKPOINT_X:
  298. /*
  299. * We don't allow kernel breakpoints in places that are not
  300. * acceptable for kprobes. On non-kprobes kernels, we don't
  301. * allow kernel breakpoints at all.
  302. */
  303. if (attr->bp_addr >= TASK_SIZE_MAX) {
  304. if (within_kprobe_blacklist(attr->bp_addr))
  305. return -EINVAL;
  306. }
  307. hw->type = X86_BREAKPOINT_EXECUTE;
  308. /*
  309. * x86 inst breakpoints need to have a specific undefined len.
  310. * But we still need to check userspace is not trying to setup
  311. * an unsupported length, to get a range breakpoint for example.
  312. */
  313. if (attr->bp_len == sizeof(long)) {
  314. hw->len = X86_BREAKPOINT_LEN_X;
  315. return 0;
  316. }
  317. fallthrough;
  318. default:
  319. return -EINVAL;
  320. }
  321. /* Len */
  322. switch (attr->bp_len) {
  323. case HW_BREAKPOINT_LEN_1:
  324. hw->len = X86_BREAKPOINT_LEN_1;
  325. break;
  326. case HW_BREAKPOINT_LEN_2:
  327. hw->len = X86_BREAKPOINT_LEN_2;
  328. break;
  329. case HW_BREAKPOINT_LEN_4:
  330. hw->len = X86_BREAKPOINT_LEN_4;
  331. break;
  332. #ifdef CONFIG_X86_64
  333. case HW_BREAKPOINT_LEN_8:
  334. hw->len = X86_BREAKPOINT_LEN_8;
  335. break;
  336. #endif
  337. default:
  338. /* AMD range breakpoint */
  339. if (!is_power_of_2(attr->bp_len))
  340. return -EINVAL;
  341. if (attr->bp_addr & (attr->bp_len - 1))
  342. return -EINVAL;
  343. if (!boot_cpu_has(X86_FEATURE_BPEXT))
  344. return -EOPNOTSUPP;
  345. /*
  346. * It's impossible to use a range breakpoint to fake out
  347. * user vs kernel detection because bp_len - 1 can't
  348. * have the high bit set. If we ever allow range instruction
  349. * breakpoints, then we'll have to check for kprobe-blacklisted
  350. * addresses anywhere in the range.
  351. */
  352. hw->mask = attr->bp_len - 1;
  353. hw->len = X86_BREAKPOINT_LEN_1;
  354. }
  355. return 0;
  356. }
  357. /*
  358. * Validate the arch-specific HW Breakpoint register settings
  359. */
  360. int hw_breakpoint_arch_parse(struct perf_event *bp,
  361. const struct perf_event_attr *attr,
  362. struct arch_hw_breakpoint *hw)
  363. {
  364. unsigned int align;
  365. int ret;
  366. ret = arch_build_bp_info(bp, attr, hw);
  367. if (ret)
  368. return ret;
  369. switch (hw->len) {
  370. case X86_BREAKPOINT_LEN_1:
  371. align = 0;
  372. if (hw->mask)
  373. align = hw->mask;
  374. break;
  375. case X86_BREAKPOINT_LEN_2:
  376. align = 1;
  377. break;
  378. case X86_BREAKPOINT_LEN_4:
  379. align = 3;
  380. break;
  381. #ifdef CONFIG_X86_64
  382. case X86_BREAKPOINT_LEN_8:
  383. align = 7;
  384. break;
  385. #endif
  386. default:
  387. WARN_ON_ONCE(1);
  388. return -EINVAL;
  389. }
  390. /*
  391. * Check that the low-order bits of the address are appropriate
  392. * for the alignment implied by len.
  393. */
  394. if (hw->address & align)
  395. return -EINVAL;
  396. return 0;
  397. }
  398. /*
  399. * Release the user breakpoints used by ptrace
  400. */
  401. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  402. {
  403. int i;
  404. struct thread_struct *t = &tsk->thread;
  405. for (i = 0; i < HBP_NUM; i++) {
  406. unregister_hw_breakpoint(t->ptrace_bps[i]);
  407. t->ptrace_bps[i] = NULL;
  408. }
  409. t->virtual_dr6 = 0;
  410. t->ptrace_dr7 = 0;
  411. }
  412. void hw_breakpoint_restore(void)
  413. {
  414. set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
  415. set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
  416. set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
  417. set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
  418. set_debugreg(DR6_RESERVED, 6);
  419. set_debugreg(__this_cpu_read(cpu_dr7), 7);
  420. }
  421. EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
  422. /*
  423. * Handle debug exception notifications.
  424. *
  425. * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below.
  426. *
  427. * NOTIFY_DONE returned if one of the following conditions is true.
  428. * i) When the causative address is from user-space and the exception
  429. * is a valid one, i.e. not triggered as a result of lazy debug register
  430. * switching
  431. * ii) When there are more bits than trap<n> set in DR6 register (such
  432. * as BD, BS or BT) indicating that more than one debug condition is
  433. * met and requires some more action in do_debug().
  434. *
  435. * NOTIFY_STOP returned for all other cases
  436. *
  437. */
  438. static int hw_breakpoint_handler(struct die_args *args)
  439. {
  440. int i, rc = NOTIFY_STOP;
  441. struct perf_event *bp;
  442. unsigned long *dr6_p;
  443. unsigned long dr6;
  444. bool bpx;
  445. /* The DR6 value is pointed by args->err */
  446. dr6_p = (unsigned long *)ERR_PTR(args->err);
  447. dr6 = *dr6_p;
  448. /* Do an early return if no trap bits are set in DR6 */
  449. if ((dr6 & DR_TRAP_BITS) == 0)
  450. return NOTIFY_DONE;
  451. /* Handle all the breakpoints that were triggered */
  452. for (i = 0; i < HBP_NUM; ++i) {
  453. if (likely(!(dr6 & (DR_TRAP0 << i))))
  454. continue;
  455. bp = this_cpu_read(bp_per_reg[i]);
  456. if (!bp)
  457. continue;
  458. bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
  459. /*
  460. * TF and data breakpoints are traps and can be merged, however
  461. * instruction breakpoints are faults and will be raised
  462. * separately.
  463. *
  464. * However DR6 can indicate both TF and instruction
  465. * breakpoints. In that case take TF as that has precedence and
  466. * delay the instruction breakpoint for the next exception.
  467. */
  468. if (bpx && (dr6 & DR_STEP))
  469. continue;
  470. /*
  471. * Reset the 'i'th TRAP bit in dr6 to denote completion of
  472. * exception handling
  473. */
  474. (*dr6_p) &= ~(DR_TRAP0 << i);
  475. perf_bp_event(bp, args->regs);
  476. /*
  477. * Set up resume flag to avoid breakpoint recursion when
  478. * returning back to origin.
  479. */
  480. if (bpx)
  481. args->regs->flags |= X86_EFLAGS_RF;
  482. }
  483. /*
  484. * Further processing in do_debug() is needed for a) user-space
  485. * breakpoints (to generate signals) and b) when the system has
  486. * taken exception due to multiple causes
  487. */
  488. if ((current->thread.virtual_dr6 & DR_TRAP_BITS) ||
  489. (dr6 & (~DR_TRAP_BITS)))
  490. rc = NOTIFY_DONE;
  491. return rc;
  492. }
  493. /*
  494. * Handle debug exception notifications.
  495. */
  496. int hw_breakpoint_exceptions_notify(
  497. struct notifier_block *unused, unsigned long val, void *data)
  498. {
  499. if (val != DIE_DEBUG)
  500. return NOTIFY_DONE;
  501. return hw_breakpoint_handler(data);
  502. }
  503. void hw_breakpoint_pmu_read(struct perf_event *bp)
  504. {
  505. /* TODO */
  506. }