hw_breakpoint.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2007 Alan Stern
  4. * Copyright (C) IBM Corporation, 2009
  5. * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
  6. *
  7. * Thanks to Ingo Molnar for his many suggestions.
  8. *
  9. * Authors: Alan Stern <stern@rowland.harvard.edu>
  10. * K.Prasad <prasad@linux.vnet.ibm.com>
  11. * Frederic Weisbecker <fweisbec@gmail.com>
  12. */
  13. /*
  14. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  15. * using the CPU's debug registers.
  16. * This file contains the arch-independent routines.
  17. */
  18. #include <linux/irqflags.h>
  19. #include <linux/kallsyms.h>
  20. #include <linux/notifier.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/kdebug.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/percpu.h>
  26. #include <linux/sched.h>
  27. #include <linux/init.h>
  28. #include <linux/slab.h>
  29. #include <linux/list.h>
  30. #include <linux/cpu.h>
  31. #include <linux/smp.h>
  32. #include <linux/bug.h>
  33. #include <linux/hw_breakpoint.h>
  34. /*
  35. * Constraints data
  36. */
  37. struct bp_cpuinfo {
  38. /* Number of pinned cpu breakpoints in a cpu */
  39. unsigned int cpu_pinned;
  40. /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
  41. unsigned int *tsk_pinned;
  42. /* Number of non-pinned cpu/task breakpoints in a cpu */
  43. unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
  44. };
  45. static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
  46. static int nr_slots[TYPE_MAX];
  47. static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
  48. {
  49. return per_cpu_ptr(bp_cpuinfo + type, cpu);
  50. }
  51. /* Keep track of the breakpoints attached to tasks */
  52. static LIST_HEAD(bp_task_head);
  53. static int constraints_initialized;
  54. /* Gather the number of total pinned and un-pinned bp in a cpuset */
  55. struct bp_busy_slots {
  56. unsigned int pinned;
  57. unsigned int flexible;
  58. };
  59. /* Serialize accesses to the above constraints */
  60. static DEFINE_MUTEX(nr_bp_mutex);
  61. __weak int hw_breakpoint_weight(struct perf_event *bp)
  62. {
  63. return 1;
  64. }
  65. static inline enum bp_type_idx find_slot_idx(u64 bp_type)
  66. {
  67. if (bp_type & HW_BREAKPOINT_RW)
  68. return TYPE_DATA;
  69. return TYPE_INST;
  70. }
  71. /*
  72. * Report the maximum number of pinned breakpoints a task
  73. * have in this cpu
  74. */
  75. static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  76. {
  77. unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
  78. int i;
  79. for (i = nr_slots[type] - 1; i >= 0; i--) {
  80. if (tsk_pinned[i] > 0)
  81. return i + 1;
  82. }
  83. return 0;
  84. }
  85. /*
  86. * Count the number of breakpoints of the same type and same task.
  87. * The given event must be not on the list.
  88. */
  89. static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
  90. {
  91. struct task_struct *tsk = bp->hw.target;
  92. struct perf_event *iter;
  93. int count = 0;
  94. list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
  95. if (iter->hw.target == tsk &&
  96. find_slot_idx(iter->attr.bp_type) == type &&
  97. (iter->cpu < 0 || cpu == iter->cpu))
  98. count += hw_breakpoint_weight(iter);
  99. }
  100. return count;
  101. }
  102. static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
  103. {
  104. if (bp->cpu >= 0)
  105. return cpumask_of(bp->cpu);
  106. return cpu_possible_mask;
  107. }
  108. /*
  109. * Report the number of pinned/un-pinned breakpoints we have in
  110. * a given cpu (cpu > -1) or in all of them (cpu = -1).
  111. */
  112. static void
  113. fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
  114. enum bp_type_idx type)
  115. {
  116. const struct cpumask *cpumask = cpumask_of_bp(bp);
  117. int cpu;
  118. for_each_cpu(cpu, cpumask) {
  119. struct bp_cpuinfo *info = get_bp_info(cpu, type);
  120. int nr;
  121. nr = info->cpu_pinned;
  122. if (!bp->hw.target)
  123. nr += max_task_bp_pinned(cpu, type);
  124. else
  125. nr += task_bp_pinned(cpu, bp, type);
  126. if (nr > slots->pinned)
  127. slots->pinned = nr;
  128. nr = info->flexible;
  129. if (nr > slots->flexible)
  130. slots->flexible = nr;
  131. }
  132. }
  133. /*
  134. * For now, continue to consider flexible as pinned, until we can
  135. * ensure no flexible event can ever be scheduled before a pinned event
  136. * in a same cpu.
  137. */
  138. static void
  139. fetch_this_slot(struct bp_busy_slots *slots, int weight)
  140. {
  141. slots->pinned += weight;
  142. }
  143. /*
  144. * Add a pinned breakpoint for the given task in our constraint table
  145. */
  146. static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
  147. enum bp_type_idx type, int weight)
  148. {
  149. unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
  150. int old_idx, new_idx;
  151. old_idx = task_bp_pinned(cpu, bp, type) - 1;
  152. new_idx = old_idx + weight;
  153. if (old_idx >= 0)
  154. tsk_pinned[old_idx]--;
  155. if (new_idx >= 0)
  156. tsk_pinned[new_idx]++;
  157. }
  158. /*
  159. * Add/remove the given breakpoint in our constraint table
  160. */
  161. static void
  162. toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
  163. int weight)
  164. {
  165. const struct cpumask *cpumask = cpumask_of_bp(bp);
  166. int cpu;
  167. if (!enable)
  168. weight = -weight;
  169. /* Pinned counter cpu profiling */
  170. if (!bp->hw.target) {
  171. get_bp_info(bp->cpu, type)->cpu_pinned += weight;
  172. return;
  173. }
  174. /* Pinned counter task profiling */
  175. for_each_cpu(cpu, cpumask)
  176. toggle_bp_task_slot(bp, cpu, type, weight);
  177. if (enable)
  178. list_add_tail(&bp->hw.bp_list, &bp_task_head);
  179. else
  180. list_del(&bp->hw.bp_list);
  181. }
  182. __weak int arch_reserve_bp_slot(struct perf_event *bp)
  183. {
  184. return 0;
  185. }
  186. __weak void arch_release_bp_slot(struct perf_event *bp)
  187. {
  188. }
  189. /*
  190. * Function to perform processor-specific cleanup during unregistration
  191. */
  192. __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  193. {
  194. /*
  195. * A weak stub function here for those archs that don't define
  196. * it inside arch/.../kernel/hw_breakpoint.c
  197. */
  198. }
  199. /*
  200. * Constraints to check before allowing this new breakpoint counter:
  201. *
  202. * == Non-pinned counter == (Considered as pinned for now)
  203. *
  204. * - If attached to a single cpu, check:
  205. *
  206. * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
  207. * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
  208. *
  209. * -> If there are already non-pinned counters in this cpu, it means
  210. * there is already a free slot for them.
  211. * Otherwise, we check that the maximum number of per task
  212. * breakpoints (for this cpu) plus the number of per cpu breakpoint
  213. * (for this cpu) doesn't cover every registers.
  214. *
  215. * - If attached to every cpus, check:
  216. *
  217. * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
  218. * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
  219. *
  220. * -> This is roughly the same, except we check the number of per cpu
  221. * bp for every cpu and we keep the max one. Same for the per tasks
  222. * breakpoints.
  223. *
  224. *
  225. * == Pinned counter ==
  226. *
  227. * - If attached to a single cpu, check:
  228. *
  229. * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
  230. * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
  231. *
  232. * -> Same checks as before. But now the info->flexible, if any, must keep
  233. * one register at least (or they will never be fed).
  234. *
  235. * - If attached to every cpus, check:
  236. *
  237. * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
  238. * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
  239. */
  240. static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
  241. {
  242. struct bp_busy_slots slots = {0};
  243. enum bp_type_idx type;
  244. int weight;
  245. int ret;
  246. /* We couldn't initialize breakpoint constraints on boot */
  247. if (!constraints_initialized)
  248. return -ENOMEM;
  249. /* Basic checks */
  250. if (bp_type == HW_BREAKPOINT_EMPTY ||
  251. bp_type == HW_BREAKPOINT_INVALID)
  252. return -EINVAL;
  253. type = find_slot_idx(bp_type);
  254. weight = hw_breakpoint_weight(bp);
  255. fetch_bp_busy_slots(&slots, bp, type);
  256. /*
  257. * Simulate the addition of this breakpoint to the constraints
  258. * and see the result.
  259. */
  260. fetch_this_slot(&slots, weight);
  261. /* Flexible counters need to keep at least one slot */
  262. if (slots.pinned + (!!slots.flexible) > nr_slots[type])
  263. return -ENOSPC;
  264. ret = arch_reserve_bp_slot(bp);
  265. if (ret)
  266. return ret;
  267. toggle_bp_slot(bp, true, type, weight);
  268. return 0;
  269. }
  270. int reserve_bp_slot(struct perf_event *bp)
  271. {
  272. int ret;
  273. mutex_lock(&nr_bp_mutex);
  274. ret = __reserve_bp_slot(bp, bp->attr.bp_type);
  275. mutex_unlock(&nr_bp_mutex);
  276. return ret;
  277. }
  278. static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
  279. {
  280. enum bp_type_idx type;
  281. int weight;
  282. arch_release_bp_slot(bp);
  283. type = find_slot_idx(bp_type);
  284. weight = hw_breakpoint_weight(bp);
  285. toggle_bp_slot(bp, false, type, weight);
  286. }
  287. void release_bp_slot(struct perf_event *bp)
  288. {
  289. mutex_lock(&nr_bp_mutex);
  290. arch_unregister_hw_breakpoint(bp);
  291. __release_bp_slot(bp, bp->attr.bp_type);
  292. mutex_unlock(&nr_bp_mutex);
  293. }
  294. static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
  295. {
  296. int err;
  297. __release_bp_slot(bp, old_type);
  298. err = __reserve_bp_slot(bp, new_type);
  299. if (err) {
  300. /*
  301. * Reserve the old_type slot back in case
  302. * there's no space for the new type.
  303. *
  304. * This must succeed, because we just released
  305. * the old_type slot in the __release_bp_slot
  306. * call above. If not, something is broken.
  307. */
  308. WARN_ON(__reserve_bp_slot(bp, old_type));
  309. }
  310. return err;
  311. }
  312. static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
  313. {
  314. int ret;
  315. mutex_lock(&nr_bp_mutex);
  316. ret = __modify_bp_slot(bp, old_type, new_type);
  317. mutex_unlock(&nr_bp_mutex);
  318. return ret;
  319. }
  320. /*
  321. * Allow the kernel debugger to reserve breakpoint slots without
  322. * taking a lock using the dbg_* variant of for the reserve and
  323. * release breakpoint slots.
  324. */
  325. int dbg_reserve_bp_slot(struct perf_event *bp)
  326. {
  327. if (mutex_is_locked(&nr_bp_mutex))
  328. return -1;
  329. return __reserve_bp_slot(bp, bp->attr.bp_type);
  330. }
  331. int dbg_release_bp_slot(struct perf_event *bp)
  332. {
  333. if (mutex_is_locked(&nr_bp_mutex))
  334. return -1;
  335. __release_bp_slot(bp, bp->attr.bp_type);
  336. return 0;
  337. }
  338. static int hw_breakpoint_parse(struct perf_event *bp,
  339. const struct perf_event_attr *attr,
  340. struct arch_hw_breakpoint *hw)
  341. {
  342. int err;
  343. err = hw_breakpoint_arch_parse(bp, attr, hw);
  344. if (err)
  345. return err;
  346. if (arch_check_bp_in_kernelspace(hw)) {
  347. if (attr->exclude_kernel)
  348. return -EINVAL;
  349. /*
  350. * Don't let unprivileged users set a breakpoint in the trap
  351. * path to avoid trap recursion attacks.
  352. */
  353. if (!capable(CAP_SYS_ADMIN))
  354. return -EPERM;
  355. }
  356. return 0;
  357. }
  358. int register_perf_hw_breakpoint(struct perf_event *bp)
  359. {
  360. struct arch_hw_breakpoint hw = { };
  361. int err;
  362. err = reserve_bp_slot(bp);
  363. if (err)
  364. return err;
  365. err = hw_breakpoint_parse(bp, &bp->attr, &hw);
  366. if (err) {
  367. release_bp_slot(bp);
  368. return err;
  369. }
  370. bp->hw.info = hw;
  371. return 0;
  372. }
  373. /**
  374. * register_user_hw_breakpoint - register a hardware breakpoint for user space
  375. * @attr: breakpoint attributes
  376. * @triggered: callback to trigger when we hit the breakpoint
  377. * @tsk: pointer to 'task_struct' of the process to which the address belongs
  378. */
  379. struct perf_event *
  380. register_user_hw_breakpoint(struct perf_event_attr *attr,
  381. perf_overflow_handler_t triggered,
  382. void *context,
  383. struct task_struct *tsk)
  384. {
  385. return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
  386. context);
  387. }
  388. EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
  389. static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
  390. struct perf_event_attr *from)
  391. {
  392. to->bp_addr = from->bp_addr;
  393. to->bp_type = from->bp_type;
  394. to->bp_len = from->bp_len;
  395. to->disabled = from->disabled;
  396. }
  397. int
  398. modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
  399. bool check)
  400. {
  401. struct arch_hw_breakpoint hw = { };
  402. int err;
  403. err = hw_breakpoint_parse(bp, attr, &hw);
  404. if (err)
  405. return err;
  406. if (check) {
  407. struct perf_event_attr old_attr;
  408. old_attr = bp->attr;
  409. hw_breakpoint_copy_attr(&old_attr, attr);
  410. if (memcmp(&old_attr, attr, sizeof(*attr)))
  411. return -EINVAL;
  412. }
  413. if (bp->attr.bp_type != attr->bp_type) {
  414. err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
  415. if (err)
  416. return err;
  417. }
  418. hw_breakpoint_copy_attr(&bp->attr, attr);
  419. bp->hw.info = hw;
  420. return 0;
  421. }
  422. /**
  423. * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  424. * @bp: the breakpoint structure to modify
  425. * @attr: new breakpoint attributes
  426. */
  427. int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
  428. {
  429. int err;
  430. /*
  431. * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
  432. * will not be possible to raise IPIs that invoke __perf_event_disable.
  433. * So call the function directly after making sure we are targeting the
  434. * current task.
  435. */
  436. if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
  437. perf_event_disable_local(bp);
  438. else
  439. perf_event_disable(bp);
  440. err = modify_user_hw_breakpoint_check(bp, attr, false);
  441. if (!bp->attr.disabled)
  442. perf_event_enable(bp);
  443. return err;
  444. }
  445. EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
  446. /**
  447. * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
  448. * @bp: the breakpoint structure to unregister
  449. */
  450. void unregister_hw_breakpoint(struct perf_event *bp)
  451. {
  452. if (!bp)
  453. return;
  454. perf_event_release_kernel(bp);
  455. }
  456. EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
  457. /**
  458. * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
  459. * @attr: breakpoint attributes
  460. * @triggered: callback to trigger when we hit the breakpoint
  461. *
  462. * @return a set of per_cpu pointers to perf events
  463. */
  464. struct perf_event * __percpu *
  465. register_wide_hw_breakpoint(struct perf_event_attr *attr,
  466. perf_overflow_handler_t triggered,
  467. void *context)
  468. {
  469. struct perf_event * __percpu *cpu_events, *bp;
  470. long err = 0;
  471. int cpu;
  472. cpu_events = alloc_percpu(typeof(*cpu_events));
  473. if (!cpu_events)
  474. return (void __percpu __force *)ERR_PTR(-ENOMEM);
  475. get_online_cpus();
  476. for_each_online_cpu(cpu) {
  477. bp = perf_event_create_kernel_counter(attr, cpu, NULL,
  478. triggered, context);
  479. if (IS_ERR(bp)) {
  480. err = PTR_ERR(bp);
  481. break;
  482. }
  483. per_cpu(*cpu_events, cpu) = bp;
  484. }
  485. put_online_cpus();
  486. if (likely(!err))
  487. return cpu_events;
  488. unregister_wide_hw_breakpoint(cpu_events);
  489. return (void __percpu __force *)ERR_PTR(err);
  490. }
  491. EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
  492. /**
  493. * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
  494. * @cpu_events: the per cpu set of events to unregister
  495. */
  496. void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
  497. {
  498. int cpu;
  499. for_each_possible_cpu(cpu)
  500. unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
  501. free_percpu(cpu_events);
  502. }
  503. EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
  504. static struct notifier_block hw_breakpoint_exceptions_nb = {
  505. .notifier_call = hw_breakpoint_exceptions_notify,
  506. /* we need to be notified first */
  507. .priority = 0x7fffffff
  508. };
  509. static void bp_perf_event_destroy(struct perf_event *event)
  510. {
  511. release_bp_slot(event);
  512. }
  513. static int hw_breakpoint_event_init(struct perf_event *bp)
  514. {
  515. int err;
  516. if (bp->attr.type != PERF_TYPE_BREAKPOINT)
  517. return -ENOENT;
  518. /*
  519. * no branch sampling for breakpoint events
  520. */
  521. if (has_branch_stack(bp))
  522. return -EOPNOTSUPP;
  523. err = register_perf_hw_breakpoint(bp);
  524. if (err)
  525. return err;
  526. bp->destroy = bp_perf_event_destroy;
  527. return 0;
  528. }
  529. static int hw_breakpoint_add(struct perf_event *bp, int flags)
  530. {
  531. if (!(flags & PERF_EF_START))
  532. bp->hw.state = PERF_HES_STOPPED;
  533. if (is_sampling_event(bp)) {
  534. bp->hw.last_period = bp->hw.sample_period;
  535. perf_swevent_set_period(bp);
  536. }
  537. return arch_install_hw_breakpoint(bp);
  538. }
  539. static void hw_breakpoint_del(struct perf_event *bp, int flags)
  540. {
  541. arch_uninstall_hw_breakpoint(bp);
  542. }
  543. static void hw_breakpoint_start(struct perf_event *bp, int flags)
  544. {
  545. bp->hw.state = 0;
  546. }
  547. static void hw_breakpoint_stop(struct perf_event *bp, int flags)
  548. {
  549. bp->hw.state = PERF_HES_STOPPED;
  550. }
  551. static struct pmu perf_breakpoint = {
  552. .task_ctx_nr = perf_sw_context, /* could eventually get its own */
  553. .event_init = hw_breakpoint_event_init,
  554. .add = hw_breakpoint_add,
  555. .del = hw_breakpoint_del,
  556. .start = hw_breakpoint_start,
  557. .stop = hw_breakpoint_stop,
  558. .read = hw_breakpoint_pmu_read,
  559. };
  560. int __init init_hw_breakpoint(void)
  561. {
  562. int cpu, err_cpu;
  563. int i;
  564. for (i = 0; i < TYPE_MAX; i++)
  565. nr_slots[i] = hw_breakpoint_slots(i);
  566. for_each_possible_cpu(cpu) {
  567. for (i = 0; i < TYPE_MAX; i++) {
  568. struct bp_cpuinfo *info = get_bp_info(cpu, i);
  569. info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
  570. GFP_KERNEL);
  571. if (!info->tsk_pinned)
  572. goto err_alloc;
  573. }
  574. }
  575. constraints_initialized = 1;
  576. perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
  577. return register_die_notifier(&hw_breakpoint_exceptions_nb);
  578. err_alloc:
  579. for_each_possible_cpu(err_cpu) {
  580. for (i = 0; i < TYPE_MAX; i++)
  581. kfree(get_bp_info(err_cpu, i)->tsk_pinned);
  582. if (err_cpu == cpu)
  583. break;
  584. }
  585. return -ENOMEM;
  586. }