hw_breakpoint_constraints.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <linux/kernel.h>
  3. #include <linux/uaccess.h>
  4. #include <linux/sched.h>
  5. #include <asm/hw_breakpoint.h>
  6. #include <asm/sstep.h>
  7. #include <asm/cache.h>
  8. static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
  9. {
  10. return ((info->address <= dar) && (dar - info->address < info->len));
  11. }
  12. static bool ea_user_range_overlaps(unsigned long ea, int size,
  13. struct arch_hw_breakpoint *info)
  14. {
  15. return ((ea < info->address + info->len) &&
  16. (ea + size > info->address));
  17. }
  18. static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
  19. {
  20. unsigned long hw_start_addr, hw_end_addr;
  21. hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
  22. hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
  23. return ((hw_start_addr <= dar) && (hw_end_addr > dar));
  24. }
  25. static bool ea_hw_range_overlaps(unsigned long ea, int size,
  26. struct arch_hw_breakpoint *info)
  27. {
  28. unsigned long hw_start_addr, hw_end_addr;
  29. unsigned long align_size = HW_BREAKPOINT_SIZE;
  30. /*
  31. * On p10 predecessors, quadword is handle differently then
  32. * other instructions.
  33. */
  34. if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
  35. align_size = HW_BREAKPOINT_SIZE_QUADWORD;
  36. hw_start_addr = ALIGN_DOWN(info->address, align_size);
  37. hw_end_addr = ALIGN(info->address + info->len, align_size);
  38. return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
  39. }
  40. /*
  41. * If hw has multiple DAWR registers, we also need to check all
  42. * dawrx constraint bits to confirm this is _really_ a valid event.
  43. * If type is UNKNOWN, but privilege level matches, consider it as
  44. * a positive match.
  45. */
  46. static bool check_dawrx_constraints(struct pt_regs *regs, int type,
  47. struct arch_hw_breakpoint *info)
  48. {
  49. if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
  50. return false;
  51. /*
  52. * The Cache Management instructions other than dcbz never
  53. * cause a match. i.e. if type is CACHEOP, the instruction
  54. * is dcbz, and dcbz is treated as Store.
  55. */
  56. if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
  57. return false;
  58. if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
  59. return false;
  60. if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
  61. return false;
  62. return true;
  63. }
  64. /*
  65. * Return true if the event is valid wrt dawr configuration,
  66. * including extraneous exception. Otherwise return false.
  67. */
  68. bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
  69. unsigned long ea, int type, int size,
  70. struct arch_hw_breakpoint *info)
  71. {
  72. bool in_user_range = dar_in_user_range(regs->dar, info);
  73. bool dawrx_constraints;
  74. /*
  75. * 8xx supports only one breakpoint and thus we can
  76. * unconditionally return true.
  77. */
  78. if (IS_ENABLED(CONFIG_PPC_8xx)) {
  79. if (!in_user_range)
  80. info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
  81. return true;
  82. }
  83. if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
  84. if (cpu_has_feature(CPU_FTR_ARCH_31) &&
  85. !dar_in_hw_range(regs->dar, info))
  86. return false;
  87. return true;
  88. }
  89. dawrx_constraints = check_dawrx_constraints(regs, type, info);
  90. if (type == UNKNOWN) {
  91. if (cpu_has_feature(CPU_FTR_ARCH_31) &&
  92. !dar_in_hw_range(regs->dar, info))
  93. return false;
  94. return dawrx_constraints;
  95. }
  96. if (ea_user_range_overlaps(ea, size, info))
  97. return dawrx_constraints;
  98. if (ea_hw_range_overlaps(ea, size, info)) {
  99. if (dawrx_constraints) {
  100. info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
  101. return true;
  102. }
  103. }
  104. return false;
  105. }
  106. static int cache_op_size(void)
  107. {
  108. #ifdef __powerpc64__
  109. return ppc64_caches.l1d.block_size;
  110. #else
  111. return L1_CACHE_BYTES;
  112. #endif
  113. }
  114. void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
  115. int *type, int *size, unsigned long *ea)
  116. {
  117. struct instruction_op op;
  118. if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
  119. return;
  120. analyse_instr(&op, regs, *instr);
  121. *type = GETTYPE(op.type);
  122. *ea = op.ea;
  123. #ifdef __powerpc64__
  124. if (!(regs->msr & MSR_64BIT))
  125. *ea &= 0xffffffffUL;
  126. #endif
  127. *size = GETSIZE(op.type);
  128. if (*type == CACHEOP) {
  129. *size = cache_op_size();
  130. *ea &= ~(*size - 1);
  131. } else if (*type == LOAD_VMX || *type == STORE_VMX) {
  132. *ea &= ~(*size - 1);
  133. }
  134. }