trace_probe_tmpl.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Traceprobe fetch helper inlines
  4. */
  5. static nokprobe_inline void
  6. fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf)
  7. {
  8. switch (code->size) {
  9. case 1:
  10. *(u8 *)buf = (u8)val;
  11. break;
  12. case 2:
  13. *(u16 *)buf = (u16)val;
  14. break;
  15. case 4:
  16. *(u32 *)buf = (u32)val;
  17. break;
  18. case 8:
  19. //TBD: 32bit signed
  20. *(u64 *)buf = (u64)val;
  21. break;
  22. default:
  23. *(unsigned long *)buf = val;
  24. }
  25. }
  26. static nokprobe_inline void
  27. fetch_apply_bitfield(struct fetch_insn *code, void *buf)
  28. {
  29. switch (code->basesize) {
  30. case 1:
  31. *(u8 *)buf <<= code->lshift;
  32. *(u8 *)buf >>= code->rshift;
  33. break;
  34. case 2:
  35. *(u16 *)buf <<= code->lshift;
  36. *(u16 *)buf >>= code->rshift;
  37. break;
  38. case 4:
  39. *(u32 *)buf <<= code->lshift;
  40. *(u32 *)buf >>= code->rshift;
  41. break;
  42. case 8:
  43. *(u64 *)buf <<= code->lshift;
  44. *(u64 *)buf >>= code->rshift;
  45. break;
  46. }
  47. }
  48. /*
  49. * These functions must be defined for each callsite.
  50. * Return consumed dynamic data size (>= 0), or error (< 0).
  51. * If dest is NULL, don't store result and return required dynamic data size.
  52. */
  53. static int
  54. process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs,
  55. void *dest, void *base);
  56. static nokprobe_inline int fetch_store_strlen(unsigned long addr);
  57. static nokprobe_inline int
  58. fetch_store_string(unsigned long addr, void *dest, void *base);
  59. static nokprobe_inline int fetch_store_strlen_user(unsigned long addr);
  60. static nokprobe_inline int
  61. fetch_store_string_user(unsigned long addr, void *dest, void *base);
  62. static nokprobe_inline int
  63. probe_mem_read(void *dest, void *src, size_t size);
  64. static nokprobe_inline int
  65. probe_mem_read_user(void *dest, void *src, size_t size);
  66. /* From the 2nd stage, routine is same */
  67. static nokprobe_inline int
  68. process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
  69. void *dest, void *base)
  70. {
  71. struct fetch_insn *s3 = NULL;
  72. int total = 0, ret = 0, i = 0;
  73. u32 loc = 0;
  74. unsigned long lval = val;
  75. stage2:
  76. /* 2nd stage: dereference memory if needed */
  77. do {
  78. if (code->op == FETCH_OP_DEREF) {
  79. lval = val;
  80. ret = probe_mem_read(&val, (void *)val + code->offset,
  81. sizeof(val));
  82. } else if (code->op == FETCH_OP_UDEREF) {
  83. lval = val;
  84. ret = probe_mem_read_user(&val,
  85. (void *)val + code->offset, sizeof(val));
  86. } else
  87. break;
  88. if (ret)
  89. return ret;
  90. code++;
  91. } while (1);
  92. s3 = code;
  93. stage3:
  94. /* 3rd stage: store value to buffer */
  95. if (unlikely(!dest)) {
  96. if (code->op == FETCH_OP_ST_STRING) {
  97. ret = fetch_store_strlen(val + code->offset);
  98. code++;
  99. goto array;
  100. } else if (code->op == FETCH_OP_ST_USTRING) {
  101. ret += fetch_store_strlen_user(val + code->offset);
  102. code++;
  103. goto array;
  104. } else
  105. return -EILSEQ;
  106. }
  107. switch (code->op) {
  108. case FETCH_OP_ST_RAW:
  109. fetch_store_raw(val, code, dest);
  110. break;
  111. case FETCH_OP_ST_MEM:
  112. probe_mem_read(dest, (void *)val + code->offset, code->size);
  113. break;
  114. case FETCH_OP_ST_UMEM:
  115. probe_mem_read_user(dest, (void *)val + code->offset, code->size);
  116. break;
  117. case FETCH_OP_ST_STRING:
  118. loc = *(u32 *)dest;
  119. ret = fetch_store_string(val + code->offset, dest, base);
  120. break;
  121. case FETCH_OP_ST_USTRING:
  122. loc = *(u32 *)dest;
  123. ret = fetch_store_string_user(val + code->offset, dest, base);
  124. break;
  125. default:
  126. return -EILSEQ;
  127. }
  128. code++;
  129. /* 4th stage: modify stored value if needed */
  130. if (code->op == FETCH_OP_MOD_BF) {
  131. fetch_apply_bitfield(code, dest);
  132. code++;
  133. }
  134. array:
  135. /* the last stage: Loop on array */
  136. if (code->op == FETCH_OP_LP_ARRAY) {
  137. total += ret;
  138. if (++i < code->param) {
  139. code = s3;
  140. if (s3->op != FETCH_OP_ST_STRING &&
  141. s3->op != FETCH_OP_ST_USTRING) {
  142. dest += s3->size;
  143. val += s3->size;
  144. goto stage3;
  145. }
  146. code--;
  147. val = lval + sizeof(char *);
  148. if (dest) {
  149. dest += sizeof(u32);
  150. *(u32 *)dest = update_data_loc(loc, ret);
  151. }
  152. goto stage2;
  153. }
  154. code++;
  155. ret = total;
  156. }
  157. return code->op == FETCH_OP_END ? ret : -EILSEQ;
  158. }
  159. /* Sum up total data length for dynamic arraies (strings) */
  160. static nokprobe_inline int
  161. __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
  162. {
  163. struct probe_arg *arg;
  164. int i, len, ret = 0;
  165. for (i = 0; i < tp->nr_args; i++) {
  166. arg = tp->args + i;
  167. if (unlikely(arg->dynamic)) {
  168. len = process_fetch_insn(arg->code, regs, NULL, NULL);
  169. if (len > 0)
  170. ret += len;
  171. }
  172. }
  173. return ret;
  174. }
  175. /* Store the value of each argument */
  176. static nokprobe_inline void
  177. store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
  178. int header_size, int maxlen)
  179. {
  180. struct probe_arg *arg;
  181. void *base = data - header_size;
  182. void *dyndata = data + tp->size;
  183. u32 *dl; /* Data location */
  184. int ret, i;
  185. for (i = 0; i < tp->nr_args; i++) {
  186. arg = tp->args + i;
  187. dl = data + arg->offset;
  188. /* Point the dynamic data area if needed */
  189. if (unlikely(arg->dynamic))
  190. *dl = make_data_loc(maxlen, dyndata - base);
  191. ret = process_fetch_insn(arg->code, regs, dl, base);
  192. if (unlikely(ret < 0 && arg->dynamic)) {
  193. *dl = make_data_loc(0, dyndata - base);
  194. } else {
  195. dyndata += ret;
  196. maxlen -= ret;
  197. }
  198. }
  199. }
  200. static inline int
  201. print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
  202. u8 *data, void *field)
  203. {
  204. void *p;
  205. int i, j;
  206. for (i = 0; i < nr_args; i++) {
  207. struct probe_arg *a = args + i;
  208. trace_seq_printf(s, " %s=", a->name);
  209. if (likely(!a->count)) {
  210. if (!a->type->print(s, data + a->offset, field))
  211. return -ENOMEM;
  212. continue;
  213. }
  214. trace_seq_putc(s, '{');
  215. p = data + a->offset;
  216. for (j = 0; j < a->count; j++) {
  217. if (!a->type->print(s, p, field))
  218. return -ENOMEM;
  219. trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
  220. p += a->type->size;
  221. }
  222. }
  223. return 0;
  224. }