stackmap.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2016 Facebook
  3. */
  4. #include <linux/bpf.h>
  5. #include <linux/jhash.h>
  6. #include <linux/filter.h>
  7. #include <linux/kernel.h>
  8. #include <linux/stacktrace.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/elf.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/irq_work.h>
  13. #include <linux/btf_ids.h>
  14. #include "percpu_freelist.h"
  15. #define STACK_CREATE_FLAG_MASK \
  16. (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
  17. BPF_F_STACK_BUILD_ID)
  18. struct stack_map_bucket {
  19. struct pcpu_freelist_node fnode;
  20. u32 hash;
  21. u32 nr;
  22. u64 data[];
  23. };
  24. struct bpf_stack_map {
  25. struct bpf_map map;
  26. void *elems;
  27. struct pcpu_freelist freelist;
  28. u32 n_buckets;
  29. struct stack_map_bucket *buckets[];
  30. };
  31. /* irq_work to run up_read() for build_id lookup in nmi context */
  32. struct stack_map_irq_work {
  33. struct irq_work irq_work;
  34. struct mm_struct *mm;
  35. };
  36. static void do_up_read(struct irq_work *entry)
  37. {
  38. struct stack_map_irq_work *work;
  39. if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
  40. return;
  41. work = container_of(entry, struct stack_map_irq_work, irq_work);
  42. mmap_read_unlock_non_owner(work->mm);
  43. }
  44. static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
  45. static inline bool stack_map_use_build_id(struct bpf_map *map)
  46. {
  47. return (map->map_flags & BPF_F_STACK_BUILD_ID);
  48. }
  49. static inline int stack_map_data_size(struct bpf_map *map)
  50. {
  51. return stack_map_use_build_id(map) ?
  52. sizeof(struct bpf_stack_build_id) : sizeof(u64);
  53. }
  54. static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
  55. {
  56. u64 elem_size = sizeof(struct stack_map_bucket) +
  57. (u64)smap->map.value_size;
  58. int err;
  59. smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
  60. smap->map.numa_node);
  61. if (!smap->elems)
  62. return -ENOMEM;
  63. err = pcpu_freelist_init(&smap->freelist);
  64. if (err)
  65. goto free_elems;
  66. pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
  67. smap->map.max_entries);
  68. return 0;
  69. free_elems:
  70. bpf_map_area_free(smap->elems);
  71. return err;
  72. }
  73. /* Called from syscall */
  74. static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
  75. {
  76. u32 value_size = attr->value_size;
  77. struct bpf_stack_map *smap;
  78. struct bpf_map_memory mem;
  79. u64 cost, n_buckets;
  80. int err;
  81. if (!bpf_capable())
  82. return ERR_PTR(-EPERM);
  83. if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
  84. return ERR_PTR(-EINVAL);
  85. /* check sanity of attributes */
  86. if (attr->max_entries == 0 || attr->key_size != 4 ||
  87. value_size < 8 || value_size % 8)
  88. return ERR_PTR(-EINVAL);
  89. BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
  90. if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
  91. if (value_size % sizeof(struct bpf_stack_build_id) ||
  92. value_size / sizeof(struct bpf_stack_build_id)
  93. > sysctl_perf_event_max_stack)
  94. return ERR_PTR(-EINVAL);
  95. } else if (value_size / 8 > sysctl_perf_event_max_stack)
  96. return ERR_PTR(-EINVAL);
  97. /* hash table size must be power of 2 */
  98. n_buckets = roundup_pow_of_two(attr->max_entries);
  99. if (!n_buckets)
  100. return ERR_PTR(-E2BIG);
  101. cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
  102. cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
  103. err = bpf_map_charge_init(&mem, cost);
  104. if (err)
  105. return ERR_PTR(err);
  106. smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
  107. if (!smap) {
  108. bpf_map_charge_finish(&mem);
  109. return ERR_PTR(-ENOMEM);
  110. }
  111. bpf_map_init_from_attr(&smap->map, attr);
  112. smap->map.value_size = value_size;
  113. smap->n_buckets = n_buckets;
  114. err = get_callchain_buffers(sysctl_perf_event_max_stack);
  115. if (err)
  116. goto free_charge;
  117. err = prealloc_elems_and_freelist(smap);
  118. if (err)
  119. goto put_buffers;
  120. bpf_map_charge_move(&smap->map.memory, &mem);
  121. return &smap->map;
  122. put_buffers:
  123. put_callchain_buffers();
  124. free_charge:
  125. bpf_map_charge_finish(&mem);
  126. bpf_map_area_free(smap);
  127. return ERR_PTR(err);
  128. }
  129. #define BPF_BUILD_ID 3
  130. /*
  131. * Parse build id from the note segment. This logic can be shared between
  132. * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
  133. * identical.
  134. */
  135. static inline int stack_map_parse_build_id(void *page_addr,
  136. unsigned char *build_id,
  137. void *note_start,
  138. Elf32_Word note_size)
  139. {
  140. Elf32_Word note_offs = 0, new_offs;
  141. /* check for overflow */
  142. if (note_start < page_addr || note_start + note_size < note_start)
  143. return -EINVAL;
  144. /* only supports note that fits in the first page */
  145. if (note_start + note_size > page_addr + PAGE_SIZE)
  146. return -EINVAL;
  147. while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
  148. Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
  149. if (nhdr->n_type == BPF_BUILD_ID &&
  150. nhdr->n_namesz == sizeof("GNU") &&
  151. nhdr->n_descsz > 0 &&
  152. nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
  153. memcpy(build_id,
  154. note_start + note_offs +
  155. ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
  156. nhdr->n_descsz);
  157. memset(build_id + nhdr->n_descsz, 0,
  158. BPF_BUILD_ID_SIZE - nhdr->n_descsz);
  159. return 0;
  160. }
  161. new_offs = note_offs + sizeof(Elf32_Nhdr) +
  162. ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
  163. if (new_offs <= note_offs) /* overflow */
  164. break;
  165. note_offs = new_offs;
  166. }
  167. return -EINVAL;
  168. }
  169. /* Parse build ID from 32-bit ELF */
  170. static int stack_map_get_build_id_32(void *page_addr,
  171. unsigned char *build_id)
  172. {
  173. Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
  174. Elf32_Phdr *phdr;
  175. int i;
  176. /* only supports phdr that fits in one page */
  177. if (ehdr->e_phnum >
  178. (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
  179. return -EINVAL;
  180. phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
  181. for (i = 0; i < ehdr->e_phnum; ++i) {
  182. if (phdr[i].p_type == PT_NOTE &&
  183. !stack_map_parse_build_id(page_addr, build_id,
  184. page_addr + phdr[i].p_offset,
  185. phdr[i].p_filesz))
  186. return 0;
  187. }
  188. return -EINVAL;
  189. }
  190. /* Parse build ID from 64-bit ELF */
  191. static int stack_map_get_build_id_64(void *page_addr,
  192. unsigned char *build_id)
  193. {
  194. Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
  195. Elf64_Phdr *phdr;
  196. int i;
  197. /* only supports phdr that fits in one page */
  198. if (ehdr->e_phnum >
  199. (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
  200. return -EINVAL;
  201. phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
  202. for (i = 0; i < ehdr->e_phnum; ++i) {
  203. if (phdr[i].p_type == PT_NOTE &&
  204. !stack_map_parse_build_id(page_addr, build_id,
  205. page_addr + phdr[i].p_offset,
  206. phdr[i].p_filesz))
  207. return 0;
  208. }
  209. return -EINVAL;
  210. }
  211. /* Parse build ID of ELF file mapped to vma */
  212. static int stack_map_get_build_id(struct vm_area_struct *vma,
  213. unsigned char *build_id)
  214. {
  215. Elf32_Ehdr *ehdr;
  216. struct page *page;
  217. void *page_addr;
  218. int ret;
  219. /* only works for page backed storage */
  220. if (!vma->vm_file)
  221. return -EINVAL;
  222. page = find_get_page(vma->vm_file->f_mapping, 0);
  223. if (!page)
  224. return -EFAULT; /* page not mapped */
  225. ret = -EINVAL;
  226. page_addr = kmap_atomic(page);
  227. ehdr = (Elf32_Ehdr *)page_addr;
  228. /* compare magic x7f "ELF" */
  229. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
  230. goto out;
  231. /* only support executable file and shared object file */
  232. if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
  233. goto out;
  234. if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
  235. ret = stack_map_get_build_id_32(page_addr, build_id);
  236. else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
  237. ret = stack_map_get_build_id_64(page_addr, build_id);
  238. out:
  239. kunmap_atomic(page_addr);
  240. put_page(page);
  241. return ret;
  242. }
  243. static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
  244. u64 *ips, u32 trace_nr, bool user)
  245. {
  246. int i;
  247. struct vm_area_struct *vma;
  248. bool irq_work_busy = false;
  249. struct stack_map_irq_work *work = NULL;
  250. if (irqs_disabled()) {
  251. if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
  252. work = this_cpu_ptr(&up_read_work);
  253. if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
  254. /* cannot queue more up_read, fallback */
  255. irq_work_busy = true;
  256. }
  257. } else {
  258. /*
  259. * PREEMPT_RT does not allow to trylock mmap sem in
  260. * interrupt disabled context. Force the fallback code.
  261. */
  262. irq_work_busy = true;
  263. }
  264. }
  265. /*
  266. * We cannot do up_read() when the irq is disabled, because of
  267. * risk to deadlock with rq_lock. To do build_id lookup when the
  268. * irqs are disabled, we need to run up_read() in irq_work. We use
  269. * a percpu variable to do the irq_work. If the irq_work is
  270. * already used by another lookup, we fall back to report ips.
  271. *
  272. * Same fallback is used for kernel stack (!user) on a stackmap
  273. * with build_id.
  274. */
  275. if (!user || !current || !current->mm || irq_work_busy ||
  276. !mmap_read_trylock_non_owner(current->mm)) {
  277. /* cannot access current->mm, fall back to ips */
  278. for (i = 0; i < trace_nr; i++) {
  279. id_offs[i].status = BPF_STACK_BUILD_ID_IP;
  280. id_offs[i].ip = ips[i];
  281. memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
  282. }
  283. return;
  284. }
  285. for (i = 0; i < trace_nr; i++) {
  286. vma = find_vma(current->mm, ips[i]);
  287. if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
  288. /* per entry fall back to ips */
  289. id_offs[i].status = BPF_STACK_BUILD_ID_IP;
  290. id_offs[i].ip = ips[i];
  291. memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
  292. continue;
  293. }
  294. id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
  295. - vma->vm_start;
  296. id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
  297. }
  298. if (!work) {
  299. mmap_read_unlock_non_owner(current->mm);
  300. } else {
  301. work->mm = current->mm;
  302. irq_work_queue(&work->irq_work);
  303. }
  304. }
  305. static struct perf_callchain_entry *
  306. get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
  307. {
  308. #ifdef CONFIG_STACKTRACE
  309. struct perf_callchain_entry *entry;
  310. int rctx;
  311. entry = get_callchain_entry(&rctx);
  312. if (!entry)
  313. return NULL;
  314. entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
  315. max_depth, 0);
  316. /* stack_trace_save_tsk() works on unsigned long array, while
  317. * perf_callchain_entry uses u64 array. For 32-bit systems, it is
  318. * necessary to fix this mismatch.
  319. */
  320. if (__BITS_PER_LONG != 64) {
  321. unsigned long *from = (unsigned long *) entry->ip;
  322. u64 *to = entry->ip;
  323. int i;
  324. /* copy data from the end to avoid using extra buffer */
  325. for (i = entry->nr - 1; i >= 0; i--)
  326. to[i] = (u64)(from[i]);
  327. }
  328. put_callchain_entry(rctx);
  329. return entry;
  330. #else /* CONFIG_STACKTRACE */
  331. return NULL;
  332. #endif
  333. }
  334. static long __bpf_get_stackid(struct bpf_map *map,
  335. struct perf_callchain_entry *trace, u64 flags)
  336. {
  337. struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
  338. struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
  339. u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
  340. u32 hash, id, trace_nr, trace_len;
  341. bool user = flags & BPF_F_USER_STACK;
  342. u64 *ips;
  343. bool hash_matches;
  344. if (trace->nr <= skip)
  345. /* skipping more than usable stack trace */
  346. return -EFAULT;
  347. trace_nr = trace->nr - skip;
  348. trace_len = trace_nr * sizeof(u64);
  349. ips = trace->ip + skip;
  350. hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
  351. id = hash & (smap->n_buckets - 1);
  352. bucket = READ_ONCE(smap->buckets[id]);
  353. hash_matches = bucket && bucket->hash == hash;
  354. /* fast cmp */
  355. if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
  356. return id;
  357. if (stack_map_use_build_id(map)) {
  358. /* for build_id+offset, pop a bucket before slow cmp */
  359. new_bucket = (struct stack_map_bucket *)
  360. pcpu_freelist_pop(&smap->freelist);
  361. if (unlikely(!new_bucket))
  362. return -ENOMEM;
  363. new_bucket->nr = trace_nr;
  364. stack_map_get_build_id_offset(
  365. (struct bpf_stack_build_id *)new_bucket->data,
  366. ips, trace_nr, user);
  367. trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
  368. if (hash_matches && bucket->nr == trace_nr &&
  369. memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
  370. pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
  371. return id;
  372. }
  373. if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
  374. pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
  375. return -EEXIST;
  376. }
  377. } else {
  378. if (hash_matches && bucket->nr == trace_nr &&
  379. memcmp(bucket->data, ips, trace_len) == 0)
  380. return id;
  381. if (bucket && !(flags & BPF_F_REUSE_STACKID))
  382. return -EEXIST;
  383. new_bucket = (struct stack_map_bucket *)
  384. pcpu_freelist_pop(&smap->freelist);
  385. if (unlikely(!new_bucket))
  386. return -ENOMEM;
  387. memcpy(new_bucket->data, ips, trace_len);
  388. }
  389. new_bucket->hash = hash;
  390. new_bucket->nr = trace_nr;
  391. old_bucket = xchg(&smap->buckets[id], new_bucket);
  392. if (old_bucket)
  393. pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
  394. return id;
  395. }
  396. BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
  397. u64, flags)
  398. {
  399. u32 max_depth = map->value_size / stack_map_data_size(map);
  400. u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
  401. bool user = flags & BPF_F_USER_STACK;
  402. struct perf_callchain_entry *trace;
  403. bool kernel = !user;
  404. if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
  405. BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
  406. return -EINVAL;
  407. max_depth += skip;
  408. if (max_depth > sysctl_perf_event_max_stack)
  409. max_depth = sysctl_perf_event_max_stack;
  410. trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
  411. false, false);
  412. if (unlikely(!trace))
  413. /* couldn't fetch the stack trace */
  414. return -EFAULT;
  415. return __bpf_get_stackid(map, trace, flags);
  416. }
  417. const struct bpf_func_proto bpf_get_stackid_proto = {
  418. .func = bpf_get_stackid,
  419. .gpl_only = true,
  420. .ret_type = RET_INTEGER,
  421. .arg1_type = ARG_PTR_TO_CTX,
  422. .arg2_type = ARG_CONST_MAP_PTR,
  423. .arg3_type = ARG_ANYTHING,
  424. };
  425. static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
  426. {
  427. __u64 nr_kernel = 0;
  428. while (nr_kernel < trace->nr) {
  429. if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
  430. break;
  431. nr_kernel++;
  432. }
  433. return nr_kernel;
  434. }
  435. BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
  436. struct bpf_map *, map, u64, flags)
  437. {
  438. struct perf_event *event = ctx->event;
  439. struct perf_callchain_entry *trace;
  440. bool kernel, user;
  441. __u64 nr_kernel;
  442. int ret;
  443. /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
  444. if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
  445. return bpf_get_stackid((unsigned long)(ctx->regs),
  446. (unsigned long) map, flags, 0, 0);
  447. if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
  448. BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
  449. return -EINVAL;
  450. user = flags & BPF_F_USER_STACK;
  451. kernel = !user;
  452. trace = ctx->data->callchain;
  453. if (unlikely(!trace))
  454. return -EFAULT;
  455. nr_kernel = count_kernel_ip(trace);
  456. if (kernel) {
  457. __u64 nr = trace->nr;
  458. trace->nr = nr_kernel;
  459. ret = __bpf_get_stackid(map, trace, flags);
  460. /* restore nr */
  461. trace->nr = nr;
  462. } else { /* user */
  463. u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
  464. skip += nr_kernel;
  465. if (skip > BPF_F_SKIP_FIELD_MASK)
  466. return -EFAULT;
  467. flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
  468. ret = __bpf_get_stackid(map, trace, flags);
  469. }
  470. return ret;
  471. }
  472. const struct bpf_func_proto bpf_get_stackid_proto_pe = {
  473. .func = bpf_get_stackid_pe,
  474. .gpl_only = false,
  475. .ret_type = RET_INTEGER,
  476. .arg1_type = ARG_PTR_TO_CTX,
  477. .arg2_type = ARG_CONST_MAP_PTR,
  478. .arg3_type = ARG_ANYTHING,
  479. };
  480. static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
  481. struct perf_callchain_entry *trace_in,
  482. void *buf, u32 size, u64 flags)
  483. {
  484. u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
  485. bool user_build_id = flags & BPF_F_USER_BUILD_ID;
  486. u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
  487. bool user = flags & BPF_F_USER_STACK;
  488. struct perf_callchain_entry *trace;
  489. bool kernel = !user;
  490. int err = -EINVAL;
  491. u64 *ips;
  492. if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
  493. BPF_F_USER_BUILD_ID)))
  494. goto clear;
  495. if (kernel && user_build_id)
  496. goto clear;
  497. elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
  498. : sizeof(u64);
  499. if (unlikely(size % elem_size))
  500. goto clear;
  501. /* cannot get valid user stack for task without user_mode regs */
  502. if (task && user && !user_mode(regs))
  503. goto err_fault;
  504. num_elem = size / elem_size;
  505. max_depth = num_elem + skip;
  506. if (sysctl_perf_event_max_stack < max_depth)
  507. max_depth = sysctl_perf_event_max_stack;
  508. if (trace_in)
  509. trace = trace_in;
  510. else if (kernel && task)
  511. trace = get_callchain_entry_for_task(task, max_depth);
  512. else
  513. trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
  514. false, false);
  515. if (unlikely(!trace))
  516. goto err_fault;
  517. if (trace->nr < skip)
  518. goto err_fault;
  519. trace_nr = trace->nr - skip;
  520. trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
  521. copy_len = trace_nr * elem_size;
  522. ips = trace->ip + skip;
  523. if (user && user_build_id)
  524. stack_map_get_build_id_offset(buf, ips, trace_nr, user);
  525. else
  526. memcpy(buf, ips, copy_len);
  527. if (size > copy_len)
  528. memset(buf + copy_len, 0, size - copy_len);
  529. return copy_len;
  530. err_fault:
  531. err = -EFAULT;
  532. clear:
  533. memset(buf, 0, size);
  534. return err;
  535. }
  536. BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
  537. u64, flags)
  538. {
  539. return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
  540. }
  541. const struct bpf_func_proto bpf_get_stack_proto = {
  542. .func = bpf_get_stack,
  543. .gpl_only = true,
  544. .ret_type = RET_INTEGER,
  545. .arg1_type = ARG_PTR_TO_CTX,
  546. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  547. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  548. .arg4_type = ARG_ANYTHING,
  549. };
  550. BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
  551. u32, size, u64, flags)
  552. {
  553. struct pt_regs *regs;
  554. long res = -EINVAL;
  555. if (!try_get_task_stack(task))
  556. return -EFAULT;
  557. regs = task_pt_regs(task);
  558. if (regs)
  559. res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
  560. put_task_stack(task);
  561. return res;
  562. }
  563. BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
  564. const struct bpf_func_proto bpf_get_task_stack_proto = {
  565. .func = bpf_get_task_stack,
  566. .gpl_only = false,
  567. .ret_type = RET_INTEGER,
  568. .arg1_type = ARG_PTR_TO_BTF_ID,
  569. .arg1_btf_id = &bpf_get_task_stack_btf_ids[0],
  570. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  571. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  572. .arg4_type = ARG_ANYTHING,
  573. };
  574. BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
  575. void *, buf, u32, size, u64, flags)
  576. {
  577. struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
  578. struct perf_event *event = ctx->event;
  579. struct perf_callchain_entry *trace;
  580. bool kernel, user;
  581. int err = -EINVAL;
  582. __u64 nr_kernel;
  583. if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
  584. return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
  585. if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
  586. BPF_F_USER_BUILD_ID)))
  587. goto clear;
  588. user = flags & BPF_F_USER_STACK;
  589. kernel = !user;
  590. err = -EFAULT;
  591. trace = ctx->data->callchain;
  592. if (unlikely(!trace))
  593. goto clear;
  594. nr_kernel = count_kernel_ip(trace);
  595. if (kernel) {
  596. __u64 nr = trace->nr;
  597. trace->nr = nr_kernel;
  598. err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
  599. /* restore nr */
  600. trace->nr = nr;
  601. } else { /* user */
  602. u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
  603. skip += nr_kernel;
  604. if (skip > BPF_F_SKIP_FIELD_MASK)
  605. goto clear;
  606. flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
  607. err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
  608. }
  609. return err;
  610. clear:
  611. memset(buf, 0, size);
  612. return err;
  613. }
  614. const struct bpf_func_proto bpf_get_stack_proto_pe = {
  615. .func = bpf_get_stack_pe,
  616. .gpl_only = true,
  617. .ret_type = RET_INTEGER,
  618. .arg1_type = ARG_PTR_TO_CTX,
  619. .arg2_type = ARG_PTR_TO_UNINIT_MEM,
  620. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  621. .arg4_type = ARG_ANYTHING,
  622. };
  623. /* Called from eBPF program */
  624. static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
  625. {
  626. return ERR_PTR(-EOPNOTSUPP);
  627. }
  628. /* Called from syscall */
  629. int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  630. {
  631. struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
  632. struct stack_map_bucket *bucket, *old_bucket;
  633. u32 id = *(u32 *)key, trace_len;
  634. if (unlikely(id >= smap->n_buckets))
  635. return -ENOENT;
  636. bucket = xchg(&smap->buckets[id], NULL);
  637. if (!bucket)
  638. return -ENOENT;
  639. trace_len = bucket->nr * stack_map_data_size(map);
  640. memcpy(value, bucket->data, trace_len);
  641. memset(value + trace_len, 0, map->value_size - trace_len);
  642. old_bucket = xchg(&smap->buckets[id], bucket);
  643. if (old_bucket)
  644. pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
  645. return 0;
  646. }
  647. static int stack_map_get_next_key(struct bpf_map *map, void *key,
  648. void *next_key)
  649. {
  650. struct bpf_stack_map *smap = container_of(map,
  651. struct bpf_stack_map, map);
  652. u32 id;
  653. WARN_ON_ONCE(!rcu_read_lock_held());
  654. if (!key) {
  655. id = 0;
  656. } else {
  657. id = *(u32 *)key;
  658. if (id >= smap->n_buckets || !smap->buckets[id])
  659. id = 0;
  660. else
  661. id++;
  662. }
  663. while (id < smap->n_buckets && !smap->buckets[id])
  664. id++;
  665. if (id >= smap->n_buckets)
  666. return -ENOENT;
  667. *(u32 *)next_key = id;
  668. return 0;
  669. }
  670. static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
  671. u64 map_flags)
  672. {
  673. return -EINVAL;
  674. }
  675. /* Called from syscall or from eBPF program */
  676. static int stack_map_delete_elem(struct bpf_map *map, void *key)
  677. {
  678. struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
  679. struct stack_map_bucket *old_bucket;
  680. u32 id = *(u32 *)key;
  681. if (unlikely(id >= smap->n_buckets))
  682. return -E2BIG;
  683. old_bucket = xchg(&smap->buckets[id], NULL);
  684. if (old_bucket) {
  685. pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
  686. return 0;
  687. } else {
  688. return -ENOENT;
  689. }
  690. }
  691. /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
  692. static void stack_map_free(struct bpf_map *map)
  693. {
  694. struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
  695. bpf_map_area_free(smap->elems);
  696. pcpu_freelist_destroy(&smap->freelist);
  697. bpf_map_area_free(smap);
  698. put_callchain_buffers();
  699. }
  700. static int stack_trace_map_btf_id;
  701. const struct bpf_map_ops stack_trace_map_ops = {
  702. .map_meta_equal = bpf_map_meta_equal,
  703. .map_alloc = stack_map_alloc,
  704. .map_free = stack_map_free,
  705. .map_get_next_key = stack_map_get_next_key,
  706. .map_lookup_elem = stack_map_lookup_elem,
  707. .map_update_elem = stack_map_update_elem,
  708. .map_delete_elem = stack_map_delete_elem,
  709. .map_check_btf = map_check_no_btf,
  710. .map_btf_name = "bpf_stack_map",
  711. .map_btf_id = &stack_trace_map_btf_id,
  712. };
  713. static int __init stack_map_init(void)
  714. {
  715. int cpu;
  716. struct stack_map_irq_work *work;
  717. for_each_possible_cpu(cpu) {
  718. work = per_cpu_ptr(&up_read_work, cpu);
  719. init_irq_work(&work->irq_work, do_up_read);
  720. }
  721. return 0;
  722. }
  723. subsys_initcall(stack_map_init);