ringbuf.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. #include <linux/bpf.h>
  2. #include <linux/btf.h>
  3. #include <linux/err.h>
  4. #include <linux/irq_work.h>
  5. #include <linux/slab.h>
  6. #include <linux/filter.h>
  7. #include <linux/mm.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/wait.h>
  10. #include <linux/poll.h>
  11. #include <linux/kmemleak.h>
  12. #include <uapi/linux/btf.h>
  13. #define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE)
  14. /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */
  15. #define RINGBUF_PGOFF \
  16. (offsetof(struct bpf_ringbuf, consumer_pos) >> PAGE_SHIFT)
  17. /* consumer page and producer page */
  18. #define RINGBUF_POS_PAGES 2
  19. #define RINGBUF_MAX_RECORD_SZ (UINT_MAX/4)
  20. /* Maximum size of ring buffer area is limited by 32-bit page offset within
  21. * record header, counted in pages. Reserve 8 bits for extensibility, and take
  22. * into account few extra pages for consumer/producer pages and
  23. * non-mmap()'able parts. This gives 64GB limit, which seems plenty for single
  24. * ring buffer.
  25. */
  26. #define RINGBUF_MAX_DATA_SZ \
  27. (((1ULL << 24) - RINGBUF_POS_PAGES - RINGBUF_PGOFF) * PAGE_SIZE)
  28. struct bpf_ringbuf {
  29. wait_queue_head_t waitq;
  30. struct irq_work work;
  31. u64 mask;
  32. struct page **pages;
  33. int nr_pages;
  34. spinlock_t spinlock ____cacheline_aligned_in_smp;
  35. /* Consumer and producer counters are put into separate pages to allow
  36. * mapping consumer page as r/w, but restrict producer page to r/o.
  37. * This protects producer position from being modified by user-space
  38. * application and ruining in-kernel position tracking.
  39. */
  40. unsigned long consumer_pos __aligned(PAGE_SIZE);
  41. unsigned long producer_pos __aligned(PAGE_SIZE);
  42. char data[] __aligned(PAGE_SIZE);
  43. };
  44. struct bpf_ringbuf_map {
  45. struct bpf_map map;
  46. struct bpf_map_memory memory;
  47. struct bpf_ringbuf *rb;
  48. };
  49. /* 8-byte ring buffer record header structure */
  50. struct bpf_ringbuf_hdr {
  51. u32 len;
  52. u32 pg_off;
  53. };
  54. static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
  55. {
  56. const gfp_t flags = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
  57. __GFP_ZERO;
  58. int nr_meta_pages = RINGBUF_PGOFF + RINGBUF_POS_PAGES;
  59. int nr_data_pages = data_sz >> PAGE_SHIFT;
  60. int nr_pages = nr_meta_pages + nr_data_pages;
  61. struct page **pages, *page;
  62. struct bpf_ringbuf *rb;
  63. size_t array_size;
  64. int i;
  65. /* Each data page is mapped twice to allow "virtual"
  66. * continuous read of samples wrapping around the end of ring
  67. * buffer area:
  68. * ------------------------------------------------------
  69. * | meta pages | real data pages | same data pages |
  70. * ------------------------------------------------------
  71. * | | 1 2 3 4 5 6 7 8 9 | 1 2 3 4 5 6 7 8 9 |
  72. * ------------------------------------------------------
  73. * | | TA DA | TA DA |
  74. * ------------------------------------------------------
  75. * ^^^^^^^
  76. * |
  77. * Here, no need to worry about special handling of wrapped-around
  78. * data due to double-mapped data pages. This works both in kernel and
  79. * when mmap()'ed in user-space, simplifying both kernel and
  80. * user-space implementations significantly.
  81. */
  82. array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages);
  83. if (array_size > PAGE_SIZE)
  84. pages = vmalloc_node(array_size, numa_node);
  85. else
  86. pages = kmalloc_node(array_size, flags, numa_node);
  87. if (!pages)
  88. return NULL;
  89. for (i = 0; i < nr_pages; i++) {
  90. page = alloc_pages_node(numa_node, flags, 0);
  91. if (!page) {
  92. nr_pages = i;
  93. goto err_free_pages;
  94. }
  95. pages[i] = page;
  96. if (i >= nr_meta_pages)
  97. pages[nr_data_pages + i] = page;
  98. }
  99. rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
  100. VM_MAP | VM_USERMAP, PAGE_KERNEL);
  101. if (rb) {
  102. kmemleak_not_leak(pages);
  103. rb->pages = pages;
  104. rb->nr_pages = nr_pages;
  105. return rb;
  106. }
  107. err_free_pages:
  108. for (i = 0; i < nr_pages; i++)
  109. __free_page(pages[i]);
  110. kvfree(pages);
  111. return NULL;
  112. }
  113. static void bpf_ringbuf_notify(struct irq_work *work)
  114. {
  115. struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
  116. wake_up_all(&rb->waitq);
  117. }
  118. static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
  119. {
  120. struct bpf_ringbuf *rb;
  121. rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
  122. if (!rb)
  123. return ERR_PTR(-ENOMEM);
  124. spin_lock_init(&rb->spinlock);
  125. init_waitqueue_head(&rb->waitq);
  126. init_irq_work(&rb->work, bpf_ringbuf_notify);
  127. rb->mask = data_sz - 1;
  128. rb->consumer_pos = 0;
  129. rb->producer_pos = 0;
  130. return rb;
  131. }
  132. static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
  133. {
  134. struct bpf_ringbuf_map *rb_map;
  135. u64 cost;
  136. int err;
  137. if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
  138. return ERR_PTR(-EINVAL);
  139. if (attr->key_size || attr->value_size ||
  140. !is_power_of_2(attr->max_entries) ||
  141. !PAGE_ALIGNED(attr->max_entries))
  142. return ERR_PTR(-EINVAL);
  143. #ifdef CONFIG_64BIT
  144. /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
  145. if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
  146. return ERR_PTR(-E2BIG);
  147. #endif
  148. rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
  149. if (!rb_map)
  150. return ERR_PTR(-ENOMEM);
  151. bpf_map_init_from_attr(&rb_map->map, attr);
  152. cost = sizeof(struct bpf_ringbuf_map) +
  153. sizeof(struct bpf_ringbuf) +
  154. attr->max_entries;
  155. err = bpf_map_charge_init(&rb_map->map.memory, cost);
  156. if (err)
  157. goto err_free_map;
  158. rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node);
  159. if (IS_ERR(rb_map->rb)) {
  160. err = PTR_ERR(rb_map->rb);
  161. goto err_uncharge;
  162. }
  163. return &rb_map->map;
  164. err_uncharge:
  165. bpf_map_charge_finish(&rb_map->map.memory);
  166. err_free_map:
  167. kfree(rb_map);
  168. return ERR_PTR(err);
  169. }
  170. static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
  171. {
  172. /* copy pages pointer and nr_pages to local variable, as we are going
  173. * to unmap rb itself with vunmap() below
  174. */
  175. struct page **pages = rb->pages;
  176. int i, nr_pages = rb->nr_pages;
  177. vunmap(rb);
  178. for (i = 0; i < nr_pages; i++)
  179. __free_page(pages[i]);
  180. kvfree(pages);
  181. }
  182. static void ringbuf_map_free(struct bpf_map *map)
  183. {
  184. struct bpf_ringbuf_map *rb_map;
  185. rb_map = container_of(map, struct bpf_ringbuf_map, map);
  186. bpf_ringbuf_free(rb_map->rb);
  187. kfree(rb_map);
  188. }
  189. static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
  190. {
  191. return ERR_PTR(-ENOTSUPP);
  192. }
  193. static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
  194. u64 flags)
  195. {
  196. return -ENOTSUPP;
  197. }
  198. static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
  199. {
  200. return -ENOTSUPP;
  201. }
  202. static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
  203. void *next_key)
  204. {
  205. return -ENOTSUPP;
  206. }
  207. static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
  208. {
  209. struct bpf_ringbuf_map *rb_map;
  210. rb_map = container_of(map, struct bpf_ringbuf_map, map);
  211. if (vma->vm_flags & VM_WRITE) {
  212. /* allow writable mapping for the consumer_pos only */
  213. if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
  214. return -EPERM;
  215. } else {
  216. vma->vm_flags &= ~VM_MAYWRITE;
  217. }
  218. /* remap_vmalloc_range() checks size and offset constraints */
  219. return remap_vmalloc_range(vma, rb_map->rb,
  220. vma->vm_pgoff + RINGBUF_PGOFF);
  221. }
  222. static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
  223. {
  224. unsigned long cons_pos, prod_pos;
  225. cons_pos = smp_load_acquire(&rb->consumer_pos);
  226. prod_pos = smp_load_acquire(&rb->producer_pos);
  227. return prod_pos - cons_pos;
  228. }
  229. static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
  230. struct poll_table_struct *pts)
  231. {
  232. struct bpf_ringbuf_map *rb_map;
  233. rb_map = container_of(map, struct bpf_ringbuf_map, map);
  234. poll_wait(filp, &rb_map->rb->waitq, pts);
  235. if (ringbuf_avail_data_sz(rb_map->rb))
  236. return EPOLLIN | EPOLLRDNORM;
  237. return 0;
  238. }
  239. static int ringbuf_map_btf_id;
  240. const struct bpf_map_ops ringbuf_map_ops = {
  241. .map_meta_equal = bpf_map_meta_equal,
  242. .map_alloc = ringbuf_map_alloc,
  243. .map_free = ringbuf_map_free,
  244. .map_mmap = ringbuf_map_mmap,
  245. .map_poll = ringbuf_map_poll,
  246. .map_lookup_elem = ringbuf_map_lookup_elem,
  247. .map_update_elem = ringbuf_map_update_elem,
  248. .map_delete_elem = ringbuf_map_delete_elem,
  249. .map_get_next_key = ringbuf_map_get_next_key,
  250. .map_btf_name = "bpf_ringbuf_map",
  251. .map_btf_id = &ringbuf_map_btf_id,
  252. };
  253. /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
  254. * calculate offset from record metadata to ring buffer in pages, rounded
  255. * down. This page offset is stored as part of record metadata and allows to
  256. * restore struct bpf_ringbuf * from record pointer. This page offset is
  257. * stored at offset 4 of record metadata header.
  258. */
  259. static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
  260. struct bpf_ringbuf_hdr *hdr)
  261. {
  262. return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
  263. }
  264. /* Given pointer to ring buffer record header, restore pointer to struct
  265. * bpf_ringbuf itself by using page offset stored at offset 4
  266. */
  267. static struct bpf_ringbuf *
  268. bpf_ringbuf_restore_from_rec(struct bpf_ringbuf_hdr *hdr)
  269. {
  270. unsigned long addr = (unsigned long)(void *)hdr;
  271. unsigned long off = (unsigned long)hdr->pg_off << PAGE_SHIFT;
  272. return (void*)((addr & PAGE_MASK) - off);
  273. }
  274. static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
  275. {
  276. unsigned long cons_pos, prod_pos, new_prod_pos, flags;
  277. u32 len, pg_off;
  278. struct bpf_ringbuf_hdr *hdr;
  279. if (unlikely(size > RINGBUF_MAX_RECORD_SZ))
  280. return NULL;
  281. len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
  282. if (len > rb->mask + 1)
  283. return NULL;
  284. cons_pos = smp_load_acquire(&rb->consumer_pos);
  285. if (in_nmi()) {
  286. if (!spin_trylock_irqsave(&rb->spinlock, flags))
  287. return NULL;
  288. } else {
  289. spin_lock_irqsave(&rb->spinlock, flags);
  290. }
  291. prod_pos = rb->producer_pos;
  292. new_prod_pos = prod_pos + len;
  293. /* check for out of ringbuf space by ensuring producer position
  294. * doesn't advance more than (ringbuf_size - 1) ahead
  295. */
  296. if (new_prod_pos - cons_pos > rb->mask) {
  297. spin_unlock_irqrestore(&rb->spinlock, flags);
  298. return NULL;
  299. }
  300. hdr = (void *)rb->data + (prod_pos & rb->mask);
  301. pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
  302. hdr->len = size | BPF_RINGBUF_BUSY_BIT;
  303. hdr->pg_off = pg_off;
  304. /* pairs with consumer's smp_load_acquire() */
  305. smp_store_release(&rb->producer_pos, new_prod_pos);
  306. spin_unlock_irqrestore(&rb->spinlock, flags);
  307. return (void *)hdr + BPF_RINGBUF_HDR_SZ;
  308. }
  309. BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags)
  310. {
  311. struct bpf_ringbuf_map *rb_map;
  312. if (unlikely(flags))
  313. return 0;
  314. rb_map = container_of(map, struct bpf_ringbuf_map, map);
  315. return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
  316. }
  317. const struct bpf_func_proto bpf_ringbuf_reserve_proto = {
  318. .func = bpf_ringbuf_reserve,
  319. .ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL,
  320. .arg1_type = ARG_CONST_MAP_PTR,
  321. .arg2_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
  322. .arg3_type = ARG_ANYTHING,
  323. };
  324. static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard)
  325. {
  326. unsigned long rec_pos, cons_pos;
  327. struct bpf_ringbuf_hdr *hdr;
  328. struct bpf_ringbuf *rb;
  329. u32 new_len;
  330. hdr = sample - BPF_RINGBUF_HDR_SZ;
  331. rb = bpf_ringbuf_restore_from_rec(hdr);
  332. new_len = hdr->len ^ BPF_RINGBUF_BUSY_BIT;
  333. if (discard)
  334. new_len |= BPF_RINGBUF_DISCARD_BIT;
  335. /* update record header with correct final size prefix */
  336. xchg(&hdr->len, new_len);
  337. /* if consumer caught up and is waiting for our record, notify about
  338. * new data availability
  339. */
  340. rec_pos = (void *)hdr - (void *)rb->data;
  341. cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
  342. if (flags & BPF_RB_FORCE_WAKEUP)
  343. irq_work_queue(&rb->work);
  344. else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP))
  345. irq_work_queue(&rb->work);
  346. }
  347. BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags)
  348. {
  349. bpf_ringbuf_commit(sample, flags, false /* discard */);
  350. return 0;
  351. }
  352. const struct bpf_func_proto bpf_ringbuf_submit_proto = {
  353. .func = bpf_ringbuf_submit,
  354. .ret_type = RET_VOID,
  355. .arg1_type = ARG_PTR_TO_ALLOC_MEM,
  356. .arg2_type = ARG_ANYTHING,
  357. };
  358. BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags)
  359. {
  360. bpf_ringbuf_commit(sample, flags, true /* discard */);
  361. return 0;
  362. }
  363. const struct bpf_func_proto bpf_ringbuf_discard_proto = {
  364. .func = bpf_ringbuf_discard,
  365. .ret_type = RET_VOID,
  366. .arg1_type = ARG_PTR_TO_ALLOC_MEM,
  367. .arg2_type = ARG_ANYTHING,
  368. };
  369. BPF_CALL_4(bpf_ringbuf_output, struct bpf_map *, map, void *, data, u64, size,
  370. u64, flags)
  371. {
  372. struct bpf_ringbuf_map *rb_map;
  373. void *rec;
  374. if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP)))
  375. return -EINVAL;
  376. rb_map = container_of(map, struct bpf_ringbuf_map, map);
  377. rec = __bpf_ringbuf_reserve(rb_map->rb, size);
  378. if (!rec)
  379. return -EAGAIN;
  380. memcpy(rec, data, size);
  381. bpf_ringbuf_commit(rec, flags, false /* discard */);
  382. return 0;
  383. }
  384. const struct bpf_func_proto bpf_ringbuf_output_proto = {
  385. .func = bpf_ringbuf_output,
  386. .ret_type = RET_INTEGER,
  387. .arg1_type = ARG_CONST_MAP_PTR,
  388. .arg2_type = ARG_PTR_TO_MEM,
  389. .arg3_type = ARG_CONST_SIZE_OR_ZERO,
  390. .arg4_type = ARG_ANYTHING,
  391. };
  392. BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags)
  393. {
  394. struct bpf_ringbuf *rb;
  395. rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
  396. switch (flags) {
  397. case BPF_RB_AVAIL_DATA:
  398. return ringbuf_avail_data_sz(rb);
  399. case BPF_RB_RING_SIZE:
  400. return rb->mask + 1;
  401. case BPF_RB_CONS_POS:
  402. return smp_load_acquire(&rb->consumer_pos);
  403. case BPF_RB_PROD_POS:
  404. return smp_load_acquire(&rb->producer_pos);
  405. default:
  406. return 0;
  407. }
  408. }
  409. const struct bpf_func_proto bpf_ringbuf_query_proto = {
  410. .func = bpf_ringbuf_query,
  411. .ret_type = RET_INTEGER,
  412. .arg1_type = ARG_CONST_MAP_PTR,
  413. .arg2_type = ARG_ANYTHING,
  414. };