mmap.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <sys/mman.h>
  3. #include <inttypes.h>
  4. #include <asm/bug.h>
  5. #include <errno.h>
  6. #include <string.h>
  7. #include <linux/ring_buffer.h>
  8. #include <linux/perf_event.h>
  9. #include <perf/mmap.h>
  10. #include <perf/event.h>
  11. #include <internal/mmap.h>
  12. #include <internal/lib.h>
  13. #include <linux/kernel.h>
  14. #include "internal.h"
  15. void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
  16. bool overwrite, libperf_unmap_cb_t unmap_cb)
  17. {
  18. map->fd = -1;
  19. map->overwrite = overwrite;
  20. map->unmap_cb = unmap_cb;
  21. refcount_set(&map->refcnt, 0);
  22. if (prev)
  23. prev->next = map;
  24. }
  25. size_t perf_mmap__mmap_len(struct perf_mmap *map)
  26. {
  27. return map->mask + 1 + page_size;
  28. }
  29. int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
  30. int fd, int cpu)
  31. {
  32. map->prev = 0;
  33. map->mask = mp->mask;
  34. map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
  35. MAP_SHARED, fd, 0);
  36. if (map->base == MAP_FAILED) {
  37. map->base = NULL;
  38. return -1;
  39. }
  40. map->fd = fd;
  41. map->cpu = cpu;
  42. return 0;
  43. }
  44. void perf_mmap__munmap(struct perf_mmap *map)
  45. {
  46. if (map && map->base != NULL) {
  47. munmap(map->base, perf_mmap__mmap_len(map));
  48. map->base = NULL;
  49. map->fd = -1;
  50. refcount_set(&map->refcnt, 0);
  51. }
  52. if (map && map->unmap_cb)
  53. map->unmap_cb(map);
  54. }
  55. void perf_mmap__get(struct perf_mmap *map)
  56. {
  57. refcount_inc(&map->refcnt);
  58. }
  59. void perf_mmap__put(struct perf_mmap *map)
  60. {
  61. BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
  62. if (refcount_dec_and_test(&map->refcnt))
  63. perf_mmap__munmap(map);
  64. }
  65. static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
  66. {
  67. ring_buffer_write_tail(md->base, tail);
  68. }
  69. u64 perf_mmap__read_head(struct perf_mmap *map)
  70. {
  71. return ring_buffer_read_head(map->base);
  72. }
  73. static bool perf_mmap__empty(struct perf_mmap *map)
  74. {
  75. struct perf_event_mmap_page *pc = map->base;
  76. return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
  77. }
  78. void perf_mmap__consume(struct perf_mmap *map)
  79. {
  80. if (!map->overwrite) {
  81. u64 old = map->prev;
  82. perf_mmap__write_tail(map, old);
  83. }
  84. if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
  85. perf_mmap__put(map);
  86. }
  87. static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
  88. {
  89. struct perf_event_header *pheader;
  90. u64 evt_head = *start;
  91. int size = mask + 1;
  92. pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
  93. pheader = (struct perf_event_header *)(buf + (*start & mask));
  94. while (true) {
  95. if (evt_head - *start >= (unsigned int)size) {
  96. pr_debug("Finished reading overwrite ring buffer: rewind\n");
  97. if (evt_head - *start > (unsigned int)size)
  98. evt_head -= pheader->size;
  99. *end = evt_head;
  100. return 0;
  101. }
  102. pheader = (struct perf_event_header *)(buf + (evt_head & mask));
  103. if (pheader->size == 0) {
  104. pr_debug("Finished reading overwrite ring buffer: get start\n");
  105. *end = evt_head;
  106. return 0;
  107. }
  108. evt_head += pheader->size;
  109. pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
  110. }
  111. WARN_ONCE(1, "Shouldn't get here\n");
  112. return -1;
  113. }
  114. /*
  115. * Report the start and end of the available data in ringbuffer
  116. */
  117. static int __perf_mmap__read_init(struct perf_mmap *md)
  118. {
  119. u64 head = perf_mmap__read_head(md);
  120. u64 old = md->prev;
  121. unsigned char *data = md->base + page_size;
  122. unsigned long size;
  123. md->start = md->overwrite ? head : old;
  124. md->end = md->overwrite ? old : head;
  125. if ((md->end - md->start) < md->flush)
  126. return -EAGAIN;
  127. size = md->end - md->start;
  128. if (size > (unsigned long)(md->mask) + 1) {
  129. if (!md->overwrite) {
  130. WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
  131. md->prev = head;
  132. perf_mmap__consume(md);
  133. return -EAGAIN;
  134. }
  135. /*
  136. * Backward ring buffer is full. We still have a chance to read
  137. * most of data from it.
  138. */
  139. if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
  140. return -EINVAL;
  141. }
  142. return 0;
  143. }
  144. int perf_mmap__read_init(struct perf_mmap *map)
  145. {
  146. /*
  147. * Check if event was unmapped due to a POLLHUP/POLLERR.
  148. */
  149. if (!refcount_read(&map->refcnt))
  150. return -ENOENT;
  151. return __perf_mmap__read_init(map);
  152. }
  153. /*
  154. * Mandatory for overwrite mode
  155. * The direction of overwrite mode is backward.
  156. * The last perf_mmap__read() will set tail to map->core.prev.
  157. * Need to correct the map->core.prev to head which is the end of next read.
  158. */
  159. void perf_mmap__read_done(struct perf_mmap *map)
  160. {
  161. /*
  162. * Check if event was unmapped due to a POLLHUP/POLLERR.
  163. */
  164. if (!refcount_read(&map->refcnt))
  165. return;
  166. map->prev = perf_mmap__read_head(map);
  167. }
  168. /* When check_messup is true, 'end' must points to a good entry */
  169. static union perf_event *perf_mmap__read(struct perf_mmap *map,
  170. u64 *startp, u64 end)
  171. {
  172. unsigned char *data = map->base + page_size;
  173. union perf_event *event = NULL;
  174. int diff = end - *startp;
  175. if (diff >= (int)sizeof(event->header)) {
  176. size_t size;
  177. event = (union perf_event *)&data[*startp & map->mask];
  178. size = event->header.size;
  179. if (size < sizeof(event->header) || diff < (int)size)
  180. return NULL;
  181. /*
  182. * Event straddles the mmap boundary -- header should always
  183. * be inside due to u64 alignment of output.
  184. */
  185. if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
  186. unsigned int offset = *startp;
  187. unsigned int len = min(sizeof(*event), size), cpy;
  188. void *dst = map->event_copy;
  189. do {
  190. cpy = min(map->mask + 1 - (offset & map->mask), len);
  191. memcpy(dst, &data[offset & map->mask], cpy);
  192. offset += cpy;
  193. dst += cpy;
  194. len -= cpy;
  195. } while (len);
  196. event = (union perf_event *)map->event_copy;
  197. }
  198. *startp += size;
  199. }
  200. return event;
  201. }
  202. /*
  203. * Read event from ring buffer one by one.
  204. * Return one event for each call.
  205. *
  206. * Usage:
  207. * perf_mmap__read_init()
  208. * while(event = perf_mmap__read_event()) {
  209. * //process the event
  210. * perf_mmap__consume()
  211. * }
  212. * perf_mmap__read_done()
  213. */
  214. union perf_event *perf_mmap__read_event(struct perf_mmap *map)
  215. {
  216. union perf_event *event;
  217. /*
  218. * Check if event was unmapped due to a POLLHUP/POLLERR.
  219. */
  220. if (!refcount_read(&map->refcnt))
  221. return NULL;
  222. /* non-overwirte doesn't pause the ringbuffer */
  223. if (!map->overwrite)
  224. map->end = perf_mmap__read_head(map);
  225. event = perf_mmap__read(map, &map->start, map->end);
  226. if (!map->overwrite)
  227. map->prev = map->start;
  228. return event;
  229. }