ordered-events.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <linux/list.h>
  5. #include <linux/compiler.h>
  6. #include <linux/string.h>
  7. #include "ordered-events.h"
  8. #include "session.h"
  9. #include "asm/bug.h"
  10. #include "debug.h"
  11. #include "ui/progress.h"
  12. #define pr_N(n, fmt, ...) \
  13. eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  14. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  15. static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  16. {
  17. struct ordered_event *last = oe->last;
  18. u64 timestamp = new->timestamp;
  19. struct list_head *p;
  20. ++oe->nr_events;
  21. oe->last = new;
  22. pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  23. if (!last) {
  24. list_add(&new->list, &oe->events);
  25. oe->max_timestamp = timestamp;
  26. return;
  27. }
  28. /*
  29. * last event might point to some random place in the list as it's
  30. * the last queued event. We expect that the new event is close to
  31. * this.
  32. */
  33. if (last->timestamp <= timestamp) {
  34. while (last->timestamp <= timestamp) {
  35. p = last->list.next;
  36. if (p == &oe->events) {
  37. list_add_tail(&new->list, &oe->events);
  38. oe->max_timestamp = timestamp;
  39. return;
  40. }
  41. last = list_entry(p, struct ordered_event, list);
  42. }
  43. list_add_tail(&new->list, &last->list);
  44. } else {
  45. while (last->timestamp > timestamp) {
  46. p = last->list.prev;
  47. if (p == &oe->events) {
  48. list_add(&new->list, &oe->events);
  49. return;
  50. }
  51. last = list_entry(p, struct ordered_event, list);
  52. }
  53. list_add(&new->list, &last->list);
  54. }
  55. }
  56. static union perf_event *__dup_event(struct ordered_events *oe,
  57. union perf_event *event)
  58. {
  59. union perf_event *new_event = NULL;
  60. if (oe->cur_alloc_size < oe->max_alloc_size) {
  61. new_event = memdup(event, event->header.size);
  62. if (new_event)
  63. oe->cur_alloc_size += event->header.size;
  64. }
  65. return new_event;
  66. }
  67. static union perf_event *dup_event(struct ordered_events *oe,
  68. union perf_event *event)
  69. {
  70. return oe->copy_on_queue ? __dup_event(oe, event) : event;
  71. }
  72. static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
  73. {
  74. if (event) {
  75. oe->cur_alloc_size -= event->header.size;
  76. free(event);
  77. }
  78. }
  79. static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  80. {
  81. if (oe->copy_on_queue)
  82. __free_dup_event(oe, event);
  83. }
  84. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
  85. static struct ordered_event *alloc_event(struct ordered_events *oe,
  86. union perf_event *event)
  87. {
  88. struct list_head *cache = &oe->cache;
  89. struct ordered_event *new = NULL;
  90. union perf_event *new_event;
  91. size_t size;
  92. new_event = dup_event(oe, event);
  93. if (!new_event)
  94. return NULL;
  95. /*
  96. * We maintain the following scheme of buffers for ordered
  97. * event allocation:
  98. *
  99. * to_free list -> buffer1 (64K)
  100. * buffer2 (64K)
  101. * ...
  102. *
  103. * Each buffer keeps an array of ordered events objects:
  104. * buffer -> event[0]
  105. * event[1]
  106. * ...
  107. *
  108. * Each allocated ordered event is linked to one of
  109. * following lists:
  110. * - time ordered list 'events'
  111. * - list of currently removed events 'cache'
  112. *
  113. * Allocation of the ordered event uses the following order
  114. * to get the memory:
  115. * - use recently removed object from 'cache' list
  116. * - use available object in current allocation buffer
  117. * - allocate new buffer if the current buffer is full
  118. *
  119. * Removal of ordered event object moves it from events to
  120. * the cache list.
  121. */
  122. size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
  123. if (!list_empty(cache)) {
  124. new = list_entry(cache->next, struct ordered_event, list);
  125. list_del_init(&new->list);
  126. } else if (oe->buffer) {
  127. new = &oe->buffer->event[oe->buffer_idx];
  128. if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
  129. oe->buffer = NULL;
  130. } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
  131. oe->buffer = malloc(size);
  132. if (!oe->buffer) {
  133. free_dup_event(oe, new_event);
  134. return NULL;
  135. }
  136. pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
  137. oe->cur_alloc_size, size, oe->max_alloc_size);
  138. oe->cur_alloc_size += size;
  139. list_add(&oe->buffer->list, &oe->to_free);
  140. oe->buffer_idx = 1;
  141. new = &oe->buffer->event[0];
  142. } else {
  143. pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
  144. return NULL;
  145. }
  146. new->event = new_event;
  147. return new;
  148. }
  149. static struct ordered_event *
  150. ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
  151. union perf_event *event)
  152. {
  153. struct ordered_event *new;
  154. new = alloc_event(oe, event);
  155. if (new) {
  156. new->timestamp = timestamp;
  157. queue_event(oe, new);
  158. }
  159. return new;
  160. }
  161. void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
  162. {
  163. list_move(&event->list, &oe->cache);
  164. oe->nr_events--;
  165. free_dup_event(oe, event->event);
  166. event->event = NULL;
  167. }
  168. int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
  169. u64 timestamp, u64 file_offset)
  170. {
  171. struct ordered_event *oevent;
  172. if (!timestamp || timestamp == ~0ULL)
  173. return -ETIME;
  174. if (timestamp < oe->last_flush) {
  175. pr_oe_time(timestamp, "out of order event\n");
  176. pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
  177. oe->last_flush_type);
  178. oe->nr_unordered_events++;
  179. }
  180. oevent = ordered_events__new_event(oe, timestamp, event);
  181. if (!oevent) {
  182. ordered_events__flush(oe, OE_FLUSH__HALF);
  183. oevent = ordered_events__new_event(oe, timestamp, event);
  184. }
  185. if (!oevent)
  186. return -ENOMEM;
  187. oevent->file_offset = file_offset;
  188. return 0;
  189. }
  190. static int do_flush(struct ordered_events *oe, bool show_progress)
  191. {
  192. struct list_head *head = &oe->events;
  193. struct ordered_event *tmp, *iter;
  194. u64 limit = oe->next_flush;
  195. u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
  196. struct ui_progress prog;
  197. int ret;
  198. if (!limit)
  199. return 0;
  200. if (show_progress)
  201. ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
  202. list_for_each_entry_safe(iter, tmp, head, list) {
  203. if (session_done())
  204. return 0;
  205. if (iter->timestamp > limit)
  206. break;
  207. ret = oe->deliver(oe, iter);
  208. if (ret)
  209. return ret;
  210. ordered_events__delete(oe, iter);
  211. oe->last_flush = iter->timestamp;
  212. if (show_progress)
  213. ui_progress__update(&prog, 1);
  214. }
  215. if (list_empty(head))
  216. oe->last = NULL;
  217. else if (last_ts <= limit)
  218. oe->last = list_entry(head->prev, struct ordered_event, list);
  219. if (show_progress)
  220. ui_progress__finish();
  221. return 0;
  222. }
  223. static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
  224. u64 timestamp)
  225. {
  226. static const char * const str[] = {
  227. "NONE",
  228. "FINAL",
  229. "ROUND",
  230. "HALF ",
  231. "TOP ",
  232. "TIME ",
  233. };
  234. int err;
  235. bool show_progress = false;
  236. if (oe->nr_events == 0)
  237. return 0;
  238. switch (how) {
  239. case OE_FLUSH__FINAL:
  240. show_progress = true;
  241. __fallthrough;
  242. case OE_FLUSH__TOP:
  243. oe->next_flush = ULLONG_MAX;
  244. break;
  245. case OE_FLUSH__HALF:
  246. {
  247. struct ordered_event *first, *last;
  248. struct list_head *head = &oe->events;
  249. first = list_entry(head->next, struct ordered_event, list);
  250. last = oe->last;
  251. /* Warn if we are called before any event got allocated. */
  252. if (WARN_ONCE(!last || list_empty(head), "empty queue"))
  253. return 0;
  254. oe->next_flush = first->timestamp;
  255. oe->next_flush += (last->timestamp - first->timestamp) / 2;
  256. break;
  257. }
  258. case OE_FLUSH__TIME:
  259. oe->next_flush = timestamp;
  260. show_progress = false;
  261. break;
  262. case OE_FLUSH__ROUND:
  263. case OE_FLUSH__NONE:
  264. default:
  265. break;
  266. }
  267. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
  268. str[how], oe->nr_events);
  269. pr_oe_time(oe->max_timestamp, "max_timestamp\n");
  270. err = do_flush(oe, show_progress);
  271. if (!err) {
  272. if (how == OE_FLUSH__ROUND)
  273. oe->next_flush = oe->max_timestamp;
  274. oe->last_flush_type = how;
  275. }
  276. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
  277. str[how], oe->nr_events);
  278. pr_oe_time(oe->last_flush, "last_flush\n");
  279. return err;
  280. }
  281. int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
  282. {
  283. return __ordered_events__flush(oe, how, 0);
  284. }
  285. int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
  286. {
  287. return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
  288. }
  289. u64 ordered_events__first_time(struct ordered_events *oe)
  290. {
  291. struct ordered_event *event;
  292. if (list_empty(&oe->events))
  293. return 0;
  294. event = list_first_entry(&oe->events, struct ordered_event, list);
  295. return event->timestamp;
  296. }
  297. void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
  298. void *data)
  299. {
  300. INIT_LIST_HEAD(&oe->events);
  301. INIT_LIST_HEAD(&oe->cache);
  302. INIT_LIST_HEAD(&oe->to_free);
  303. oe->max_alloc_size = (u64) -1;
  304. oe->cur_alloc_size = 0;
  305. oe->deliver = deliver;
  306. oe->data = data;
  307. }
  308. static void
  309. ordered_events_buffer__free(struct ordered_events_buffer *buffer,
  310. unsigned int max, struct ordered_events *oe)
  311. {
  312. if (oe->copy_on_queue) {
  313. unsigned int i;
  314. for (i = 0; i < max; i++)
  315. __free_dup_event(oe, buffer->event[i].event);
  316. }
  317. free(buffer);
  318. }
  319. void ordered_events__free(struct ordered_events *oe)
  320. {
  321. struct ordered_events_buffer *buffer, *tmp;
  322. if (list_empty(&oe->to_free))
  323. return;
  324. /*
  325. * Current buffer might not have all the events allocated
  326. * yet, we need to free only allocated ones ...
  327. */
  328. if (oe->buffer) {
  329. list_del_init(&oe->buffer->list);
  330. ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
  331. }
  332. /* ... and continue with the rest */
  333. list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
  334. list_del_init(&buffer->list);
  335. ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
  336. }
  337. }
  338. void ordered_events__reinit(struct ordered_events *oe)
  339. {
  340. ordered_events__deliver_t old_deliver = oe->deliver;
  341. ordered_events__free(oe);
  342. memset(oe, '\0', sizeof(*oe));
  343. ordered_events__init(oe, old_deliver, oe->data);
  344. }