cs-etm.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2015-2018 Linaro Limited.
  4. *
  5. * Author: Tor Jeremiassen <tor@ti.com>
  6. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/err.h>
  10. #include <linux/kernel.h>
  11. #include <linux/log2.h>
  12. #include <linux/types.h>
  13. #include <linux/zalloc.h>
  14. #include <opencsd/ocsd_if_types.h>
  15. #include <stdlib.h>
  16. #include "auxtrace.h"
  17. #include "color.h"
  18. #include "cs-etm.h"
  19. #include "cs-etm-decoder/cs-etm-decoder.h"
  20. #include "debug.h"
  21. #include "dso.h"
  22. #include "evlist.h"
  23. #include "intlist.h"
  24. #include "machine.h"
  25. #include "map.h"
  26. #include "perf.h"
  27. #include "session.h"
  28. #include "map_symbol.h"
  29. #include "branch.h"
  30. #include "symbol.h"
  31. #include "tool.h"
  32. #include "thread.h"
  33. #include "thread-stack.h"
  34. #include <tools/libc_compat.h>
  35. #include "util/synthetic-events.h"
  36. #define MAX_TIMESTAMP (~0ULL)
  37. struct cs_etm_auxtrace {
  38. struct auxtrace auxtrace;
  39. struct auxtrace_queues queues;
  40. struct auxtrace_heap heap;
  41. struct itrace_synth_opts synth_opts;
  42. struct perf_session *session;
  43. struct machine *machine;
  44. struct thread *unknown_thread;
  45. u8 timeless_decoding;
  46. u8 snapshot_mode;
  47. u8 data_queued;
  48. u8 sample_branches;
  49. u8 sample_instructions;
  50. int num_cpu;
  51. u32 auxtrace_type;
  52. u64 branches_sample_type;
  53. u64 branches_id;
  54. u64 instructions_sample_type;
  55. u64 instructions_sample_period;
  56. u64 instructions_id;
  57. u64 **metadata;
  58. u64 kernel_start;
  59. unsigned int pmu_type;
  60. };
  61. struct cs_etm_traceid_queue {
  62. u8 trace_chan_id;
  63. pid_t pid, tid;
  64. u64 period_instructions;
  65. size_t last_branch_pos;
  66. union perf_event *event_buf;
  67. struct thread *thread;
  68. struct branch_stack *last_branch;
  69. struct branch_stack *last_branch_rb;
  70. struct cs_etm_packet *prev_packet;
  71. struct cs_etm_packet *packet;
  72. struct cs_etm_packet_queue packet_queue;
  73. };
  74. struct cs_etm_queue {
  75. struct cs_etm_auxtrace *etm;
  76. struct cs_etm_decoder *decoder;
  77. struct auxtrace_buffer *buffer;
  78. unsigned int queue_nr;
  79. u8 pending_timestamp;
  80. u64 offset;
  81. const unsigned char *buf;
  82. size_t buf_len, buf_used;
  83. /* Conversion between traceID and index in traceid_queues array */
  84. struct intlist *traceid_queues_list;
  85. struct cs_etm_traceid_queue **traceid_queues;
  86. };
  87. /* RB tree for quick conversion between traceID and metadata pointers */
  88. static struct intlist *traceid_list;
  89. static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
  90. static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
  91. static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
  92. pid_t tid);
  93. static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
  94. static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
  95. /* PTMs ETMIDR [11:8] set to b0011 */
  96. #define ETMIDR_PTM_VERSION 0x00000300
  97. /*
  98. * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
  99. * work with. One option is to modify to auxtrace_heap_XYZ() API or simply
  100. * encode the etm queue number as the upper 16 bit and the channel as
  101. * the lower 16 bit.
  102. */
  103. #define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
  104. (queue_nr << 16 | trace_chan_id)
  105. #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
  106. #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
  107. static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
  108. {
  109. etmidr &= ETMIDR_PTM_VERSION;
  110. if (etmidr == ETMIDR_PTM_VERSION)
  111. return CS_ETM_PROTO_PTM;
  112. return CS_ETM_PROTO_ETMV3;
  113. }
  114. static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
  115. {
  116. struct int_node *inode;
  117. u64 *metadata;
  118. inode = intlist__find(traceid_list, trace_chan_id);
  119. if (!inode)
  120. return -EINVAL;
  121. metadata = inode->priv;
  122. *magic = metadata[CS_ETM_MAGIC];
  123. return 0;
  124. }
  125. int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
  126. {
  127. struct int_node *inode;
  128. u64 *metadata;
  129. inode = intlist__find(traceid_list, trace_chan_id);
  130. if (!inode)
  131. return -EINVAL;
  132. metadata = inode->priv;
  133. *cpu = (int)metadata[CS_ETM_CPU];
  134. return 0;
  135. }
  136. void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
  137. u8 trace_chan_id)
  138. {
  139. /*
  140. * Wnen a timestamp packet is encountered the backend code
  141. * is stopped so that the front end has time to process packets
  142. * that were accumulated in the traceID queue. Since there can
  143. * be more than one channel per cs_etm_queue, we need to specify
  144. * what traceID queue needs servicing.
  145. */
  146. etmq->pending_timestamp = trace_chan_id;
  147. }
  148. static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
  149. u8 *trace_chan_id)
  150. {
  151. struct cs_etm_packet_queue *packet_queue;
  152. if (!etmq->pending_timestamp)
  153. return 0;
  154. if (trace_chan_id)
  155. *trace_chan_id = etmq->pending_timestamp;
  156. packet_queue = cs_etm__etmq_get_packet_queue(etmq,
  157. etmq->pending_timestamp);
  158. if (!packet_queue)
  159. return 0;
  160. /* Acknowledge pending status */
  161. etmq->pending_timestamp = 0;
  162. /* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
  163. return packet_queue->timestamp;
  164. }
  165. static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
  166. {
  167. int i;
  168. queue->head = 0;
  169. queue->tail = 0;
  170. queue->packet_count = 0;
  171. for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
  172. queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
  173. queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
  174. queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
  175. queue->packet_buffer[i].instr_count = 0;
  176. queue->packet_buffer[i].last_instr_taken_branch = false;
  177. queue->packet_buffer[i].last_instr_size = 0;
  178. queue->packet_buffer[i].last_instr_type = 0;
  179. queue->packet_buffer[i].last_instr_subtype = 0;
  180. queue->packet_buffer[i].last_instr_cond = 0;
  181. queue->packet_buffer[i].flags = 0;
  182. queue->packet_buffer[i].exception_number = UINT32_MAX;
  183. queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
  184. queue->packet_buffer[i].cpu = INT_MIN;
  185. }
  186. }
  187. static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
  188. {
  189. int idx;
  190. struct int_node *inode;
  191. struct cs_etm_traceid_queue *tidq;
  192. struct intlist *traceid_queues_list = etmq->traceid_queues_list;
  193. intlist__for_each_entry(inode, traceid_queues_list) {
  194. idx = (int)(intptr_t)inode->priv;
  195. tidq = etmq->traceid_queues[idx];
  196. cs_etm__clear_packet_queue(&tidq->packet_queue);
  197. }
  198. }
  199. static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
  200. struct cs_etm_traceid_queue *tidq,
  201. u8 trace_chan_id)
  202. {
  203. int rc = -ENOMEM;
  204. struct auxtrace_queue *queue;
  205. struct cs_etm_auxtrace *etm = etmq->etm;
  206. cs_etm__clear_packet_queue(&tidq->packet_queue);
  207. queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
  208. tidq->tid = queue->tid;
  209. tidq->pid = -1;
  210. tidq->trace_chan_id = trace_chan_id;
  211. tidq->packet = zalloc(sizeof(struct cs_etm_packet));
  212. if (!tidq->packet)
  213. goto out;
  214. tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
  215. if (!tidq->prev_packet)
  216. goto out_free;
  217. if (etm->synth_opts.last_branch) {
  218. size_t sz = sizeof(struct branch_stack);
  219. sz += etm->synth_opts.last_branch_sz *
  220. sizeof(struct branch_entry);
  221. tidq->last_branch = zalloc(sz);
  222. if (!tidq->last_branch)
  223. goto out_free;
  224. tidq->last_branch_rb = zalloc(sz);
  225. if (!tidq->last_branch_rb)
  226. goto out_free;
  227. }
  228. tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
  229. if (!tidq->event_buf)
  230. goto out_free;
  231. return 0;
  232. out_free:
  233. zfree(&tidq->last_branch_rb);
  234. zfree(&tidq->last_branch);
  235. zfree(&tidq->prev_packet);
  236. zfree(&tidq->packet);
  237. out:
  238. return rc;
  239. }
  240. static struct cs_etm_traceid_queue
  241. *cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
  242. {
  243. int idx;
  244. struct int_node *inode;
  245. struct intlist *traceid_queues_list;
  246. struct cs_etm_traceid_queue *tidq, **traceid_queues;
  247. struct cs_etm_auxtrace *etm = etmq->etm;
  248. if (etm->timeless_decoding)
  249. trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
  250. traceid_queues_list = etmq->traceid_queues_list;
  251. /*
  252. * Check if the traceid_queue exist for this traceID by looking
  253. * in the queue list.
  254. */
  255. inode = intlist__find(traceid_queues_list, trace_chan_id);
  256. if (inode) {
  257. idx = (int)(intptr_t)inode->priv;
  258. return etmq->traceid_queues[idx];
  259. }
  260. /* We couldn't find a traceid_queue for this traceID, allocate one */
  261. tidq = malloc(sizeof(*tidq));
  262. if (!tidq)
  263. return NULL;
  264. memset(tidq, 0, sizeof(*tidq));
  265. /* Get a valid index for the new traceid_queue */
  266. idx = intlist__nr_entries(traceid_queues_list);
  267. /* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
  268. inode = intlist__findnew(traceid_queues_list, trace_chan_id);
  269. if (!inode)
  270. goto out_free;
  271. /* Associate this traceID with this index */
  272. inode->priv = (void *)(intptr_t)idx;
  273. if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
  274. goto out_free;
  275. /* Grow the traceid_queues array by one unit */
  276. traceid_queues = etmq->traceid_queues;
  277. traceid_queues = reallocarray(traceid_queues,
  278. idx + 1,
  279. sizeof(*traceid_queues));
  280. /*
  281. * On failure reallocarray() returns NULL and the original block of
  282. * memory is left untouched.
  283. */
  284. if (!traceid_queues)
  285. goto out_free;
  286. traceid_queues[idx] = tidq;
  287. etmq->traceid_queues = traceid_queues;
  288. return etmq->traceid_queues[idx];
  289. out_free:
  290. /*
  291. * Function intlist__remove() removes the inode from the list
  292. * and delete the memory associated to it.
  293. */
  294. intlist__remove(traceid_queues_list, inode);
  295. free(tidq);
  296. return NULL;
  297. }
  298. struct cs_etm_packet_queue
  299. *cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
  300. {
  301. struct cs_etm_traceid_queue *tidq;
  302. tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
  303. if (tidq)
  304. return &tidq->packet_queue;
  305. return NULL;
  306. }
  307. static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
  308. struct cs_etm_traceid_queue *tidq)
  309. {
  310. struct cs_etm_packet *tmp;
  311. if (etm->sample_branches || etm->synth_opts.last_branch ||
  312. etm->sample_instructions) {
  313. /*
  314. * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
  315. * the next incoming packet.
  316. */
  317. tmp = tidq->packet;
  318. tidq->packet = tidq->prev_packet;
  319. tidq->prev_packet = tmp;
  320. }
  321. }
  322. static void cs_etm__packet_dump(const char *pkt_string)
  323. {
  324. const char *color = PERF_COLOR_BLUE;
  325. int len = strlen(pkt_string);
  326. if (len && (pkt_string[len-1] == '\n'))
  327. color_fprintf(stdout, color, " %s", pkt_string);
  328. else
  329. color_fprintf(stdout, color, " %s\n", pkt_string);
  330. fflush(stdout);
  331. }
  332. static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
  333. struct cs_etm_auxtrace *etm, int idx,
  334. u32 etmidr)
  335. {
  336. u64 **metadata = etm->metadata;
  337. t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
  338. t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
  339. t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
  340. }
  341. static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
  342. struct cs_etm_auxtrace *etm, int idx)
  343. {
  344. u64 **metadata = etm->metadata;
  345. t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
  346. t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
  347. t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
  348. t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
  349. t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
  350. t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
  351. t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
  352. }
  353. static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
  354. struct cs_etm_auxtrace *etm)
  355. {
  356. int i;
  357. u32 etmidr;
  358. u64 architecture;
  359. for (i = 0; i < etm->num_cpu; i++) {
  360. architecture = etm->metadata[i][CS_ETM_MAGIC];
  361. switch (architecture) {
  362. case __perf_cs_etmv3_magic:
  363. etmidr = etm->metadata[i][CS_ETM_ETMIDR];
  364. cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
  365. break;
  366. case __perf_cs_etmv4_magic:
  367. cs_etm__set_trace_param_etmv4(t_params, etm, i);
  368. break;
  369. default:
  370. return -EINVAL;
  371. }
  372. }
  373. return 0;
  374. }
  375. static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
  376. struct cs_etm_queue *etmq,
  377. enum cs_etm_decoder_operation mode)
  378. {
  379. int ret = -EINVAL;
  380. if (!(mode < CS_ETM_OPERATION_MAX))
  381. goto out;
  382. d_params->packet_printer = cs_etm__packet_dump;
  383. d_params->operation = mode;
  384. d_params->data = etmq;
  385. d_params->formatted = true;
  386. d_params->fsyncs = false;
  387. d_params->hsyncs = false;
  388. d_params->frame_aligned = true;
  389. ret = 0;
  390. out:
  391. return ret;
  392. }
  393. static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
  394. struct auxtrace_buffer *buffer)
  395. {
  396. int ret;
  397. const char *color = PERF_COLOR_BLUE;
  398. struct cs_etm_decoder_params d_params;
  399. struct cs_etm_trace_params *t_params;
  400. struct cs_etm_decoder *decoder;
  401. size_t buffer_used = 0;
  402. fprintf(stdout, "\n");
  403. color_fprintf(stdout, color,
  404. ". ... CoreSight ETM Trace data: size %zu bytes\n",
  405. buffer->size);
  406. /* Use metadata to fill in trace parameters for trace decoder */
  407. t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
  408. if (!t_params)
  409. return;
  410. if (cs_etm__init_trace_params(t_params, etm))
  411. goto out_free;
  412. /* Set decoder parameters to simply print the trace packets */
  413. if (cs_etm__init_decoder_params(&d_params, NULL,
  414. CS_ETM_OPERATION_PRINT))
  415. goto out_free;
  416. decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
  417. if (!decoder)
  418. goto out_free;
  419. do {
  420. size_t consumed;
  421. ret = cs_etm_decoder__process_data_block(
  422. decoder, buffer->offset,
  423. &((u8 *)buffer->data)[buffer_used],
  424. buffer->size - buffer_used, &consumed);
  425. if (ret)
  426. break;
  427. buffer_used += consumed;
  428. } while (buffer_used < buffer->size);
  429. cs_etm_decoder__free(decoder);
  430. out_free:
  431. zfree(&t_params);
  432. }
  433. static int cs_etm__flush_events(struct perf_session *session,
  434. struct perf_tool *tool)
  435. {
  436. int ret;
  437. struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
  438. struct cs_etm_auxtrace,
  439. auxtrace);
  440. if (dump_trace)
  441. return 0;
  442. if (!tool->ordered_events)
  443. return -EINVAL;
  444. ret = cs_etm__update_queues(etm);
  445. if (ret < 0)
  446. return ret;
  447. if (etm->timeless_decoding)
  448. return cs_etm__process_timeless_queues(etm, -1);
  449. return cs_etm__process_queues(etm);
  450. }
  451. static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
  452. {
  453. int idx;
  454. uintptr_t priv;
  455. struct int_node *inode, *tmp;
  456. struct cs_etm_traceid_queue *tidq;
  457. struct intlist *traceid_queues_list = etmq->traceid_queues_list;
  458. intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
  459. priv = (uintptr_t)inode->priv;
  460. idx = priv;
  461. /* Free this traceid_queue from the array */
  462. tidq = etmq->traceid_queues[idx];
  463. thread__zput(tidq->thread);
  464. zfree(&tidq->event_buf);
  465. zfree(&tidq->last_branch);
  466. zfree(&tidq->last_branch_rb);
  467. zfree(&tidq->prev_packet);
  468. zfree(&tidq->packet);
  469. zfree(&tidq);
  470. /*
  471. * Function intlist__remove() removes the inode from the list
  472. * and delete the memory associated to it.
  473. */
  474. intlist__remove(traceid_queues_list, inode);
  475. }
  476. /* Then the RB tree itself */
  477. intlist__delete(traceid_queues_list);
  478. etmq->traceid_queues_list = NULL;
  479. /* finally free the traceid_queues array */
  480. zfree(&etmq->traceid_queues);
  481. }
  482. static void cs_etm__free_queue(void *priv)
  483. {
  484. struct cs_etm_queue *etmq = priv;
  485. if (!etmq)
  486. return;
  487. cs_etm_decoder__free(etmq->decoder);
  488. cs_etm__free_traceid_queues(etmq);
  489. free(etmq);
  490. }
  491. static void cs_etm__free_events(struct perf_session *session)
  492. {
  493. unsigned int i;
  494. struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
  495. struct cs_etm_auxtrace,
  496. auxtrace);
  497. struct auxtrace_queues *queues = &aux->queues;
  498. for (i = 0; i < queues->nr_queues; i++) {
  499. cs_etm__free_queue(queues->queue_array[i].priv);
  500. queues->queue_array[i].priv = NULL;
  501. }
  502. auxtrace_queues__free(queues);
  503. }
  504. static void cs_etm__free(struct perf_session *session)
  505. {
  506. int i;
  507. struct int_node *inode, *tmp;
  508. struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
  509. struct cs_etm_auxtrace,
  510. auxtrace);
  511. cs_etm__free_events(session);
  512. session->auxtrace = NULL;
  513. /* First remove all traceID/metadata nodes for the RB tree */
  514. intlist__for_each_entry_safe(inode, tmp, traceid_list)
  515. intlist__remove(traceid_list, inode);
  516. /* Then the RB tree itself */
  517. intlist__delete(traceid_list);
  518. for (i = 0; i < aux->num_cpu; i++)
  519. zfree(&aux->metadata[i]);
  520. thread__zput(aux->unknown_thread);
  521. zfree(&aux->metadata);
  522. zfree(&aux);
  523. }
  524. static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
  525. struct evsel *evsel)
  526. {
  527. struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
  528. struct cs_etm_auxtrace,
  529. auxtrace);
  530. return evsel->core.attr.type == aux->pmu_type;
  531. }
  532. static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
  533. {
  534. struct machine *machine;
  535. machine = etmq->etm->machine;
  536. if (address >= etmq->etm->kernel_start) {
  537. if (machine__is_host(machine))
  538. return PERF_RECORD_MISC_KERNEL;
  539. else
  540. return PERF_RECORD_MISC_GUEST_KERNEL;
  541. } else {
  542. if (machine__is_host(machine))
  543. return PERF_RECORD_MISC_USER;
  544. else if (perf_guest)
  545. return PERF_RECORD_MISC_GUEST_USER;
  546. else
  547. return PERF_RECORD_MISC_HYPERVISOR;
  548. }
  549. }
  550. static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
  551. u64 address, size_t size, u8 *buffer)
  552. {
  553. u8 cpumode;
  554. u64 offset;
  555. int len;
  556. struct thread *thread;
  557. struct machine *machine;
  558. struct addr_location al;
  559. struct cs_etm_traceid_queue *tidq;
  560. if (!etmq)
  561. return 0;
  562. machine = etmq->etm->machine;
  563. cpumode = cs_etm__cpu_mode(etmq, address);
  564. tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
  565. if (!tidq)
  566. return 0;
  567. thread = tidq->thread;
  568. if (!thread) {
  569. if (cpumode != PERF_RECORD_MISC_KERNEL)
  570. return 0;
  571. thread = etmq->etm->unknown_thread;
  572. }
  573. if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
  574. return 0;
  575. if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
  576. dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
  577. return 0;
  578. offset = al.map->map_ip(al.map, address);
  579. map__load(al.map);
  580. len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
  581. if (len <= 0)
  582. return 0;
  583. return len;
  584. }
  585. static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
  586. {
  587. struct cs_etm_decoder_params d_params;
  588. struct cs_etm_trace_params *t_params = NULL;
  589. struct cs_etm_queue *etmq;
  590. etmq = zalloc(sizeof(*etmq));
  591. if (!etmq)
  592. return NULL;
  593. etmq->traceid_queues_list = intlist__new(NULL);
  594. if (!etmq->traceid_queues_list)
  595. goto out_free;
  596. /* Use metadata to fill in trace parameters for trace decoder */
  597. t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
  598. if (!t_params)
  599. goto out_free;
  600. if (cs_etm__init_trace_params(t_params, etm))
  601. goto out_free;
  602. /* Set decoder parameters to decode trace packets */
  603. if (cs_etm__init_decoder_params(&d_params, etmq,
  604. CS_ETM_OPERATION_DECODE))
  605. goto out_free;
  606. etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
  607. if (!etmq->decoder)
  608. goto out_free;
  609. /*
  610. * Register a function to handle all memory accesses required by
  611. * the trace decoder library.
  612. */
  613. if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
  614. 0x0L, ((u64) -1L),
  615. cs_etm__mem_access))
  616. goto out_free_decoder;
  617. zfree(&t_params);
  618. return etmq;
  619. out_free_decoder:
  620. cs_etm_decoder__free(etmq->decoder);
  621. out_free:
  622. intlist__delete(etmq->traceid_queues_list);
  623. free(etmq);
  624. return NULL;
  625. }
  626. static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
  627. struct auxtrace_queue *queue,
  628. unsigned int queue_nr)
  629. {
  630. int ret = 0;
  631. unsigned int cs_queue_nr;
  632. u8 trace_chan_id;
  633. u64 timestamp;
  634. struct cs_etm_queue *etmq = queue->priv;
  635. if (list_empty(&queue->head) || etmq)
  636. goto out;
  637. etmq = cs_etm__alloc_queue(etm);
  638. if (!etmq) {
  639. ret = -ENOMEM;
  640. goto out;
  641. }
  642. queue->priv = etmq;
  643. etmq->etm = etm;
  644. etmq->queue_nr = queue_nr;
  645. etmq->offset = 0;
  646. if (etm->timeless_decoding)
  647. goto out;
  648. /*
  649. * We are under a CPU-wide trace scenario. As such we need to know
  650. * when the code that generated the traces started to execute so that
  651. * it can be correlated with execution on other CPUs. So we get a
  652. * handle on the beginning of traces and decode until we find a
  653. * timestamp. The timestamp is then added to the auxtrace min heap
  654. * in order to know what nibble (of all the etmqs) to decode first.
  655. */
  656. while (1) {
  657. /*
  658. * Fetch an aux_buffer from this etmq. Bail if no more
  659. * blocks or an error has been encountered.
  660. */
  661. ret = cs_etm__get_data_block(etmq);
  662. if (ret <= 0)
  663. goto out;
  664. /*
  665. * Run decoder on the trace block. The decoder will stop when
  666. * encountering a timestamp, a full packet queue or the end of
  667. * trace for that block.
  668. */
  669. ret = cs_etm__decode_data_block(etmq);
  670. if (ret)
  671. goto out;
  672. /*
  673. * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
  674. * the timestamp calculation for us.
  675. */
  676. timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
  677. /* We found a timestamp, no need to continue. */
  678. if (timestamp)
  679. break;
  680. /*
  681. * We didn't find a timestamp so empty all the traceid packet
  682. * queues before looking for another timestamp packet, either
  683. * in the current data block or a new one. Packets that were
  684. * just decoded are useless since no timestamp has been
  685. * associated with them. As such simply discard them.
  686. */
  687. cs_etm__clear_all_packet_queues(etmq);
  688. }
  689. /*
  690. * We have a timestamp. Add it to the min heap to reflect when
  691. * instructions conveyed by the range packets of this traceID queue
  692. * started to execute. Once the same has been done for all the traceID
  693. * queues of each etmq, redenring and decoding can start in
  694. * chronological order.
  695. *
  696. * Note that packets decoded above are still in the traceID's packet
  697. * queue and will be processed in cs_etm__process_queues().
  698. */
  699. cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
  700. ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
  701. out:
  702. return ret;
  703. }
  704. static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
  705. {
  706. unsigned int i;
  707. int ret;
  708. if (!etm->kernel_start)
  709. etm->kernel_start = machine__kernel_start(etm->machine);
  710. for (i = 0; i < etm->queues.nr_queues; i++) {
  711. ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
  712. if (ret)
  713. return ret;
  714. }
  715. return 0;
  716. }
  717. static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
  718. {
  719. if (etm->queues.new_data) {
  720. etm->queues.new_data = false;
  721. return cs_etm__setup_queues(etm);
  722. }
  723. return 0;
  724. }
  725. static inline
  726. void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
  727. struct cs_etm_traceid_queue *tidq)
  728. {
  729. struct branch_stack *bs_src = tidq->last_branch_rb;
  730. struct branch_stack *bs_dst = tidq->last_branch;
  731. size_t nr = 0;
  732. /*
  733. * Set the number of records before early exit: ->nr is used to
  734. * determine how many branches to copy from ->entries.
  735. */
  736. bs_dst->nr = bs_src->nr;
  737. /*
  738. * Early exit when there is nothing to copy.
  739. */
  740. if (!bs_src->nr)
  741. return;
  742. /*
  743. * As bs_src->entries is a circular buffer, we need to copy from it in
  744. * two steps. First, copy the branches from the most recently inserted
  745. * branch ->last_branch_pos until the end of bs_src->entries buffer.
  746. */
  747. nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
  748. memcpy(&bs_dst->entries[0],
  749. &bs_src->entries[tidq->last_branch_pos],
  750. sizeof(struct branch_entry) * nr);
  751. /*
  752. * If we wrapped around at least once, the branches from the beginning
  753. * of the bs_src->entries buffer and until the ->last_branch_pos element
  754. * are older valid branches: copy them over. The total number of
  755. * branches copied over will be equal to the number of branches asked by
  756. * the user in last_branch_sz.
  757. */
  758. if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
  759. memcpy(&bs_dst->entries[nr],
  760. &bs_src->entries[0],
  761. sizeof(struct branch_entry) * tidq->last_branch_pos);
  762. }
  763. }
  764. static inline
  765. void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
  766. {
  767. tidq->last_branch_pos = 0;
  768. tidq->last_branch_rb->nr = 0;
  769. }
  770. static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
  771. u8 trace_chan_id, u64 addr)
  772. {
  773. u8 instrBytes[2];
  774. cs_etm__mem_access(etmq, trace_chan_id, addr,
  775. ARRAY_SIZE(instrBytes), instrBytes);
  776. /*
  777. * T32 instruction size is indicated by bits[15:11] of the first
  778. * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
  779. * denote a 32-bit instruction.
  780. */
  781. return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
  782. }
  783. static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
  784. {
  785. /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
  786. if (packet->sample_type == CS_ETM_DISCONTINUITY)
  787. return 0;
  788. return packet->start_addr;
  789. }
  790. static inline
  791. u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
  792. {
  793. /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
  794. if (packet->sample_type == CS_ETM_DISCONTINUITY)
  795. return 0;
  796. return packet->end_addr - packet->last_instr_size;
  797. }
  798. static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
  799. u64 trace_chan_id,
  800. const struct cs_etm_packet *packet,
  801. u64 offset)
  802. {
  803. if (packet->isa == CS_ETM_ISA_T32) {
  804. u64 addr = packet->start_addr;
  805. while (offset) {
  806. addr += cs_etm__t32_instr_size(etmq,
  807. trace_chan_id, addr);
  808. offset--;
  809. }
  810. return addr;
  811. }
  812. /* Assume a 4 byte instruction size (A32/A64) */
  813. return packet->start_addr + offset * 4;
  814. }
  815. static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
  816. struct cs_etm_traceid_queue *tidq)
  817. {
  818. struct branch_stack *bs = tidq->last_branch_rb;
  819. struct branch_entry *be;
  820. /*
  821. * The branches are recorded in a circular buffer in reverse
  822. * chronological order: we start recording from the last element of the
  823. * buffer down. After writing the first element of the stack, move the
  824. * insert position back to the end of the buffer.
  825. */
  826. if (!tidq->last_branch_pos)
  827. tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
  828. tidq->last_branch_pos -= 1;
  829. be = &bs->entries[tidq->last_branch_pos];
  830. be->from = cs_etm__last_executed_instr(tidq->prev_packet);
  831. be->to = cs_etm__first_executed_instr(tidq->packet);
  832. /* No support for mispredict */
  833. be->flags.mispred = 0;
  834. be->flags.predicted = 1;
  835. /*
  836. * Increment bs->nr until reaching the number of last branches asked by
  837. * the user on the command line.
  838. */
  839. if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
  840. bs->nr += 1;
  841. }
  842. static int cs_etm__inject_event(union perf_event *event,
  843. struct perf_sample *sample, u64 type)
  844. {
  845. event->header.size = perf_event__sample_event_size(sample, type, 0);
  846. return perf_event__synthesize_sample(event, type, 0, sample);
  847. }
  848. static int
  849. cs_etm__get_trace(struct cs_etm_queue *etmq)
  850. {
  851. struct auxtrace_buffer *aux_buffer = etmq->buffer;
  852. struct auxtrace_buffer *old_buffer = aux_buffer;
  853. struct auxtrace_queue *queue;
  854. queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
  855. aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
  856. /* If no more data, drop the previous auxtrace_buffer and return */
  857. if (!aux_buffer) {
  858. if (old_buffer)
  859. auxtrace_buffer__drop_data(old_buffer);
  860. etmq->buf_len = 0;
  861. return 0;
  862. }
  863. etmq->buffer = aux_buffer;
  864. /* If the aux_buffer doesn't have data associated, try to load it */
  865. if (!aux_buffer->data) {
  866. /* get the file desc associated with the perf data file */
  867. int fd = perf_data__fd(etmq->etm->session->data);
  868. aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
  869. if (!aux_buffer->data)
  870. return -ENOMEM;
  871. }
  872. /* If valid, drop the previous buffer */
  873. if (old_buffer)
  874. auxtrace_buffer__drop_data(old_buffer);
  875. etmq->buf_used = 0;
  876. etmq->buf_len = aux_buffer->size;
  877. etmq->buf = aux_buffer->data;
  878. return etmq->buf_len;
  879. }
  880. static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
  881. struct cs_etm_traceid_queue *tidq)
  882. {
  883. if ((!tidq->thread) && (tidq->tid != -1))
  884. tidq->thread = machine__find_thread(etm->machine, -1,
  885. tidq->tid);
  886. if (tidq->thread)
  887. tidq->pid = tidq->thread->pid_;
  888. }
  889. int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
  890. pid_t tid, u8 trace_chan_id)
  891. {
  892. int cpu, err = -EINVAL;
  893. struct cs_etm_auxtrace *etm = etmq->etm;
  894. struct cs_etm_traceid_queue *tidq;
  895. tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
  896. if (!tidq)
  897. return err;
  898. if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
  899. return err;
  900. err = machine__set_current_tid(etm->machine, cpu, tid, tid);
  901. if (err)
  902. return err;
  903. tidq->tid = tid;
  904. thread__zput(tidq->thread);
  905. cs_etm__set_pid_tid_cpu(etm, tidq);
  906. return 0;
  907. }
  908. bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
  909. {
  910. return !!etmq->etm->timeless_decoding;
  911. }
  912. static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
  913. u64 trace_chan_id,
  914. const struct cs_etm_packet *packet,
  915. struct perf_sample *sample)
  916. {
  917. /*
  918. * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
  919. * packet, so directly bail out with 'insn_len' = 0.
  920. */
  921. if (packet->sample_type == CS_ETM_DISCONTINUITY) {
  922. sample->insn_len = 0;
  923. return;
  924. }
  925. /*
  926. * T32 instruction size might be 32-bit or 16-bit, decide by calling
  927. * cs_etm__t32_instr_size().
  928. */
  929. if (packet->isa == CS_ETM_ISA_T32)
  930. sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
  931. sample->ip);
  932. /* Otherwise, A64 and A32 instruction size are always 32-bit. */
  933. else
  934. sample->insn_len = 4;
  935. cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
  936. sample->insn_len, (void *)sample->insn);
  937. }
  938. static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
  939. struct cs_etm_traceid_queue *tidq,
  940. u64 addr, u64 period)
  941. {
  942. int ret = 0;
  943. struct cs_etm_auxtrace *etm = etmq->etm;
  944. union perf_event *event = tidq->event_buf;
  945. struct perf_sample sample = {.ip = 0,};
  946. event->sample.header.type = PERF_RECORD_SAMPLE;
  947. event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
  948. event->sample.header.size = sizeof(struct perf_event_header);
  949. sample.ip = addr;
  950. sample.pid = tidq->pid;
  951. sample.tid = tidq->tid;
  952. sample.id = etmq->etm->instructions_id;
  953. sample.stream_id = etmq->etm->instructions_id;
  954. sample.period = period;
  955. sample.cpu = tidq->packet->cpu;
  956. sample.flags = tidq->prev_packet->flags;
  957. sample.cpumode = event->sample.header.misc;
  958. cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
  959. if (etm->synth_opts.last_branch)
  960. sample.branch_stack = tidq->last_branch;
  961. if (etm->synth_opts.inject) {
  962. ret = cs_etm__inject_event(event, &sample,
  963. etm->instructions_sample_type);
  964. if (ret)
  965. return ret;
  966. }
  967. ret = perf_session__deliver_synth_event(etm->session, event, &sample);
  968. if (ret)
  969. pr_err(
  970. "CS ETM Trace: failed to deliver instruction event, error %d\n",
  971. ret);
  972. return ret;
  973. }
  974. /*
  975. * The cs etm packet encodes an instruction range between a branch target
  976. * and the next taken branch. Generate sample accordingly.
  977. */
  978. static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
  979. struct cs_etm_traceid_queue *tidq)
  980. {
  981. int ret = 0;
  982. struct cs_etm_auxtrace *etm = etmq->etm;
  983. struct perf_sample sample = {.ip = 0,};
  984. union perf_event *event = tidq->event_buf;
  985. struct dummy_branch_stack {
  986. u64 nr;
  987. u64 hw_idx;
  988. struct branch_entry entries;
  989. } dummy_bs;
  990. u64 ip;
  991. ip = cs_etm__last_executed_instr(tidq->prev_packet);
  992. event->sample.header.type = PERF_RECORD_SAMPLE;
  993. event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
  994. event->sample.header.size = sizeof(struct perf_event_header);
  995. sample.ip = ip;
  996. sample.pid = tidq->pid;
  997. sample.tid = tidq->tid;
  998. sample.addr = cs_etm__first_executed_instr(tidq->packet);
  999. sample.id = etmq->etm->branches_id;
  1000. sample.stream_id = etmq->etm->branches_id;
  1001. sample.period = 1;
  1002. sample.cpu = tidq->packet->cpu;
  1003. sample.flags = tidq->prev_packet->flags;
  1004. sample.cpumode = event->sample.header.misc;
  1005. cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
  1006. &sample);
  1007. /*
  1008. * perf report cannot handle events without a branch stack
  1009. */
  1010. if (etm->synth_opts.last_branch) {
  1011. dummy_bs = (struct dummy_branch_stack){
  1012. .nr = 1,
  1013. .hw_idx = -1ULL,
  1014. .entries = {
  1015. .from = sample.ip,
  1016. .to = sample.addr,
  1017. },
  1018. };
  1019. sample.branch_stack = (struct branch_stack *)&dummy_bs;
  1020. }
  1021. if (etm->synth_opts.inject) {
  1022. ret = cs_etm__inject_event(event, &sample,
  1023. etm->branches_sample_type);
  1024. if (ret)
  1025. return ret;
  1026. }
  1027. ret = perf_session__deliver_synth_event(etm->session, event, &sample);
  1028. if (ret)
  1029. pr_err(
  1030. "CS ETM Trace: failed to deliver instruction event, error %d\n",
  1031. ret);
  1032. return ret;
  1033. }
  1034. struct cs_etm_synth {
  1035. struct perf_tool dummy_tool;
  1036. struct perf_session *session;
  1037. };
  1038. static int cs_etm__event_synth(struct perf_tool *tool,
  1039. union perf_event *event,
  1040. struct perf_sample *sample __maybe_unused,
  1041. struct machine *machine __maybe_unused)
  1042. {
  1043. struct cs_etm_synth *cs_etm_synth =
  1044. container_of(tool, struct cs_etm_synth, dummy_tool);
  1045. return perf_session__deliver_synth_event(cs_etm_synth->session,
  1046. event, NULL);
  1047. }
  1048. static int cs_etm__synth_event(struct perf_session *session,
  1049. struct perf_event_attr *attr, u64 id)
  1050. {
  1051. struct cs_etm_synth cs_etm_synth;
  1052. memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
  1053. cs_etm_synth.session = session;
  1054. return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
  1055. &id, cs_etm__event_synth);
  1056. }
  1057. static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
  1058. struct perf_session *session)
  1059. {
  1060. struct evlist *evlist = session->evlist;
  1061. struct evsel *evsel;
  1062. struct perf_event_attr attr;
  1063. bool found = false;
  1064. u64 id;
  1065. int err;
  1066. evlist__for_each_entry(evlist, evsel) {
  1067. if (evsel->core.attr.type == etm->pmu_type) {
  1068. found = true;
  1069. break;
  1070. }
  1071. }
  1072. if (!found) {
  1073. pr_debug("No selected events with CoreSight Trace data\n");
  1074. return 0;
  1075. }
  1076. memset(&attr, 0, sizeof(struct perf_event_attr));
  1077. attr.size = sizeof(struct perf_event_attr);
  1078. attr.type = PERF_TYPE_HARDWARE;
  1079. attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
  1080. attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
  1081. PERF_SAMPLE_PERIOD;
  1082. if (etm->timeless_decoding)
  1083. attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
  1084. else
  1085. attr.sample_type |= PERF_SAMPLE_TIME;
  1086. attr.exclude_user = evsel->core.attr.exclude_user;
  1087. attr.exclude_kernel = evsel->core.attr.exclude_kernel;
  1088. attr.exclude_hv = evsel->core.attr.exclude_hv;
  1089. attr.exclude_host = evsel->core.attr.exclude_host;
  1090. attr.exclude_guest = evsel->core.attr.exclude_guest;
  1091. attr.sample_id_all = evsel->core.attr.sample_id_all;
  1092. attr.read_format = evsel->core.attr.read_format;
  1093. /* create new id val to be a fixed offset from evsel id */
  1094. id = evsel->core.id[0] + 1000000000;
  1095. if (!id)
  1096. id = 1;
  1097. if (etm->synth_opts.branches) {
  1098. attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
  1099. attr.sample_period = 1;
  1100. attr.sample_type |= PERF_SAMPLE_ADDR;
  1101. err = cs_etm__synth_event(session, &attr, id);
  1102. if (err)
  1103. return err;
  1104. etm->sample_branches = true;
  1105. etm->branches_sample_type = attr.sample_type;
  1106. etm->branches_id = id;
  1107. id += 1;
  1108. attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
  1109. }
  1110. if (etm->synth_opts.last_branch) {
  1111. attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
  1112. /*
  1113. * We don't use the hardware index, but the sample generation
  1114. * code uses the new format branch_stack with this field,
  1115. * so the event attributes must indicate that it's present.
  1116. */
  1117. attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
  1118. }
  1119. if (etm->synth_opts.instructions) {
  1120. attr.config = PERF_COUNT_HW_INSTRUCTIONS;
  1121. attr.sample_period = etm->synth_opts.period;
  1122. etm->instructions_sample_period = attr.sample_period;
  1123. err = cs_etm__synth_event(session, &attr, id);
  1124. if (err)
  1125. return err;
  1126. etm->sample_instructions = true;
  1127. etm->instructions_sample_type = attr.sample_type;
  1128. etm->instructions_id = id;
  1129. id += 1;
  1130. }
  1131. return 0;
  1132. }
  1133. static int cs_etm__sample(struct cs_etm_queue *etmq,
  1134. struct cs_etm_traceid_queue *tidq)
  1135. {
  1136. struct cs_etm_auxtrace *etm = etmq->etm;
  1137. int ret;
  1138. u8 trace_chan_id = tidq->trace_chan_id;
  1139. u64 instrs_prev;
  1140. /* Get instructions remainder from previous packet */
  1141. instrs_prev = tidq->period_instructions;
  1142. tidq->period_instructions += tidq->packet->instr_count;
  1143. /*
  1144. * Record a branch when the last instruction in
  1145. * PREV_PACKET is a branch.
  1146. */
  1147. if (etm->synth_opts.last_branch &&
  1148. tidq->prev_packet->sample_type == CS_ETM_RANGE &&
  1149. tidq->prev_packet->last_instr_taken_branch)
  1150. cs_etm__update_last_branch_rb(etmq, tidq);
  1151. if (etm->sample_instructions &&
  1152. tidq->period_instructions >= etm->instructions_sample_period) {
  1153. /*
  1154. * Emit instruction sample periodically
  1155. * TODO: allow period to be defined in cycles and clock time
  1156. */
  1157. /*
  1158. * Below diagram demonstrates the instruction samples
  1159. * generation flows:
  1160. *
  1161. * Instrs Instrs Instrs Instrs
  1162. * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3)
  1163. * | | | |
  1164. * V V V V
  1165. * --------------------------------------------------
  1166. * ^ ^
  1167. * | |
  1168. * Period Period
  1169. * instructions(Pi) instructions(Pi')
  1170. *
  1171. * | |
  1172. * \---------------- -----------------/
  1173. * V
  1174. * tidq->packet->instr_count
  1175. *
  1176. * Instrs Sample(n...) are the synthesised samples occurring
  1177. * every etm->instructions_sample_period instructions - as
  1178. * defined on the perf command line. Sample(n) is being the
  1179. * last sample before the current etm packet, n+1 to n+3
  1180. * samples are generated from the current etm packet.
  1181. *
  1182. * tidq->packet->instr_count represents the number of
  1183. * instructions in the current etm packet.
  1184. *
  1185. * Period instructions (Pi) contains the the number of
  1186. * instructions executed after the sample point(n) from the
  1187. * previous etm packet. This will always be less than
  1188. * etm->instructions_sample_period.
  1189. *
  1190. * When generate new samples, it combines with two parts
  1191. * instructions, one is the tail of the old packet and another
  1192. * is the head of the new coming packet, to generate
  1193. * sample(n+1); sample(n+2) and sample(n+3) consume the
  1194. * instructions with sample period. After sample(n+3), the rest
  1195. * instructions will be used by later packet and it is assigned
  1196. * to tidq->period_instructions for next round calculation.
  1197. */
  1198. /*
  1199. * Get the initial offset into the current packet instructions;
  1200. * entry conditions ensure that instrs_prev is less than
  1201. * etm->instructions_sample_period.
  1202. */
  1203. u64 offset = etm->instructions_sample_period - instrs_prev;
  1204. u64 addr;
  1205. /* Prepare last branches for instruction sample */
  1206. if (etm->synth_opts.last_branch)
  1207. cs_etm__copy_last_branch_rb(etmq, tidq);
  1208. while (tidq->period_instructions >=
  1209. etm->instructions_sample_period) {
  1210. /*
  1211. * Calculate the address of the sampled instruction (-1
  1212. * as sample is reported as though instruction has just
  1213. * been executed, but PC has not advanced to next
  1214. * instruction)
  1215. */
  1216. addr = cs_etm__instr_addr(etmq, trace_chan_id,
  1217. tidq->packet, offset - 1);
  1218. ret = cs_etm__synth_instruction_sample(
  1219. etmq, tidq, addr,
  1220. etm->instructions_sample_period);
  1221. if (ret)
  1222. return ret;
  1223. offset += etm->instructions_sample_period;
  1224. tidq->period_instructions -=
  1225. etm->instructions_sample_period;
  1226. }
  1227. }
  1228. if (etm->sample_branches) {
  1229. bool generate_sample = false;
  1230. /* Generate sample for tracing on packet */
  1231. if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
  1232. generate_sample = true;
  1233. /* Generate sample for branch taken packet */
  1234. if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
  1235. tidq->prev_packet->last_instr_taken_branch)
  1236. generate_sample = true;
  1237. if (generate_sample) {
  1238. ret = cs_etm__synth_branch_sample(etmq, tidq);
  1239. if (ret)
  1240. return ret;
  1241. }
  1242. }
  1243. cs_etm__packet_swap(etm, tidq);
  1244. return 0;
  1245. }
  1246. static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
  1247. {
  1248. /*
  1249. * When the exception packet is inserted, whether the last instruction
  1250. * in previous range packet is taken branch or not, we need to force
  1251. * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
  1252. * to generate branch sample for the instruction range before the
  1253. * exception is trapped to kernel or before the exception returning.
  1254. *
  1255. * The exception packet includes the dummy address values, so don't
  1256. * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
  1257. * for generating instruction and branch samples.
  1258. */
  1259. if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
  1260. tidq->prev_packet->last_instr_taken_branch = true;
  1261. return 0;
  1262. }
  1263. static int cs_etm__flush(struct cs_etm_queue *etmq,
  1264. struct cs_etm_traceid_queue *tidq)
  1265. {
  1266. int err = 0;
  1267. struct cs_etm_auxtrace *etm = etmq->etm;
  1268. /* Handle start tracing packet */
  1269. if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
  1270. goto swap_packet;
  1271. if (etmq->etm->synth_opts.last_branch &&
  1272. tidq->prev_packet->sample_type == CS_ETM_RANGE) {
  1273. u64 addr;
  1274. /* Prepare last branches for instruction sample */
  1275. cs_etm__copy_last_branch_rb(etmq, tidq);
  1276. /*
  1277. * Generate a last branch event for the branches left in the
  1278. * circular buffer at the end of the trace.
  1279. *
  1280. * Use the address of the end of the last reported execution
  1281. * range
  1282. */
  1283. addr = cs_etm__last_executed_instr(tidq->prev_packet);
  1284. err = cs_etm__synth_instruction_sample(
  1285. etmq, tidq, addr,
  1286. tidq->period_instructions);
  1287. if (err)
  1288. return err;
  1289. tidq->period_instructions = 0;
  1290. }
  1291. if (etm->sample_branches &&
  1292. tidq->prev_packet->sample_type == CS_ETM_RANGE) {
  1293. err = cs_etm__synth_branch_sample(etmq, tidq);
  1294. if (err)
  1295. return err;
  1296. }
  1297. swap_packet:
  1298. cs_etm__packet_swap(etm, tidq);
  1299. /* Reset last branches after flush the trace */
  1300. if (etm->synth_opts.last_branch)
  1301. cs_etm__reset_last_branch_rb(tidq);
  1302. return err;
  1303. }
  1304. static int cs_etm__end_block(struct cs_etm_queue *etmq,
  1305. struct cs_etm_traceid_queue *tidq)
  1306. {
  1307. int err;
  1308. /*
  1309. * It has no new packet coming and 'etmq->packet' contains the stale
  1310. * packet which was set at the previous time with packets swapping;
  1311. * so skip to generate branch sample to avoid stale packet.
  1312. *
  1313. * For this case only flush branch stack and generate a last branch
  1314. * event for the branches left in the circular buffer at the end of
  1315. * the trace.
  1316. */
  1317. if (etmq->etm->synth_opts.last_branch &&
  1318. tidq->prev_packet->sample_type == CS_ETM_RANGE) {
  1319. u64 addr;
  1320. /* Prepare last branches for instruction sample */
  1321. cs_etm__copy_last_branch_rb(etmq, tidq);
  1322. /*
  1323. * Use the address of the end of the last reported execution
  1324. * range.
  1325. */
  1326. addr = cs_etm__last_executed_instr(tidq->prev_packet);
  1327. err = cs_etm__synth_instruction_sample(
  1328. etmq, tidq, addr,
  1329. tidq->period_instructions);
  1330. if (err)
  1331. return err;
  1332. tidq->period_instructions = 0;
  1333. }
  1334. return 0;
  1335. }
  1336. /*
  1337. * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
  1338. * if need be.
  1339. * Returns: < 0 if error
  1340. * = 0 if no more auxtrace_buffer to read
  1341. * > 0 if the current buffer isn't empty yet
  1342. */
  1343. static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
  1344. {
  1345. int ret;
  1346. if (!etmq->buf_len) {
  1347. ret = cs_etm__get_trace(etmq);
  1348. if (ret <= 0)
  1349. return ret;
  1350. /*
  1351. * We cannot assume consecutive blocks in the data file
  1352. * are contiguous, reset the decoder to force re-sync.
  1353. */
  1354. ret = cs_etm_decoder__reset(etmq->decoder);
  1355. if (ret)
  1356. return ret;
  1357. }
  1358. return etmq->buf_len;
  1359. }
  1360. static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
  1361. struct cs_etm_packet *packet,
  1362. u64 end_addr)
  1363. {
  1364. /* Initialise to keep compiler happy */
  1365. u16 instr16 = 0;
  1366. u32 instr32 = 0;
  1367. u64 addr;
  1368. switch (packet->isa) {
  1369. case CS_ETM_ISA_T32:
  1370. /*
  1371. * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
  1372. *
  1373. * b'15 b'8
  1374. * +-----------------+--------+
  1375. * | 1 1 0 1 1 1 1 1 | imm8 |
  1376. * +-----------------+--------+
  1377. *
  1378. * According to the specifiction, it only defines SVC for T32
  1379. * with 16 bits instruction and has no definition for 32bits;
  1380. * so below only read 2 bytes as instruction size for T32.
  1381. */
  1382. addr = end_addr - 2;
  1383. cs_etm__mem_access(etmq, trace_chan_id, addr,
  1384. sizeof(instr16), (u8 *)&instr16);
  1385. if ((instr16 & 0xFF00) == 0xDF00)
  1386. return true;
  1387. break;
  1388. case CS_ETM_ISA_A32:
  1389. /*
  1390. * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
  1391. *
  1392. * b'31 b'28 b'27 b'24
  1393. * +---------+---------+-------------------------+
  1394. * | !1111 | 1 1 1 1 | imm24 |
  1395. * +---------+---------+-------------------------+
  1396. */
  1397. addr = end_addr - 4;
  1398. cs_etm__mem_access(etmq, trace_chan_id, addr,
  1399. sizeof(instr32), (u8 *)&instr32);
  1400. if ((instr32 & 0x0F000000) == 0x0F000000 &&
  1401. (instr32 & 0xF0000000) != 0xF0000000)
  1402. return true;
  1403. break;
  1404. case CS_ETM_ISA_A64:
  1405. /*
  1406. * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
  1407. *
  1408. * b'31 b'21 b'4 b'0
  1409. * +-----------------------+---------+-----------+
  1410. * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
  1411. * +-----------------------+---------+-----------+
  1412. */
  1413. addr = end_addr - 4;
  1414. cs_etm__mem_access(etmq, trace_chan_id, addr,
  1415. sizeof(instr32), (u8 *)&instr32);
  1416. if ((instr32 & 0xFFE0001F) == 0xd4000001)
  1417. return true;
  1418. break;
  1419. case CS_ETM_ISA_UNKNOWN:
  1420. default:
  1421. break;
  1422. }
  1423. return false;
  1424. }
  1425. static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
  1426. struct cs_etm_traceid_queue *tidq, u64 magic)
  1427. {
  1428. u8 trace_chan_id = tidq->trace_chan_id;
  1429. struct cs_etm_packet *packet = tidq->packet;
  1430. struct cs_etm_packet *prev_packet = tidq->prev_packet;
  1431. if (magic == __perf_cs_etmv3_magic)
  1432. if (packet->exception_number == CS_ETMV3_EXC_SVC)
  1433. return true;
  1434. /*
  1435. * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
  1436. * HVC cases; need to check if it's SVC instruction based on
  1437. * packet address.
  1438. */
  1439. if (magic == __perf_cs_etmv4_magic) {
  1440. if (packet->exception_number == CS_ETMV4_EXC_CALL &&
  1441. cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
  1442. prev_packet->end_addr))
  1443. return true;
  1444. }
  1445. return false;
  1446. }
  1447. static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
  1448. u64 magic)
  1449. {
  1450. struct cs_etm_packet *packet = tidq->packet;
  1451. if (magic == __perf_cs_etmv3_magic)
  1452. if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
  1453. packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
  1454. packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
  1455. packet->exception_number == CS_ETMV3_EXC_IRQ ||
  1456. packet->exception_number == CS_ETMV3_EXC_FIQ)
  1457. return true;
  1458. if (magic == __perf_cs_etmv4_magic)
  1459. if (packet->exception_number == CS_ETMV4_EXC_RESET ||
  1460. packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
  1461. packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
  1462. packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
  1463. packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
  1464. packet->exception_number == CS_ETMV4_EXC_IRQ ||
  1465. packet->exception_number == CS_ETMV4_EXC_FIQ)
  1466. return true;
  1467. return false;
  1468. }
  1469. static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
  1470. struct cs_etm_traceid_queue *tidq,
  1471. u64 magic)
  1472. {
  1473. u8 trace_chan_id = tidq->trace_chan_id;
  1474. struct cs_etm_packet *packet = tidq->packet;
  1475. struct cs_etm_packet *prev_packet = tidq->prev_packet;
  1476. if (magic == __perf_cs_etmv3_magic)
  1477. if (packet->exception_number == CS_ETMV3_EXC_SMC ||
  1478. packet->exception_number == CS_ETMV3_EXC_HYP ||
  1479. packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
  1480. packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
  1481. packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
  1482. packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
  1483. packet->exception_number == CS_ETMV3_EXC_GENERIC)
  1484. return true;
  1485. if (magic == __perf_cs_etmv4_magic) {
  1486. if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
  1487. packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
  1488. packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
  1489. packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
  1490. return true;
  1491. /*
  1492. * For CS_ETMV4_EXC_CALL, except SVC other instructions
  1493. * (SMC, HVC) are taken as sync exceptions.
  1494. */
  1495. if (packet->exception_number == CS_ETMV4_EXC_CALL &&
  1496. !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
  1497. prev_packet->end_addr))
  1498. return true;
  1499. /*
  1500. * ETMv4 has 5 bits for exception number; if the numbers
  1501. * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
  1502. * they are implementation defined exceptions.
  1503. *
  1504. * For this case, simply take it as sync exception.
  1505. */
  1506. if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
  1507. packet->exception_number <= CS_ETMV4_EXC_END)
  1508. return true;
  1509. }
  1510. return false;
  1511. }
  1512. static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
  1513. struct cs_etm_traceid_queue *tidq)
  1514. {
  1515. struct cs_etm_packet *packet = tidq->packet;
  1516. struct cs_etm_packet *prev_packet = tidq->prev_packet;
  1517. u8 trace_chan_id = tidq->trace_chan_id;
  1518. u64 magic;
  1519. int ret;
  1520. switch (packet->sample_type) {
  1521. case CS_ETM_RANGE:
  1522. /*
  1523. * Immediate branch instruction without neither link nor
  1524. * return flag, it's normal branch instruction within
  1525. * the function.
  1526. */
  1527. if (packet->last_instr_type == OCSD_INSTR_BR &&
  1528. packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
  1529. packet->flags = PERF_IP_FLAG_BRANCH;
  1530. if (packet->last_instr_cond)
  1531. packet->flags |= PERF_IP_FLAG_CONDITIONAL;
  1532. }
  1533. /*
  1534. * Immediate branch instruction with link (e.g. BL), this is
  1535. * branch instruction for function call.
  1536. */
  1537. if (packet->last_instr_type == OCSD_INSTR_BR &&
  1538. packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
  1539. packet->flags = PERF_IP_FLAG_BRANCH |
  1540. PERF_IP_FLAG_CALL;
  1541. /*
  1542. * Indirect branch instruction with link (e.g. BLR), this is
  1543. * branch instruction for function call.
  1544. */
  1545. if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
  1546. packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
  1547. packet->flags = PERF_IP_FLAG_BRANCH |
  1548. PERF_IP_FLAG_CALL;
  1549. /*
  1550. * Indirect branch instruction with subtype of
  1551. * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
  1552. * function return for A32/T32.
  1553. */
  1554. if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
  1555. packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
  1556. packet->flags = PERF_IP_FLAG_BRANCH |
  1557. PERF_IP_FLAG_RETURN;
  1558. /*
  1559. * Indirect branch instruction without link (e.g. BR), usually
  1560. * this is used for function return, especially for functions
  1561. * within dynamic link lib.
  1562. */
  1563. if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
  1564. packet->last_instr_subtype == OCSD_S_INSTR_NONE)
  1565. packet->flags = PERF_IP_FLAG_BRANCH |
  1566. PERF_IP_FLAG_RETURN;
  1567. /* Return instruction for function return. */
  1568. if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
  1569. packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
  1570. packet->flags = PERF_IP_FLAG_BRANCH |
  1571. PERF_IP_FLAG_RETURN;
  1572. /*
  1573. * Decoder might insert a discontinuity in the middle of
  1574. * instruction packets, fixup prev_packet with flag
  1575. * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
  1576. */
  1577. if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
  1578. prev_packet->flags |= PERF_IP_FLAG_BRANCH |
  1579. PERF_IP_FLAG_TRACE_BEGIN;
  1580. /*
  1581. * If the previous packet is an exception return packet
  1582. * and the return address just follows SVC instuction,
  1583. * it needs to calibrate the previous packet sample flags
  1584. * as PERF_IP_FLAG_SYSCALLRET.
  1585. */
  1586. if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
  1587. PERF_IP_FLAG_RETURN |
  1588. PERF_IP_FLAG_INTERRUPT) &&
  1589. cs_etm__is_svc_instr(etmq, trace_chan_id,
  1590. packet, packet->start_addr))
  1591. prev_packet->flags = PERF_IP_FLAG_BRANCH |
  1592. PERF_IP_FLAG_RETURN |
  1593. PERF_IP_FLAG_SYSCALLRET;
  1594. break;
  1595. case CS_ETM_DISCONTINUITY:
  1596. /*
  1597. * The trace is discontinuous, if the previous packet is
  1598. * instruction packet, set flag PERF_IP_FLAG_TRACE_END
  1599. * for previous packet.
  1600. */
  1601. if (prev_packet->sample_type == CS_ETM_RANGE)
  1602. prev_packet->flags |= PERF_IP_FLAG_BRANCH |
  1603. PERF_IP_FLAG_TRACE_END;
  1604. break;
  1605. case CS_ETM_EXCEPTION:
  1606. ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
  1607. if (ret)
  1608. return ret;
  1609. /* The exception is for system call. */
  1610. if (cs_etm__is_syscall(etmq, tidq, magic))
  1611. packet->flags = PERF_IP_FLAG_BRANCH |
  1612. PERF_IP_FLAG_CALL |
  1613. PERF_IP_FLAG_SYSCALLRET;
  1614. /*
  1615. * The exceptions are triggered by external signals from bus,
  1616. * interrupt controller, debug module, PE reset or halt.
  1617. */
  1618. else if (cs_etm__is_async_exception(tidq, magic))
  1619. packet->flags = PERF_IP_FLAG_BRANCH |
  1620. PERF_IP_FLAG_CALL |
  1621. PERF_IP_FLAG_ASYNC |
  1622. PERF_IP_FLAG_INTERRUPT;
  1623. /*
  1624. * Otherwise, exception is caused by trap, instruction &
  1625. * data fault, or alignment errors.
  1626. */
  1627. else if (cs_etm__is_sync_exception(etmq, tidq, magic))
  1628. packet->flags = PERF_IP_FLAG_BRANCH |
  1629. PERF_IP_FLAG_CALL |
  1630. PERF_IP_FLAG_INTERRUPT;
  1631. /*
  1632. * When the exception packet is inserted, since exception
  1633. * packet is not used standalone for generating samples
  1634. * and it's affiliation to the previous instruction range
  1635. * packet; so set previous range packet flags to tell perf
  1636. * it is an exception taken branch.
  1637. */
  1638. if (prev_packet->sample_type == CS_ETM_RANGE)
  1639. prev_packet->flags = packet->flags;
  1640. break;
  1641. case CS_ETM_EXCEPTION_RET:
  1642. /*
  1643. * When the exception return packet is inserted, since
  1644. * exception return packet is not used standalone for
  1645. * generating samples and it's affiliation to the previous
  1646. * instruction range packet; so set previous range packet
  1647. * flags to tell perf it is an exception return branch.
  1648. *
  1649. * The exception return can be for either system call or
  1650. * other exception types; unfortunately the packet doesn't
  1651. * contain exception type related info so we cannot decide
  1652. * the exception type purely based on exception return packet.
  1653. * If we record the exception number from exception packet and
  1654. * reuse it for excpetion return packet, this is not reliable
  1655. * due the trace can be discontinuity or the interrupt can
  1656. * be nested, thus the recorded exception number cannot be
  1657. * used for exception return packet for these two cases.
  1658. *
  1659. * For exception return packet, we only need to distinguish the
  1660. * packet is for system call or for other types. Thus the
  1661. * decision can be deferred when receive the next packet which
  1662. * contains the return address, based on the return address we
  1663. * can read out the previous instruction and check if it's a
  1664. * system call instruction and then calibrate the sample flag
  1665. * as needed.
  1666. */
  1667. if (prev_packet->sample_type == CS_ETM_RANGE)
  1668. prev_packet->flags = PERF_IP_FLAG_BRANCH |
  1669. PERF_IP_FLAG_RETURN |
  1670. PERF_IP_FLAG_INTERRUPT;
  1671. break;
  1672. case CS_ETM_EMPTY:
  1673. default:
  1674. break;
  1675. }
  1676. return 0;
  1677. }
  1678. static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
  1679. {
  1680. int ret = 0;
  1681. size_t processed = 0;
  1682. /*
  1683. * Packets are decoded and added to the decoder's packet queue
  1684. * until the decoder packet processing callback has requested that
  1685. * processing stops or there is nothing left in the buffer. Normal
  1686. * operations that stop processing are a timestamp packet or a full
  1687. * decoder buffer queue.
  1688. */
  1689. ret = cs_etm_decoder__process_data_block(etmq->decoder,
  1690. etmq->offset,
  1691. &etmq->buf[etmq->buf_used],
  1692. etmq->buf_len,
  1693. &processed);
  1694. if (ret)
  1695. goto out;
  1696. etmq->offset += processed;
  1697. etmq->buf_used += processed;
  1698. etmq->buf_len -= processed;
  1699. out:
  1700. return ret;
  1701. }
  1702. static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
  1703. struct cs_etm_traceid_queue *tidq)
  1704. {
  1705. int ret;
  1706. struct cs_etm_packet_queue *packet_queue;
  1707. packet_queue = &tidq->packet_queue;
  1708. /* Process each packet in this chunk */
  1709. while (1) {
  1710. ret = cs_etm_decoder__get_packet(packet_queue,
  1711. tidq->packet);
  1712. if (ret <= 0)
  1713. /*
  1714. * Stop processing this chunk on
  1715. * end of data or error
  1716. */
  1717. break;
  1718. /*
  1719. * Since packet addresses are swapped in packet
  1720. * handling within below switch() statements,
  1721. * thus setting sample flags must be called
  1722. * prior to switch() statement to use address
  1723. * information before packets swapping.
  1724. */
  1725. ret = cs_etm__set_sample_flags(etmq, tidq);
  1726. if (ret < 0)
  1727. break;
  1728. switch (tidq->packet->sample_type) {
  1729. case CS_ETM_RANGE:
  1730. /*
  1731. * If the packet contains an instruction
  1732. * range, generate instruction sequence
  1733. * events.
  1734. */
  1735. cs_etm__sample(etmq, tidq);
  1736. break;
  1737. case CS_ETM_EXCEPTION:
  1738. case CS_ETM_EXCEPTION_RET:
  1739. /*
  1740. * If the exception packet is coming,
  1741. * make sure the previous instruction
  1742. * range packet to be handled properly.
  1743. */
  1744. cs_etm__exception(tidq);
  1745. break;
  1746. case CS_ETM_DISCONTINUITY:
  1747. /*
  1748. * Discontinuity in trace, flush
  1749. * previous branch stack
  1750. */
  1751. cs_etm__flush(etmq, tidq);
  1752. break;
  1753. case CS_ETM_EMPTY:
  1754. /*
  1755. * Should not receive empty packet,
  1756. * report error.
  1757. */
  1758. pr_err("CS ETM Trace: empty packet\n");
  1759. return -EINVAL;
  1760. default:
  1761. break;
  1762. }
  1763. }
  1764. return ret;
  1765. }
  1766. static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
  1767. {
  1768. int idx;
  1769. struct int_node *inode;
  1770. struct cs_etm_traceid_queue *tidq;
  1771. struct intlist *traceid_queues_list = etmq->traceid_queues_list;
  1772. intlist__for_each_entry(inode, traceid_queues_list) {
  1773. idx = (int)(intptr_t)inode->priv;
  1774. tidq = etmq->traceid_queues[idx];
  1775. /* Ignore return value */
  1776. cs_etm__process_traceid_queue(etmq, tidq);
  1777. /*
  1778. * Generate an instruction sample with the remaining
  1779. * branchstack entries.
  1780. */
  1781. cs_etm__flush(etmq, tidq);
  1782. }
  1783. }
  1784. static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
  1785. {
  1786. int err = 0;
  1787. struct cs_etm_traceid_queue *tidq;
  1788. tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
  1789. if (!tidq)
  1790. return -EINVAL;
  1791. /* Go through each buffer in the queue and decode them one by one */
  1792. while (1) {
  1793. err = cs_etm__get_data_block(etmq);
  1794. if (err <= 0)
  1795. return err;
  1796. /* Run trace decoder until buffer consumed or end of trace */
  1797. do {
  1798. err = cs_etm__decode_data_block(etmq);
  1799. if (err)
  1800. return err;
  1801. /*
  1802. * Process each packet in this chunk, nothing to do if
  1803. * an error occurs other than hoping the next one will
  1804. * be better.
  1805. */
  1806. err = cs_etm__process_traceid_queue(etmq, tidq);
  1807. } while (etmq->buf_len);
  1808. if (err == 0)
  1809. /* Flush any remaining branch stack entries */
  1810. err = cs_etm__end_block(etmq, tidq);
  1811. }
  1812. return err;
  1813. }
  1814. static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
  1815. pid_t tid)
  1816. {
  1817. unsigned int i;
  1818. struct auxtrace_queues *queues = &etm->queues;
  1819. for (i = 0; i < queues->nr_queues; i++) {
  1820. struct auxtrace_queue *queue = &etm->queues.queue_array[i];
  1821. struct cs_etm_queue *etmq = queue->priv;
  1822. struct cs_etm_traceid_queue *tidq;
  1823. if (!etmq)
  1824. continue;
  1825. tidq = cs_etm__etmq_get_traceid_queue(etmq,
  1826. CS_ETM_PER_THREAD_TRACEID);
  1827. if (!tidq)
  1828. continue;
  1829. if ((tid == -1) || (tidq->tid == tid)) {
  1830. cs_etm__set_pid_tid_cpu(etm, tidq);
  1831. cs_etm__run_decoder(etmq);
  1832. }
  1833. }
  1834. return 0;
  1835. }
  1836. static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
  1837. {
  1838. int ret = 0;
  1839. unsigned int cs_queue_nr, queue_nr;
  1840. u8 trace_chan_id;
  1841. u64 timestamp;
  1842. struct auxtrace_queue *queue;
  1843. struct cs_etm_queue *etmq;
  1844. struct cs_etm_traceid_queue *tidq;
  1845. while (1) {
  1846. if (!etm->heap.heap_cnt)
  1847. goto out;
  1848. /* Take the entry at the top of the min heap */
  1849. cs_queue_nr = etm->heap.heap_array[0].queue_nr;
  1850. queue_nr = TO_QUEUE_NR(cs_queue_nr);
  1851. trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
  1852. queue = &etm->queues.queue_array[queue_nr];
  1853. etmq = queue->priv;
  1854. /*
  1855. * Remove the top entry from the heap since we are about
  1856. * to process it.
  1857. */
  1858. auxtrace_heap__pop(&etm->heap);
  1859. tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
  1860. if (!tidq) {
  1861. /*
  1862. * No traceID queue has been allocated for this traceID,
  1863. * which means something somewhere went very wrong. No
  1864. * other choice than simply exit.
  1865. */
  1866. ret = -EINVAL;
  1867. goto out;
  1868. }
  1869. /*
  1870. * Packets associated with this timestamp are already in
  1871. * the etmq's traceID queue, so process them.
  1872. */
  1873. ret = cs_etm__process_traceid_queue(etmq, tidq);
  1874. if (ret < 0)
  1875. goto out;
  1876. /*
  1877. * Packets for this timestamp have been processed, time to
  1878. * move on to the next timestamp, fetching a new auxtrace_buffer
  1879. * if need be.
  1880. */
  1881. refetch:
  1882. ret = cs_etm__get_data_block(etmq);
  1883. if (ret < 0)
  1884. goto out;
  1885. /*
  1886. * No more auxtrace_buffers to process in this etmq, simply
  1887. * move on to another entry in the auxtrace_heap.
  1888. */
  1889. if (!ret)
  1890. continue;
  1891. ret = cs_etm__decode_data_block(etmq);
  1892. if (ret)
  1893. goto out;
  1894. timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
  1895. if (!timestamp) {
  1896. /*
  1897. * Function cs_etm__decode_data_block() returns when
  1898. * there is no more traces to decode in the current
  1899. * auxtrace_buffer OR when a timestamp has been
  1900. * encountered on any of the traceID queues. Since we
  1901. * did not get a timestamp, there is no more traces to
  1902. * process in this auxtrace_buffer. As such empty and
  1903. * flush all traceID queues.
  1904. */
  1905. cs_etm__clear_all_traceid_queues(etmq);
  1906. /* Fetch another auxtrace_buffer for this etmq */
  1907. goto refetch;
  1908. }
  1909. /*
  1910. * Add to the min heap the timestamp for packets that have
  1911. * just been decoded. They will be processed and synthesized
  1912. * during the next call to cs_etm__process_traceid_queue() for
  1913. * this queue/traceID.
  1914. */
  1915. cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
  1916. ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
  1917. }
  1918. out:
  1919. return ret;
  1920. }
  1921. static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
  1922. union perf_event *event)
  1923. {
  1924. struct thread *th;
  1925. if (etm->timeless_decoding)
  1926. return 0;
  1927. /*
  1928. * Add the tid/pid to the log so that we can get a match when
  1929. * we get a contextID from the decoder.
  1930. */
  1931. th = machine__findnew_thread(etm->machine,
  1932. event->itrace_start.pid,
  1933. event->itrace_start.tid);
  1934. if (!th)
  1935. return -ENOMEM;
  1936. thread__put(th);
  1937. return 0;
  1938. }
  1939. static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
  1940. union perf_event *event)
  1941. {
  1942. struct thread *th;
  1943. bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
  1944. /*
  1945. * Context switch in per-thread mode are irrelevant since perf
  1946. * will start/stop tracing as the process is scheduled.
  1947. */
  1948. if (etm->timeless_decoding)
  1949. return 0;
  1950. /*
  1951. * SWITCH_IN events carry the next process to be switched out while
  1952. * SWITCH_OUT events carry the process to be switched in. As such
  1953. * we don't care about IN events.
  1954. */
  1955. if (!out)
  1956. return 0;
  1957. /*
  1958. * Add the tid/pid to the log so that we can get a match when
  1959. * we get a contextID from the decoder.
  1960. */
  1961. th = machine__findnew_thread(etm->machine,
  1962. event->context_switch.next_prev_pid,
  1963. event->context_switch.next_prev_tid);
  1964. if (!th)
  1965. return -ENOMEM;
  1966. thread__put(th);
  1967. return 0;
  1968. }
  1969. static int cs_etm__process_event(struct perf_session *session,
  1970. union perf_event *event,
  1971. struct perf_sample *sample,
  1972. struct perf_tool *tool)
  1973. {
  1974. int err = 0;
  1975. u64 timestamp;
  1976. struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
  1977. struct cs_etm_auxtrace,
  1978. auxtrace);
  1979. if (dump_trace)
  1980. return 0;
  1981. if (!tool->ordered_events) {
  1982. pr_err("CoreSight ETM Trace requires ordered events\n");
  1983. return -EINVAL;
  1984. }
  1985. if (sample->time && (sample->time != (u64) -1))
  1986. timestamp = sample->time;
  1987. else
  1988. timestamp = 0;
  1989. if (timestamp || etm->timeless_decoding) {
  1990. err = cs_etm__update_queues(etm);
  1991. if (err)
  1992. return err;
  1993. }
  1994. if (etm->timeless_decoding &&
  1995. event->header.type == PERF_RECORD_EXIT)
  1996. return cs_etm__process_timeless_queues(etm,
  1997. event->fork.tid);
  1998. if (event->header.type == PERF_RECORD_ITRACE_START)
  1999. return cs_etm__process_itrace_start(etm, event);
  2000. else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
  2001. return cs_etm__process_switch_cpu_wide(etm, event);
  2002. if (!etm->timeless_decoding &&
  2003. event->header.type == PERF_RECORD_AUX)
  2004. return cs_etm__process_queues(etm);
  2005. return 0;
  2006. }
  2007. static int cs_etm__process_auxtrace_event(struct perf_session *session,
  2008. union perf_event *event,
  2009. struct perf_tool *tool __maybe_unused)
  2010. {
  2011. struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
  2012. struct cs_etm_auxtrace,
  2013. auxtrace);
  2014. if (!etm->data_queued) {
  2015. struct auxtrace_buffer *buffer;
  2016. off_t data_offset;
  2017. int fd = perf_data__fd(session->data);
  2018. bool is_pipe = perf_data__is_pipe(session->data);
  2019. int err;
  2020. if (is_pipe)
  2021. data_offset = 0;
  2022. else {
  2023. data_offset = lseek(fd, 0, SEEK_CUR);
  2024. if (data_offset == -1)
  2025. return -errno;
  2026. }
  2027. err = auxtrace_queues__add_event(&etm->queues, session,
  2028. event, data_offset, &buffer);
  2029. if (err)
  2030. return err;
  2031. if (dump_trace)
  2032. if (auxtrace_buffer__get_data(buffer, fd)) {
  2033. cs_etm__dump_event(etm, buffer);
  2034. auxtrace_buffer__put_data(buffer);
  2035. }
  2036. }
  2037. return 0;
  2038. }
  2039. static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
  2040. {
  2041. struct evsel *evsel;
  2042. struct evlist *evlist = etm->session->evlist;
  2043. bool timeless_decoding = true;
  2044. /*
  2045. * Circle through the list of event and complain if we find one
  2046. * with the time bit set.
  2047. */
  2048. evlist__for_each_entry(evlist, evsel) {
  2049. if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
  2050. timeless_decoding = false;
  2051. }
  2052. return timeless_decoding;
  2053. }
  2054. static const char * const cs_etm_global_header_fmts[] = {
  2055. [CS_HEADER_VERSION_0] = " Header version %llx\n",
  2056. [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
  2057. [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
  2058. };
  2059. static const char * const cs_etm_priv_fmts[] = {
  2060. [CS_ETM_MAGIC] = " Magic number %llx\n",
  2061. [CS_ETM_CPU] = " CPU %lld\n",
  2062. [CS_ETM_ETMCR] = " ETMCR %llx\n",
  2063. [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
  2064. [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
  2065. [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
  2066. };
  2067. static const char * const cs_etmv4_priv_fmts[] = {
  2068. [CS_ETM_MAGIC] = " Magic number %llx\n",
  2069. [CS_ETM_CPU] = " CPU %lld\n",
  2070. [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
  2071. [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
  2072. [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
  2073. [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
  2074. [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
  2075. [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
  2076. [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
  2077. };
  2078. static void cs_etm__print_auxtrace_info(__u64 *val, int num)
  2079. {
  2080. int i, j, cpu = 0;
  2081. for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
  2082. fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
  2083. for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
  2084. if (val[i] == __perf_cs_etmv3_magic)
  2085. for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
  2086. fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
  2087. else if (val[i] == __perf_cs_etmv4_magic)
  2088. for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
  2089. fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
  2090. else
  2091. /* failure.. return */
  2092. return;
  2093. }
  2094. }
  2095. int cs_etm__process_auxtrace_info(union perf_event *event,
  2096. struct perf_session *session)
  2097. {
  2098. struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
  2099. struct cs_etm_auxtrace *etm = NULL;
  2100. struct int_node *inode;
  2101. unsigned int pmu_type;
  2102. int event_header_size = sizeof(struct perf_event_header);
  2103. int info_header_size;
  2104. int total_size = auxtrace_info->header.size;
  2105. int priv_size = 0;
  2106. int num_cpu;
  2107. int err = 0, idx = -1;
  2108. int i, j, k;
  2109. u64 *ptr, *hdr = NULL;
  2110. u64 **metadata = NULL;
  2111. /*
  2112. * sizeof(auxtrace_info_event::type) +
  2113. * sizeof(auxtrace_info_event::reserved) == 8
  2114. */
  2115. info_header_size = 8;
  2116. if (total_size < (event_header_size + info_header_size))
  2117. return -EINVAL;
  2118. priv_size = total_size - event_header_size - info_header_size;
  2119. /* First the global part */
  2120. ptr = (u64 *) auxtrace_info->priv;
  2121. /* Look for version '0' of the header */
  2122. if (ptr[0] != 0)
  2123. return -EINVAL;
  2124. hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
  2125. if (!hdr)
  2126. return -ENOMEM;
  2127. /* Extract header information - see cs-etm.h for format */
  2128. for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
  2129. hdr[i] = ptr[i];
  2130. num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
  2131. pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
  2132. 0xffffffff);
  2133. /*
  2134. * Create an RB tree for traceID-metadata tuple. Since the conversion
  2135. * has to be made for each packet that gets decoded, optimizing access
  2136. * in anything other than a sequential array is worth doing.
  2137. */
  2138. traceid_list = intlist__new(NULL);
  2139. if (!traceid_list) {
  2140. err = -ENOMEM;
  2141. goto err_free_hdr;
  2142. }
  2143. metadata = zalloc(sizeof(*metadata) * num_cpu);
  2144. if (!metadata) {
  2145. err = -ENOMEM;
  2146. goto err_free_traceid_list;
  2147. }
  2148. /*
  2149. * The metadata is stored in the auxtrace_info section and encodes
  2150. * the configuration of the ARM embedded trace macrocell which is
  2151. * required by the trace decoder to properly decode the trace due
  2152. * to its highly compressed nature.
  2153. */
  2154. for (j = 0; j < num_cpu; j++) {
  2155. if (ptr[i] == __perf_cs_etmv3_magic) {
  2156. metadata[j] = zalloc(sizeof(*metadata[j]) *
  2157. CS_ETM_PRIV_MAX);
  2158. if (!metadata[j]) {
  2159. err = -ENOMEM;
  2160. goto err_free_metadata;
  2161. }
  2162. for (k = 0; k < CS_ETM_PRIV_MAX; k++)
  2163. metadata[j][k] = ptr[i + k];
  2164. /* The traceID is our handle */
  2165. idx = metadata[j][CS_ETM_ETMTRACEIDR];
  2166. i += CS_ETM_PRIV_MAX;
  2167. } else if (ptr[i] == __perf_cs_etmv4_magic) {
  2168. metadata[j] = zalloc(sizeof(*metadata[j]) *
  2169. CS_ETMV4_PRIV_MAX);
  2170. if (!metadata[j]) {
  2171. err = -ENOMEM;
  2172. goto err_free_metadata;
  2173. }
  2174. for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
  2175. metadata[j][k] = ptr[i + k];
  2176. /* The traceID is our handle */
  2177. idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
  2178. i += CS_ETMV4_PRIV_MAX;
  2179. }
  2180. /* Get an RB node for this CPU */
  2181. inode = intlist__findnew(traceid_list, idx);
  2182. /* Something went wrong, no need to continue */
  2183. if (!inode) {
  2184. err = -ENOMEM;
  2185. goto err_free_metadata;
  2186. }
  2187. /*
  2188. * The node for that CPU should not be taken.
  2189. * Back out if that's the case.
  2190. */
  2191. if (inode->priv) {
  2192. err = -EINVAL;
  2193. goto err_free_metadata;
  2194. }
  2195. /* All good, associate the traceID with the metadata pointer */
  2196. inode->priv = metadata[j];
  2197. }
  2198. /*
  2199. * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
  2200. * CS_ETMV4_PRIV_MAX mark how many double words are in the
  2201. * global metadata, and each cpu's metadata respectively.
  2202. * The following tests if the correct number of double words was
  2203. * present in the auxtrace info section.
  2204. */
  2205. if (i * 8 != priv_size) {
  2206. err = -EINVAL;
  2207. goto err_free_metadata;
  2208. }
  2209. etm = zalloc(sizeof(*etm));
  2210. if (!etm) {
  2211. err = -ENOMEM;
  2212. goto err_free_metadata;
  2213. }
  2214. err = auxtrace_queues__init(&etm->queues);
  2215. if (err)
  2216. goto err_free_etm;
  2217. etm->session = session;
  2218. etm->machine = &session->machines.host;
  2219. etm->num_cpu = num_cpu;
  2220. etm->pmu_type = pmu_type;
  2221. etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
  2222. etm->metadata = metadata;
  2223. etm->auxtrace_type = auxtrace_info->type;
  2224. etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
  2225. etm->auxtrace.process_event = cs_etm__process_event;
  2226. etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
  2227. etm->auxtrace.flush_events = cs_etm__flush_events;
  2228. etm->auxtrace.free_events = cs_etm__free_events;
  2229. etm->auxtrace.free = cs_etm__free;
  2230. etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
  2231. session->auxtrace = &etm->auxtrace;
  2232. etm->unknown_thread = thread__new(999999999, 999999999);
  2233. if (!etm->unknown_thread) {
  2234. err = -ENOMEM;
  2235. goto err_free_queues;
  2236. }
  2237. /*
  2238. * Initialize list node so that at thread__zput() we can avoid
  2239. * segmentation fault at list_del_init().
  2240. */
  2241. INIT_LIST_HEAD(&etm->unknown_thread->node);
  2242. err = thread__set_comm(etm->unknown_thread, "unknown", 0);
  2243. if (err)
  2244. goto err_delete_thread;
  2245. if (thread__init_maps(etm->unknown_thread, etm->machine)) {
  2246. err = -ENOMEM;
  2247. goto err_delete_thread;
  2248. }
  2249. if (dump_trace) {
  2250. cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
  2251. return 0;
  2252. }
  2253. if (session->itrace_synth_opts->set) {
  2254. etm->synth_opts = *session->itrace_synth_opts;
  2255. } else {
  2256. itrace_synth_opts__set_default(&etm->synth_opts,
  2257. session->itrace_synth_opts->default_no_sample);
  2258. etm->synth_opts.callchain = false;
  2259. }
  2260. err = cs_etm__synth_events(etm, session);
  2261. if (err)
  2262. goto err_delete_thread;
  2263. err = auxtrace_queues__process_index(&etm->queues, session);
  2264. if (err)
  2265. goto err_delete_thread;
  2266. etm->data_queued = etm->queues.populated;
  2267. return 0;
  2268. err_delete_thread:
  2269. thread__zput(etm->unknown_thread);
  2270. err_free_queues:
  2271. auxtrace_queues__free(&etm->queues);
  2272. session->auxtrace = NULL;
  2273. err_free_etm:
  2274. zfree(&etm);
  2275. err_free_metadata:
  2276. /* No need to check @metadata[j], free(NULL) is supported */
  2277. for (j = 0; j < num_cpu; j++)
  2278. zfree(&metadata[j]);
  2279. zfree(&metadata);
  2280. err_free_traceid_list:
  2281. intlist__delete(traceid_list);
  2282. err_free_hdr:
  2283. zfree(&hdr);
  2284. return err;
  2285. }