sbi_pmu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_bitops.h>
  11. #include <sbi/sbi_console.h>
  12. #include <sbi/sbi_hart.h>
  13. #include <sbi/sbi_platform.h>
  14. #include <sbi/sbi_pmu.h>
  15. #include <sbi/sbi_scratch.h>
  16. #include <sbi/sbi_string.h>
  17. /** Information about hardware counters */
  18. struct sbi_pmu_hw_event {
  19. uint32_t counters;
  20. uint32_t start_idx;
  21. uint32_t end_idx;
  22. /* Event selector value used only for raw events. The event select value
  23. * can be a even id or a selector value for set of events encoded in few
  24. * bits. In case latter, the bits used for encoding of the events should
  25. * be zeroed out in the select value.
  26. */
  27. uint64_t select;
  28. /**
  29. * The select_mask indicates which bits are encoded for the event(s).
  30. */
  31. uint64_t select_mask;
  32. };
  33. /** Representation of a firmware event */
  34. struct sbi_pmu_fw_event {
  35. /* Event associated with the particular counter */
  36. uint32_t event_idx;
  37. /* Current value of the counter */
  38. unsigned long curr_count;
  39. /* A flag indicating pmu event monitoring is started */
  40. bool bStarted;
  41. };
  42. /* Information about PMU counters as per SBI specification */
  43. union sbi_pmu_ctr_info {
  44. unsigned long value;
  45. struct {
  46. unsigned long csr:12;
  47. unsigned long width:6;
  48. #if __riscv_xlen == 32
  49. unsigned long reserved:13;
  50. #else
  51. unsigned long reserved:45;
  52. #endif
  53. unsigned long type:1;
  54. };
  55. };
  56. /* Mapping between event range and possible counters */
  57. static struct sbi_pmu_hw_event hw_event_map[SBI_PMU_HW_EVENT_MAX] = {0};
  58. /* counter to enabled event mapping */
  59. static uint32_t active_events[SBI_HARTMASK_MAX_BITS][SBI_PMU_HW_CTR_MAX + SBI_PMU_FW_CTR_MAX];
  60. /* Contains all the information about firmwares events */
  61. static struct sbi_pmu_fw_event fw_event_map[SBI_HARTMASK_MAX_BITS][SBI_PMU_FW_EVENT_MAX] = {0};
  62. /* Maximum number of hardware events available */
  63. static uint32_t num_hw_events;
  64. /* Maximum number of hardware counters available */
  65. static uint32_t num_hw_ctrs;
  66. /* Maximum number of counters available */
  67. static uint32_t total_ctrs;
  68. /* Helper macros to retrieve event idx and code type */
  69. #define get_cidx_type(x) ((x & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16)
  70. #define get_cidx_code(x) (x & SBI_PMU_EVENT_IDX_CODE_MASK)
  71. /**
  72. * Perform a sanity check on event & counter mappings with event range overlap check
  73. * @param evtA Pointer to the existing hw event structure
  74. * @param evtB Pointer to the new hw event structure
  75. *
  76. * Return FALSE if the range doesn't overlap, TRUE otherwise
  77. */
  78. static bool pmu_event_range_overlap(struct sbi_pmu_hw_event *evtA,
  79. struct sbi_pmu_hw_event *evtB)
  80. {
  81. /* check if the range of events overlap with a previous entry */
  82. if (((evtA->end_idx < evtB->start_idx) && (evtA->end_idx < evtB->end_idx)) ||
  83. ((evtA->start_idx > evtB->start_idx) && (evtA->start_idx > evtB->end_idx)))
  84. return FALSE;
  85. return TRUE;
  86. }
  87. static bool pmu_event_select_overlap(struct sbi_pmu_hw_event *evt,
  88. uint64_t select_val, uint64_t select_mask)
  89. {
  90. if ((evt->select == select_val) && (evt->select_mask == select_mask))
  91. return TRUE;
  92. return FALSE;
  93. }
  94. static int pmu_ctr_validate(uint32_t cidx, uint32_t *event_idx_code)
  95. {
  96. uint32_t event_idx_val;
  97. uint32_t event_idx_type;
  98. u32 hartid = current_hartid();
  99. event_idx_val = active_events[hartid][cidx];
  100. if (cidx >= total_ctrs || (event_idx_val == SBI_PMU_EVENT_IDX_INVALID))
  101. return SBI_EINVAL;
  102. event_idx_type = get_cidx_type(event_idx_val);
  103. if (event_idx_type >= SBI_PMU_EVENT_TYPE_MAX)
  104. return SBI_EINVAL;
  105. *event_idx_code = get_cidx_code(event_idx_val);
  106. return event_idx_type;
  107. }
  108. static int pmu_ctr_read_fw(uint32_t cidx, unsigned long *cval,
  109. uint32_t fw_evt_code)
  110. {
  111. u32 hartid = current_hartid();
  112. struct sbi_pmu_fw_event fevent;
  113. fevent = fw_event_map[hartid][fw_evt_code];
  114. *cval = fevent.curr_count;
  115. return 0;
  116. }
  117. /* Add a hardware counter read for completeness for future purpose */
  118. static int pmu_ctr_read_hw(uint32_t cidx, uint64_t *cval)
  119. {
  120. /* Check for invalid hw counter read requests */
  121. if (unlikely(cidx == 1))
  122. return SBI_EINVAL;
  123. #if __riscv_xlen == 32
  124. uint32_t temp, temph = 0;
  125. temp = csr_read_num(CSR_MCYCLE + cidx);
  126. temph = csr_read_num(CSR_MCYCLEH + cidx);
  127. *cval = ((uint64_t)temph << 32) | temp;
  128. #else
  129. *cval = csr_read_num(CSR_MCYCLE + cidx);
  130. #endif
  131. return 0;
  132. }
  133. int sbi_pmu_ctr_read(uint32_t cidx, unsigned long *cval)
  134. {
  135. int event_idx_type;
  136. uint32_t event_code;
  137. uint64_t cval64;
  138. event_idx_type = pmu_ctr_validate(cidx, &event_code);
  139. if (event_idx_type < 0)
  140. return SBI_EINVAL;
  141. else if (event_idx_type == SBI_PMU_EVENT_TYPE_FW)
  142. pmu_ctr_read_fw(cidx, cval, event_code);
  143. else
  144. pmu_ctr_read_hw(cidx, &cval64);
  145. return 0;
  146. }
  147. static int pmu_add_hw_event_map(u32 eidx_start, u32 eidx_end, u32 cmap,
  148. uint64_t select, uint64_t select_mask)
  149. {
  150. int i = 0;
  151. bool is_overlap;
  152. struct sbi_pmu_hw_event *event = &hw_event_map[num_hw_events];
  153. /* The first two counters are reserved by priv spec */
  154. if (eidx_start > SBI_PMU_HW_INSTRUCTIONS && (cmap & SBI_PMU_FIXED_CTR_MASK))
  155. return SBI_EDENIED;
  156. if (num_hw_events >= SBI_PMU_HW_EVENT_MAX - 1) {
  157. sbi_printf("Can not handle more than %d perf events\n",
  158. SBI_PMU_HW_EVENT_MAX);
  159. return SBI_EFAIL;
  160. }
  161. event->start_idx = eidx_start;
  162. event->end_idx = eidx_end;
  163. /* Sanity check */
  164. for (i = 0; i < num_hw_events; i++) {
  165. if (eidx_start == SBI_PMU_EVENT_RAW_IDX)
  166. /* All raw events have same event idx. Just do sanity check on select */
  167. is_overlap = pmu_event_select_overlap(&hw_event_map[i],
  168. select, select_mask);
  169. else
  170. is_overlap = pmu_event_range_overlap(&hw_event_map[i], event);
  171. if (is_overlap)
  172. goto reset_event;
  173. }
  174. event->select_mask = select_mask;
  175. event->counters = cmap;
  176. event->select = select;
  177. num_hw_events++;
  178. return 0;
  179. reset_event:
  180. event->start_idx = 0;
  181. event->end_idx = 0;
  182. return SBI_EINVAL;
  183. }
  184. /**
  185. * Logical counter ids are assigned to hardware counters are assigned consecutively.
  186. * E.g. counter0 must count MCYCLE where counter2 must count minstret. Similarly,
  187. * counterX will mhpmcounterX.
  188. */
  189. int sbi_pmu_add_hw_event_counter_map(u32 eidx_start, u32 eidx_end, u32 cmap)
  190. {
  191. if ((eidx_start > eidx_end) || eidx_start == SBI_PMU_EVENT_RAW_IDX ||
  192. eidx_end == SBI_PMU_EVENT_RAW_IDX)
  193. return SBI_EINVAL;
  194. return pmu_add_hw_event_map(eidx_start, eidx_end, cmap, 0, 0);
  195. }
  196. int sbi_pmu_add_raw_event_counter_map(uint64_t select, uint64_t select_mask, u32 cmap)
  197. {
  198. return pmu_add_hw_event_map(SBI_PMU_EVENT_RAW_IDX,
  199. SBI_PMU_EVENT_RAW_IDX, cmap, select, select_mask);
  200. }
  201. static int pmu_ctr_enable_irq_hw(int ctr_idx)
  202. {
  203. unsigned long mhpmevent_csr;
  204. unsigned long mhpmevent_curr;
  205. unsigned long mip_val;
  206. unsigned long of_mask;
  207. if (ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
  208. return SBI_EFAIL;
  209. #if __riscv_xlen == 32
  210. mhpmevent_csr = CSR_MHPMEVENT3H + ctr_idx - 3;
  211. of_mask = ~MHPMEVENTH_OF;
  212. #else
  213. mhpmevent_csr = CSR_MHPMEVENT3 + ctr_idx - 3;
  214. of_mask = ~MHPMEVENT_OF;
  215. #endif
  216. mhpmevent_curr = csr_read_num(mhpmevent_csr);
  217. mip_val = csr_read(CSR_MIP);
  218. /**
  219. * Clear out the OF bit so that next interrupt can be enabled.
  220. * This should be done only when the corresponding overflow interrupt
  221. * bit is cleared. That indicates that software has already handled the
  222. * previous interrupts or the hardware yet to set an overflow interrupt.
  223. * Otherwise, there will be race conditions where we may clear the bit
  224. * the software is yet to handle the interrupt.
  225. */
  226. if (!(mip_val & MIP_LCOFIP)) {
  227. mhpmevent_curr &= of_mask;
  228. csr_write_num(mhpmevent_csr, mhpmevent_curr);
  229. }
  230. return 0;
  231. }
  232. static void pmu_ctr_write_hw(uint32_t cidx, uint64_t ival)
  233. {
  234. #if __riscv_xlen == 32
  235. csr_write_num(CSR_MCYCLE + cidx, 0);
  236. csr_write_num(CSR_MCYCLE + cidx, ival & 0xFFFFFFFF);
  237. csr_write_num(CSR_MCYCLEH + cidx, ival >> BITS_PER_LONG);
  238. #else
  239. csr_write_num(CSR_MCYCLE + cidx, ival);
  240. #endif
  241. }
  242. static int pmu_ctr_start_hw(uint32_t cidx, uint64_t ival, bool ival_update)
  243. {
  244. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  245. unsigned long mctr_inhbt;
  246. /* Make sure the counter index lies within the range and is not TM bit */
  247. if (cidx > num_hw_ctrs || cidx == 1)
  248. return SBI_EINVAL;
  249. if (!sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTINHIBIT))
  250. goto skip_inhibit_update;
  251. /*
  252. * Some of the hardware may not support mcountinhibit but perf stat
  253. * still can work if supervisor mode programs the initial value.
  254. */
  255. mctr_inhbt = csr_read(CSR_MCOUNTINHIBIT);
  256. if (!__test_bit(cidx, &mctr_inhbt))
  257. return SBI_EALREADY_STARTED;
  258. __clear_bit(cidx, &mctr_inhbt);
  259. if (sbi_hart_has_feature(scratch, SBI_HART_HAS_SSCOFPMF))
  260. pmu_ctr_enable_irq_hw(cidx);
  261. csr_write(CSR_MCOUNTINHIBIT, mctr_inhbt);
  262. skip_inhibit_update:
  263. if (ival_update)
  264. pmu_ctr_write_hw(cidx, ival);
  265. return 0;
  266. }
  267. static int pmu_ctr_start_fw(uint32_t cidx, uint32_t fw_evt_code,
  268. uint64_t ival, bool ival_update)
  269. {
  270. u32 hartid = current_hartid();
  271. struct sbi_pmu_fw_event *fevent;
  272. fevent = &fw_event_map[hartid][fw_evt_code];
  273. if (ival_update)
  274. fevent->curr_count = ival;
  275. fevent->bStarted = TRUE;
  276. return 0;
  277. }
  278. int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
  279. unsigned long flags, uint64_t ival)
  280. {
  281. int event_idx_type;
  282. uint32_t event_code;
  283. unsigned long ctr_mask = cmask << cbase;
  284. int ret = SBI_EINVAL;
  285. bool bUpdate = FALSE;
  286. if (__fls(ctr_mask) >= total_ctrs)
  287. return ret;
  288. if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
  289. bUpdate = TRUE;
  290. for_each_set_bit_from(cbase, &ctr_mask, total_ctrs) {
  291. event_idx_type = pmu_ctr_validate(cbase, &event_code);
  292. if (event_idx_type < 0)
  293. /* Continue the start operation for other counters */
  294. continue;
  295. else if (event_idx_type == SBI_PMU_EVENT_TYPE_FW)
  296. ret = pmu_ctr_start_fw(cbase, event_code, ival, bUpdate);
  297. else
  298. ret = pmu_ctr_start_hw(cbase, ival, bUpdate);
  299. }
  300. return ret;
  301. }
  302. static int pmu_ctr_stop_hw(uint32_t cidx)
  303. {
  304. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  305. unsigned long mctr_inhbt;
  306. if (!sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTINHIBIT))
  307. return 0;
  308. mctr_inhbt = csr_read(CSR_MCOUNTINHIBIT);
  309. /* Make sure the counter index lies within the range and is not TM bit */
  310. if (cidx > num_hw_ctrs || cidx == 1)
  311. return SBI_EINVAL;
  312. if (!__test_bit(cidx, &mctr_inhbt)) {
  313. __set_bit(cidx, &mctr_inhbt);
  314. csr_write(CSR_MCOUNTINHIBIT, mctr_inhbt);
  315. return 0;
  316. } else
  317. return SBI_EALREADY_STOPPED;
  318. }
  319. static int pmu_ctr_stop_fw(uint32_t cidx, uint32_t fw_evt_code)
  320. {
  321. u32 hartid = current_hartid();
  322. fw_event_map[hartid][fw_evt_code].bStarted = FALSE;
  323. return 0;
  324. }
  325. static int pmu_reset_hw_mhpmevent(int ctr_idx)
  326. {
  327. if (ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
  328. return SBI_EFAIL;
  329. #if __riscv_xlen == 32
  330. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, 0);
  331. csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3, 0);
  332. #else
  333. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, 0);
  334. #endif
  335. return 0;
  336. }
  337. int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
  338. unsigned long flag)
  339. {
  340. u32 hartid = current_hartid();
  341. int ret = SBI_EINVAL;
  342. int event_idx_type;
  343. uint32_t event_code;
  344. unsigned long ctr_mask = cmask << cbase;
  345. if (__fls(ctr_mask) >= total_ctrs)
  346. return SBI_EINVAL;
  347. for_each_set_bit_from(cbase, &ctr_mask, total_ctrs) {
  348. event_idx_type = pmu_ctr_validate(cbase, &event_code);
  349. if (event_idx_type < 0)
  350. /* Continue the stop operation for other counters */
  351. continue;
  352. else if (event_idx_type == SBI_PMU_EVENT_TYPE_FW)
  353. ret = pmu_ctr_stop_fw(cbase, event_code);
  354. else
  355. ret = pmu_ctr_stop_hw(cbase);
  356. if (flag & SBI_PMU_STOP_FLAG_RESET) {
  357. active_events[hartid][cbase] = SBI_PMU_EVENT_IDX_INVALID;
  358. pmu_reset_hw_mhpmevent(cbase);
  359. }
  360. }
  361. return ret;
  362. }
  363. static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
  364. {
  365. if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
  366. *mhpmevent_val |= MHPMEVENT_VUINH;
  367. if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
  368. *mhpmevent_val |= MHPMEVENT_VSINH;
  369. if (flags & SBI_PMU_CFG_FLAG_SET_UINH)
  370. *mhpmevent_val |= MHPMEVENT_UINH;
  371. if (flags & SBI_PMU_CFG_FLAG_SET_SINH)
  372. *mhpmevent_val |= MHPMEVENT_SINH;
  373. }
  374. static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
  375. unsigned long flags, unsigned long eindex,
  376. uint64_t data)
  377. {
  378. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  379. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  380. uint64_t mhpmevent_val;
  381. /* Get the final mhpmevent value to be written from platform */
  382. mhpmevent_val = sbi_platform_pmu_xlate_to_mhpmevent(plat, eindex, data);
  383. if (!mhpmevent_val || ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
  384. return SBI_EFAIL;
  385. /* Always clear the OVF bit and inhibit countin of events in M-mode */
  386. if (sbi_hart_has_feature(scratch, SBI_HART_HAS_SSCOFPMF))
  387. mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) | MHPMEVENT_MINH;
  388. /* Update the inhibit flags based on inhibit flags received from supervisor */
  389. pmu_update_inhibit_flags(flags, &mhpmevent_val);
  390. #if __riscv_xlen == 32
  391. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val & 0xFFFFFFFF);
  392. csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3, mhpmevent_val >> BITS_PER_LONG);
  393. #else
  394. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val);
  395. #endif
  396. return 0;
  397. }
  398. static int pmu_ctr_find_fixed_fw(unsigned long evt_idx_code)
  399. {
  400. /* Non-programmables counters are enabled always. No need to do lookup */
  401. if (evt_idx_code == SBI_PMU_HW_CPU_CYCLES)
  402. return 0;
  403. else if (evt_idx_code == SBI_PMU_HW_INSTRUCTIONS)
  404. return 2;
  405. else
  406. return SBI_EINVAL;
  407. }
  408. static int pmu_ctr_find_hw(unsigned long cbase, unsigned long cmask, unsigned long flags,
  409. unsigned long event_idx, uint64_t data)
  410. {
  411. unsigned long ctr_mask;
  412. int i, ret = 0, fixed_ctr, ctr_idx = SBI_ENOTSUPP;
  413. struct sbi_pmu_hw_event *temp;
  414. unsigned long mctr_inhbt = 0;
  415. u32 hartid = current_hartid();
  416. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  417. if (cbase > num_hw_ctrs)
  418. return SBI_EINVAL;
  419. /**
  420. * If Sscof is present try to find the programmable counter for
  421. * cycle/instret as well.
  422. */
  423. fixed_ctr = pmu_ctr_find_fixed_fw(event_idx);
  424. if (fixed_ctr >= 0 &&
  425. !sbi_hart_has_feature(scratch, SBI_HART_HAS_SSCOFPMF))
  426. return fixed_ctr;
  427. if (sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTINHIBIT))
  428. mctr_inhbt = csr_read(CSR_MCOUNTINHIBIT);
  429. for (i = 0; i < num_hw_events; i++) {
  430. temp = &hw_event_map[i];
  431. if ((temp->start_idx > event_idx && event_idx < temp->end_idx) ||
  432. (temp->start_idx < event_idx && event_idx > temp->end_idx))
  433. continue;
  434. /* For raw events, event data is used as the select value */
  435. if (event_idx == SBI_PMU_EVENT_RAW_IDX) {
  436. uint64_t select_mask = temp->select_mask;
  437. /* The non-event map bits of data should match the selector */
  438. if (temp->select != (data & select_mask))
  439. continue;
  440. }
  441. /* Fixed counters should not be part of the search */
  442. ctr_mask = temp->counters & (cmask << cbase) &
  443. (~SBI_PMU_FIXED_CTR_MASK);
  444. for_each_set_bit_from(cbase, &ctr_mask, SBI_PMU_HW_CTR_MAX) {
  445. /**
  446. * Some of the platform may not support mcountinhibit.
  447. * Checking the active_events is enough for them
  448. */
  449. if (active_events[hartid][cbase] != SBI_PMU_EVENT_IDX_INVALID)
  450. continue;
  451. /* If mcountinhibit is supported, the bit must be enabled */
  452. if ((sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTINHIBIT)) &&
  453. !__test_bit(cbase, &mctr_inhbt))
  454. continue;
  455. /* We found a valid counter that is not started yet */
  456. ctr_idx = cbase;
  457. }
  458. }
  459. if (ctr_idx == SBI_ENOTSUPP) {
  460. /**
  461. * We can't find any programmable counters for cycle/instret.
  462. * Return the fixed counter as they are mandatory anyways.
  463. */
  464. if (fixed_ctr >= 0)
  465. return fixed_ctr;
  466. else
  467. return SBI_EFAIL;
  468. }
  469. ret = pmu_update_hw_mhpmevent(temp, ctr_idx, flags, event_idx, data);
  470. if (!ret)
  471. ret = ctr_idx;
  472. return ret;
  473. }
  474. /**
  475. * Any firmware counter can map to any firmware event.
  476. * Thus, select the first available fw counter after sanity
  477. * check.
  478. */
  479. static int pmu_ctr_find_fw(unsigned long cbase, unsigned long cmask, u32 hartid)
  480. {
  481. int i = 0;
  482. int fw_base;
  483. unsigned long ctr_mask = cmask << cbase;
  484. if (cbase <= num_hw_ctrs)
  485. fw_base = num_hw_ctrs + 1;
  486. else
  487. fw_base = cbase;
  488. for (i = fw_base; i < total_ctrs; i++)
  489. if ((active_events[hartid][i] == SBI_PMU_EVENT_IDX_INVALID) &&
  490. ((1UL << i) & ctr_mask))
  491. return i;
  492. return SBI_ENOTSUPP;
  493. }
  494. int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
  495. unsigned long flags, unsigned long event_idx,
  496. uint64_t event_data)
  497. {
  498. int ctr_idx = SBI_ENOTSUPP;
  499. u32 hartid = current_hartid();
  500. int event_type = get_cidx_type(event_idx);
  501. struct sbi_pmu_fw_event *fevent;
  502. uint32_t fw_evt_code;
  503. unsigned long tmp = cidx_mask << cidx_base;
  504. /* Do a basic sanity check of counter base & mask */
  505. if (__fls(tmp) >= total_ctrs || event_type >= SBI_PMU_EVENT_TYPE_MAX)
  506. return SBI_EINVAL;
  507. if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) {
  508. /* The caller wants to skip the match because it already knows the
  509. * counter idx for the given event. Verify that the counter idx
  510. * is still valid.
  511. */
  512. if (active_events[hartid][cidx_base] == SBI_PMU_EVENT_IDX_INVALID)
  513. return SBI_EINVAL;
  514. ctr_idx = cidx_base;
  515. goto skip_match;
  516. }
  517. if (event_type == SBI_PMU_EVENT_TYPE_FW) {
  518. /* Any firmware counter can be used track any firmware event */
  519. ctr_idx = pmu_ctr_find_fw(cidx_base, cidx_mask, hartid);
  520. } else {
  521. ctr_idx = pmu_ctr_find_hw(cidx_base, cidx_mask, flags, event_idx,
  522. event_data);
  523. }
  524. if (ctr_idx < 0)
  525. return SBI_ENOTSUPP;
  526. active_events[hartid][ctr_idx] = event_idx;
  527. skip_match:
  528. if (event_type == SBI_PMU_EVENT_TYPE_HW) {
  529. if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE)
  530. pmu_ctr_write_hw(ctr_idx, 0);
  531. if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
  532. pmu_ctr_start_hw(ctr_idx, 0, false);
  533. } else if (event_type == SBI_PMU_EVENT_TYPE_FW) {
  534. fw_evt_code = get_cidx_code(event_idx);
  535. fevent = &fw_event_map[hartid][fw_evt_code];
  536. if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE)
  537. fevent->curr_count = 0;
  538. if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
  539. fevent->bStarted = TRUE;
  540. }
  541. return ctr_idx;
  542. }
  543. inline int sbi_pmu_ctr_incr_fw(enum sbi_pmu_fw_event_code_id fw_id)
  544. {
  545. u32 hartid = current_hartid();
  546. struct sbi_pmu_fw_event *fevent;
  547. if (unlikely(fw_id >= SBI_PMU_FW_MAX))
  548. return SBI_EINVAL;
  549. fevent = &fw_event_map[hartid][fw_id];
  550. /* PMU counters will be only enabled during performance debugging */
  551. if (unlikely(fevent->bStarted))
  552. fevent->curr_count++;
  553. return 0;
  554. }
  555. unsigned long sbi_pmu_num_ctr(void)
  556. {
  557. return (num_hw_ctrs + SBI_PMU_FW_CTR_MAX);
  558. }
  559. int sbi_pmu_ctr_get_info(uint32_t cidx, unsigned long *ctr_info)
  560. {
  561. union sbi_pmu_ctr_info cinfo = {0};
  562. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  563. /* Sanity check. Counter1 is not mapped at all */
  564. if (cidx >= total_ctrs || cidx == 1)
  565. return SBI_EINVAL;
  566. /* We have 31 HW counters with 31 being the last index(MHPMCOUNTER31) */
  567. if (cidx <= num_hw_ctrs) {
  568. cinfo.type = SBI_PMU_CTR_TYPE_HW;
  569. cinfo.csr = CSR_CYCLE + cidx;
  570. /* mcycle & minstret are always 64 bit */
  571. if (cidx == 0 || cidx == 2)
  572. cinfo.width = 63;
  573. else
  574. cinfo.width = sbi_hart_mhpm_bits(scratch) - 1;
  575. } else {
  576. /* it's a firmware counter */
  577. cinfo.type = SBI_PMU_CTR_TYPE_FW;
  578. /* Firmware counters are XLEN bits wide */
  579. cinfo.width = BITS_PER_LONG - 1;
  580. }
  581. *ctr_info = cinfo.value;
  582. return 0;
  583. }
  584. static void pmu_reset_event_map(u32 hartid)
  585. {
  586. int j;
  587. /* Initialize the counter to event mapping table */
  588. for (j = 3; j < total_ctrs; j++)
  589. active_events[hartid][j] = SBI_PMU_EVENT_IDX_INVALID;
  590. for (j = 0; j < SBI_PMU_FW_CTR_MAX; j++)
  591. sbi_memset(&fw_event_map[hartid][j], 0,
  592. sizeof(struct sbi_pmu_fw_event));
  593. }
  594. void sbi_pmu_exit(struct sbi_scratch *scratch)
  595. {
  596. u32 hartid = current_hartid();
  597. if (sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTINHIBIT))
  598. csr_write(CSR_MCOUNTINHIBIT, 0xFFFFFFF8);
  599. csr_write(CSR_MCOUNTEREN, -1);
  600. pmu_reset_event_map(hartid);
  601. }
  602. int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)
  603. {
  604. const struct sbi_platform *plat;
  605. u32 hartid = current_hartid();
  606. if (cold_boot) {
  607. plat = sbi_platform_ptr(scratch);
  608. /* Initialize hw pmu events */
  609. sbi_platform_pmu_init(plat);
  610. /* mcycle & minstret is available always */
  611. num_hw_ctrs = sbi_hart_mhpm_count(scratch) + 2;
  612. total_ctrs = num_hw_ctrs + SBI_PMU_FW_CTR_MAX;
  613. }
  614. pmu_reset_event_map(hartid);
  615. /* First three counters are fixed by the priv spec and we enable it by default */
  616. active_events[hartid][0] = SBI_PMU_EVENT_TYPE_HW << SBI_PMU_EVENT_IDX_OFFSET |
  617. SBI_PMU_HW_CPU_CYCLES;
  618. active_events[hartid][1] = SBI_PMU_EVENT_IDX_INVALID;
  619. active_events[hartid][2] = SBI_PMU_EVENT_TYPE_HW << SBI_PMU_EVENT_IDX_OFFSET |
  620. SBI_PMU_HW_INSTRUCTIONS;
  621. return 0;
  622. }