sbi_pmu.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_bitops.h>
  11. #include <sbi/sbi_console.h>
  12. #include <sbi/sbi_ecall_interface.h>
  13. #include <sbi/sbi_hart.h>
  14. #include <sbi/sbi_hartmask.h>
  15. #include <sbi/sbi_platform.h>
  16. #include <sbi/sbi_pmu.h>
  17. #include <sbi/sbi_scratch.h>
  18. #include <sbi/sbi_string.h>
  19. /** Information about hardware counters */
  20. struct sbi_pmu_hw_event {
  21. uint32_t counters;
  22. uint32_t start_idx;
  23. uint32_t end_idx;
  24. /* Event selector value used only for raw events. The event select value
  25. * can be a even id or a selector value for set of events encoded in few
  26. * bits. In case latter, the bits used for encoding of the events should
  27. * be zeroed out in the select value.
  28. */
  29. uint64_t select;
  30. /**
  31. * The select_mask indicates which bits are encoded for the event(s).
  32. */
  33. uint64_t select_mask;
  34. };
  35. /* Information about PMU counters as per SBI specification */
  36. union sbi_pmu_ctr_info {
  37. unsigned long value;
  38. struct {
  39. unsigned long csr:12;
  40. unsigned long width:6;
  41. #if __riscv_xlen == 32
  42. unsigned long reserved:13;
  43. #else
  44. unsigned long reserved:45;
  45. #endif
  46. unsigned long type:1;
  47. };
  48. };
  49. /* Platform specific PMU device */
  50. static const struct sbi_pmu_device *pmu_dev = NULL;
  51. /* Mapping between event range and possible counters */
  52. static struct sbi_pmu_hw_event hw_event_map[SBI_PMU_HW_EVENT_MAX] = {0};
  53. /* counter to enabled event mapping */
  54. static uint32_t active_events[SBI_HARTMASK_MAX_BITS][SBI_PMU_HW_CTR_MAX + SBI_PMU_FW_CTR_MAX];
  55. /* Bitmap of firmware counters started on each HART */
  56. #if SBI_PMU_FW_CTR_MAX >= BITS_PER_LONG
  57. #error "Can't handle firmware counters beyond BITS_PER_LONG"
  58. #endif
  59. static unsigned long fw_counters_started[SBI_HARTMASK_MAX_BITS];
  60. /*
  61. * Counter values for SBI firmware events and event codes for platform
  62. * firmware events. Both are mutually exclusive and hence can optimally share
  63. * the same memory.
  64. */
  65. static uint64_t fw_counters_data[SBI_HARTMASK_MAX_BITS][SBI_PMU_FW_CTR_MAX] = {0};
  66. /* Maximum number of hardware events available */
  67. static uint32_t num_hw_events;
  68. /* Maximum number of hardware counters available */
  69. static uint32_t num_hw_ctrs;
  70. /* Maximum number of counters available */
  71. static uint32_t total_ctrs;
  72. /* Helper macros to retrieve event idx and code type */
  73. #define get_cidx_type(x) ((x & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16)
  74. #define get_cidx_code(x) (x & SBI_PMU_EVENT_IDX_CODE_MASK)
  75. /**
  76. * Perform a sanity check on event & counter mappings with event range overlap check
  77. * @param evtA Pointer to the existing hw event structure
  78. * @param evtB Pointer to the new hw event structure
  79. *
  80. * Return false if the range doesn't overlap, true otherwise
  81. */
  82. static bool pmu_event_range_overlap(struct sbi_pmu_hw_event *evtA,
  83. struct sbi_pmu_hw_event *evtB)
  84. {
  85. /* check if the range of events overlap with a previous entry */
  86. if (((evtA->end_idx < evtB->start_idx) && (evtA->end_idx < evtB->end_idx)) ||
  87. ((evtA->start_idx > evtB->start_idx) && (evtA->start_idx > evtB->end_idx)))
  88. return false;
  89. return true;
  90. }
  91. static bool pmu_event_select_overlap(struct sbi_pmu_hw_event *evt,
  92. uint64_t select_val, uint64_t select_mask)
  93. {
  94. if ((evt->select == select_val) && (evt->select_mask == select_mask))
  95. return true;
  96. return false;
  97. }
  98. static int pmu_event_validate(unsigned long event_idx, uint64_t edata)
  99. {
  100. uint32_t event_idx_type = get_cidx_type(event_idx);
  101. uint32_t event_idx_code = get_cidx_code(event_idx);
  102. uint32_t event_idx_code_max = -1;
  103. uint32_t cache_ops_result, cache_ops_id, cache_id;
  104. u32 hartid = current_hartid();
  105. switch(event_idx_type) {
  106. case SBI_PMU_EVENT_TYPE_HW:
  107. event_idx_code_max = SBI_PMU_HW_GENERAL_MAX;
  108. break;
  109. case SBI_PMU_EVENT_TYPE_FW:
  110. if ((event_idx_code >= SBI_PMU_FW_MAX &&
  111. event_idx_code <= SBI_PMU_FW_RESERVED_MAX) ||
  112. event_idx_code > SBI_PMU_FW_PLATFORM)
  113. return SBI_EINVAL;
  114. if (SBI_PMU_FW_PLATFORM == event_idx_code &&
  115. pmu_dev && pmu_dev->fw_event_validate_encoding)
  116. return pmu_dev->fw_event_validate_encoding(hartid,
  117. edata);
  118. else
  119. event_idx_code_max = SBI_PMU_FW_MAX;
  120. break;
  121. case SBI_PMU_EVENT_TYPE_HW_CACHE:
  122. cache_ops_result = event_idx_code &
  123. SBI_PMU_EVENT_HW_CACHE_OPS_RESULT;
  124. cache_ops_id = (event_idx_code &
  125. SBI_PMU_EVENT_HW_CACHE_OPS_ID_MASK) >>
  126. SBI_PMU_EVENT_HW_CACHE_OPS_ID_OFFSET;
  127. cache_id = (event_idx_code &
  128. SBI_PMU_EVENT_HW_CACHE_ID_MASK) >>
  129. SBI_PMU_EVENT_HW_CACHE_ID_OFFSET;
  130. if ((cache_ops_result < SBI_PMU_HW_CACHE_RESULT_MAX) &&
  131. (cache_ops_id < SBI_PMU_HW_CACHE_OP_MAX) &&
  132. (cache_id < SBI_PMU_HW_CACHE_MAX))
  133. return event_idx_type;
  134. else
  135. return SBI_EINVAL;
  136. break;
  137. case SBI_PMU_EVENT_TYPE_HW_RAW:
  138. event_idx_code_max = 1; // event_idx.code should be zero
  139. break;
  140. default:
  141. return SBI_EINVAL;
  142. }
  143. if (event_idx_code < event_idx_code_max)
  144. return event_idx_type;
  145. return SBI_EINVAL;
  146. }
  147. static int pmu_ctr_validate(uint32_t cidx, uint32_t *event_idx_code)
  148. {
  149. uint32_t event_idx_val;
  150. uint32_t event_idx_type;
  151. u32 hartid = current_hartid();
  152. if (cidx >= total_ctrs)
  153. return SBI_EINVAL;
  154. event_idx_val = active_events[hartid][cidx];
  155. event_idx_type = get_cidx_type(event_idx_val);
  156. if (event_idx_val == SBI_PMU_EVENT_IDX_INVALID ||
  157. event_idx_type >= SBI_PMU_EVENT_TYPE_MAX)
  158. return SBI_EINVAL;
  159. *event_idx_code = get_cidx_code(event_idx_val);
  160. return event_idx_type;
  161. }
  162. int sbi_pmu_ctr_fw_read(uint32_t cidx, uint64_t *cval)
  163. {
  164. int event_idx_type;
  165. uint32_t event_code;
  166. u32 hartid = current_hartid();
  167. event_idx_type = pmu_ctr_validate(cidx, &event_code);
  168. if (event_idx_type != SBI_PMU_EVENT_TYPE_FW)
  169. return SBI_EINVAL;
  170. if ((event_code >= SBI_PMU_FW_MAX &&
  171. event_code <= SBI_PMU_FW_RESERVED_MAX) ||
  172. event_code > SBI_PMU_FW_PLATFORM)
  173. return SBI_EINVAL;
  174. if (SBI_PMU_FW_PLATFORM == event_code) {
  175. if (pmu_dev && pmu_dev->fw_counter_read_value)
  176. *cval = pmu_dev->fw_counter_read_value(hartid,
  177. cidx -
  178. num_hw_ctrs);
  179. else
  180. *cval = 0;
  181. } else
  182. *cval = fw_counters_data[hartid][cidx - num_hw_ctrs];
  183. return 0;
  184. }
  185. static int pmu_add_hw_event_map(u32 eidx_start, u32 eidx_end, u32 cmap,
  186. uint64_t select, uint64_t select_mask)
  187. {
  188. int i = 0;
  189. bool is_overlap;
  190. struct sbi_pmu_hw_event *event = &hw_event_map[num_hw_events];
  191. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  192. int hw_ctr_avail = sbi_hart_mhpm_count(scratch);
  193. uint32_t ctr_avail_mask = ((uint32_t)(~0) >> (32 - (hw_ctr_avail + 3)));
  194. /* The first two counters are reserved by priv spec */
  195. if (eidx_start > SBI_PMU_HW_INSTRUCTIONS && (cmap & SBI_PMU_FIXED_CTR_MASK))
  196. return SBI_EDENIED;
  197. if (num_hw_events >= SBI_PMU_HW_EVENT_MAX - 1) {
  198. sbi_printf("Can not handle more than %d perf events\n",
  199. SBI_PMU_HW_EVENT_MAX);
  200. return SBI_EFAIL;
  201. }
  202. event->start_idx = eidx_start;
  203. event->end_idx = eidx_end;
  204. /* Sanity check */
  205. for (i = 0; i < num_hw_events; i++) {
  206. if (eidx_start == SBI_PMU_EVENT_RAW_IDX)
  207. /* All raw events have same event idx. Just do sanity check on select */
  208. is_overlap = pmu_event_select_overlap(&hw_event_map[i],
  209. select, select_mask);
  210. else
  211. is_overlap = pmu_event_range_overlap(&hw_event_map[i], event);
  212. if (is_overlap)
  213. goto reset_event;
  214. }
  215. event->select_mask = select_mask;
  216. /* Map the only the counters that are available in the hardware */
  217. event->counters = cmap & ctr_avail_mask;
  218. event->select = select;
  219. num_hw_events++;
  220. return 0;
  221. reset_event:
  222. event->start_idx = 0;
  223. event->end_idx = 0;
  224. return SBI_EINVAL;
  225. }
  226. /**
  227. * Logical counter ids are assigned to hardware counters are assigned consecutively.
  228. * E.g. counter0 must count MCYCLE where counter2 must count minstret. Similarly,
  229. * counterX will mhpmcounterX.
  230. */
  231. int sbi_pmu_add_hw_event_counter_map(u32 eidx_start, u32 eidx_end, u32 cmap)
  232. {
  233. if ((eidx_start > eidx_end) || eidx_start == SBI_PMU_EVENT_RAW_IDX ||
  234. eidx_end == SBI_PMU_EVENT_RAW_IDX)
  235. return SBI_EINVAL;
  236. return pmu_add_hw_event_map(eidx_start, eidx_end, cmap, 0, 0);
  237. }
  238. int sbi_pmu_add_raw_event_counter_map(uint64_t select, uint64_t select_mask, u32 cmap)
  239. {
  240. return pmu_add_hw_event_map(SBI_PMU_EVENT_RAW_IDX,
  241. SBI_PMU_EVENT_RAW_IDX, cmap, select, select_mask);
  242. }
  243. static int pmu_ctr_enable_irq_hw(int ctr_idx)
  244. {
  245. unsigned long mhpmevent_csr;
  246. unsigned long mhpmevent_curr;
  247. unsigned long mip_val;
  248. unsigned long of_mask;
  249. if (ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
  250. return SBI_EFAIL;
  251. #if __riscv_xlen == 32
  252. mhpmevent_csr = CSR_MHPMEVENT3H + ctr_idx - 3;
  253. of_mask = (uint32_t)~MHPMEVENTH_OF;
  254. #else
  255. mhpmevent_csr = CSR_MHPMEVENT3 + ctr_idx - 3;
  256. of_mask = ~MHPMEVENT_OF;
  257. #endif
  258. mhpmevent_curr = csr_read_num(mhpmevent_csr);
  259. mip_val = csr_read(CSR_MIP);
  260. /**
  261. * Clear out the OF bit so that next interrupt can be enabled.
  262. * This should be done only when the corresponding overflow interrupt
  263. * bit is cleared. That indicates that software has already handled the
  264. * previous interrupts or the hardware yet to set an overflow interrupt.
  265. * Otherwise, there will be race conditions where we may clear the bit
  266. * the software is yet to handle the interrupt.
  267. */
  268. if (!(mip_val & MIP_LCOFIP)) {
  269. mhpmevent_curr &= of_mask;
  270. csr_write_num(mhpmevent_csr, mhpmevent_curr);
  271. }
  272. return 0;
  273. }
  274. static void pmu_ctr_write_hw(uint32_t cidx, uint64_t ival)
  275. {
  276. #if __riscv_xlen == 32
  277. csr_write_num(CSR_MCYCLE + cidx, 0);
  278. csr_write_num(CSR_MCYCLE + cidx, ival & 0xFFFFFFFF);
  279. csr_write_num(CSR_MCYCLEH + cidx, ival >> BITS_PER_LONG);
  280. #else
  281. csr_write_num(CSR_MCYCLE + cidx, ival);
  282. #endif
  283. }
  284. static int pmu_ctr_start_hw(uint32_t cidx, uint64_t ival, bool ival_update)
  285. {
  286. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  287. unsigned long mctr_inhbt;
  288. /* Make sure the counter index lies within the range and is not TM bit */
  289. if (cidx >= num_hw_ctrs || cidx == 1)
  290. return SBI_EINVAL;
  291. if (sbi_hart_priv_version(scratch) < SBI_HART_PRIV_VER_1_11)
  292. goto skip_inhibit_update;
  293. /*
  294. * Some of the hardware may not support mcountinhibit but perf stat
  295. * still can work if supervisor mode programs the initial value.
  296. */
  297. mctr_inhbt = csr_read(CSR_MCOUNTINHIBIT);
  298. if (!__test_bit(cidx, &mctr_inhbt))
  299. return SBI_EALREADY_STARTED;
  300. __clear_bit(cidx, &mctr_inhbt);
  301. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  302. pmu_ctr_enable_irq_hw(cidx);
  303. if (pmu_dev && pmu_dev->hw_counter_enable_irq)
  304. pmu_dev->hw_counter_enable_irq(cidx);
  305. csr_write(CSR_MCOUNTINHIBIT, mctr_inhbt);
  306. skip_inhibit_update:
  307. if (ival_update)
  308. pmu_ctr_write_hw(cidx, ival);
  309. return 0;
  310. }
  311. int sbi_pmu_irq_bit(void)
  312. {
  313. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  314. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  315. return MIP_LCOFIP;
  316. if (pmu_dev && pmu_dev->hw_counter_irq_bit)
  317. return pmu_dev->hw_counter_irq_bit();
  318. return 0;
  319. }
  320. static int pmu_ctr_start_fw(uint32_t cidx, uint32_t event_code,
  321. uint64_t event_data, uint64_t ival,
  322. bool ival_update)
  323. {
  324. u32 hartid = current_hartid();
  325. if ((event_code >= SBI_PMU_FW_MAX &&
  326. event_code <= SBI_PMU_FW_RESERVED_MAX) ||
  327. event_code > SBI_PMU_FW_PLATFORM)
  328. return SBI_EINVAL;
  329. if (SBI_PMU_FW_PLATFORM == event_code) {
  330. if (!pmu_dev ||
  331. !pmu_dev->fw_counter_write_value ||
  332. !pmu_dev->fw_counter_start) {
  333. return SBI_EINVAL;
  334. }
  335. if (ival_update)
  336. pmu_dev->fw_counter_write_value(hartid,
  337. cidx - num_hw_ctrs,
  338. ival);
  339. return pmu_dev->fw_counter_start(hartid, cidx - num_hw_ctrs,
  340. event_data);
  341. } else {
  342. if (ival_update)
  343. fw_counters_data[hartid][cidx - num_hw_ctrs] = ival;
  344. }
  345. fw_counters_started[hartid] |= BIT(cidx - num_hw_ctrs);
  346. return 0;
  347. }
  348. int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
  349. unsigned long flags, uint64_t ival)
  350. {
  351. u32 hartid = current_hartid();
  352. int event_idx_type;
  353. uint32_t event_code;
  354. int ret = SBI_EINVAL;
  355. bool bUpdate = false;
  356. int i, cidx;
  357. uint64_t edata;
  358. if ((cbase + sbi_fls(cmask)) >= total_ctrs)
  359. return ret;
  360. if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
  361. bUpdate = true;
  362. for_each_set_bit(i, &cmask, total_ctrs) {
  363. cidx = i + cbase;
  364. event_idx_type = pmu_ctr_validate(cidx, &event_code);
  365. if (event_idx_type < 0)
  366. /* Continue the start operation for other counters */
  367. continue;
  368. else if (event_idx_type == SBI_PMU_EVENT_TYPE_FW) {
  369. edata = (event_code == SBI_PMU_FW_PLATFORM) ?
  370. fw_counters_data[hartid][cidx - num_hw_ctrs]
  371. : 0x0;
  372. ret = pmu_ctr_start_fw(cidx, event_code, edata, ival,
  373. bUpdate);
  374. }
  375. else
  376. ret = pmu_ctr_start_hw(cidx, ival, bUpdate);
  377. }
  378. return ret;
  379. }
  380. static int pmu_ctr_stop_hw(uint32_t cidx)
  381. {
  382. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  383. unsigned long mctr_inhbt;
  384. if (sbi_hart_priv_version(scratch) < SBI_HART_PRIV_VER_1_11)
  385. return 0;
  386. mctr_inhbt = csr_read(CSR_MCOUNTINHIBIT);
  387. /* Make sure the counter index lies within the range and is not TM bit */
  388. if (cidx >= num_hw_ctrs || cidx == 1)
  389. return SBI_EINVAL;
  390. if (!__test_bit(cidx, &mctr_inhbt)) {
  391. __set_bit(cidx, &mctr_inhbt);
  392. csr_write(CSR_MCOUNTINHIBIT, mctr_inhbt);
  393. return 0;
  394. } else
  395. return SBI_EALREADY_STOPPED;
  396. }
  397. static int pmu_ctr_stop_fw(uint32_t cidx, uint32_t event_code)
  398. {
  399. u32 hartid = current_hartid();
  400. int ret;
  401. if ((event_code >= SBI_PMU_FW_MAX &&
  402. event_code <= SBI_PMU_FW_RESERVED_MAX) ||
  403. event_code > SBI_PMU_FW_PLATFORM)
  404. return SBI_EINVAL;
  405. if (SBI_PMU_FW_PLATFORM == event_code &&
  406. pmu_dev && pmu_dev->fw_counter_stop) {
  407. ret = pmu_dev->fw_counter_stop(hartid, cidx - num_hw_ctrs);
  408. if (ret)
  409. return ret;
  410. }
  411. fw_counters_started[current_hartid()] &= ~BIT(cidx - num_hw_ctrs);
  412. return 0;
  413. }
  414. static int pmu_reset_hw_mhpmevent(int ctr_idx)
  415. {
  416. if (ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
  417. return SBI_EFAIL;
  418. #if __riscv_xlen == 32
  419. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, 0);
  420. if (sbi_hart_has_extension(sbi_scratch_thishart_ptr(),
  421. SBI_HART_EXT_SSCOFPMF))
  422. csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3, 0);
  423. #else
  424. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, 0);
  425. #endif
  426. return 0;
  427. }
  428. int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
  429. unsigned long flag)
  430. {
  431. u32 hartid = current_hartid();
  432. int ret = SBI_EINVAL;
  433. int event_idx_type;
  434. uint32_t event_code;
  435. int i, cidx;
  436. if ((cbase + sbi_fls(cmask)) >= total_ctrs)
  437. return SBI_EINVAL;
  438. for_each_set_bit(i, &cmask, total_ctrs) {
  439. cidx = i + cbase;
  440. event_idx_type = pmu_ctr_validate(cidx, &event_code);
  441. if (event_idx_type < 0)
  442. /* Continue the stop operation for other counters */
  443. continue;
  444. else if (event_idx_type == SBI_PMU_EVENT_TYPE_FW)
  445. ret = pmu_ctr_stop_fw(cidx, event_code);
  446. else
  447. ret = pmu_ctr_stop_hw(cidx);
  448. if (flag & SBI_PMU_STOP_FLAG_RESET) {
  449. active_events[hartid][cidx] = SBI_PMU_EVENT_IDX_INVALID;
  450. pmu_reset_hw_mhpmevent(cidx);
  451. }
  452. }
  453. return ret;
  454. }
  455. static void pmu_update_inhibit_flags(unsigned long flags, uint64_t *mhpmevent_val)
  456. {
  457. if (flags & SBI_PMU_CFG_FLAG_SET_VUINH)
  458. *mhpmevent_val |= MHPMEVENT_VUINH;
  459. if (flags & SBI_PMU_CFG_FLAG_SET_VSINH)
  460. *mhpmevent_val |= MHPMEVENT_VSINH;
  461. if (flags & SBI_PMU_CFG_FLAG_SET_UINH)
  462. *mhpmevent_val |= MHPMEVENT_UINH;
  463. if (flags & SBI_PMU_CFG_FLAG_SET_SINH)
  464. *mhpmevent_val |= MHPMEVENT_SINH;
  465. }
  466. static int pmu_update_hw_mhpmevent(struct sbi_pmu_hw_event *hw_evt, int ctr_idx,
  467. unsigned long flags, unsigned long eindex,
  468. uint64_t data)
  469. {
  470. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  471. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  472. uint64_t mhpmevent_val;
  473. /* Get the final mhpmevent value to be written from platform */
  474. mhpmevent_val = sbi_platform_pmu_xlate_to_mhpmevent(plat, eindex, data);
  475. if (!mhpmevent_val || ctr_idx < 3 || ctr_idx >= SBI_PMU_HW_CTR_MAX)
  476. return SBI_EFAIL;
  477. /**
  478. * Always set the OVF bit(disable interrupts) and inhibit counting of
  479. * events in M-mode. The OVF bit should be enabled during the start call.
  480. */
  481. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  482. mhpmevent_val = (mhpmevent_val & ~MHPMEVENT_SSCOF_MASK) |
  483. MHPMEVENT_MINH | MHPMEVENT_OF;
  484. if (pmu_dev && pmu_dev->hw_counter_disable_irq)
  485. pmu_dev->hw_counter_disable_irq(ctr_idx);
  486. /* Update the inhibit flags based on inhibit flags received from supervisor */
  487. pmu_update_inhibit_flags(flags, &mhpmevent_val);
  488. #if __riscv_xlen == 32
  489. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val & 0xFFFFFFFF);
  490. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  491. csr_write_num(CSR_MHPMEVENT3H + ctr_idx - 3,
  492. mhpmevent_val >> BITS_PER_LONG);
  493. #else
  494. csr_write_num(CSR_MHPMEVENT3 + ctr_idx - 3, mhpmevent_val);
  495. #endif
  496. return 0;
  497. }
  498. static int pmu_ctr_find_fixed_fw(unsigned long evt_idx_code)
  499. {
  500. /* Non-programmables counters are enabled always. No need to do lookup */
  501. if (evt_idx_code == SBI_PMU_HW_CPU_CYCLES)
  502. return 0;
  503. else if (evt_idx_code == SBI_PMU_HW_INSTRUCTIONS)
  504. return 2;
  505. else
  506. return SBI_EINVAL;
  507. }
  508. static int pmu_ctr_find_hw(unsigned long cbase, unsigned long cmask, unsigned long flags,
  509. unsigned long event_idx, uint64_t data)
  510. {
  511. unsigned long ctr_mask;
  512. int i, ret = 0, fixed_ctr, ctr_idx = SBI_ENOTSUPP;
  513. struct sbi_pmu_hw_event *temp;
  514. unsigned long mctr_inhbt = 0;
  515. u32 hartid = current_hartid();
  516. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  517. if (cbase >= num_hw_ctrs)
  518. return SBI_EINVAL;
  519. /**
  520. * If Sscof is present try to find the programmable counter for
  521. * cycle/instret as well.
  522. */
  523. fixed_ctr = pmu_ctr_find_fixed_fw(event_idx);
  524. if (fixed_ctr >= 0 &&
  525. !sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  526. return fixed_ctr;
  527. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11)
  528. mctr_inhbt = csr_read(CSR_MCOUNTINHIBIT);
  529. for (i = 0; i < num_hw_events; i++) {
  530. temp = &hw_event_map[i];
  531. if ((temp->start_idx > event_idx && event_idx < temp->end_idx) ||
  532. (temp->start_idx < event_idx && event_idx > temp->end_idx))
  533. continue;
  534. /* For raw events, event data is used as the select value */
  535. if (event_idx == SBI_PMU_EVENT_RAW_IDX) {
  536. uint64_t select_mask = temp->select_mask;
  537. /* The non-event map bits of data should match the selector */
  538. if (temp->select != (data & select_mask))
  539. continue;
  540. }
  541. /* Fixed counters should not be part of the search */
  542. ctr_mask = temp->counters & (cmask << cbase) &
  543. (~SBI_PMU_FIXED_CTR_MASK);
  544. for_each_set_bit_from(cbase, &ctr_mask, SBI_PMU_HW_CTR_MAX) {
  545. /**
  546. * Some of the platform may not support mcountinhibit.
  547. * Checking the active_events is enough for them
  548. */
  549. if (active_events[hartid][cbase] != SBI_PMU_EVENT_IDX_INVALID)
  550. continue;
  551. /* If mcountinhibit is supported, the bit must be enabled */
  552. if ((sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11) &&
  553. !__test_bit(cbase, &mctr_inhbt))
  554. continue;
  555. /* We found a valid counter that is not started yet */
  556. ctr_idx = cbase;
  557. }
  558. }
  559. if (ctr_idx == SBI_ENOTSUPP) {
  560. /**
  561. * We can't find any programmable counters for cycle/instret.
  562. * Return the fixed counter as they are mandatory anyways.
  563. */
  564. if (fixed_ctr >= 0)
  565. return fixed_ctr;
  566. else
  567. return SBI_EFAIL;
  568. }
  569. ret = pmu_update_hw_mhpmevent(temp, ctr_idx, flags, event_idx, data);
  570. if (!ret)
  571. ret = ctr_idx;
  572. return ret;
  573. }
  574. /**
  575. * Any firmware counter can map to any firmware event.
  576. * Thus, select the first available fw counter after sanity
  577. * check.
  578. */
  579. static int pmu_ctr_find_fw(unsigned long cbase, unsigned long cmask,
  580. uint32_t event_code, u32 hartid, uint64_t edata)
  581. {
  582. int i, cidx;
  583. if ((event_code >= SBI_PMU_FW_MAX &&
  584. event_code <= SBI_PMU_FW_RESERVED_MAX) ||
  585. event_code > SBI_PMU_FW_PLATFORM)
  586. return SBI_EINVAL;
  587. for_each_set_bit(i, &cmask, BITS_PER_LONG) {
  588. cidx = i + cbase;
  589. if (cidx < num_hw_ctrs || total_ctrs <= cidx)
  590. continue;
  591. if (active_events[hartid][i] != SBI_PMU_EVENT_IDX_INVALID)
  592. continue;
  593. if (SBI_PMU_FW_PLATFORM == event_code &&
  594. pmu_dev && pmu_dev->fw_counter_match_encoding) {
  595. if (!pmu_dev->fw_counter_match_encoding(hartid,
  596. cidx - num_hw_ctrs,
  597. edata))
  598. continue;
  599. }
  600. return i;
  601. }
  602. return SBI_ENOTSUPP;
  603. }
  604. int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
  605. unsigned long flags, unsigned long event_idx,
  606. uint64_t event_data)
  607. {
  608. int ret, ctr_idx = SBI_ENOTSUPP;
  609. u32 event_code, hartid = current_hartid();
  610. int event_type;
  611. /* Do a basic sanity check of counter base & mask */
  612. if ((cidx_base + sbi_fls(cidx_mask)) >= total_ctrs)
  613. return SBI_EINVAL;
  614. event_type = pmu_event_validate(event_idx, event_data);
  615. if (event_type < 0)
  616. return SBI_EINVAL;
  617. event_code = get_cidx_code(event_idx);
  618. if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) {
  619. /* The caller wants to skip the match because it already knows the
  620. * counter idx for the given event. Verify that the counter idx
  621. * is still valid.
  622. */
  623. if (active_events[hartid][cidx_base] == SBI_PMU_EVENT_IDX_INVALID)
  624. return SBI_EINVAL;
  625. ctr_idx = cidx_base;
  626. goto skip_match;
  627. }
  628. if (event_type == SBI_PMU_EVENT_TYPE_FW) {
  629. /* Any firmware counter can be used track any firmware event */
  630. ctr_idx = pmu_ctr_find_fw(cidx_base, cidx_mask, event_code,
  631. hartid, event_data);
  632. if (event_code == SBI_PMU_FW_PLATFORM)
  633. fw_counters_data[hartid][ctr_idx - num_hw_ctrs] =
  634. event_data;
  635. } else {
  636. ctr_idx = pmu_ctr_find_hw(cidx_base, cidx_mask, flags, event_idx,
  637. event_data);
  638. }
  639. if (ctr_idx < 0)
  640. return SBI_ENOTSUPP;
  641. active_events[hartid][ctr_idx] = event_idx;
  642. skip_match:
  643. if (event_type == SBI_PMU_EVENT_TYPE_HW) {
  644. if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE)
  645. pmu_ctr_write_hw(ctr_idx, 0);
  646. if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
  647. pmu_ctr_start_hw(ctr_idx, 0, false);
  648. } else if (event_type == SBI_PMU_EVENT_TYPE_FW) {
  649. if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE)
  650. fw_counters_data[hartid][ctr_idx - num_hw_ctrs] = 0;
  651. if (flags & SBI_PMU_CFG_FLAG_AUTO_START) {
  652. if (SBI_PMU_FW_PLATFORM == event_code &&
  653. pmu_dev && pmu_dev->fw_counter_start) {
  654. ret = pmu_dev->fw_counter_start(hartid,
  655. ctr_idx - num_hw_ctrs, event_data);
  656. if (ret)
  657. return ret;
  658. }
  659. fw_counters_started[hartid] |= BIT(ctr_idx - num_hw_ctrs);
  660. }
  661. }
  662. return ctr_idx;
  663. }
  664. int sbi_pmu_ctr_incr_fw(enum sbi_pmu_fw_event_code_id fw_id)
  665. {
  666. u32 cidx, hartid = current_hartid();
  667. uint64_t *fcounter = NULL;
  668. if (likely(!fw_counters_started[hartid]))
  669. return 0;
  670. if (unlikely(fw_id >= SBI_PMU_FW_MAX))
  671. return SBI_EINVAL;
  672. for (cidx = num_hw_ctrs; cidx < total_ctrs; cidx++) {
  673. if (get_cidx_code(active_events[hartid][cidx]) == fw_id &&
  674. (fw_counters_started[hartid] & BIT(cidx - num_hw_ctrs))) {
  675. fcounter = &fw_counters_data[hartid][cidx - num_hw_ctrs];
  676. break;
  677. }
  678. }
  679. if (fcounter)
  680. (*fcounter)++;
  681. return 0;
  682. }
  683. unsigned long sbi_pmu_num_ctr(void)
  684. {
  685. return (num_hw_ctrs + SBI_PMU_FW_CTR_MAX);
  686. }
  687. int sbi_pmu_ctr_get_info(uint32_t cidx, unsigned long *ctr_info)
  688. {
  689. int width;
  690. union sbi_pmu_ctr_info cinfo = {0};
  691. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  692. /* Sanity check. Counter1 is not mapped at all */
  693. if (cidx >= total_ctrs || cidx == 1)
  694. return SBI_EINVAL;
  695. /* We have 31 HW counters with 31 being the last index(MHPMCOUNTER31) */
  696. if (cidx < num_hw_ctrs) {
  697. cinfo.type = SBI_PMU_CTR_TYPE_HW;
  698. cinfo.csr = CSR_CYCLE + cidx;
  699. /* mcycle & minstret are always 64 bit */
  700. if (cidx == 0 || cidx == 2)
  701. cinfo.width = 63;
  702. else
  703. cinfo.width = sbi_hart_mhpm_bits(scratch) - 1;
  704. } else {
  705. /* it's a firmware counter */
  706. cinfo.type = SBI_PMU_CTR_TYPE_FW;
  707. /* Firmware counters are always 64 bits wide */
  708. cinfo.width = 63;
  709. if (pmu_dev && pmu_dev->fw_counter_width) {
  710. width = pmu_dev->fw_counter_width();
  711. if (width)
  712. cinfo.width = width - 1;
  713. }
  714. }
  715. *ctr_info = cinfo.value;
  716. return 0;
  717. }
  718. static void pmu_reset_event_map(u32 hartid)
  719. {
  720. int j;
  721. /* Initialize the counter to event mapping table */
  722. for (j = 3; j < total_ctrs; j++)
  723. active_events[hartid][j] = SBI_PMU_EVENT_IDX_INVALID;
  724. for (j = 0; j < SBI_PMU_FW_CTR_MAX; j++)
  725. fw_counters_data[hartid][j] = 0;
  726. fw_counters_started[hartid] = 0;
  727. }
  728. const struct sbi_pmu_device *sbi_pmu_get_device(void)
  729. {
  730. return pmu_dev;
  731. }
  732. void sbi_pmu_set_device(const struct sbi_pmu_device *dev)
  733. {
  734. if (!dev || pmu_dev)
  735. return;
  736. pmu_dev = dev;
  737. }
  738. void sbi_pmu_exit(struct sbi_scratch *scratch)
  739. {
  740. u32 hartid = current_hartid();
  741. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11)
  742. csr_write(CSR_MCOUNTINHIBIT, 0xFFFFFFF8);
  743. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
  744. csr_write(CSR_MCOUNTEREN, -1);
  745. pmu_reset_event_map(hartid);
  746. }
  747. int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)
  748. {
  749. const struct sbi_platform *plat;
  750. u32 hartid = current_hartid();
  751. if (cold_boot) {
  752. plat = sbi_platform_ptr(scratch);
  753. /* Initialize hw pmu events */
  754. sbi_platform_pmu_init(plat);
  755. /* mcycle & minstret is available always */
  756. num_hw_ctrs = sbi_hart_mhpm_count(scratch) + 3;
  757. total_ctrs = num_hw_ctrs + SBI_PMU_FW_CTR_MAX;
  758. }
  759. pmu_reset_event_map(hartid);
  760. /* First three counters are fixed by the priv spec and we enable it by default */
  761. active_events[hartid][0] = SBI_PMU_EVENT_TYPE_HW << SBI_PMU_EVENT_IDX_OFFSET |
  762. SBI_PMU_HW_CPU_CYCLES;
  763. active_events[hartid][1] = SBI_PMU_EVENT_IDX_INVALID;
  764. active_events[hartid][2] = SBI_PMU_EVENT_TYPE_HW << SBI_PMU_EVENT_IDX_OFFSET |
  765. SBI_PMU_HW_INSTRUCTIONS;
  766. return 0;
  767. }