thead_c900_pmu_v1.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
  3. #include <linux/errno.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/module.h>
  6. #include <linux/of.h>
  7. #include <linux/perf_event.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/smp.h>
  10. #include <asm/sbi.h>
  11. #define RISCV_MAX_COUNTERS 32
  12. #define RISCV_OP_UNSUPP (-EOPNOTSUPP)
  13. #define RISCV_PMU_CYCLE 0
  14. #define RISCV_PMU_TIME 1
  15. #define RISCV_PMU_INSTRET 2
  16. #define RISCV_PMU_L1ICAC 3 /* ICache Access */
  17. #define RISCV_PMU_L1ICMC 4 /* ICache Miss */
  18. #define RISCV_PMU_IUTLBMC 5 /* I-UTLB Miss */
  19. #define RISCV_PMU_DUTLBMC 6 /* D-UTLB Miss */
  20. #define RISCV_PMU_JTLBMC 7 /* JTLB Miss Counter */
  21. #define RISCV_PMU_CBMC 8 /* Cond-br-mispredict */
  22. #define RISCV_PMU_CBIC 9 /* Cond-br-instruction */
  23. #define RISCV_PMU_IBMC 10 /* Indirect Branch Mispredict */
  24. #define RISCV_PMU_IBIC 11 /* Indirect Branch Instruction */
  25. #define RISCV_PMU_LSUSFC 12 /* LSU Spec Fail */
  26. #define RISCV_PMU_STC 13 /* Store Instruction */
  27. #define RISCV_PMU_L1DCRAC 14 /* L1 DCache Read Access */
  28. #define RISCV_PMU_L1DCRMC 15 /* L1 DCache Read Miss */
  29. #define RISCV_PMU_L1DCWAC 16 /* L1 DCache Write Access */
  30. #define RISCV_PMU_L1DCWMC 17 /* L1 DCache Write Miss */
  31. #define RISCV_PMU_L2CRAC 18 /* L2 Cache Read Access */
  32. #define RISCV_PMU_L2CRMC 19 /* L2 Cache Read Miss */
  33. #define RISCV_PMU_L2CWAC 20 /* L2 Cache Write Access */
  34. #define RISCV_PMU_L2CWMC 21 /* L2 Cache Write Miss */
  35. #define RISCV_PMU_RFLFC 22 /* RF Launch Fail */
  36. #define RISCV_PMU_RFRLFC 23 /* RF Reg Launch Fail */
  37. #define RISCV_PMU_RFIC 24 /* RF Instruction */
  38. #define RISCV_PMU_LSUC4SC 25 /* LSU Cross 4K Stall */
  39. #define RISCV_PMU_LSUOSC 26 /* LSU Other Stall */
  40. #define RISCV_PMU_LSUSQDC 27 /* LSU SQ Discard */
  41. #define RISCV_PMU_LSUSQDDC 28 /* LSU SQ Data Discard */
  42. #define SCOUNTERINTEN 0x5c4
  43. #define SCOUNTEROF 0x5c5
  44. #define SCOUNTERBASE 0x5e0
  45. #define WRITE_COUNTER(idx, value) \
  46. csr_write(SCOUNTERBASE + idx, value)
  47. /* The events for a given PMU register set. */
  48. struct pmu_hw_events {
  49. /*
  50. * The events that are active on the PMU for the given index.
  51. */
  52. struct perf_event *events[RISCV_MAX_COUNTERS];
  53. /*
  54. * A 1 bit for an index indicates that the counter is being used for
  55. * an event. A 0 means that the counter can be used.
  56. */
  57. unsigned long used_mask[BITS_TO_LONGS(RISCV_MAX_COUNTERS)];
  58. };
  59. static struct riscv_pmu_t {
  60. struct pmu pmu;
  61. struct pmu_hw_events __percpu *hw_events;
  62. struct platform_device *plat_device;
  63. u64 max_period;
  64. } riscv_pmu;
  65. static int riscv_pmu_irq;
  66. /*
  67. * Hardware & cache maps and their methods
  68. */
  69. static const int riscv_hw_event_map[] = {
  70. [PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE,
  71. [PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET,
  72. [PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_PMU_L1ICAC,
  73. [PERF_COUNT_HW_CACHE_MISSES] = RISCV_PMU_L1ICMC,
  74. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_PMU_CBIC,
  75. [PERF_COUNT_HW_BRANCH_MISSES] = RISCV_PMU_CBMC,
  76. [PERF_COUNT_HW_BUS_CYCLES] = RISCV_PMU_IBMC,
  77. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = RISCV_PMU_IBIC,
  78. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = RISCV_PMU_LSUSFC,
  79. [PERF_COUNT_HW_REF_CPU_CYCLES] = RISCV_PMU_STC,
  80. };
  81. #define C(x) PERF_COUNT_HW_CACHE_##x
  82. static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
  83. [PERF_COUNT_HW_CACHE_OP_MAX]
  84. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  85. [C(L1D)] = {
  86. [C(OP_READ)] = {
  87. [C(RESULT_ACCESS)] = RISCV_PMU_L1DCRAC,
  88. [C(RESULT_MISS)] = RISCV_PMU_L1DCRMC,
  89. },
  90. [C(OP_WRITE)] = {
  91. [C(RESULT_ACCESS)] = RISCV_PMU_L1DCWAC,
  92. [C(RESULT_MISS)] = RISCV_PMU_L1DCWMC,
  93. },
  94. [C(OP_PREFETCH)] = {
  95. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  96. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  97. },
  98. },
  99. [C(L1I)] = {
  100. [C(OP_READ)] = {
  101. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  102. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  103. },
  104. [C(OP_WRITE)] = {
  105. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  106. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  107. },
  108. [C(OP_PREFETCH)] = {
  109. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  110. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  111. },
  112. },
  113. [C(LL)] = {
  114. [C(OP_READ)] = {
  115. [C(RESULT_ACCESS)] = RISCV_PMU_L2CRAC,
  116. [C(RESULT_MISS)] = RISCV_PMU_L2CRMC,
  117. },
  118. [C(OP_WRITE)] = {
  119. [C(RESULT_ACCESS)] = RISCV_PMU_L2CWAC,
  120. [C(RESULT_MISS)] = RISCV_PMU_L2CWMC,
  121. },
  122. [C(OP_PREFETCH)] = {
  123. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  124. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  125. },
  126. },
  127. [C(DTLB)] = {
  128. [C(OP_READ)] = {
  129. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  130. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  131. },
  132. [C(OP_WRITE)] = {
  133. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  134. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  135. },
  136. [C(OP_PREFETCH)] = {
  137. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  138. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  139. },
  140. },
  141. [C(ITLB)] = {
  142. [C(OP_READ)] = {
  143. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  144. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  145. },
  146. [C(OP_WRITE)] = {
  147. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  148. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  149. },
  150. [C(OP_PREFETCH)] = {
  151. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  152. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  153. },
  154. },
  155. [C(BPU)] = {
  156. [C(OP_READ)] = {
  157. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  158. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  159. },
  160. [C(OP_WRITE)] = {
  161. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  162. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  163. },
  164. [C(OP_PREFETCH)] = {
  165. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  166. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  167. },
  168. },
  169. [C(NODE)] = {
  170. [C(OP_READ)] = {
  171. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  172. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  173. },
  174. [C(OP_WRITE)] = {
  175. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  176. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  177. },
  178. [C(OP_PREFETCH)] = {
  179. [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
  180. [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
  181. },
  182. },
  183. };
  184. /*
  185. * Low-level functions: reading/writing counters
  186. */
  187. static inline u64 read_counter(int idx)
  188. {
  189. u64 val = 0;
  190. switch (idx) {
  191. case RISCV_PMU_CYCLE:
  192. val = csr_read(cycle);
  193. break;
  194. case RISCV_PMU_INSTRET:
  195. val = csr_read(instret);
  196. break;
  197. case RISCV_PMU_L1ICAC:
  198. val = csr_read(hpmcounter3);
  199. break;
  200. case RISCV_PMU_L1ICMC:
  201. val = csr_read(hpmcounter4);
  202. break;
  203. case RISCV_PMU_IUTLBMC:
  204. val = csr_read(hpmcounter5);
  205. break;
  206. case RISCV_PMU_DUTLBMC:
  207. val = csr_read(hpmcounter6);
  208. break;
  209. case RISCV_PMU_JTLBMC:
  210. val = csr_read(hpmcounter7);
  211. break;
  212. case RISCV_PMU_CBMC:
  213. val = csr_read(hpmcounter8);
  214. break;
  215. case RISCV_PMU_CBIC:
  216. val = csr_read(hpmcounter9);
  217. break;
  218. case RISCV_PMU_IBMC:
  219. val = csr_read(hpmcounter10);
  220. break;
  221. case RISCV_PMU_IBIC:
  222. val = csr_read(hpmcounter11);
  223. break;
  224. case RISCV_PMU_LSUSFC:
  225. val = csr_read(hpmcounter12);
  226. break;
  227. case RISCV_PMU_STC:
  228. val = csr_read(hpmcounter13);
  229. break;
  230. case RISCV_PMU_L1DCRAC:
  231. val = csr_read(hpmcounter14);
  232. break;
  233. case RISCV_PMU_L1DCRMC:
  234. val = csr_read(hpmcounter15);
  235. break;
  236. case RISCV_PMU_L1DCWAC:
  237. val = csr_read(hpmcounter16);
  238. break;
  239. case RISCV_PMU_L1DCWMC:
  240. val = csr_read(hpmcounter17);
  241. break;
  242. case RISCV_PMU_L2CRAC:
  243. val = csr_read(hpmcounter18);
  244. break;
  245. case RISCV_PMU_L2CRMC:
  246. val = csr_read(hpmcounter19);
  247. break;
  248. case RISCV_PMU_L2CWAC:
  249. val = csr_read(hpmcounter20);
  250. break;
  251. case RISCV_PMU_L2CWMC:
  252. val = csr_read(hpmcounter21);
  253. break;
  254. case RISCV_PMU_RFLFC:
  255. val = csr_read(hpmcounter22);
  256. break;
  257. case RISCV_PMU_RFRLFC:
  258. val = csr_read(hpmcounter23);
  259. break;
  260. case RISCV_PMU_RFIC:
  261. val = csr_read(hpmcounter24);
  262. break;
  263. case RISCV_PMU_LSUC4SC:
  264. val = csr_read(hpmcounter25);
  265. break;
  266. case RISCV_PMU_LSUOSC:
  267. val = csr_read(hpmcounter26);
  268. break;
  269. case RISCV_PMU_LSUSQDC:
  270. val = csr_read(hpmcounter27);
  271. break;
  272. case RISCV_PMU_LSUSQDDC:
  273. val = csr_read(hpmcounter28);
  274. break;
  275. default:
  276. WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
  277. return -EINVAL;
  278. }
  279. return val;
  280. }
  281. static inline void write_counter(int idx, u64 value)
  282. {
  283. switch (idx) {
  284. case RISCV_PMU_CYCLE:
  285. WRITE_COUNTER(RISCV_PMU_CYCLE, value);
  286. break;
  287. case RISCV_PMU_INSTRET:
  288. WRITE_COUNTER(RISCV_PMU_INSTRET, value);
  289. break;
  290. case RISCV_PMU_L1ICAC:
  291. WRITE_COUNTER(RISCV_PMU_L1ICAC, value);
  292. break;
  293. case RISCV_PMU_L1ICMC:
  294. WRITE_COUNTER(RISCV_PMU_L1ICMC, value);
  295. break;
  296. case RISCV_PMU_IUTLBMC:
  297. WRITE_COUNTER(RISCV_PMU_IUTLBMC, value);
  298. break;
  299. case RISCV_PMU_DUTLBMC:
  300. WRITE_COUNTER(RISCV_PMU_DUTLBMC, value);
  301. break;
  302. case RISCV_PMU_JTLBMC:
  303. WRITE_COUNTER(RISCV_PMU_JTLBMC, value);
  304. break;
  305. case RISCV_PMU_CBMC:
  306. WRITE_COUNTER(RISCV_PMU_CBMC, value);
  307. break;
  308. case RISCV_PMU_CBIC:
  309. WRITE_COUNTER(RISCV_PMU_CBIC, value);
  310. break;
  311. case RISCV_PMU_IBMC:
  312. WRITE_COUNTER(RISCV_PMU_IBMC, value);
  313. break;
  314. case RISCV_PMU_IBIC:
  315. WRITE_COUNTER(RISCV_PMU_IBIC, value);
  316. break;
  317. case RISCV_PMU_LSUSFC:
  318. WRITE_COUNTER(RISCV_PMU_LSUSFC, value);
  319. break;
  320. case RISCV_PMU_STC:
  321. WRITE_COUNTER(RISCV_PMU_STC, value);
  322. break;
  323. case RISCV_PMU_L1DCRAC:
  324. WRITE_COUNTER(RISCV_PMU_L1DCRAC, value);
  325. break;
  326. case RISCV_PMU_L1DCRMC:
  327. WRITE_COUNTER(RISCV_PMU_L1DCRMC, value);
  328. break;
  329. case RISCV_PMU_L1DCWAC:
  330. WRITE_COUNTER(RISCV_PMU_L1DCWAC, value);
  331. break;
  332. case RISCV_PMU_L1DCWMC:
  333. WRITE_COUNTER(RISCV_PMU_L1DCWMC, value);
  334. break;
  335. case RISCV_PMU_L2CRAC:
  336. WRITE_COUNTER(RISCV_PMU_L2CRAC, value);
  337. break;
  338. case RISCV_PMU_L2CRMC:
  339. WRITE_COUNTER(RISCV_PMU_L2CRMC, value);
  340. break;
  341. case RISCV_PMU_L2CWAC:
  342. WRITE_COUNTER(RISCV_PMU_L2CWAC, value);
  343. break;
  344. case RISCV_PMU_L2CWMC:
  345. WRITE_COUNTER(RISCV_PMU_L2CWMC, value);
  346. break;
  347. case RISCV_PMU_RFLFC:
  348. WRITE_COUNTER(RISCV_PMU_RFLFC, value);
  349. break;
  350. case RISCV_PMU_RFRLFC:
  351. WRITE_COUNTER(RISCV_PMU_RFRLFC, value);
  352. break;
  353. case RISCV_PMU_RFIC:
  354. WRITE_COUNTER(RISCV_PMU_RFIC, value);
  355. break;
  356. case RISCV_PMU_LSUC4SC:
  357. WRITE_COUNTER(RISCV_PMU_LSUC4SC, value);
  358. break;
  359. case RISCV_PMU_LSUOSC:
  360. WRITE_COUNTER(RISCV_PMU_LSUOSC, value);
  361. break;
  362. case RISCV_PMU_LSUSQDC:
  363. WRITE_COUNTER(RISCV_PMU_LSUSQDC, value);
  364. break;
  365. case RISCV_PMU_LSUSQDDC:
  366. WRITE_COUNTER(RISCV_PMU_LSUSQDDC, value);
  367. break;
  368. default:
  369. WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
  370. }
  371. }
  372. static int riscv_pmu_event_is_frequent(int idx)
  373. {
  374. return idx >= RISCV_PMU_CYCLE &&
  375. idx <= RISCV_PMU_L1DCWMC;
  376. }
  377. static int riscv_pmu_event_set_period(struct perf_event *event)
  378. {
  379. struct hw_perf_event *hwc = &event->hw;
  380. s64 left = local64_read(&hwc->period_left);
  381. s64 period = hwc->sample_period;
  382. int ret = 0;
  383. if (period < 4096 && period != 0 &&
  384. riscv_pmu_event_is_frequent(hwc->idx)) {
  385. hwc->sample_period = period = 4096;
  386. }
  387. if (unlikely(left <= -period)) {
  388. left = period;
  389. local64_set(&hwc->period_left, left);
  390. hwc->last_period = period;
  391. ret = 1;
  392. }
  393. if (unlikely(left <= 0)) {
  394. left += period;
  395. local64_set(&hwc->period_left, left);
  396. hwc->last_period = period;
  397. ret = 1;
  398. }
  399. if (left < 0)
  400. left = riscv_pmu.max_period;
  401. /*
  402. * The hw event starts counting from this event offset,
  403. * mark it to be able to extract future "deltas":
  404. */
  405. local64_set(&hwc->prev_count, (u64)(-left));
  406. csr_write(SCOUNTEROF, csr_read(SCOUNTEROF) & ~BIT(hwc->idx));
  407. write_counter(hwc->idx, (u64)(-left));
  408. perf_event_update_userpage(event);
  409. return ret;
  410. }
  411. static void riscv_perf_event_update(struct perf_event *event,
  412. struct hw_perf_event *hwc)
  413. {
  414. uint64_t prev_raw_count = local64_read(&hwc->prev_count);
  415. /*
  416. * Sign extend count value to 64bit, otherwise delta calculation
  417. * would be incorrect when overflow occurs.
  418. */
  419. uint64_t new_raw_count = read_counter(hwc->idx);
  420. int64_t delta = new_raw_count - prev_raw_count;
  421. /*
  422. * We aren't afraid of hwc->prev_count changing beneath our feet
  423. * because there's no way for us to re-enter this function anytime.
  424. */
  425. local64_set(&hwc->prev_count, new_raw_count);
  426. local64_add(delta, &event->count);
  427. local64_sub(delta, &hwc->period_left);
  428. }
  429. static void riscv_pmu_read(struct perf_event *event)
  430. {
  431. riscv_perf_event_update(event, &event->hw);
  432. }
  433. static int riscv_pmu_cache_event(u64 config)
  434. {
  435. unsigned int cache_type, cache_op, cache_result;
  436. cache_type = (config >> 0) & 0xff;
  437. cache_op = (config >> 8) & 0xff;
  438. cache_result = (config >> 16) & 0xff;
  439. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  440. return -EINVAL;
  441. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  442. return -EINVAL;
  443. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  444. return -EINVAL;
  445. return riscv_cache_event_map[cache_type][cache_op][cache_result];
  446. }
  447. static int riscv_pmu_event_init(struct perf_event *event)
  448. {
  449. struct hw_perf_event *hwc = &event->hw;
  450. int ret;
  451. switch (event->attr.type) {
  452. case PERF_TYPE_HARDWARE:
  453. if (event->attr.config >= PERF_COUNT_HW_MAX)
  454. return -ENOENT;
  455. ret = riscv_hw_event_map[event->attr.config];
  456. if (ret == RISCV_OP_UNSUPP)
  457. return -ENOENT;
  458. hwc->idx = ret;
  459. break;
  460. case PERF_TYPE_HW_CACHE:
  461. ret = riscv_pmu_cache_event(event->attr.config);
  462. if (ret == RISCV_OP_UNSUPP)
  463. return -ENOENT;
  464. hwc->idx = ret;
  465. break;
  466. case PERF_TYPE_RAW:
  467. if (event->attr.config < 0 || event->attr.config >
  468. RISCV_MAX_COUNTERS)
  469. return -ENOENT;
  470. hwc->idx = event->attr.config;
  471. break;
  472. default:
  473. return -ENOENT;
  474. }
  475. return 0;
  476. }
  477. static void riscv_pmu_enable(struct pmu *pmu)
  478. {
  479. }
  480. /* stops all counters */
  481. static void riscv_pmu_disable(struct pmu *pmu)
  482. {
  483. }
  484. static void riscv_pmu_start(struct perf_event *event, int flags)
  485. {
  486. unsigned long flg;
  487. struct hw_perf_event *hwc = &event->hw;
  488. int idx = hwc->idx;
  489. if (WARN_ON_ONCE(idx == -1))
  490. return;
  491. if (flags & PERF_EF_RELOAD)
  492. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  493. hwc->state = 0;
  494. riscv_pmu_event_set_period(event);
  495. local_irq_save(flg);
  496. csr_write(SCOUNTERINTEN, BIT(idx) | csr_read(SCOUNTERINTEN));
  497. local_irq_restore(flg);
  498. }
  499. static void riscv_pmu_stop_event(struct perf_event *event)
  500. {
  501. unsigned long flg;
  502. struct hw_perf_event *hwc = &event->hw;
  503. int idx = hwc->idx;
  504. local_irq_save(flg);
  505. csr_write(SCOUNTERINTEN, ~BIT(idx) & csr_read(SCOUNTERINTEN));
  506. local_irq_restore(flg);
  507. }
  508. static void riscv_pmu_stop(struct perf_event *event, int flags)
  509. {
  510. if (!(event->hw.state & PERF_HES_STOPPED)) {
  511. riscv_pmu_stop_event(event);
  512. event->hw.state |= PERF_HES_STOPPED;
  513. }
  514. if ((flags & PERF_EF_UPDATE) &&
  515. !(event->hw.state & PERF_HES_UPTODATE)) {
  516. riscv_perf_event_update(event, &event->hw);
  517. event->hw.state |= PERF_HES_UPTODATE;
  518. }
  519. }
  520. static void riscv_pmu_del(struct perf_event *event, int flags)
  521. {
  522. struct pmu_hw_events *hw_events = this_cpu_ptr(riscv_pmu.hw_events);
  523. struct hw_perf_event *hwc = &event->hw;
  524. riscv_pmu_stop(event, PERF_EF_UPDATE);
  525. hw_events->events[hwc->idx] = NULL;
  526. perf_event_update_userpage(event);
  527. }
  528. /* allocate hardware counter and optionally start counting */
  529. static int riscv_pmu_add(struct perf_event *event, int flags)
  530. {
  531. struct pmu_hw_events *hw_events = this_cpu_ptr(riscv_pmu.hw_events);
  532. struct hw_perf_event *hwc = &event->hw;
  533. hw_events->events[hwc->idx] = event;
  534. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  535. if (flags & PERF_EF_START)
  536. riscv_pmu_start(event, PERF_EF_RELOAD);
  537. perf_event_update_userpage(event);
  538. return 0;
  539. }
  540. static irqreturn_t riscv_pmu_handle_irq(int irq, void *dev_id)
  541. {
  542. struct perf_sample_data data;
  543. struct pmu_hw_events *cpuc = this_cpu_ptr(riscv_pmu.hw_events);
  544. struct pt_regs *regs;
  545. int idx;
  546. /*
  547. * Did an overflow occur?
  548. */
  549. if (!csr_read(SCOUNTEROF))
  550. return IRQ_NONE;
  551. /*
  552. * Handle the counter(s) overflow(s)
  553. */
  554. regs = get_irq_regs();
  555. for (idx = 0; idx < RISCV_MAX_COUNTERS; ++idx) {
  556. struct perf_event *event = cpuc->events[idx];
  557. struct hw_perf_event *hwc;
  558. /* Ignore if we don't have an event. */
  559. if (!event)
  560. continue;
  561. /*
  562. * We have a single interrupt for all counters. Check that
  563. * each counter has overflowed before we process it.
  564. */
  565. if (!(csr_read(SCOUNTEROF) & BIT(idx)))
  566. continue;
  567. hwc = &event->hw;
  568. riscv_perf_event_update(event, &event->hw);
  569. perf_sample_data_init(&data, 0, hwc->last_period);
  570. riscv_pmu_event_set_period(event);
  571. if (perf_event_overflow(event, &data, regs))
  572. riscv_pmu_stop_event(event);
  573. }
  574. /*
  575. * Handle the pending perf events.
  576. *
  577. * Note: this call *must* be run with interrupts disabled. For
  578. * platforms that can have the PMU interrupts raised as an NMI, this
  579. * will not work.
  580. */
  581. irq_work_run();
  582. return IRQ_HANDLED;
  583. }
  584. static int riscv_pmu_request_irq(irq_handler_t handler)
  585. {
  586. int err, irqs;
  587. struct platform_device *pmu_device = riscv_pmu.plat_device;
  588. if (!pmu_device)
  589. return -ENODEV;
  590. irqs = min(pmu_device->num_resources, num_possible_cpus());
  591. if (irqs < 1) {
  592. pr_err("no irqs for PMUs defined\n");
  593. return -ENODEV;
  594. }
  595. riscv_pmu_irq = platform_get_irq(pmu_device, 0);
  596. if (riscv_pmu_irq < 0)
  597. return -ENODEV;
  598. err = request_percpu_irq(riscv_pmu_irq, handler, "c9xx-pmu-v1", &riscv_pmu);
  599. if (err) {
  600. pr_err("unable to request IRQ%d for c9xx PMU counters\n",
  601. riscv_pmu_irq);
  602. return err;
  603. }
  604. return 0;
  605. }
  606. static void riscv_pmu_free_irq(void)
  607. {
  608. int irq;
  609. struct platform_device *pmu_device = riscv_pmu.plat_device;
  610. irq = platform_get_irq(pmu_device, 0);
  611. if (irq >= 0)
  612. free_percpu_irq(irq, this_cpu_ptr(riscv_pmu.hw_events));
  613. }
  614. static int init_hw_perf_events(void)
  615. {
  616. riscv_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events,
  617. GFP_KERNEL);
  618. if (!riscv_pmu.hw_events) {
  619. pr_info("failed to allocate per-cpu PMU data.\n");
  620. return -ENOMEM;
  621. }
  622. riscv_pmu.pmu = (struct pmu) {
  623. .pmu_enable = riscv_pmu_enable,
  624. .pmu_disable = riscv_pmu_disable,
  625. .event_init = riscv_pmu_event_init,
  626. .add = riscv_pmu_add,
  627. .del = riscv_pmu_del,
  628. .start = riscv_pmu_start,
  629. .stop = riscv_pmu_stop,
  630. .read = riscv_pmu_read,
  631. };
  632. return 0;
  633. }
  634. static int riscv_pmu_starting_cpu(unsigned int cpu)
  635. {
  636. sbi_ecall(0x09000001, 0, 1, 0, 0, 0, 0, 0);
  637. enable_percpu_irq(riscv_pmu_irq, 0);
  638. return 0;
  639. }
  640. static int riscv_pmu_dying_cpu(unsigned int cpu)
  641. {
  642. disable_percpu_irq(riscv_pmu_irq);
  643. return 0;
  644. }
  645. static int riscv_pmu_device_probe(struct platform_device *pdev,
  646. const struct of_device_id *of_table)
  647. {
  648. int ret;
  649. ret = init_hw_perf_events();
  650. if (ret) {
  651. pr_notice("[perf] failed to probe PMU!\n");
  652. return ret;
  653. }
  654. riscv_pmu.max_period = ULONG_MAX;
  655. riscv_pmu.plat_device = pdev;
  656. ret = riscv_pmu_request_irq(riscv_pmu_handle_irq);
  657. if (ret) {
  658. riscv_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
  659. pr_notice("[perf] PMU request irq fail!\n");
  660. }
  661. ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE + 1, "perf riscv:online",
  662. riscv_pmu_starting_cpu,
  663. riscv_pmu_dying_cpu);
  664. if (ret) {
  665. riscv_pmu_free_irq();
  666. free_percpu(riscv_pmu.hw_events);
  667. return ret;
  668. }
  669. ret = perf_pmu_register(&riscv_pmu.pmu, "thead_xt_pmu", PERF_TYPE_RAW);
  670. if (ret) {
  671. riscv_pmu_free_irq();
  672. free_percpu(riscv_pmu.hw_events);
  673. }
  674. pr_notice("[perf] T-HEAD C900 PMU v1 probed\n");
  675. return ret;
  676. }
  677. const static struct of_device_id riscv_pmu_of_device_ids[] = {
  678. {.compatible = "riscv,thead_xt_pmu"},
  679. {.compatible = "riscv,c910_pmu"},
  680. {},
  681. };
  682. static int riscv_pmu_dev_probe(struct platform_device *pdev)
  683. {
  684. return riscv_pmu_device_probe(pdev, riscv_pmu_of_device_ids);
  685. }
  686. static struct platform_driver riscv_pmu_driver = {
  687. .driver = {
  688. .name = "thead_xt_pmu_v1",
  689. .of_match_table = riscv_pmu_of_device_ids,
  690. },
  691. .probe = riscv_pmu_dev_probe,
  692. };
  693. int __init riscv_pmu_v1_probe(void)
  694. {
  695. return platform_driver_register(&riscv_pmu_driver);
  696. }
  697. device_initcall(riscv_pmu_v1_probe);