thead_c900_pmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/errno.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/module.h>
  5. #include <linux/of.h>
  6. #include <linux/perf_event.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/smp.h>
  9. #include <asm/sbi.h>
  10. #define MAX_COUNTERS 32
  11. /*
  12. * Counter Enable register
  13. */
  14. #define SCOUNTEREN 0x106
  15. /*
  16. * Counter inhibit register
  17. */
  18. #define SCOUNTINHIBIT 0x5c8
  19. /*
  20. * 11: PMDS - rw, Performance monitor disable S-mode counting
  21. * 10: PMDU - rw, Performance monitor disable U-mode counting
  22. */
  23. #define SXSTATUS 0x5c0
  24. /*
  25. * Overflow interrupt enable & status register
  26. */
  27. #define SCOUNTERINTEN 0x5c4
  28. #define SCOUNTEROF 0x5c5
  29. /*
  30. * 63: TS - rw, status of trigger
  31. * 13: PMDM - ro, Performance monitor disable machine mode counting
  32. * 11: PMDS - rw, Performance monitor disable supervisor mode counting
  33. * 10: PMDU - rw, Performance monitor disable user mode counting
  34. * 0-1: TME - Trigger Mode, 2'b00 trigger disabled, 2'b01 triger enable
  35. */
  36. #define SHPMCR 0x5c9
  37. /*
  38. * Start/End trigger register
  39. */
  40. #define SHPMSP 0x5ca
  41. #define SHPMEP 0x5cb
  42. #define SHPMCOUNTER0 0x5e0
  43. struct pmu_hw_events {
  44. struct perf_event *events[MAX_COUNTERS];
  45. DECLARE_BITMAP(used_mask, MAX_COUNTERS);
  46. };
  47. static struct thead_pmu_t {
  48. struct pmu pmu;
  49. struct pmu_hw_events __percpu *hw_events;
  50. struct platform_device *plat_device;
  51. unsigned long max_period;
  52. int irq;
  53. } thead_pmu;
  54. /*
  55. * Hardware events
  56. */
  57. #define EVENT_NONE 0
  58. #define EVENT_L1_ICACHE_ACCESS 1
  59. #define EVENT_L1_ICACHE_MISS 2
  60. #define EVENT_ITLB_MISS 3
  61. #define EVENT_DTLB_MISS 4
  62. #define EVENT_JTLB_MISS 5
  63. #define EVENT_BRANCH_MISS 6
  64. #define EVENT_BRANCH 7
  65. #define EVENT_INDIRECT_BRANCH_MISS 8
  66. #define EVENT_INDIRECT_BRANCH 9
  67. #define EVENT_LSU_SPEC_FAIL 10
  68. #define EVENT_STORE_INSTRUCTION 11
  69. #define EVENT_L1_DCACHE_LOAD_ACCESS 12
  70. #define EVENT_L1_DCACHE_LOAD_MISS 13
  71. #define EVENT_L1_DCACHE_STORE_ACCESS 14
  72. #define EVENT_L1_DCACHE_STORE_MISS 15
  73. #define EVENT_L2_LOAD_ACCESS 16
  74. #define EVENT_L2_LOAD_MISS 17
  75. #define EVENT_L2_STORE_ACCESS 18
  76. #define EVENT_L2_STORE_MISS 19
  77. #define EVENT_RF_LAUNCH_FAIL 20
  78. #define EVENT_RF_REG_LAUNCH_FAIL 21
  79. #define EVENT_RF_INSTRUCTION 22
  80. #define EVENT_LSU_CROSS_4K_STALL 23
  81. #define EVENT_LSU_OTHER_STALL 24
  82. #define EVENT_LSU_SQ_DISCARD 25
  83. #define EVENT_LSU_SQ_DATA_DISCARD 26
  84. #define EVENT_IFU_BRANCH_TARGET_MISPRED 27
  85. #define EVENT_IFU_BRANCH_TARGET_INSTRUCTION 28
  86. #define EVENT_ALU_INSTRUCTION 29
  87. #define EVENT_LDST_INSTRUCTION 30
  88. #define EVENT_VECTOR_SIMD_INSTRUCTION 31
  89. #define EVENT_CSR_INSTRUCTION 32
  90. #define EVENT_SYNC_INSTRUCTION 33
  91. #define EVENT_LDST_UNALIGNED_ACCESS 34
  92. #define EVENT_INTERRUPT_NUMBER 35
  93. #define EVENT_INTERRUPT_OFF_CYCLE 36
  94. #define EVENT_ENVIRONMENT_CALL 37
  95. #define EVENT_LONG_JUMP 38
  96. #define EVENT_STALLED_CYCLES_FRONTEND 39
  97. #define EVENT_STALLED_CYCLES_BACKEND 40
  98. #define EVENT_SYNC_STALL 41
  99. #define EVENT_FLOAT_POINT_INSTRUCTION 42
  100. static const int hw_event_map[] = {
  101. [PERF_COUNT_HW_CPU_CYCLES] = -EOPNOTSUPP,
  102. [PERF_COUNT_HW_INSTRUCTIONS] = -EOPNOTSUPP,
  103. [PERF_COUNT_HW_CACHE_REFERENCES] = EVENT_L1_ICACHE_ACCESS,
  104. [PERF_COUNT_HW_CACHE_MISSES] = EVENT_L1_ICACHE_MISS,
  105. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = EVENT_BRANCH,
  106. [PERF_COUNT_HW_BRANCH_MISSES] = EVENT_BRANCH_MISS,
  107. [PERF_COUNT_HW_BUS_CYCLES] = -EOPNOTSUPP,
  108. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = EVENT_STALLED_CYCLES_FRONTEND,
  109. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = EVENT_STALLED_CYCLES_BACKEND,
  110. [PERF_COUNT_HW_REF_CPU_CYCLES] = -EOPNOTSUPP,
  111. };
  112. #define C(x) PERF_COUNT_HW_CACHE_##x
  113. static const int thead_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
  114. [PERF_COUNT_HW_CACHE_OP_MAX]
  115. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  116. [C(L1D)] = {
  117. [C(OP_READ)] = {
  118. [C(RESULT_ACCESS)] = EVENT_L1_DCACHE_LOAD_ACCESS,
  119. [C(RESULT_MISS)] = EVENT_L1_DCACHE_LOAD_MISS,
  120. },
  121. [C(OP_WRITE)] = {
  122. [C(RESULT_ACCESS)] = EVENT_L1_DCACHE_STORE_ACCESS,
  123. [C(RESULT_MISS)] = EVENT_L1_DCACHE_STORE_MISS,
  124. },
  125. [C(OP_PREFETCH)] = {
  126. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  127. [C(RESULT_MISS)] = -EOPNOTSUPP,
  128. },
  129. },
  130. [C(L1I)] = {
  131. [C(OP_READ)] = {
  132. [C(RESULT_ACCESS)] = EVENT_L1_ICACHE_ACCESS,
  133. [C(RESULT_MISS)] = EVENT_L1_ICACHE_MISS,
  134. },
  135. [C(OP_WRITE)] = {
  136. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  137. [C(RESULT_MISS)] = -EOPNOTSUPP,
  138. },
  139. [C(OP_PREFETCH)] = {
  140. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  141. [C(RESULT_MISS)] = -EOPNOTSUPP,
  142. },
  143. },
  144. [C(LL)] = {
  145. [C(OP_READ)] = {
  146. [C(RESULT_ACCESS)] = EVENT_L2_LOAD_ACCESS,
  147. [C(RESULT_MISS)] = EVENT_L2_LOAD_MISS,
  148. },
  149. [C(OP_WRITE)] = {
  150. [C(RESULT_ACCESS)] = EVENT_L2_STORE_ACCESS,
  151. [C(RESULT_MISS)] = EVENT_L2_STORE_MISS,
  152. },
  153. [C(OP_PREFETCH)] = {
  154. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  155. [C(RESULT_MISS)] = -EOPNOTSUPP,
  156. },
  157. },
  158. [C(DTLB)] = {
  159. [C(OP_READ)] = {
  160. [C(RESULT_ACCESS)] = EVENT_L1_DCACHE_LOAD_ACCESS,
  161. [C(RESULT_MISS)] = EVENT_DTLB_MISS,
  162. },
  163. [C(OP_WRITE)] = {
  164. [C(RESULT_ACCESS)] = EVENT_L1_DCACHE_STORE_ACCESS,
  165. [C(RESULT_MISS)] = EVENT_DTLB_MISS,
  166. },
  167. [C(OP_PREFETCH)] = {
  168. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  169. [C(RESULT_MISS)] = -EOPNOTSUPP,
  170. },
  171. },
  172. [C(ITLB)] = {
  173. [C(OP_READ)] = {
  174. [C(RESULT_ACCESS)] = EVENT_L1_ICACHE_ACCESS,
  175. [C(RESULT_MISS)] = EVENT_ITLB_MISS,
  176. },
  177. [C(OP_WRITE)] = {
  178. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  179. [C(RESULT_MISS)] = -EOPNOTSUPP,
  180. },
  181. [C(OP_PREFETCH)] = {
  182. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  183. [C(RESULT_MISS)] = -EOPNOTSUPP,
  184. },
  185. },
  186. [C(BPU)] = {
  187. [C(OP_READ)] = {
  188. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  189. [C(RESULT_MISS)] = -EOPNOTSUPP,
  190. },
  191. [C(OP_WRITE)] = {
  192. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  193. [C(RESULT_MISS)] = -EOPNOTSUPP,
  194. },
  195. [C(OP_PREFETCH)] = {
  196. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  197. [C(RESULT_MISS)] = -EOPNOTSUPP,
  198. },
  199. },
  200. [C(NODE)] = {
  201. [C(OP_READ)] = {
  202. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  203. [C(RESULT_MISS)] = -EOPNOTSUPP,
  204. },
  205. [C(OP_WRITE)] = {
  206. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  207. [C(RESULT_MISS)] = -EOPNOTSUPP,
  208. },
  209. [C(OP_PREFETCH)] = {
  210. [C(RESULT_ACCESS)] = -EOPNOTSUPP,
  211. [C(RESULT_MISS)] = -EOPNOTSUPP,
  212. },
  213. },
  214. };
  215. #define RW_COUNTER(idx, value, is_write) \
  216. if (is_write) { \
  217. csr_write(SHPMCOUNTER0 + idx, value); \
  218. return 0; \
  219. } else \
  220. return csr_read(SHPMCOUNTER0 + idx);
  221. static inline u64 rw_counter(int idx, u64 value, bool is_write)
  222. {
  223. switch (idx) {
  224. case 0:
  225. RW_COUNTER(0, value, is_write);
  226. case 1:
  227. return -EINVAL;
  228. case 2:
  229. RW_COUNTER(2, value, is_write);
  230. case 3:
  231. RW_COUNTER(3, value, is_write);
  232. case 4:
  233. RW_COUNTER(4, value, is_write);
  234. case 5:
  235. RW_COUNTER(5, value, is_write);
  236. case 6:
  237. RW_COUNTER(6, value, is_write);
  238. case 7:
  239. RW_COUNTER(7, value, is_write);
  240. case 8:
  241. RW_COUNTER(8, value, is_write);
  242. case 9:
  243. RW_COUNTER(9, value, is_write);
  244. case 10:
  245. RW_COUNTER(10, value, is_write);
  246. case 11:
  247. RW_COUNTER(11, value, is_write);
  248. case 12:
  249. RW_COUNTER(12, value, is_write);
  250. case 13:
  251. RW_COUNTER(13, value, is_write);
  252. case 14:
  253. RW_COUNTER(14, value, is_write);
  254. case 15:
  255. RW_COUNTER(15, value, is_write);
  256. case 16:
  257. RW_COUNTER(16, value, is_write);
  258. case 17:
  259. RW_COUNTER(17, value, is_write);
  260. case 18:
  261. RW_COUNTER(18, value, is_write);
  262. case 19:
  263. RW_COUNTER(19, value, is_write);
  264. case 20:
  265. RW_COUNTER(20, value, is_write);
  266. case 21:
  267. RW_COUNTER(21, value, is_write);
  268. case 22:
  269. RW_COUNTER(22, value, is_write);
  270. case 23:
  271. RW_COUNTER(23, value, is_write);
  272. case 24:
  273. RW_COUNTER(24, value, is_write);
  274. case 25:
  275. RW_COUNTER(25, value, is_write);
  276. case 26:
  277. RW_COUNTER(26, value, is_write);
  278. case 27:
  279. RW_COUNTER(27, value, is_write);
  280. case 28:
  281. RW_COUNTER(28, value, is_write);
  282. case 29:
  283. RW_COUNTER(29, value, is_write);
  284. case 30:
  285. RW_COUNTER(30, value, is_write);
  286. case 31:
  287. RW_COUNTER(31, value, is_write);
  288. }
  289. return -EINVAL;
  290. }
  291. static int thead_pmu_event_set_period(struct perf_event *event)
  292. {
  293. struct hw_perf_event *hwc = &event->hw;
  294. s64 left = local64_read(&hwc->period_left);
  295. s64 period = hwc->sample_period;
  296. int ret = 0;
  297. if (period < 4096 && period != 0 && (
  298. hwc->idx == 0 || hwc->idx == 2 ||
  299. hwc->config_base == EVENT_L1_ICACHE_ACCESS ||
  300. hwc->config_base == EVENT_L1_DCACHE_LOAD_ACCESS ||
  301. hwc->config_base == EVENT_L1_DCACHE_STORE_ACCESS ||
  302. hwc->config_base == EVENT_L2_LOAD_ACCESS ||
  303. hwc->config_base == EVENT_L2_STORE_ACCESS))
  304. hwc->sample_period = period = 4096;
  305. if (unlikely(left <= -period)) {
  306. left = period;
  307. local64_set(&hwc->period_left, left);
  308. hwc->last_period = period;
  309. ret = 1;
  310. }
  311. if (unlikely(left <= 0)) {
  312. left += period;
  313. local64_set(&hwc->period_left, left);
  314. hwc->last_period = period;
  315. ret = 1;
  316. }
  317. if (left < 0)
  318. left = thead_pmu.max_period;
  319. local64_set(&hwc->prev_count, (u64)(-left));
  320. csr_clear(SCOUNTEROF, BIT(hwc->idx));
  321. rw_counter(hwc->idx, (u64)(-left), true);
  322. perf_event_update_userpage(event);
  323. return ret;
  324. }
  325. static void thead_perf_event_update(struct perf_event *event,
  326. struct hw_perf_event *hwc)
  327. {
  328. uint64_t prev_raw_count = local64_read(&hwc->prev_count);
  329. /*
  330. * Sign extend count value to 64bit, otherwise delta calculation
  331. * would be incorrect when overflow occurs.
  332. */
  333. uint64_t new_raw_count = rw_counter(hwc->idx, 0, false);
  334. int64_t delta = new_raw_count - prev_raw_count;
  335. /*
  336. * We aren't afraid of hwc->prev_count changing beneath our feet
  337. * because there's no way for us to re-enter this function anytime.
  338. */
  339. local64_set(&hwc->prev_count, new_raw_count);
  340. local64_add(delta, &event->count);
  341. local64_sub(delta, &hwc->period_left);
  342. }
  343. static void thead_pmu_read(struct perf_event *event)
  344. {
  345. thead_perf_event_update(event, &event->hw);
  346. }
  347. static int thead_pmu_cache_event(u64 config)
  348. {
  349. unsigned int cache_type, cache_op, cache_result;
  350. cache_type = (config >> 0) & 0xff;
  351. cache_op = (config >> 8) & 0xff;
  352. cache_result = (config >> 16) & 0xff;
  353. if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  354. return -EINVAL;
  355. if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  356. return -EINVAL;
  357. if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  358. return -EINVAL;
  359. return thead_cache_event_map[cache_type][cache_op][cache_result];
  360. }
  361. static int thead_pmu_event_init(struct perf_event *event)
  362. {
  363. struct hw_perf_event *hwc = &event->hw;
  364. int event_id;
  365. hwc->idx = -1;
  366. hwc->config_base = 0;
  367. hwc->config = 0;
  368. hwc->event_base = 0;
  369. switch (event->attr.type) {
  370. case PERF_TYPE_HARDWARE:
  371. if (event->attr.config >= PERF_COUNT_HW_MAX)
  372. return -ENOENT;
  373. event_id = hw_event_map[event->attr.config];
  374. if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES ||
  375. event->attr.config == PERF_COUNT_HW_BUS_CYCLES ||
  376. event->attr.config == PERF_COUNT_HW_REF_CPU_CYCLES) {
  377. hwc->idx = 0;
  378. hwc->config_base = EVENT_NONE;
  379. break;
  380. }
  381. if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
  382. hwc->idx = 2;
  383. hwc->config_base = EVENT_NONE;
  384. break;
  385. }
  386. break;
  387. case PERF_TYPE_HW_CACHE:
  388. event_id = thead_pmu_cache_event(event->attr.config);
  389. if (event_id == -EOPNOTSUPP)
  390. return -ENOENT;
  391. hwc->config_base = event_id;
  392. break;
  393. case PERF_TYPE_RAW:
  394. event_id = event->attr.config;
  395. if (event_id < 0)
  396. return -ENOENT;
  397. hwc->config_base = event_id;
  398. break;
  399. default:
  400. return -ENOENT;
  401. }
  402. return 0;
  403. }
  404. static void thead_pmu_enable(struct pmu *pmu)
  405. {
  406. }
  407. static void thead_pmu_disable(struct pmu *pmu)
  408. {
  409. }
  410. static void thead_pmu_start(struct perf_event *event, int flags)
  411. {
  412. unsigned long flg;
  413. struct hw_perf_event *hwc = &event->hw;
  414. int idx = hwc->idx;
  415. if (WARN_ON_ONCE(idx == -1))
  416. return;
  417. if (flags & PERF_EF_RELOAD)
  418. WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  419. hwc->state = 0;
  420. thead_pmu_event_set_period(event);
  421. local_irq_save(flg);
  422. csr_set(SCOUNTERINTEN, BIT(idx));
  423. local_irq_restore(flg);
  424. }
  425. static void thead_pmu_stop_event(struct perf_event *event)
  426. {
  427. unsigned long flg;
  428. struct hw_perf_event *hwc = &event->hw;
  429. int idx = hwc->idx;
  430. local_irq_save(flg);
  431. csr_clear(SCOUNTERINTEN, BIT(idx));
  432. local_irq_restore(flg);
  433. }
  434. static void thead_pmu_stop(struct perf_event *event, int flags)
  435. {
  436. if (!(event->hw.state & PERF_HES_STOPPED)) {
  437. thead_pmu_stop_event(event);
  438. event->hw.state |= PERF_HES_STOPPED;
  439. }
  440. if ((flags & PERF_EF_UPDATE) &&
  441. !(event->hw.state & PERF_HES_UPTODATE)) {
  442. thead_perf_event_update(event, &event->hw);
  443. event->hw.state |= PERF_HES_UPTODATE;
  444. }
  445. }
  446. static void thead_pmu_del(struct perf_event *event, int flags)
  447. {
  448. struct pmu_hw_events *hw_events = this_cpu_ptr(thead_pmu.hw_events);
  449. struct hw_perf_event *hwc = &event->hw;
  450. unsigned long *used_mask = hw_events->used_mask;
  451. thead_pmu_stop(event, PERF_EF_UPDATE);
  452. hw_events->events[hwc->idx] = NULL;
  453. if (hwc->config_base != EVENT_NONE) {
  454. clear_bit(hwc->idx, used_mask);
  455. hwc->idx = -1;
  456. }
  457. perf_event_update_userpage(event);
  458. }
  459. /* allocate hardware counter and optionally start counting */
  460. static int thead_pmu_add(struct perf_event *event, int flags)
  461. {
  462. struct pmu_hw_events *hw_events = this_cpu_ptr(thead_pmu.hw_events);
  463. struct hw_perf_event *hwc = &event->hw;
  464. unsigned long *used_mask = hw_events->used_mask;
  465. int idx;
  466. if (hwc->config_base != EVENT_NONE) {
  467. set_bit(0, used_mask);
  468. set_bit(1, used_mask);
  469. set_bit(2, used_mask);
  470. idx = find_first_zero_bit(used_mask, MAX_COUNTERS);
  471. if (idx == MAX_COUNTERS)
  472. return -EAGAIN;
  473. set_bit(idx, used_mask);
  474. sbi_ecall(0x09000001, 0, 2, idx, hwc->config_base, 0, 0, 0);
  475. hwc->idx = idx;
  476. }
  477. hw_events->events[hwc->idx] = event;
  478. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  479. if (flags & PERF_EF_START)
  480. thead_pmu_start(event, PERF_EF_RELOAD);
  481. perf_event_update_userpage(event);
  482. return 0;
  483. }
  484. static irqreturn_t thead_pmu_handle_irq(int irq, void *dev_id)
  485. {
  486. struct perf_sample_data data;
  487. struct pmu_hw_events *cpuc = this_cpu_ptr(thead_pmu.hw_events);
  488. struct pt_regs *regs;
  489. int idx;
  490. /*
  491. * Did an overflow occur?
  492. */
  493. if (!csr_read(SCOUNTEROF))
  494. return IRQ_NONE;
  495. csr_write(SCOUNTINHIBIT, UINT_MAX);
  496. /*
  497. * Handle the counter(s) overflow(s)
  498. */
  499. regs = get_irq_regs();
  500. for (idx = 0; idx < MAX_COUNTERS; ++idx) {
  501. struct perf_event *event = cpuc->events[idx];
  502. struct hw_perf_event *hwc;
  503. /* Ignore if we don't have an event. */
  504. if (!event)
  505. continue;
  506. /*
  507. * We have a single interrupt for all counters. Check that
  508. * each counter has overflowed before we process it.
  509. */
  510. if (!(csr_read(SCOUNTEROF) & BIT(idx)))
  511. continue;
  512. hwc = &event->hw;
  513. thead_perf_event_update(event, &event->hw);
  514. perf_sample_data_init(&data, 0, hwc->last_period);
  515. thead_pmu_event_set_period(event);
  516. if (perf_event_overflow(event, &data, regs))
  517. thead_pmu_stop_event(event);
  518. }
  519. csr_write(SCOUNTINHIBIT, 0);
  520. /*
  521. * Handle the pending perf events.
  522. *
  523. * Note: this call *must* be run with interrupts disabled. For
  524. * platforms that can have the PMU interrupts raised as an NMI, this
  525. * will not work.
  526. */
  527. irq_work_run();
  528. return IRQ_HANDLED;
  529. }
  530. static int thead_pmu_request_irq(irq_handler_t handler)
  531. {
  532. int err, irqs;
  533. struct platform_device *pmu_device = thead_pmu.plat_device;
  534. if (!pmu_device)
  535. return -ENODEV;
  536. irqs = min(pmu_device->num_resources, num_possible_cpus());
  537. if (irqs < 1) {
  538. pr_err("no irqs for PMUs defined\n");
  539. return -ENODEV;
  540. }
  541. thead_pmu.irq = platform_get_irq(pmu_device, 0);
  542. if (thead_pmu.irq < 0)
  543. return -ENODEV;
  544. err = request_percpu_irq(thead_pmu.irq, handler, "c9xx-pmu-v1", &thead_pmu);
  545. if (err) {
  546. pr_err("unable to request IRQ%d for c9xx PMU counters\n",
  547. thead_pmu.irq);
  548. return err;
  549. }
  550. return 0;
  551. }
  552. static void thead_pmu_free_irq(void)
  553. {
  554. int irq;
  555. struct platform_device *pmu_device = thead_pmu.plat_device;
  556. irq = platform_get_irq(pmu_device, 0);
  557. if (irq >= 0)
  558. free_percpu_irq(irq, this_cpu_ptr(thead_pmu.hw_events));
  559. }
  560. static int init_hw_perf_events(void)
  561. {
  562. thead_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events,
  563. GFP_KERNEL);
  564. if (!thead_pmu.hw_events) {
  565. pr_info("failed to allocate per-cpu PMU data.\n");
  566. return -ENOMEM;
  567. }
  568. thead_pmu.pmu = (struct pmu) {
  569. .pmu_enable = thead_pmu_enable,
  570. .pmu_disable = thead_pmu_disable,
  571. .event_init = thead_pmu_event_init,
  572. .add = thead_pmu_add,
  573. .del = thead_pmu_del,
  574. .start = thead_pmu_start,
  575. .stop = thead_pmu_stop,
  576. .read = thead_pmu_read,
  577. };
  578. return 0;
  579. }
  580. static int thead_pmu_starting_cpu(unsigned int cpu)
  581. {
  582. sbi_ecall(0x09000001, 0, 1, 0, 0, 0, 0, 0);
  583. enable_percpu_irq(thead_pmu.irq, 0);
  584. return 0;
  585. }
  586. static int thead_pmu_dying_cpu(unsigned int cpu)
  587. {
  588. disable_percpu_irq(thead_pmu.irq);
  589. return 0;
  590. }
  591. static int thead_pmu_device_probe(struct platform_device *pdev,
  592. const struct of_device_id *of_table)
  593. {
  594. int ret;
  595. ret = init_hw_perf_events();
  596. if (ret) {
  597. pr_notice("[perf] failed to probe PMU!\n");
  598. return ret;
  599. }
  600. thead_pmu.max_period = ULONG_MAX;
  601. thead_pmu.plat_device = pdev;
  602. ret = thead_pmu_request_irq(thead_pmu_handle_irq);
  603. if (ret) {
  604. thead_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
  605. pr_notice("[perf] PMU request irq fail!\n");
  606. }
  607. ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE + 1, "perf thead:online",
  608. thead_pmu_starting_cpu,
  609. thead_pmu_dying_cpu);
  610. if (ret) {
  611. thead_pmu_free_irq();
  612. free_percpu(thead_pmu.hw_events);
  613. return ret;
  614. }
  615. ret = perf_pmu_register(&thead_pmu.pmu, "thead_xt_pmu", PERF_TYPE_RAW);
  616. if (ret) {
  617. thead_pmu_free_irq();
  618. free_percpu(thead_pmu.hw_events);
  619. }
  620. pr_notice("[perf] T-HEAD C900 PMU probed\n");
  621. return ret;
  622. }
  623. const static struct of_device_id thead_pmu_of_device_ids[] = {
  624. {.compatible = "thead,c900_pmu"},
  625. {},
  626. };
  627. static int thead_pmu_dev_probe(struct platform_device *pdev)
  628. {
  629. return thead_pmu_device_probe(pdev, thead_pmu_of_device_ids);
  630. }
  631. static struct platform_driver thead_pmu_driver = {
  632. .driver = {
  633. .name = "thead_c900_pmu",
  634. .of_match_table = thead_pmu_of_device_ids,
  635. },
  636. .probe = thead_pmu_dev_probe,
  637. };
  638. int __init thead_c900_pmu_probe(void)
  639. {
  640. return platform_driver_register(&thead_pmu_driver);
  641. }
  642. device_initcall(thead_c900_pmu_probe);