qcom_l2_pmu.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  3. */
  4. #include <linux/acpi.h>
  5. #include <linux/bitops.h>
  6. #include <linux/bug.h>
  7. #include <linux/cpuhotplug.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/device.h>
  10. #include <linux/errno.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/percpu.h>
  16. #include <linux/perf_event.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/smp.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/types.h>
  22. #include <asm/barrier.h>
  23. #include <asm/local64.h>
  24. #include <asm/sysreg.h>
  25. #include <soc/qcom/kryo-l2-accessors.h>
  26. #define MAX_L2_CTRS 9
  27. #define L2PMCR_NUM_EV_SHIFT 11
  28. #define L2PMCR_NUM_EV_MASK 0x1F
  29. #define L2PMCR 0x400
  30. #define L2PMCNTENCLR 0x403
  31. #define L2PMCNTENSET 0x404
  32. #define L2PMINTENCLR 0x405
  33. #define L2PMINTENSET 0x406
  34. #define L2PMOVSCLR 0x407
  35. #define L2PMOVSSET 0x408
  36. #define L2PMCCNTCR 0x409
  37. #define L2PMCCNTR 0x40A
  38. #define L2PMCCNTSR 0x40C
  39. #define L2PMRESR 0x410
  40. #define IA_L2PMXEVCNTCR_BASE 0x420
  41. #define IA_L2PMXEVCNTR_BASE 0x421
  42. #define IA_L2PMXEVFILTER_BASE 0x423
  43. #define IA_L2PMXEVTYPER_BASE 0x424
  44. #define IA_L2_REG_OFFSET 0x10
  45. #define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000
  46. #define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004
  47. #define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003
  48. #define L2EVTYPER_REG_SHIFT 3
  49. #define L2PMRESR_GROUP_BITS 8
  50. #define L2PMRESR_GROUP_MASK GENMASK(7, 0)
  51. #define L2CYCLE_CTR_BIT 31
  52. #define L2CYCLE_CTR_RAW_CODE 0xFE
  53. #define L2PMCR_RESET_ALL 0x6
  54. #define L2PMCR_COUNTERS_ENABLE 0x1
  55. #define L2PMCR_COUNTERS_DISABLE 0x0
  56. #define L2PMRESR_EN BIT_ULL(63)
  57. #define L2_EVT_MASK 0x00000FFF
  58. #define L2_EVT_CODE_MASK 0x00000FF0
  59. #define L2_EVT_GRP_MASK 0x0000000F
  60. #define L2_EVT_CODE_SHIFT 4
  61. #define L2_EVT_GRP_SHIFT 0
  62. #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
  63. #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
  64. #define L2_EVT_GROUP_MAX 7
  65. #define L2_COUNTER_RELOAD BIT_ULL(31)
  66. #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
  67. #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
  68. /*
  69. * Events
  70. */
  71. #define L2_EVENT_CYCLES 0xfe
  72. #define L2_EVENT_DCACHE_OPS 0x400
  73. #define L2_EVENT_ICACHE_OPS 0x401
  74. #define L2_EVENT_TLBI 0x402
  75. #define L2_EVENT_BARRIERS 0x403
  76. #define L2_EVENT_TOTAL_READS 0x405
  77. #define L2_EVENT_TOTAL_WRITES 0x406
  78. #define L2_EVENT_TOTAL_REQUESTS 0x407
  79. #define L2_EVENT_LDREX 0x420
  80. #define L2_EVENT_STREX 0x421
  81. #define L2_EVENT_CLREX 0x422
  82. struct cluster_pmu;
  83. /*
  84. * Aggregate PMU. Implements the core pmu functions and manages
  85. * the hardware PMUs.
  86. */
  87. struct l2cache_pmu {
  88. struct hlist_node node;
  89. u32 num_pmus;
  90. struct pmu pmu;
  91. int num_counters;
  92. cpumask_t cpumask;
  93. struct platform_device *pdev;
  94. struct cluster_pmu * __percpu *pmu_cluster;
  95. struct list_head clusters;
  96. };
  97. /*
  98. * The cache is made up of one or more clusters, each cluster has its own PMU.
  99. * Each cluster is associated with one or more CPUs.
  100. * This structure represents one of the hardware PMUs.
  101. *
  102. * Events can be envisioned as a 2-dimensional array. Each column represents
  103. * a group of events. There are 8 groups. Only one entry from each
  104. * group can be in use at a time.
  105. *
  106. * Events are specified as 0xCCG, where CC is 2 hex digits specifying
  107. * the code (array row) and G specifies the group (column).
  108. *
  109. * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
  110. * which is outside the above scheme.
  111. */
  112. struct cluster_pmu {
  113. struct list_head next;
  114. struct perf_event *events[MAX_L2_CTRS];
  115. struct l2cache_pmu *l2cache_pmu;
  116. DECLARE_BITMAP(used_counters, MAX_L2_CTRS);
  117. DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1);
  118. int irq;
  119. int cluster_id;
  120. /* The CPU that is used for collecting events on this cluster */
  121. int on_cpu;
  122. /* All the CPUs associated with this cluster */
  123. cpumask_t cluster_cpus;
  124. spinlock_t pmu_lock;
  125. };
  126. #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
  127. static u32 l2_cycle_ctr_idx;
  128. static u32 l2_counter_present_mask;
  129. static inline u32 idx_to_reg_bit(u32 idx)
  130. {
  131. if (idx == l2_cycle_ctr_idx)
  132. return BIT(L2CYCLE_CTR_BIT);
  133. return BIT(idx);
  134. }
  135. static inline struct cluster_pmu *get_cluster_pmu(
  136. struct l2cache_pmu *l2cache_pmu, int cpu)
  137. {
  138. return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
  139. }
  140. static void cluster_pmu_reset(void)
  141. {
  142. /* Reset all counters */
  143. kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
  144. kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
  145. kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
  146. kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
  147. }
  148. static inline void cluster_pmu_enable(void)
  149. {
  150. kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
  151. }
  152. static inline void cluster_pmu_disable(void)
  153. {
  154. kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
  155. }
  156. static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
  157. {
  158. if (idx == l2_cycle_ctr_idx)
  159. kryo_l2_set_indirect_reg(L2PMCCNTR, value);
  160. else
  161. kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
  162. }
  163. static inline u64 cluster_pmu_counter_get_value(u32 idx)
  164. {
  165. u64 value;
  166. if (idx == l2_cycle_ctr_idx)
  167. value = kryo_l2_get_indirect_reg(L2PMCCNTR);
  168. else
  169. value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
  170. return value;
  171. }
  172. static inline void cluster_pmu_counter_enable(u32 idx)
  173. {
  174. kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
  175. }
  176. static inline void cluster_pmu_counter_disable(u32 idx)
  177. {
  178. kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
  179. }
  180. static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
  181. {
  182. kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
  183. }
  184. static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
  185. {
  186. kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
  187. }
  188. static inline void cluster_pmu_set_evccntcr(u32 val)
  189. {
  190. kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
  191. }
  192. static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
  193. {
  194. kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
  195. }
  196. static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
  197. {
  198. kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
  199. }
  200. static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
  201. u32 event_group, u32 event_cc)
  202. {
  203. u64 field;
  204. u64 resr_val;
  205. u32 shift;
  206. unsigned long flags;
  207. shift = L2PMRESR_GROUP_BITS * event_group;
  208. field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift);
  209. spin_lock_irqsave(&cluster->pmu_lock, flags);
  210. resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
  211. resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
  212. resr_val |= field;
  213. resr_val |= L2PMRESR_EN;
  214. kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
  215. spin_unlock_irqrestore(&cluster->pmu_lock, flags);
  216. }
  217. /*
  218. * Hardware allows filtering of events based on the originating
  219. * CPU. Turn this off by setting filter bits to allow events from
  220. * all CPUS, subunits and ID independent events in this cluster.
  221. */
  222. static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
  223. {
  224. u32 val = L2PMXEVFILTER_SUFILTER_ALL |
  225. L2PMXEVFILTER_ORGFILTER_IDINDEP |
  226. L2PMXEVFILTER_ORGFILTER_ALL;
  227. kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
  228. }
  229. static inline u32 cluster_pmu_getreset_ovsr(void)
  230. {
  231. u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
  232. kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
  233. return result;
  234. }
  235. static inline bool cluster_pmu_has_overflowed(u32 ovsr)
  236. {
  237. return !!(ovsr & l2_counter_present_mask);
  238. }
  239. static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
  240. {
  241. return !!(ovsr & idx_to_reg_bit(idx));
  242. }
  243. static void l2_cache_event_update(struct perf_event *event)
  244. {
  245. struct hw_perf_event *hwc = &event->hw;
  246. u64 delta, prev, now;
  247. u32 idx = hwc->idx;
  248. do {
  249. prev = local64_read(&hwc->prev_count);
  250. now = cluster_pmu_counter_get_value(idx);
  251. } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
  252. /*
  253. * The cycle counter is 64-bit, but all other counters are
  254. * 32-bit, and we must handle 32-bit overflow explicitly.
  255. */
  256. delta = now - prev;
  257. if (idx != l2_cycle_ctr_idx)
  258. delta &= 0xffffffff;
  259. local64_add(delta, &event->count);
  260. }
  261. static void l2_cache_cluster_set_period(struct cluster_pmu *cluster,
  262. struct hw_perf_event *hwc)
  263. {
  264. u32 idx = hwc->idx;
  265. u64 new;
  266. /*
  267. * We limit the max period to half the max counter value so
  268. * that even in the case of extreme interrupt latency the
  269. * counter will (hopefully) not wrap past its initial value.
  270. */
  271. if (idx == l2_cycle_ctr_idx)
  272. new = L2_CYCLE_COUNTER_RELOAD;
  273. else
  274. new = L2_COUNTER_RELOAD;
  275. local64_set(&hwc->prev_count, new);
  276. cluster_pmu_counter_set_value(idx, new);
  277. }
  278. static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
  279. struct perf_event *event)
  280. {
  281. struct hw_perf_event *hwc = &event->hw;
  282. int idx;
  283. int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
  284. unsigned int group;
  285. if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
  286. if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters))
  287. return -EAGAIN;
  288. return l2_cycle_ctr_idx;
  289. }
  290. idx = find_first_zero_bit(cluster->used_counters, num_ctrs);
  291. if (idx == num_ctrs)
  292. /* The counters are all in use. */
  293. return -EAGAIN;
  294. /*
  295. * Check for column exclusion: event column already in use by another
  296. * event. This is for events which are not in the same group.
  297. * Conflicting events in the same group are detected in event_init.
  298. */
  299. group = L2_EVT_GROUP(hwc->config_base);
  300. if (test_bit(group, cluster->used_groups))
  301. return -EAGAIN;
  302. set_bit(idx, cluster->used_counters);
  303. set_bit(group, cluster->used_groups);
  304. return idx;
  305. }
  306. static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
  307. struct perf_event *event)
  308. {
  309. struct hw_perf_event *hwc = &event->hw;
  310. int idx = hwc->idx;
  311. clear_bit(idx, cluster->used_counters);
  312. if (hwc->config_base != L2CYCLE_CTR_RAW_CODE)
  313. clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups);
  314. }
  315. static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
  316. {
  317. struct cluster_pmu *cluster = data;
  318. int num_counters = cluster->l2cache_pmu->num_counters;
  319. u32 ovsr;
  320. int idx;
  321. ovsr = cluster_pmu_getreset_ovsr();
  322. if (!cluster_pmu_has_overflowed(ovsr))
  323. return IRQ_NONE;
  324. for_each_set_bit(idx, cluster->used_counters, num_counters) {
  325. struct perf_event *event = cluster->events[idx];
  326. struct hw_perf_event *hwc;
  327. if (WARN_ON_ONCE(!event))
  328. continue;
  329. if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
  330. continue;
  331. l2_cache_event_update(event);
  332. hwc = &event->hw;
  333. l2_cache_cluster_set_period(cluster, hwc);
  334. }
  335. return IRQ_HANDLED;
  336. }
  337. /*
  338. * Implementation of abstract pmu functionality required by
  339. * the core perf events code.
  340. */
  341. static void l2_cache_pmu_enable(struct pmu *pmu)
  342. {
  343. /*
  344. * Although there is only one PMU (per socket) controlling multiple
  345. * physical PMUs (per cluster), because we do not support per-task mode
  346. * each event is associated with a CPU. Each event has pmu_enable
  347. * called on its CPU, so here it is only necessary to enable the
  348. * counters for the current CPU.
  349. */
  350. cluster_pmu_enable();
  351. }
  352. static void l2_cache_pmu_disable(struct pmu *pmu)
  353. {
  354. cluster_pmu_disable();
  355. }
  356. static int l2_cache_event_init(struct perf_event *event)
  357. {
  358. struct hw_perf_event *hwc = &event->hw;
  359. struct cluster_pmu *cluster;
  360. struct perf_event *sibling;
  361. struct l2cache_pmu *l2cache_pmu;
  362. if (event->attr.type != event->pmu->type)
  363. return -ENOENT;
  364. l2cache_pmu = to_l2cache_pmu(event->pmu);
  365. if (hwc->sample_period) {
  366. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  367. "Sampling not supported\n");
  368. return -EOPNOTSUPP;
  369. }
  370. if (event->cpu < 0) {
  371. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  372. "Per-task mode not supported\n");
  373. return -EOPNOTSUPP;
  374. }
  375. if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
  376. ((event->attr.config & ~L2_EVT_MASK) != 0)) &&
  377. (event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
  378. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  379. "Invalid config %llx\n",
  380. event->attr.config);
  381. return -EINVAL;
  382. }
  383. /* Don't allow groups with mixed PMUs, except for s/w events */
  384. if (event->group_leader->pmu != event->pmu &&
  385. !is_software_event(event->group_leader)) {
  386. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  387. "Can't create mixed PMU group\n");
  388. return -EINVAL;
  389. }
  390. for_each_sibling_event(sibling, event->group_leader) {
  391. if (sibling->pmu != event->pmu &&
  392. !is_software_event(sibling)) {
  393. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  394. "Can't create mixed PMU group\n");
  395. return -EINVAL;
  396. }
  397. }
  398. cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
  399. if (!cluster) {
  400. /* CPU has not been initialised */
  401. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  402. "CPU%d not associated with L2 cluster\n", event->cpu);
  403. return -EINVAL;
  404. }
  405. /* Ensure all events in a group are on the same cpu */
  406. if ((event->group_leader != event) &&
  407. (cluster->on_cpu != event->group_leader->cpu)) {
  408. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  409. "Can't create group on CPUs %d and %d",
  410. event->cpu, event->group_leader->cpu);
  411. return -EINVAL;
  412. }
  413. if ((event != event->group_leader) &&
  414. !is_software_event(event->group_leader) &&
  415. (L2_EVT_GROUP(event->group_leader->attr.config) ==
  416. L2_EVT_GROUP(event->attr.config))) {
  417. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  418. "Column exclusion: conflicting events %llx %llx\n",
  419. event->group_leader->attr.config,
  420. event->attr.config);
  421. return -EINVAL;
  422. }
  423. for_each_sibling_event(sibling, event->group_leader) {
  424. if ((sibling != event) &&
  425. !is_software_event(sibling) &&
  426. (L2_EVT_GROUP(sibling->attr.config) ==
  427. L2_EVT_GROUP(event->attr.config))) {
  428. dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
  429. "Column exclusion: conflicting events %llx %llx\n",
  430. sibling->attr.config,
  431. event->attr.config);
  432. return -EINVAL;
  433. }
  434. }
  435. hwc->idx = -1;
  436. hwc->config_base = event->attr.config;
  437. /*
  438. * Ensure all events are on the same cpu so all events are in the
  439. * same cpu context, to avoid races on pmu_enable etc.
  440. */
  441. event->cpu = cluster->on_cpu;
  442. return 0;
  443. }
  444. static void l2_cache_event_start(struct perf_event *event, int flags)
  445. {
  446. struct cluster_pmu *cluster;
  447. struct hw_perf_event *hwc = &event->hw;
  448. int idx = hwc->idx;
  449. u32 config;
  450. u32 event_cc, event_group;
  451. hwc->state = 0;
  452. cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
  453. l2_cache_cluster_set_period(cluster, hwc);
  454. if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
  455. cluster_pmu_set_evccntcr(0);
  456. } else {
  457. config = hwc->config_base;
  458. event_cc = L2_EVT_CODE(config);
  459. event_group = L2_EVT_GROUP(config);
  460. cluster_pmu_set_evcntcr(idx, 0);
  461. cluster_pmu_set_evtyper(idx, event_group);
  462. cluster_pmu_set_resr(cluster, event_group, event_cc);
  463. cluster_pmu_set_evfilter_sys_mode(idx);
  464. }
  465. cluster_pmu_counter_enable_interrupt(idx);
  466. cluster_pmu_counter_enable(idx);
  467. }
  468. static void l2_cache_event_stop(struct perf_event *event, int flags)
  469. {
  470. struct hw_perf_event *hwc = &event->hw;
  471. int idx = hwc->idx;
  472. if (hwc->state & PERF_HES_STOPPED)
  473. return;
  474. cluster_pmu_counter_disable_interrupt(idx);
  475. cluster_pmu_counter_disable(idx);
  476. if (flags & PERF_EF_UPDATE)
  477. l2_cache_event_update(event);
  478. hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
  479. }
  480. static int l2_cache_event_add(struct perf_event *event, int flags)
  481. {
  482. struct hw_perf_event *hwc = &event->hw;
  483. int idx;
  484. int err = 0;
  485. struct cluster_pmu *cluster;
  486. cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
  487. idx = l2_cache_get_event_idx(cluster, event);
  488. if (idx < 0)
  489. return idx;
  490. hwc->idx = idx;
  491. hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
  492. cluster->events[idx] = event;
  493. local64_set(&hwc->prev_count, 0);
  494. if (flags & PERF_EF_START)
  495. l2_cache_event_start(event, flags);
  496. /* Propagate changes to the userspace mapping. */
  497. perf_event_update_userpage(event);
  498. return err;
  499. }
  500. static void l2_cache_event_del(struct perf_event *event, int flags)
  501. {
  502. struct hw_perf_event *hwc = &event->hw;
  503. struct cluster_pmu *cluster;
  504. int idx = hwc->idx;
  505. cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
  506. l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
  507. cluster->events[idx] = NULL;
  508. l2_cache_clear_event_idx(cluster, event);
  509. perf_event_update_userpage(event);
  510. }
  511. static void l2_cache_event_read(struct perf_event *event)
  512. {
  513. l2_cache_event_update(event);
  514. }
  515. static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
  516. struct device_attribute *attr,
  517. char *buf)
  518. {
  519. struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
  520. return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
  521. }
  522. static struct device_attribute l2_cache_pmu_cpumask_attr =
  523. __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL);
  524. static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
  525. &l2_cache_pmu_cpumask_attr.attr,
  526. NULL,
  527. };
  528. static struct attribute_group l2_cache_pmu_cpumask_group = {
  529. .attrs = l2_cache_pmu_cpumask_attrs,
  530. };
  531. /* CCG format for perf RAW codes. */
  532. PMU_FORMAT_ATTR(l2_code, "config:4-11");
  533. PMU_FORMAT_ATTR(l2_group, "config:0-3");
  534. PMU_FORMAT_ATTR(event, "config:0-11");
  535. static struct attribute *l2_cache_pmu_formats[] = {
  536. &format_attr_l2_code.attr,
  537. &format_attr_l2_group.attr,
  538. &format_attr_event.attr,
  539. NULL,
  540. };
  541. static struct attribute_group l2_cache_pmu_format_group = {
  542. .name = "format",
  543. .attrs = l2_cache_pmu_formats,
  544. };
  545. static ssize_t l2cache_pmu_event_show(struct device *dev,
  546. struct device_attribute *attr, char *page)
  547. {
  548. struct perf_pmu_events_attr *pmu_attr;
  549. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  550. return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
  551. }
  552. #define L2CACHE_EVENT_ATTR(_name, _id) \
  553. (&((struct perf_pmu_events_attr[]) { \
  554. { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
  555. .id = _id, } \
  556. })[0].attr.attr)
  557. static struct attribute *l2_cache_pmu_events[] = {
  558. L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
  559. L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
  560. L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
  561. L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
  562. L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
  563. L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
  564. L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
  565. L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
  566. L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
  567. L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
  568. L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
  569. NULL
  570. };
  571. static struct attribute_group l2_cache_pmu_events_group = {
  572. .name = "events",
  573. .attrs = l2_cache_pmu_events,
  574. };
  575. static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
  576. &l2_cache_pmu_format_group,
  577. &l2_cache_pmu_cpumask_group,
  578. &l2_cache_pmu_events_group,
  579. NULL,
  580. };
  581. /*
  582. * Generic device handlers
  583. */
  584. static const struct acpi_device_id l2_cache_pmu_acpi_match[] = {
  585. { "QCOM8130", },
  586. { }
  587. };
  588. static int get_num_counters(void)
  589. {
  590. int val;
  591. val = kryo_l2_get_indirect_reg(L2PMCR);
  592. /*
  593. * Read number of counters from L2PMCR and add 1
  594. * for the cycle counter.
  595. */
  596. return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1;
  597. }
  598. static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
  599. struct l2cache_pmu *l2cache_pmu, int cpu)
  600. {
  601. u64 mpidr;
  602. int cpu_cluster_id;
  603. struct cluster_pmu *cluster;
  604. /*
  605. * This assumes that the cluster_id is in MPIDR[aff1] for
  606. * single-threaded cores, and MPIDR[aff2] for multi-threaded
  607. * cores. This logic will have to be updated if this changes.
  608. */
  609. mpidr = read_cpuid_mpidr();
  610. if (mpidr & MPIDR_MT_BITMASK)
  611. cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
  612. else
  613. cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  614. list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
  615. if (cluster->cluster_id != cpu_cluster_id)
  616. continue;
  617. dev_info(&l2cache_pmu->pdev->dev,
  618. "CPU%d associated with cluster %d\n", cpu,
  619. cluster->cluster_id);
  620. cpumask_set_cpu(cpu, &cluster->cluster_cpus);
  621. *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
  622. return cluster;
  623. }
  624. return NULL;
  625. }
  626. static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
  627. {
  628. struct cluster_pmu *cluster;
  629. struct l2cache_pmu *l2cache_pmu;
  630. l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
  631. cluster = get_cluster_pmu(l2cache_pmu, cpu);
  632. if (!cluster) {
  633. /* First time this CPU has come online */
  634. cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
  635. if (!cluster) {
  636. /* Only if broken firmware doesn't list every cluster */
  637. WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
  638. return 0;
  639. }
  640. }
  641. /* If another CPU is managing this cluster, we're done */
  642. if (cluster->on_cpu != -1)
  643. return 0;
  644. /*
  645. * All CPUs on this cluster were down, use this one.
  646. * Reset to put it into sane state.
  647. */
  648. cluster->on_cpu = cpu;
  649. cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
  650. cluster_pmu_reset();
  651. WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
  652. enable_irq(cluster->irq);
  653. return 0;
  654. }
  655. static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
  656. {
  657. struct cluster_pmu *cluster;
  658. struct l2cache_pmu *l2cache_pmu;
  659. cpumask_t cluster_online_cpus;
  660. unsigned int target;
  661. l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
  662. cluster = get_cluster_pmu(l2cache_pmu, cpu);
  663. if (!cluster)
  664. return 0;
  665. /* If this CPU is not managing the cluster, we're done */
  666. if (cluster->on_cpu != cpu)
  667. return 0;
  668. /* Give up ownership of cluster */
  669. cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
  670. cluster->on_cpu = -1;
  671. /* Any other CPU for this cluster which is still online */
  672. cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
  673. cpu_online_mask);
  674. target = cpumask_any_but(&cluster_online_cpus, cpu);
  675. if (target >= nr_cpu_ids) {
  676. disable_irq(cluster->irq);
  677. return 0;
  678. }
  679. perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
  680. cluster->on_cpu = target;
  681. cpumask_set_cpu(target, &l2cache_pmu->cpumask);
  682. WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target)));
  683. return 0;
  684. }
  685. static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
  686. {
  687. struct platform_device *pdev = to_platform_device(dev->parent);
  688. struct platform_device *sdev = to_platform_device(dev);
  689. struct l2cache_pmu *l2cache_pmu = data;
  690. struct cluster_pmu *cluster;
  691. struct acpi_device *device;
  692. unsigned long fw_cluster_id;
  693. int err;
  694. int irq;
  695. if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
  696. return -ENODEV;
  697. if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) {
  698. dev_err(&pdev->dev, "unable to read ACPI uid\n");
  699. return -ENODEV;
  700. }
  701. cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
  702. if (!cluster)
  703. return -ENOMEM;
  704. INIT_LIST_HEAD(&cluster->next);
  705. list_add(&cluster->next, &l2cache_pmu->clusters);
  706. cluster->cluster_id = fw_cluster_id;
  707. irq = platform_get_irq(sdev, 0);
  708. if (irq < 0)
  709. return irq;
  710. irq_set_status_flags(irq, IRQ_NOAUTOEN);
  711. cluster->irq = irq;
  712. cluster->l2cache_pmu = l2cache_pmu;
  713. cluster->on_cpu = -1;
  714. err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
  715. IRQF_NOBALANCING | IRQF_NO_THREAD,
  716. "l2-cache-pmu", cluster);
  717. if (err) {
  718. dev_err(&pdev->dev,
  719. "Unable to request IRQ%d for L2 PMU counters\n", irq);
  720. return err;
  721. }
  722. dev_info(&pdev->dev,
  723. "Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
  724. spin_lock_init(&cluster->pmu_lock);
  725. l2cache_pmu->num_pmus++;
  726. return 0;
  727. }
  728. static int l2_cache_pmu_probe(struct platform_device *pdev)
  729. {
  730. int err;
  731. struct l2cache_pmu *l2cache_pmu;
  732. l2cache_pmu =
  733. devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
  734. if (!l2cache_pmu)
  735. return -ENOMEM;
  736. INIT_LIST_HEAD(&l2cache_pmu->clusters);
  737. platform_set_drvdata(pdev, l2cache_pmu);
  738. l2cache_pmu->pmu = (struct pmu) {
  739. /* suffix is instance id for future use with multiple sockets */
  740. .name = "l2cache_0",
  741. .task_ctx_nr = perf_invalid_context,
  742. .pmu_enable = l2_cache_pmu_enable,
  743. .pmu_disable = l2_cache_pmu_disable,
  744. .event_init = l2_cache_event_init,
  745. .add = l2_cache_event_add,
  746. .del = l2_cache_event_del,
  747. .start = l2_cache_event_start,
  748. .stop = l2_cache_event_stop,
  749. .read = l2_cache_event_read,
  750. .attr_groups = l2_cache_pmu_attr_grps,
  751. .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
  752. };
  753. l2cache_pmu->num_counters = get_num_counters();
  754. l2cache_pmu->pdev = pdev;
  755. l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
  756. struct cluster_pmu *);
  757. if (!l2cache_pmu->pmu_cluster)
  758. return -ENOMEM;
  759. l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
  760. l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
  761. BIT(L2CYCLE_CTR_BIT);
  762. cpumask_clear(&l2cache_pmu->cpumask);
  763. /* Read cluster info and initialize each cluster */
  764. err = device_for_each_child(&pdev->dev, l2cache_pmu,
  765. l2_cache_pmu_probe_cluster);
  766. if (err)
  767. return err;
  768. if (l2cache_pmu->num_pmus == 0) {
  769. dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
  770. return -ENODEV;
  771. }
  772. err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
  773. &l2cache_pmu->node);
  774. if (err) {
  775. dev_err(&pdev->dev, "Error %d registering hotplug", err);
  776. return err;
  777. }
  778. err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
  779. if (err) {
  780. dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
  781. goto out_unregister;
  782. }
  783. dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
  784. l2cache_pmu->num_pmus);
  785. return err;
  786. out_unregister:
  787. cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
  788. &l2cache_pmu->node);
  789. return err;
  790. }
  791. static int l2_cache_pmu_remove(struct platform_device *pdev)
  792. {
  793. struct l2cache_pmu *l2cache_pmu =
  794. to_l2cache_pmu(platform_get_drvdata(pdev));
  795. perf_pmu_unregister(&l2cache_pmu->pmu);
  796. cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
  797. &l2cache_pmu->node);
  798. return 0;
  799. }
  800. static struct platform_driver l2_cache_pmu_driver = {
  801. .driver = {
  802. .name = "qcom-l2cache-pmu",
  803. .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
  804. .suppress_bind_attrs = true,
  805. },
  806. .probe = l2_cache_pmu_probe,
  807. .remove = l2_cache_pmu_remove,
  808. };
  809. static int __init register_l2_cache_pmu_driver(void)
  810. {
  811. int err;
  812. err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
  813. "AP_PERF_ARM_QCOM_L2_ONLINE",
  814. l2cache_pmu_online_cpu,
  815. l2cache_pmu_offline_cpu);
  816. if (err)
  817. return err;
  818. return platform_driver_register(&l2_cache_pmu_driver);
  819. }
  820. device_initcall(register_l2_cache_pmu_driver);