fsl_imx8_ddr_perf.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2017 NXP
  4. * Copyright 2016 Freescale Semiconductor, Inc.
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/init.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/module.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_device.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/perf_event.h>
  16. #include <linux/slab.h>
  17. #define COUNTER_CNTL 0x0
  18. #define COUNTER_READ 0x20
  19. #define COUNTER_DPCR1 0x30
  20. #define CNTL_OVER 0x1
  21. #define CNTL_CLEAR 0x2
  22. #define CNTL_EN 0x4
  23. #define CNTL_EN_MASK 0xFFFFFFFB
  24. #define CNTL_CLEAR_MASK 0xFFFFFFFD
  25. #define CNTL_OVER_MASK 0xFFFFFFFE
  26. #define CNTL_CSV_SHIFT 24
  27. #define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
  28. #define EVENT_CYCLES_ID 0
  29. #define EVENT_CYCLES_COUNTER 0
  30. #define NUM_COUNTERS 4
  31. #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
  32. #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
  33. #define DDR_PERF_DEV_NAME "imx8_ddr"
  34. #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
  35. static DEFINE_IDA(ddr_ida);
  36. /* DDR Perf hardware feature */
  37. #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
  38. #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
  39. struct fsl_ddr_devtype_data {
  40. unsigned int quirks; /* quirks needed for different DDR Perf core */
  41. };
  42. static const struct fsl_ddr_devtype_data imx8_devtype_data;
  43. static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
  44. .quirks = DDR_CAP_AXI_ID_FILTER,
  45. };
  46. static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
  47. .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
  48. };
  49. static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
  50. { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
  51. { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
  52. { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
  53. { /* sentinel */ }
  54. };
  55. MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
  56. struct ddr_pmu {
  57. struct pmu pmu;
  58. void __iomem *base;
  59. unsigned int cpu;
  60. struct hlist_node node;
  61. struct device *dev;
  62. struct perf_event *events[NUM_COUNTERS];
  63. int active_events;
  64. enum cpuhp_state cpuhp_state;
  65. const struct fsl_ddr_devtype_data *devtype_data;
  66. int irq;
  67. int id;
  68. };
  69. enum ddr_perf_filter_capabilities {
  70. PERF_CAP_AXI_ID_FILTER = 0,
  71. PERF_CAP_AXI_ID_FILTER_ENHANCED,
  72. PERF_CAP_AXI_ID_FEAT_MAX,
  73. };
  74. static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
  75. {
  76. u32 quirks = pmu->devtype_data->quirks;
  77. switch (cap) {
  78. case PERF_CAP_AXI_ID_FILTER:
  79. return !!(quirks & DDR_CAP_AXI_ID_FILTER);
  80. case PERF_CAP_AXI_ID_FILTER_ENHANCED:
  81. quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
  82. return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
  83. default:
  84. WARN(1, "unknown filter cap %d\n", cap);
  85. }
  86. return 0;
  87. }
  88. static ssize_t ddr_perf_filter_cap_show(struct device *dev,
  89. struct device_attribute *attr,
  90. char *buf)
  91. {
  92. struct ddr_pmu *pmu = dev_get_drvdata(dev);
  93. struct dev_ext_attribute *ea =
  94. container_of(attr, struct dev_ext_attribute, attr);
  95. int cap = (long)ea->var;
  96. return snprintf(buf, PAGE_SIZE, "%u\n",
  97. ddr_perf_filter_cap_get(pmu, cap));
  98. }
  99. #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
  100. (&((struct dev_ext_attribute) { \
  101. __ATTR(_name, 0444, _func, NULL), (void *)_var \
  102. }).attr.attr)
  103. #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
  104. PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
  105. static struct attribute *ddr_perf_filter_cap_attr[] = {
  106. PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
  107. PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
  108. NULL,
  109. };
  110. static struct attribute_group ddr_perf_filter_cap_attr_group = {
  111. .name = "caps",
  112. .attrs = ddr_perf_filter_cap_attr,
  113. };
  114. static ssize_t ddr_perf_cpumask_show(struct device *dev,
  115. struct device_attribute *attr, char *buf)
  116. {
  117. struct ddr_pmu *pmu = dev_get_drvdata(dev);
  118. return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
  119. }
  120. static struct device_attribute ddr_perf_cpumask_attr =
  121. __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
  122. static struct attribute *ddr_perf_cpumask_attrs[] = {
  123. &ddr_perf_cpumask_attr.attr,
  124. NULL,
  125. };
  126. static struct attribute_group ddr_perf_cpumask_attr_group = {
  127. .attrs = ddr_perf_cpumask_attrs,
  128. };
  129. static ssize_t
  130. ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
  131. char *page)
  132. {
  133. struct perf_pmu_events_attr *pmu_attr;
  134. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  135. return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
  136. }
  137. #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
  138. (&((struct perf_pmu_events_attr[]) { \
  139. { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
  140. .id = _id, } \
  141. })[0].attr.attr)
  142. static struct attribute *ddr_perf_events_attrs[] = {
  143. IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
  144. IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
  145. IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
  146. IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
  147. IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
  148. IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
  149. IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
  150. IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
  151. IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
  152. IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
  153. IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
  154. IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
  155. IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
  156. IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
  157. IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
  158. IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
  159. IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
  160. IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
  161. IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
  162. IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
  163. IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
  164. IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
  165. IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
  166. IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
  167. IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
  168. IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
  169. IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
  170. IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
  171. IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
  172. IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
  173. IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
  174. IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
  175. NULL,
  176. };
  177. static struct attribute_group ddr_perf_events_attr_group = {
  178. .name = "events",
  179. .attrs = ddr_perf_events_attrs,
  180. };
  181. PMU_FORMAT_ATTR(event, "config:0-7");
  182. PMU_FORMAT_ATTR(axi_id, "config1:0-15");
  183. PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
  184. static struct attribute *ddr_perf_format_attrs[] = {
  185. &format_attr_event.attr,
  186. &format_attr_axi_id.attr,
  187. &format_attr_axi_mask.attr,
  188. NULL,
  189. };
  190. static struct attribute_group ddr_perf_format_attr_group = {
  191. .name = "format",
  192. .attrs = ddr_perf_format_attrs,
  193. };
  194. static const struct attribute_group *attr_groups[] = {
  195. &ddr_perf_events_attr_group,
  196. &ddr_perf_format_attr_group,
  197. &ddr_perf_cpumask_attr_group,
  198. &ddr_perf_filter_cap_attr_group,
  199. NULL,
  200. };
  201. static bool ddr_perf_is_filtered(struct perf_event *event)
  202. {
  203. return event->attr.config == 0x41 || event->attr.config == 0x42;
  204. }
  205. static u32 ddr_perf_filter_val(struct perf_event *event)
  206. {
  207. return event->attr.config1;
  208. }
  209. static bool ddr_perf_filters_compatible(struct perf_event *a,
  210. struct perf_event *b)
  211. {
  212. if (!ddr_perf_is_filtered(a))
  213. return true;
  214. if (!ddr_perf_is_filtered(b))
  215. return true;
  216. return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
  217. }
  218. static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
  219. {
  220. unsigned int filt;
  221. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  222. filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
  223. return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
  224. ddr_perf_is_filtered(event);
  225. }
  226. static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
  227. {
  228. int i;
  229. /*
  230. * Always map cycle event to counter 0
  231. * Cycles counter is dedicated for cycle event
  232. * can't used for the other events
  233. */
  234. if (event == EVENT_CYCLES_ID) {
  235. if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
  236. return EVENT_CYCLES_COUNTER;
  237. else
  238. return -ENOENT;
  239. }
  240. for (i = 1; i < NUM_COUNTERS; i++) {
  241. if (pmu->events[i] == NULL)
  242. return i;
  243. }
  244. return -ENOENT;
  245. }
  246. static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
  247. {
  248. pmu->events[counter] = NULL;
  249. }
  250. static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
  251. {
  252. struct perf_event *event = pmu->events[counter];
  253. void __iomem *base = pmu->base;
  254. /*
  255. * return bytes instead of bursts from ddr transaction for
  256. * axid-read and axid-write event if PMU core supports enhanced
  257. * filter.
  258. */
  259. base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
  260. COUNTER_READ;
  261. return readl_relaxed(base + counter * 4);
  262. }
  263. static int ddr_perf_event_init(struct perf_event *event)
  264. {
  265. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  266. struct hw_perf_event *hwc = &event->hw;
  267. struct perf_event *sibling;
  268. if (event->attr.type != event->pmu->type)
  269. return -ENOENT;
  270. if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
  271. return -EOPNOTSUPP;
  272. if (event->cpu < 0) {
  273. dev_warn(pmu->dev, "Can't provide per-task data!\n");
  274. return -EOPNOTSUPP;
  275. }
  276. /*
  277. * We must NOT create groups containing mixed PMUs, although software
  278. * events are acceptable (for example to create a CCN group
  279. * periodically read when a hrtimer aka cpu-clock leader triggers).
  280. */
  281. if (event->group_leader->pmu != event->pmu &&
  282. !is_software_event(event->group_leader))
  283. return -EINVAL;
  284. if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
  285. if (!ddr_perf_filters_compatible(event, event->group_leader))
  286. return -EINVAL;
  287. for_each_sibling_event(sibling, event->group_leader) {
  288. if (!ddr_perf_filters_compatible(event, sibling))
  289. return -EINVAL;
  290. }
  291. }
  292. for_each_sibling_event(sibling, event->group_leader) {
  293. if (sibling->pmu != event->pmu &&
  294. !is_software_event(sibling))
  295. return -EINVAL;
  296. }
  297. event->cpu = pmu->cpu;
  298. hwc->idx = -1;
  299. return 0;
  300. }
  301. static void ddr_perf_event_update(struct perf_event *event)
  302. {
  303. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  304. struct hw_perf_event *hwc = &event->hw;
  305. u64 delta, prev_raw_count, new_raw_count;
  306. int counter = hwc->idx;
  307. do {
  308. prev_raw_count = local64_read(&hwc->prev_count);
  309. new_raw_count = ddr_perf_read_counter(pmu, counter);
  310. } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  311. new_raw_count) != prev_raw_count);
  312. delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
  313. local64_add(delta, &event->count);
  314. }
  315. static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
  316. int counter, bool enable)
  317. {
  318. u8 reg = counter * 4 + COUNTER_CNTL;
  319. int val;
  320. if (enable) {
  321. /*
  322. * cycle counter is special which should firstly write 0 then
  323. * write 1 into CLEAR bit to clear it. Other counters only
  324. * need write 0 into CLEAR bit and it turns out to be 1 by
  325. * hardware. Below enable flow is harmless for all counters.
  326. */
  327. writel(0, pmu->base + reg);
  328. val = CNTL_EN | CNTL_CLEAR;
  329. val |= FIELD_PREP(CNTL_CSV_MASK, config);
  330. writel(val, pmu->base + reg);
  331. } else {
  332. /* Disable counter */
  333. val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
  334. writel(val, pmu->base + reg);
  335. }
  336. }
  337. static void ddr_perf_event_start(struct perf_event *event, int flags)
  338. {
  339. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  340. struct hw_perf_event *hwc = &event->hw;
  341. int counter = hwc->idx;
  342. local64_set(&hwc->prev_count, 0);
  343. ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
  344. hwc->state = 0;
  345. }
  346. static int ddr_perf_event_add(struct perf_event *event, int flags)
  347. {
  348. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  349. struct hw_perf_event *hwc = &event->hw;
  350. int counter;
  351. int cfg = event->attr.config;
  352. int cfg1 = event->attr.config1;
  353. if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
  354. int i;
  355. for (i = 1; i < NUM_COUNTERS; i++) {
  356. if (pmu->events[i] &&
  357. !ddr_perf_filters_compatible(event, pmu->events[i]))
  358. return -EINVAL;
  359. }
  360. if (ddr_perf_is_filtered(event)) {
  361. /* revert axi id masking(axi_mask) value */
  362. cfg1 ^= AXI_MASKING_REVERT;
  363. writel(cfg1, pmu->base + COUNTER_DPCR1);
  364. }
  365. }
  366. counter = ddr_perf_alloc_counter(pmu, cfg);
  367. if (counter < 0) {
  368. dev_dbg(pmu->dev, "There are not enough counters\n");
  369. return -EOPNOTSUPP;
  370. }
  371. pmu->events[counter] = event;
  372. pmu->active_events++;
  373. hwc->idx = counter;
  374. hwc->state |= PERF_HES_STOPPED;
  375. if (flags & PERF_EF_START)
  376. ddr_perf_event_start(event, flags);
  377. return 0;
  378. }
  379. static void ddr_perf_event_stop(struct perf_event *event, int flags)
  380. {
  381. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  382. struct hw_perf_event *hwc = &event->hw;
  383. int counter = hwc->idx;
  384. ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
  385. ddr_perf_event_update(event);
  386. hwc->state |= PERF_HES_STOPPED;
  387. }
  388. static void ddr_perf_event_del(struct perf_event *event, int flags)
  389. {
  390. struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
  391. struct hw_perf_event *hwc = &event->hw;
  392. int counter = hwc->idx;
  393. ddr_perf_event_stop(event, PERF_EF_UPDATE);
  394. ddr_perf_free_counter(pmu, counter);
  395. pmu->active_events--;
  396. hwc->idx = -1;
  397. }
  398. static void ddr_perf_pmu_enable(struct pmu *pmu)
  399. {
  400. struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
  401. /* enable cycle counter if cycle is not active event list */
  402. if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
  403. ddr_perf_counter_enable(ddr_pmu,
  404. EVENT_CYCLES_ID,
  405. EVENT_CYCLES_COUNTER,
  406. true);
  407. }
  408. static void ddr_perf_pmu_disable(struct pmu *pmu)
  409. {
  410. struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
  411. if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
  412. ddr_perf_counter_enable(ddr_pmu,
  413. EVENT_CYCLES_ID,
  414. EVENT_CYCLES_COUNTER,
  415. false);
  416. }
  417. static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
  418. struct device *dev)
  419. {
  420. *pmu = (struct ddr_pmu) {
  421. .pmu = (struct pmu) {
  422. .module = THIS_MODULE,
  423. .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
  424. .task_ctx_nr = perf_invalid_context,
  425. .attr_groups = attr_groups,
  426. .event_init = ddr_perf_event_init,
  427. .add = ddr_perf_event_add,
  428. .del = ddr_perf_event_del,
  429. .start = ddr_perf_event_start,
  430. .stop = ddr_perf_event_stop,
  431. .read = ddr_perf_event_update,
  432. .pmu_enable = ddr_perf_pmu_enable,
  433. .pmu_disable = ddr_perf_pmu_disable,
  434. },
  435. .base = base,
  436. .dev = dev,
  437. };
  438. pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
  439. return pmu->id;
  440. }
  441. static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
  442. {
  443. int i;
  444. struct ddr_pmu *pmu = (struct ddr_pmu *) p;
  445. struct perf_event *event, *cycle_event = NULL;
  446. /* all counter will stop if cycle counter disabled */
  447. ddr_perf_counter_enable(pmu,
  448. EVENT_CYCLES_ID,
  449. EVENT_CYCLES_COUNTER,
  450. false);
  451. /*
  452. * When the cycle counter overflows, all counters are stopped,
  453. * and an IRQ is raised. If any other counter overflows, it
  454. * continues counting, and no IRQ is raised.
  455. *
  456. * Cycles occur at least 4 times as often as other events, so we
  457. * can update all events on a cycle counter overflow and not
  458. * lose events.
  459. *
  460. */
  461. for (i = 0; i < NUM_COUNTERS; i++) {
  462. if (!pmu->events[i])
  463. continue;
  464. event = pmu->events[i];
  465. ddr_perf_event_update(event);
  466. if (event->hw.idx == EVENT_CYCLES_COUNTER)
  467. cycle_event = event;
  468. }
  469. ddr_perf_counter_enable(pmu,
  470. EVENT_CYCLES_ID,
  471. EVENT_CYCLES_COUNTER,
  472. true);
  473. if (cycle_event)
  474. ddr_perf_event_update(cycle_event);
  475. return IRQ_HANDLED;
  476. }
  477. static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
  478. {
  479. struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
  480. int target;
  481. if (cpu != pmu->cpu)
  482. return 0;
  483. target = cpumask_any_but(cpu_online_mask, cpu);
  484. if (target >= nr_cpu_ids)
  485. return 0;
  486. perf_pmu_migrate_context(&pmu->pmu, cpu, target);
  487. pmu->cpu = target;
  488. WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
  489. return 0;
  490. }
  491. static int ddr_perf_probe(struct platform_device *pdev)
  492. {
  493. struct ddr_pmu *pmu;
  494. struct device_node *np;
  495. void __iomem *base;
  496. char *name;
  497. int num;
  498. int ret;
  499. int irq;
  500. base = devm_platform_ioremap_resource(pdev, 0);
  501. if (IS_ERR(base))
  502. return PTR_ERR(base);
  503. np = pdev->dev.of_node;
  504. pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
  505. if (!pmu)
  506. return -ENOMEM;
  507. num = ddr_perf_init(pmu, base, &pdev->dev);
  508. platform_set_drvdata(pdev, pmu);
  509. name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
  510. num);
  511. if (!name) {
  512. ret = -ENOMEM;
  513. goto cpuhp_state_err;
  514. }
  515. pmu->devtype_data = of_device_get_match_data(&pdev->dev);
  516. pmu->cpu = raw_smp_processor_id();
  517. ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
  518. DDR_CPUHP_CB_NAME,
  519. NULL,
  520. ddr_perf_offline_cpu);
  521. if (ret < 0) {
  522. dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
  523. goto cpuhp_state_err;
  524. }
  525. pmu->cpuhp_state = ret;
  526. /* Register the pmu instance for cpu hotplug */
  527. ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
  528. if (ret) {
  529. dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
  530. goto cpuhp_instance_err;
  531. }
  532. /* Request irq */
  533. irq = of_irq_get(np, 0);
  534. if (irq < 0) {
  535. dev_err(&pdev->dev, "Failed to get irq: %d", irq);
  536. ret = irq;
  537. goto ddr_perf_err;
  538. }
  539. ret = devm_request_irq(&pdev->dev, irq,
  540. ddr_perf_irq_handler,
  541. IRQF_NOBALANCING | IRQF_NO_THREAD,
  542. DDR_CPUHP_CB_NAME,
  543. pmu);
  544. if (ret < 0) {
  545. dev_err(&pdev->dev, "Request irq failed: %d", ret);
  546. goto ddr_perf_err;
  547. }
  548. pmu->irq = irq;
  549. ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
  550. if (ret) {
  551. dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
  552. goto ddr_perf_err;
  553. }
  554. ret = perf_pmu_register(&pmu->pmu, name, -1);
  555. if (ret)
  556. goto ddr_perf_err;
  557. return 0;
  558. ddr_perf_err:
  559. cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
  560. cpuhp_instance_err:
  561. cpuhp_remove_multi_state(pmu->cpuhp_state);
  562. cpuhp_state_err:
  563. ida_simple_remove(&ddr_ida, pmu->id);
  564. dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
  565. return ret;
  566. }
  567. static int ddr_perf_remove(struct platform_device *pdev)
  568. {
  569. struct ddr_pmu *pmu = platform_get_drvdata(pdev);
  570. cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
  571. cpuhp_remove_multi_state(pmu->cpuhp_state);
  572. irq_set_affinity_hint(pmu->irq, NULL);
  573. perf_pmu_unregister(&pmu->pmu);
  574. ida_simple_remove(&ddr_ida, pmu->id);
  575. return 0;
  576. }
  577. static struct platform_driver imx_ddr_pmu_driver = {
  578. .driver = {
  579. .name = "imx-ddr-pmu",
  580. .of_match_table = imx_ddr_pmu_dt_ids,
  581. .suppress_bind_attrs = true,
  582. },
  583. .probe = ddr_perf_probe,
  584. .remove = ddr_perf_remove,
  585. };
  586. module_platform_driver(imx_ddr_pmu_driver);
  587. MODULE_LICENSE("GPL v2");