op_model_xscale.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. /**
  2. * @file op_model_xscale.c
  3. * XScale Performance Monitor Driver
  4. *
  5. * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
  6. * @remark Copyright 2000-2004 MontaVista Software Inc
  7. * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
  8. * @remark Copyright 2004 Intel Corporation
  9. * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
  10. * @remark Copyright 2004 OProfile Authors
  11. *
  12. * @remark Read the file COPYING
  13. *
  14. * @author Zwane Mwaikambo
  15. */
  16. /* #define DEBUG */
  17. #include <linux/types.h>
  18. #include <linux/errno.h>
  19. #include <linux/sched.h>
  20. #include <linux/oprofile.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/irq.h>
  23. #include <asm/system.h>
  24. #include "op_counter.h"
  25. #include "op_arm_model.h"
  26. #define PMU_ENABLE 0x001 /* Enable counters */
  27. #define PMN_RESET 0x002 /* Reset event counters */
  28. #define CCNT_RESET 0x004 /* Reset clock counter */
  29. #define PMU_RESET (CCNT_RESET | PMN_RESET)
  30. #define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */
  31. /* TODO do runtime detection */
  32. #ifdef CONFIG_ARCH_IOP32X
  33. #define XSCALE_PMU_IRQ IRQ_IOP32X_CORE_PMU
  34. #endif
  35. #ifdef CONFIG_ARCH_IOP33X
  36. #define XSCALE_PMU_IRQ IRQ_IOP33X_CORE_PMU
  37. #endif
  38. #ifdef CONFIG_ARCH_PXA
  39. #define XSCALE_PMU_IRQ IRQ_PMU
  40. #endif
  41. /*
  42. * Different types of events that can be counted by the XScale PMU
  43. * as used by Oprofile userspace. Here primarily for documentation
  44. * purposes.
  45. */
  46. #define EVT_ICACHE_MISS 0x00
  47. #define EVT_ICACHE_NO_DELIVER 0x01
  48. #define EVT_DATA_STALL 0x02
  49. #define EVT_ITLB_MISS 0x03
  50. #define EVT_DTLB_MISS 0x04
  51. #define EVT_BRANCH 0x05
  52. #define EVT_BRANCH_MISS 0x06
  53. #define EVT_INSTRUCTION 0x07
  54. #define EVT_DCACHE_FULL_STALL 0x08
  55. #define EVT_DCACHE_FULL_STALL_CONTIG 0x09
  56. #define EVT_DCACHE_ACCESS 0x0A
  57. #define EVT_DCACHE_MISS 0x0B
  58. #define EVT_DCACE_WRITE_BACK 0x0C
  59. #define EVT_PC_CHANGED 0x0D
  60. #define EVT_BCU_REQUEST 0x10
  61. #define EVT_BCU_FULL 0x11
  62. #define EVT_BCU_DRAIN 0x12
  63. #define EVT_BCU_ECC_NO_ELOG 0x14
  64. #define EVT_BCU_1_BIT_ERR 0x15
  65. #define EVT_RMW 0x16
  66. /* EVT_CCNT is not hardware defined */
  67. #define EVT_CCNT 0xFE
  68. #define EVT_UNUSED 0xFF
  69. struct pmu_counter {
  70. volatile unsigned long ovf;
  71. unsigned long reset_counter;
  72. };
  73. enum { CCNT, PMN0, PMN1, PMN2, PMN3, MAX_COUNTERS };
  74. static struct pmu_counter results[MAX_COUNTERS];
  75. /*
  76. * There are two versions of the PMU in current XScale processors
  77. * with differing register layouts and number of performance counters.
  78. * e.g. IOP32x is xsc1 whilst IOP33x is xsc2.
  79. * We detect which register layout to use in xscale_detect_pmu()
  80. */
  81. enum { PMU_XSC1, PMU_XSC2 };
  82. struct pmu_type {
  83. int id;
  84. char *name;
  85. int num_counters;
  86. unsigned int int_enable;
  87. unsigned int cnt_ovf[MAX_COUNTERS];
  88. unsigned int int_mask[MAX_COUNTERS];
  89. };
  90. static struct pmu_type pmu_parms[] = {
  91. {
  92. .id = PMU_XSC1,
  93. .name = "arm/xscale1",
  94. .num_counters = 3,
  95. .int_mask = { [PMN0] = 0x10, [PMN1] = 0x20,
  96. [CCNT] = 0x40 },
  97. .cnt_ovf = { [CCNT] = 0x400, [PMN0] = 0x100,
  98. [PMN1] = 0x200},
  99. },
  100. {
  101. .id = PMU_XSC2,
  102. .name = "arm/xscale2",
  103. .num_counters = 5,
  104. .int_mask = { [CCNT] = 0x01, [PMN0] = 0x02,
  105. [PMN1] = 0x04, [PMN2] = 0x08,
  106. [PMN3] = 0x10 },
  107. .cnt_ovf = { [CCNT] = 0x01, [PMN0] = 0x02,
  108. [PMN1] = 0x04, [PMN2] = 0x08,
  109. [PMN3] = 0x10 },
  110. },
  111. };
  112. static struct pmu_type *pmu;
  113. static void write_pmnc(u32 val)
  114. {
  115. if (pmu->id == PMU_XSC1) {
  116. /* upper 4bits and 7, 11 are write-as-0 */
  117. val &= 0xffff77f;
  118. __asm__ __volatile__ ("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
  119. } else {
  120. /* bits 4-23 are write-as-0, 24-31 are write ignored */
  121. val &= 0xf;
  122. __asm__ __volatile__ ("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
  123. }
  124. }
  125. static u32 read_pmnc(void)
  126. {
  127. u32 val;
  128. if (pmu->id == PMU_XSC1)
  129. __asm__ __volatile__ ("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
  130. else {
  131. __asm__ __volatile__ ("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
  132. /* bits 1-2 and 4-23 are read-unpredictable */
  133. val &= 0xff000009;
  134. }
  135. return val;
  136. }
  137. static u32 __xsc1_read_counter(int counter)
  138. {
  139. u32 val = 0;
  140. switch (counter) {
  141. case CCNT:
  142. __asm__ __volatile__ ("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
  143. break;
  144. case PMN0:
  145. __asm__ __volatile__ ("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
  146. break;
  147. case PMN1:
  148. __asm__ __volatile__ ("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
  149. break;
  150. }
  151. return val;
  152. }
  153. static u32 __xsc2_read_counter(int counter)
  154. {
  155. u32 val = 0;
  156. switch (counter) {
  157. case CCNT:
  158. __asm__ __volatile__ ("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
  159. break;
  160. case PMN0:
  161. __asm__ __volatile__ ("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
  162. break;
  163. case PMN1:
  164. __asm__ __volatile__ ("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
  165. break;
  166. case PMN2:
  167. __asm__ __volatile__ ("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
  168. break;
  169. case PMN3:
  170. __asm__ __volatile__ ("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
  171. break;
  172. }
  173. return val;
  174. }
  175. static u32 read_counter(int counter)
  176. {
  177. u32 val;
  178. if (pmu->id == PMU_XSC1)
  179. val = __xsc1_read_counter(counter);
  180. else
  181. val = __xsc2_read_counter(counter);
  182. return val;
  183. }
  184. static void __xsc1_write_counter(int counter, u32 val)
  185. {
  186. switch (counter) {
  187. case CCNT:
  188. __asm__ __volatile__ ("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
  189. break;
  190. case PMN0:
  191. __asm__ __volatile__ ("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
  192. break;
  193. case PMN1:
  194. __asm__ __volatile__ ("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
  195. break;
  196. }
  197. }
  198. static void __xsc2_write_counter(int counter, u32 val)
  199. {
  200. switch (counter) {
  201. case CCNT:
  202. __asm__ __volatile__ ("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
  203. break;
  204. case PMN0:
  205. __asm__ __volatile__ ("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
  206. break;
  207. case PMN1:
  208. __asm__ __volatile__ ("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
  209. break;
  210. case PMN2:
  211. __asm__ __volatile__ ("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
  212. break;
  213. case PMN3:
  214. __asm__ __volatile__ ("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
  215. break;
  216. }
  217. }
  218. static void write_counter(int counter, u32 val)
  219. {
  220. if (pmu->id == PMU_XSC1)
  221. __xsc1_write_counter(counter, val);
  222. else
  223. __xsc2_write_counter(counter, val);
  224. }
  225. static int xscale_setup_ctrs(void)
  226. {
  227. u32 evtsel, pmnc;
  228. int i;
  229. for (i = CCNT; i < MAX_COUNTERS; i++) {
  230. if (counter_config[i].enabled)
  231. continue;
  232. counter_config[i].event = EVT_UNUSED;
  233. }
  234. switch (pmu->id) {
  235. case PMU_XSC1:
  236. pmnc = (counter_config[PMN1].event << 20) | (counter_config[PMN0].event << 12);
  237. pr_debug("xscale_setup_ctrs: pmnc: %#08x\n", pmnc);
  238. write_pmnc(pmnc);
  239. break;
  240. case PMU_XSC2:
  241. evtsel = counter_config[PMN0].event | (counter_config[PMN1].event << 8) |
  242. (counter_config[PMN2].event << 16) | (counter_config[PMN3].event << 24);
  243. pr_debug("xscale_setup_ctrs: evtsel %#08x\n", evtsel);
  244. __asm__ __volatile__ ("mcr p14, 0, %0, c8, c1, 0" : : "r" (evtsel));
  245. break;
  246. }
  247. for (i = CCNT; i < MAX_COUNTERS; i++) {
  248. if (counter_config[i].event == EVT_UNUSED) {
  249. counter_config[i].event = 0;
  250. pmu->int_enable &= ~pmu->int_mask[i];
  251. continue;
  252. }
  253. results[i].reset_counter = counter_config[i].count;
  254. write_counter(i, -(u32)counter_config[i].count);
  255. pmu->int_enable |= pmu->int_mask[i];
  256. pr_debug("xscale_setup_ctrs: counter%d %#08x from %#08lx\n", i,
  257. read_counter(i), counter_config[i].count);
  258. }
  259. return 0;
  260. }
  261. static void inline __xsc1_check_ctrs(void)
  262. {
  263. int i;
  264. u32 pmnc = read_pmnc();
  265. /* NOTE: there's an A stepping errata that states if an overflow */
  266. /* bit already exists and another occurs, the previous */
  267. /* Overflow bit gets cleared. There's no workaround. */
  268. /* Fixed in B stepping or later */
  269. /* Write the value back to clear the overflow flags. Overflow */
  270. /* flags remain in pmnc for use below */
  271. write_pmnc(pmnc & ~PMU_ENABLE);
  272. for (i = CCNT; i <= PMN1; i++) {
  273. if (!(pmu->int_mask[i] & pmu->int_enable))
  274. continue;
  275. if (pmnc & pmu->cnt_ovf[i])
  276. results[i].ovf++;
  277. }
  278. }
  279. static void inline __xsc2_check_ctrs(void)
  280. {
  281. int i;
  282. u32 flag = 0, pmnc = read_pmnc();
  283. pmnc &= ~PMU_ENABLE;
  284. write_pmnc(pmnc);
  285. /* read overflow flag register */
  286. __asm__ __volatile__ ("mrc p14, 0, %0, c5, c1, 0" : "=r" (flag));
  287. for (i = CCNT; i <= PMN3; i++) {
  288. if (!(pmu->int_mask[i] & pmu->int_enable))
  289. continue;
  290. if (flag & pmu->cnt_ovf[i])
  291. results[i].ovf++;
  292. }
  293. /* writeback clears overflow bits */
  294. __asm__ __volatile__ ("mcr p14, 0, %0, c5, c1, 0" : : "r" (flag));
  295. }
  296. static irqreturn_t xscale_pmu_interrupt(int irq, void *arg)
  297. {
  298. int i;
  299. u32 pmnc;
  300. if (pmu->id == PMU_XSC1)
  301. __xsc1_check_ctrs();
  302. else
  303. __xsc2_check_ctrs();
  304. for (i = CCNT; i < MAX_COUNTERS; i++) {
  305. if (!results[i].ovf)
  306. continue;
  307. write_counter(i, -(u32)results[i].reset_counter);
  308. oprofile_add_sample(get_irq_regs(), i);
  309. results[i].ovf--;
  310. }
  311. pmnc = read_pmnc() | PMU_ENABLE;
  312. write_pmnc(pmnc);
  313. return IRQ_HANDLED;
  314. }
  315. static void xscale_pmu_stop(void)
  316. {
  317. u32 pmnc = read_pmnc();
  318. pmnc &= ~PMU_ENABLE;
  319. write_pmnc(pmnc);
  320. free_irq(XSCALE_PMU_IRQ, results);
  321. }
  322. static int xscale_pmu_start(void)
  323. {
  324. int ret;
  325. u32 pmnc = read_pmnc();
  326. ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, IRQF_DISABLED,
  327. "XScale PMU", (void *)results);
  328. if (ret < 0) {
  329. printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n",
  330. XSCALE_PMU_IRQ);
  331. return ret;
  332. }
  333. if (pmu->id == PMU_XSC1)
  334. pmnc |= pmu->int_enable;
  335. else {
  336. __asm__ __volatile__ ("mcr p14, 0, %0, c4, c1, 0" : : "r" (pmu->int_enable));
  337. pmnc &= ~PMU_CNT64;
  338. }
  339. pmnc |= PMU_ENABLE;
  340. write_pmnc(pmnc);
  341. pr_debug("xscale_pmu_start: pmnc: %#08x mask: %08x\n", pmnc, pmu->int_enable);
  342. return 0;
  343. }
  344. static int xscale_detect_pmu(void)
  345. {
  346. int ret = 0;
  347. u32 id;
  348. id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
  349. switch (id) {
  350. case 1:
  351. pmu = &pmu_parms[PMU_XSC1];
  352. break;
  353. case 2:
  354. pmu = &pmu_parms[PMU_XSC2];
  355. break;
  356. default:
  357. ret = -ENODEV;
  358. break;
  359. }
  360. if (!ret) {
  361. op_xscale_spec.name = pmu->name;
  362. op_xscale_spec.num_counters = pmu->num_counters;
  363. pr_debug("xscale_detect_pmu: detected %s PMU\n", pmu->name);
  364. }
  365. return ret;
  366. }
  367. struct op_arm_model_spec op_xscale_spec = {
  368. .init = xscale_detect_pmu,
  369. .setup_ctrs = xscale_setup_ctrs,
  370. .start = xscale_pmu_start,
  371. .stop = xscale_pmu_stop,
  372. };