cpc925_edac.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
  4. *
  5. * Copyright (c) 2008 Wind River Systems, Inc.
  6. *
  7. * Authors: Cao Qingtao <qingtao.cao@windriver.com>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/io.h>
  12. #include <linux/edac.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/gfp.h>
  16. #include "edac_module.h"
  17. #define CPC925_EDAC_REVISION " Ver: 1.0.0"
  18. #define CPC925_EDAC_MOD_STR "cpc925_edac"
  19. #define cpc925_printk(level, fmt, arg...) \
  20. edac_printk(level, "CPC925", fmt, ##arg)
  21. #define cpc925_mc_printk(mci, level, fmt, arg...) \
  22. edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
  23. /*
  24. * CPC925 registers are of 32 bits with bit0 defined at the
  25. * most significant bit and bit31 at that of least significant.
  26. */
  27. #define CPC925_BITS_PER_REG 32
  28. #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
  29. /*
  30. * EDAC device names for the error detections of
  31. * CPU Interface and Hypertransport Link.
  32. */
  33. #define CPC925_CPU_ERR_DEV "cpu"
  34. #define CPC925_HT_LINK_DEV "htlink"
  35. /* Suppose DDR Refresh cycle is 15.6 microsecond */
  36. #define CPC925_REF_FREQ 0xFA69
  37. #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
  38. #define CPC925_NR_CSROWS 8
  39. /*
  40. * All registers and bits definitions are taken from
  41. * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
  42. */
  43. /*
  44. * CPU and Memory Controller Registers
  45. */
  46. /************************************************************
  47. * Processor Interface Exception Mask Register (APIMASK)
  48. ************************************************************/
  49. #define REG_APIMASK_OFFSET 0x30070
  50. enum apimask_bits {
  51. APIMASK_DART = CPC925_BIT(0), /* DART Exception */
  52. APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
  53. APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
  54. APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
  55. APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
  56. APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
  57. APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
  58. /* BIT(7) Reserved */
  59. APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
  60. APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
  61. APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
  62. APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
  63. CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
  64. APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
  65. APIMASK_ADRS1),
  66. ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
  67. APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
  68. };
  69. #define APIMASK_ADI(n) CPC925_BIT(((n)+1))
  70. /************************************************************
  71. * Processor Interface Exception Register (APIEXCP)
  72. ************************************************************/
  73. #define REG_APIEXCP_OFFSET 0x30060
  74. enum apiexcp_bits {
  75. APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
  76. APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
  77. APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
  78. APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
  79. APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
  80. APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
  81. APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
  82. /* BIT(7) Reserved */
  83. APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
  84. APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
  85. APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
  86. APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
  87. CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
  88. APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
  89. APIEXCP_ADRS1),
  90. UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
  91. CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
  92. ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
  93. };
  94. /************************************************************
  95. * Memory Bus Configuration Register (MBCR)
  96. ************************************************************/
  97. #define REG_MBCR_OFFSET 0x2190
  98. #define MBCR_64BITCFG_SHIFT 23
  99. #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
  100. #define MBCR_64BITBUS_SHIFT 22
  101. #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
  102. /************************************************************
  103. * Memory Bank Mode Register (MBMR)
  104. ************************************************************/
  105. #define REG_MBMR_OFFSET 0x21C0
  106. #define MBMR_MODE_MAX_VALUE 0xF
  107. #define MBMR_MODE_SHIFT 25
  108. #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
  109. #define MBMR_BBA_SHIFT 24
  110. #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
  111. /************************************************************
  112. * Memory Bank Boundary Address Register (MBBAR)
  113. ************************************************************/
  114. #define REG_MBBAR_OFFSET 0x21D0
  115. #define MBBAR_BBA_MAX_VALUE 0xFF
  116. #define MBBAR_BBA_SHIFT 24
  117. #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
  118. /************************************************************
  119. * Memory Scrub Control Register (MSCR)
  120. ************************************************************/
  121. #define REG_MSCR_OFFSET 0x2400
  122. #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
  123. #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
  124. #define MSCR_SI_SHIFT 16 /* si - bit8:15*/
  125. #define MSCR_SI_MAX_VALUE 0xFF
  126. #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
  127. /************************************************************
  128. * Memory Scrub Range Start Register (MSRSR)
  129. ************************************************************/
  130. #define REG_MSRSR_OFFSET 0x2410
  131. /************************************************************
  132. * Memory Scrub Range End Register (MSRER)
  133. ************************************************************/
  134. #define REG_MSRER_OFFSET 0x2420
  135. /************************************************************
  136. * Memory Scrub Pattern Register (MSPR)
  137. ************************************************************/
  138. #define REG_MSPR_OFFSET 0x2430
  139. /************************************************************
  140. * Memory Check Control Register (MCCR)
  141. ************************************************************/
  142. #define REG_MCCR_OFFSET 0x2440
  143. enum mccr_bits {
  144. MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
  145. };
  146. /************************************************************
  147. * Memory Check Range End Register (MCRER)
  148. ************************************************************/
  149. #define REG_MCRER_OFFSET 0x2450
  150. /************************************************************
  151. * Memory Error Address Register (MEAR)
  152. ************************************************************/
  153. #define REG_MEAR_OFFSET 0x2460
  154. #define MEAR_BCNT_MAX_VALUE 0x3
  155. #define MEAR_BCNT_SHIFT 30
  156. #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
  157. #define MEAR_RANK_MAX_VALUE 0x7
  158. #define MEAR_RANK_SHIFT 27
  159. #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
  160. #define MEAR_COL_MAX_VALUE 0x7FF
  161. #define MEAR_COL_SHIFT 16
  162. #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
  163. #define MEAR_BANK_MAX_VALUE 0x3
  164. #define MEAR_BANK_SHIFT 14
  165. #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
  166. #define MEAR_ROW_MASK 0x00003FFF
  167. /************************************************************
  168. * Memory Error Syndrome Register (MESR)
  169. ************************************************************/
  170. #define REG_MESR_OFFSET 0x2470
  171. #define MESR_ECC_SYN_H_MASK 0xFF00
  172. #define MESR_ECC_SYN_L_MASK 0x00FF
  173. /************************************************************
  174. * Memory Mode Control Register (MMCR)
  175. ************************************************************/
  176. #define REG_MMCR_OFFSET 0x2500
  177. enum mmcr_bits {
  178. MMCR_REG_DIMM_MODE = CPC925_BIT(3),
  179. };
  180. /*
  181. * HyperTransport Link Registers
  182. */
  183. /************************************************************
  184. * Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
  185. ************************************************************/
  186. #define REG_ERRCTRL_OFFSET 0x70140
  187. enum errctrl_bits { /* nonfatal interrupts for */
  188. ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
  189. ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
  190. ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
  191. ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
  192. ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
  193. ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
  194. ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
  195. ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
  196. HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
  197. ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
  198. ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
  199. HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
  200. };
  201. /************************************************************
  202. * Link Configuration and Link Control Register (LINKCTRL)
  203. ************************************************************/
  204. #define REG_LINKCTRL_OFFSET 0x70110
  205. enum linkctrl_bits {
  206. LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
  207. LINKCTRL_LINK_FAIL = CPC925_BIT(27),
  208. HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
  209. };
  210. /************************************************************
  211. * Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
  212. ************************************************************/
  213. #define REG_LINKERR_OFFSET 0x70120
  214. enum linkerr_bits {
  215. LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
  216. LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
  217. LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
  218. HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
  219. LINKERR_PROT_ERR),
  220. };
  221. /************************************************************
  222. * Bridge Control Register (BRGCTRL)
  223. ************************************************************/
  224. #define REG_BRGCTRL_OFFSET 0x70300
  225. enum brgctrl_bits {
  226. BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
  227. BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
  228. };
  229. /* Private structure for edac memory controller */
  230. struct cpc925_mc_pdata {
  231. void __iomem *vbase;
  232. unsigned long total_mem;
  233. const char *name;
  234. int edac_idx;
  235. };
  236. /* Private structure for common edac device */
  237. struct cpc925_dev_info {
  238. void __iomem *vbase;
  239. struct platform_device *pdev;
  240. char *ctl_name;
  241. int edac_idx;
  242. struct edac_device_ctl_info *edac_dev;
  243. void (*init)(struct cpc925_dev_info *dev_info);
  244. void (*exit)(struct cpc925_dev_info *dev_info);
  245. void (*check)(struct edac_device_ctl_info *edac_dev);
  246. };
  247. /* Get total memory size from Open Firmware DTB */
  248. static void get_total_mem(struct cpc925_mc_pdata *pdata)
  249. {
  250. struct device_node *np = NULL;
  251. const unsigned int *reg, *reg_end;
  252. int len, sw, aw;
  253. unsigned long start, size;
  254. np = of_find_node_by_type(NULL, "memory");
  255. if (!np)
  256. return;
  257. aw = of_n_addr_cells(np);
  258. sw = of_n_size_cells(np);
  259. reg = (const unsigned int *)of_get_property(np, "reg", &len);
  260. reg_end = reg + len/4;
  261. pdata->total_mem = 0;
  262. do {
  263. start = of_read_number(reg, aw);
  264. reg += aw;
  265. size = of_read_number(reg, sw);
  266. reg += sw;
  267. edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
  268. pdata->total_mem += size;
  269. } while (reg < reg_end);
  270. of_node_put(np);
  271. edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
  272. }
  273. static void cpc925_init_csrows(struct mem_ctl_info *mci)
  274. {
  275. struct cpc925_mc_pdata *pdata = mci->pvt_info;
  276. struct csrow_info *csrow;
  277. struct dimm_info *dimm;
  278. enum dev_type dtype;
  279. int index, j;
  280. u32 mbmr, mbbar, bba, grain;
  281. unsigned long row_size, nr_pages, last_nr_pages = 0;
  282. get_total_mem(pdata);
  283. for (index = 0; index < mci->nr_csrows; index++) {
  284. mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
  285. 0x20 * index);
  286. mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
  287. 0x20 + index);
  288. bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
  289. ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
  290. if (bba == 0)
  291. continue; /* not populated */
  292. csrow = mci->csrows[index];
  293. row_size = bba * (1UL << 28); /* 256M */
  294. csrow->first_page = last_nr_pages;
  295. nr_pages = row_size >> PAGE_SHIFT;
  296. csrow->last_page = csrow->first_page + nr_pages - 1;
  297. last_nr_pages = csrow->last_page + 1;
  298. switch (csrow->nr_channels) {
  299. case 1: /* Single channel */
  300. grain = 32; /* four-beat burst of 32 bytes */
  301. break;
  302. case 2: /* Dual channel */
  303. default:
  304. grain = 64; /* four-beat burst of 64 bytes */
  305. break;
  306. }
  307. switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
  308. case 6: /* 0110, no way to differentiate X8 VS X16 */
  309. case 5: /* 0101 */
  310. case 8: /* 1000 */
  311. dtype = DEV_X16;
  312. break;
  313. case 7: /* 0111 */
  314. case 9: /* 1001 */
  315. dtype = DEV_X8;
  316. break;
  317. default:
  318. dtype = DEV_UNKNOWN;
  319. break;
  320. }
  321. for (j = 0; j < csrow->nr_channels; j++) {
  322. dimm = csrow->channels[j]->dimm;
  323. dimm->nr_pages = nr_pages / csrow->nr_channels;
  324. dimm->mtype = MEM_RDDR;
  325. dimm->edac_mode = EDAC_SECDED;
  326. dimm->grain = grain;
  327. dimm->dtype = dtype;
  328. }
  329. }
  330. }
  331. /* Enable memory controller ECC detection */
  332. static void cpc925_mc_init(struct mem_ctl_info *mci)
  333. {
  334. struct cpc925_mc_pdata *pdata = mci->pvt_info;
  335. u32 apimask;
  336. u32 mccr;
  337. /* Enable various ECC error exceptions */
  338. apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
  339. if ((apimask & ECC_MASK_ENABLE) == 0) {
  340. apimask |= ECC_MASK_ENABLE;
  341. __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
  342. }
  343. /* Enable ECC detection */
  344. mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
  345. if ((mccr & MCCR_ECC_EN) == 0) {
  346. mccr |= MCCR_ECC_EN;
  347. __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
  348. }
  349. }
  350. /* Disable memory controller ECC detection */
  351. static void cpc925_mc_exit(struct mem_ctl_info *mci)
  352. {
  353. /*
  354. * WARNING:
  355. * We are supposed to clear the ECC error detection bits,
  356. * and it will be no problem to do so. However, once they
  357. * are cleared here if we want to re-install CPC925 EDAC
  358. * module later, setting them up in cpc925_mc_init() will
  359. * trigger machine check exception.
  360. * Also, it's ok to leave ECC error detection bits enabled,
  361. * since they are reset to 1 by default or by boot loader.
  362. */
  363. return;
  364. }
  365. /*
  366. * Revert DDR column/row/bank addresses into page frame number and
  367. * offset in page.
  368. *
  369. * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
  370. * physical address(PA) bits to column address(CA) bits mappings are:
  371. * CA 0 1 2 3 4 5 6 7 8 9 10
  372. * PA 59 58 57 56 55 54 53 52 51 50 49
  373. *
  374. * physical address(PA) bits to bank address(BA) bits mappings are:
  375. * BA 0 1
  376. * PA 43 44
  377. *
  378. * physical address(PA) bits to row address(RA) bits mappings are:
  379. * RA 0 1 2 3 4 5 6 7 8 9 10 11 12
  380. * PA 36 35 34 48 47 46 45 40 41 42 39 38 37
  381. */
  382. static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
  383. unsigned long *pfn, unsigned long *offset, int *csrow)
  384. {
  385. u32 bcnt, rank, col, bank, row;
  386. u32 c;
  387. unsigned long pa;
  388. int i;
  389. bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
  390. rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
  391. col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
  392. bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
  393. row = mear & MEAR_ROW_MASK;
  394. *csrow = rank;
  395. #ifdef CONFIG_EDAC_DEBUG
  396. if (mci->csrows[rank]->first_page == 0) {
  397. cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
  398. "non-populated csrow, broken hardware?\n");
  399. return;
  400. }
  401. #endif
  402. /* Revert csrow number */
  403. pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
  404. /* Revert column address */
  405. col += bcnt;
  406. for (i = 0; i < 11; i++) {
  407. c = col & 0x1;
  408. col >>= 1;
  409. pa |= c << (14 - i);
  410. }
  411. /* Revert bank address */
  412. pa |= bank << 19;
  413. /* Revert row address, in 4 steps */
  414. for (i = 0; i < 3; i++) {
  415. c = row & 0x1;
  416. row >>= 1;
  417. pa |= c << (26 - i);
  418. }
  419. for (i = 0; i < 3; i++) {
  420. c = row & 0x1;
  421. row >>= 1;
  422. pa |= c << (21 + i);
  423. }
  424. for (i = 0; i < 4; i++) {
  425. c = row & 0x1;
  426. row >>= 1;
  427. pa |= c << (18 - i);
  428. }
  429. for (i = 0; i < 3; i++) {
  430. c = row & 0x1;
  431. row >>= 1;
  432. pa |= c << (29 - i);
  433. }
  434. *offset = pa & (PAGE_SIZE - 1);
  435. *pfn = pa >> PAGE_SHIFT;
  436. edac_dbg(0, "ECC physical address 0x%lx\n", pa);
  437. }
  438. static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
  439. {
  440. if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
  441. return 0;
  442. if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
  443. return 1;
  444. cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
  445. syndrome);
  446. return 1;
  447. }
  448. /* Check memory controller registers for ECC errors */
  449. static void cpc925_mc_check(struct mem_ctl_info *mci)
  450. {
  451. struct cpc925_mc_pdata *pdata = mci->pvt_info;
  452. u32 apiexcp;
  453. u32 mear;
  454. u32 mesr;
  455. u16 syndrome;
  456. unsigned long pfn = 0, offset = 0;
  457. int csrow = 0, channel = 0;
  458. /* APIEXCP is cleared when read */
  459. apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
  460. if ((apiexcp & ECC_EXCP_DETECTED) == 0)
  461. return;
  462. mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
  463. syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
  464. mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
  465. /* Revert column/row addresses into page frame number, etc */
  466. cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
  467. if (apiexcp & CECC_EXCP_DETECTED) {
  468. cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
  469. channel = cpc925_mc_find_channel(mci, syndrome);
  470. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  471. pfn, offset, syndrome,
  472. csrow, channel, -1,
  473. mci->ctl_name, "");
  474. }
  475. if (apiexcp & UECC_EXCP_DETECTED) {
  476. cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
  477. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  478. pfn, offset, 0,
  479. csrow, -1, -1,
  480. mci->ctl_name, "");
  481. }
  482. cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
  483. cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
  484. __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
  485. cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
  486. apiexcp);
  487. cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
  488. __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
  489. cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
  490. __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
  491. cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
  492. __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
  493. cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
  494. __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
  495. cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
  496. __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
  497. cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
  498. __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
  499. cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
  500. mesr);
  501. cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
  502. syndrome);
  503. }
  504. /******************** CPU err device********************************/
  505. static u32 cpc925_cpu_mask_disabled(void)
  506. {
  507. struct device_node *cpunode;
  508. static u32 mask = 0;
  509. /* use cached value if available */
  510. if (mask != 0)
  511. return mask;
  512. mask = APIMASK_ADI0 | APIMASK_ADI1;
  513. for_each_of_cpu_node(cpunode) {
  514. const u32 *reg = of_get_property(cpunode, "reg", NULL);
  515. if (reg == NULL || *reg > 2) {
  516. cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
  517. continue;
  518. }
  519. mask &= ~APIMASK_ADI(*reg);
  520. }
  521. if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
  522. /* We assume that each CPU sits on it's own PI and that
  523. * for present CPUs the reg property equals to the PI
  524. * interface id */
  525. cpc925_printk(KERN_WARNING,
  526. "Assuming PI id is equal to CPU MPIC id!\n");
  527. }
  528. return mask;
  529. }
  530. /* Enable CPU Errors detection */
  531. static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
  532. {
  533. u32 apimask;
  534. u32 cpumask;
  535. apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
  536. cpumask = cpc925_cpu_mask_disabled();
  537. if (apimask & cpumask) {
  538. cpc925_printk(KERN_WARNING, "CPU(s) not present, "
  539. "but enabled in APIMASK, disabling\n");
  540. apimask &= ~cpumask;
  541. }
  542. if ((apimask & CPU_MASK_ENABLE) == 0)
  543. apimask |= CPU_MASK_ENABLE;
  544. __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
  545. }
  546. /* Disable CPU Errors detection */
  547. static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
  548. {
  549. /*
  550. * WARNING:
  551. * We are supposed to clear the CPU error detection bits,
  552. * and it will be no problem to do so. However, once they
  553. * are cleared here if we want to re-install CPC925 EDAC
  554. * module later, setting them up in cpc925_cpu_init() will
  555. * trigger machine check exception.
  556. * Also, it's ok to leave CPU error detection bits enabled,
  557. * since they are reset to 1 by default.
  558. */
  559. return;
  560. }
  561. /* Check for CPU Errors */
  562. static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
  563. {
  564. struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
  565. u32 apiexcp;
  566. u32 apimask;
  567. /* APIEXCP is cleared when read */
  568. apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
  569. if ((apiexcp & CPU_EXCP_DETECTED) == 0)
  570. return;
  571. if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
  572. return;
  573. apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
  574. cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
  575. "Processor Interface register dump:\n");
  576. cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
  577. cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
  578. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  579. }
  580. /******************** HT Link err device****************************/
  581. /* Enable HyperTransport Link Error detection */
  582. static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
  583. {
  584. u32 ht_errctrl;
  585. ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
  586. if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
  587. ht_errctrl |= HT_ERRCTRL_ENABLE;
  588. __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
  589. }
  590. }
  591. /* Disable HyperTransport Link Error detection */
  592. static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
  593. {
  594. u32 ht_errctrl;
  595. ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
  596. ht_errctrl &= ~HT_ERRCTRL_ENABLE;
  597. __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
  598. }
  599. /* Check for HyperTransport Link errors */
  600. static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
  601. {
  602. struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
  603. u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
  604. u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
  605. u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
  606. u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
  607. if (!((brgctrl & BRGCTRL_DETSERR) ||
  608. (linkctrl & HT_LINKCTRL_DETECTED) ||
  609. (errctrl & HT_ERRCTRL_DETECTED) ||
  610. (linkerr & HT_LINKERR_DETECTED)))
  611. return;
  612. cpc925_printk(KERN_INFO, "HT Link Fault\n"
  613. "HT register dump:\n");
  614. cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
  615. brgctrl);
  616. cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
  617. linkctrl);
  618. cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
  619. errctrl);
  620. cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
  621. linkerr);
  622. /* Clear by write 1 */
  623. if (brgctrl & BRGCTRL_DETSERR)
  624. __raw_writel(BRGCTRL_DETSERR,
  625. dev_info->vbase + REG_BRGCTRL_OFFSET);
  626. if (linkctrl & HT_LINKCTRL_DETECTED)
  627. __raw_writel(HT_LINKCTRL_DETECTED,
  628. dev_info->vbase + REG_LINKCTRL_OFFSET);
  629. /* Initiate Secondary Bus Reset to clear the chain failure */
  630. if (errctrl & ERRCTRL_CHN_FAL)
  631. __raw_writel(BRGCTRL_SECBUSRESET,
  632. dev_info->vbase + REG_BRGCTRL_OFFSET);
  633. if (errctrl & ERRCTRL_RSP_ERR)
  634. __raw_writel(ERRCTRL_RSP_ERR,
  635. dev_info->vbase + REG_ERRCTRL_OFFSET);
  636. if (linkerr & HT_LINKERR_DETECTED)
  637. __raw_writel(HT_LINKERR_DETECTED,
  638. dev_info->vbase + REG_LINKERR_OFFSET);
  639. edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
  640. }
  641. static struct cpc925_dev_info cpc925_devs[] = {
  642. {
  643. .ctl_name = CPC925_CPU_ERR_DEV,
  644. .init = cpc925_cpu_init,
  645. .exit = cpc925_cpu_exit,
  646. .check = cpc925_cpu_check,
  647. },
  648. {
  649. .ctl_name = CPC925_HT_LINK_DEV,
  650. .init = cpc925_htlink_init,
  651. .exit = cpc925_htlink_exit,
  652. .check = cpc925_htlink_check,
  653. },
  654. { }
  655. };
  656. /*
  657. * Add CPU Err detection and HyperTransport Link Err detection
  658. * as common "edac_device", they have no corresponding device
  659. * nodes in the Open Firmware DTB and we have to add platform
  660. * devices for them. Also, they will share the MMIO with that
  661. * of memory controller.
  662. */
  663. static void cpc925_add_edac_devices(void __iomem *vbase)
  664. {
  665. struct cpc925_dev_info *dev_info;
  666. if (!vbase) {
  667. cpc925_printk(KERN_ERR, "MMIO not established yet\n");
  668. return;
  669. }
  670. for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
  671. dev_info->vbase = vbase;
  672. dev_info->pdev = platform_device_register_simple(
  673. dev_info->ctl_name, 0, NULL, 0);
  674. if (IS_ERR(dev_info->pdev)) {
  675. cpc925_printk(KERN_ERR,
  676. "Can't register platform device for %s\n",
  677. dev_info->ctl_name);
  678. continue;
  679. }
  680. /*
  681. * Don't have to allocate private structure but
  682. * make use of cpc925_devs[] instead.
  683. */
  684. dev_info->edac_idx = edac_device_alloc_index();
  685. dev_info->edac_dev =
  686. edac_device_alloc_ctl_info(0, dev_info->ctl_name,
  687. 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
  688. if (!dev_info->edac_dev) {
  689. cpc925_printk(KERN_ERR, "No memory for edac device\n");
  690. goto err1;
  691. }
  692. dev_info->edac_dev->pvt_info = dev_info;
  693. dev_info->edac_dev->dev = &dev_info->pdev->dev;
  694. dev_info->edac_dev->ctl_name = dev_info->ctl_name;
  695. dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
  696. dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
  697. if (edac_op_state == EDAC_OPSTATE_POLL)
  698. dev_info->edac_dev->edac_check = dev_info->check;
  699. if (dev_info->init)
  700. dev_info->init(dev_info);
  701. if (edac_device_add_device(dev_info->edac_dev) > 0) {
  702. cpc925_printk(KERN_ERR,
  703. "Unable to add edac device for %s\n",
  704. dev_info->ctl_name);
  705. goto err2;
  706. }
  707. edac_dbg(0, "Successfully added edac device for %s\n",
  708. dev_info->ctl_name);
  709. continue;
  710. err2:
  711. if (dev_info->exit)
  712. dev_info->exit(dev_info);
  713. edac_device_free_ctl_info(dev_info->edac_dev);
  714. err1:
  715. platform_device_unregister(dev_info->pdev);
  716. }
  717. }
  718. /*
  719. * Delete the common "edac_device" for CPU Err Detection
  720. * and HyperTransport Link Err Detection
  721. */
  722. static void cpc925_del_edac_devices(void)
  723. {
  724. struct cpc925_dev_info *dev_info;
  725. for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
  726. if (dev_info->edac_dev) {
  727. edac_device_del_device(dev_info->edac_dev->dev);
  728. edac_device_free_ctl_info(dev_info->edac_dev);
  729. platform_device_unregister(dev_info->pdev);
  730. }
  731. if (dev_info->exit)
  732. dev_info->exit(dev_info);
  733. edac_dbg(0, "Successfully deleted edac device for %s\n",
  734. dev_info->ctl_name);
  735. }
  736. }
  737. /* Convert current back-ground scrub rate into byte/sec bandwidth */
  738. static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
  739. {
  740. struct cpc925_mc_pdata *pdata = mci->pvt_info;
  741. int bw;
  742. u32 mscr;
  743. u8 si;
  744. mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
  745. si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
  746. edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
  747. if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
  748. (si == 0)) {
  749. cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
  750. bw = 0;
  751. } else
  752. bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
  753. return bw;
  754. }
  755. /* Return 0 for single channel; 1 for dual channel */
  756. static int cpc925_mc_get_channels(void __iomem *vbase)
  757. {
  758. int dual = 0;
  759. u32 mbcr;
  760. mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
  761. /*
  762. * Dual channel only when 128-bit wide physical bus
  763. * and 128-bit configuration.
  764. */
  765. if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
  766. ((mbcr & MBCR_64BITBUS_MASK) == 0))
  767. dual = 1;
  768. edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
  769. return dual;
  770. }
  771. static int cpc925_probe(struct platform_device *pdev)
  772. {
  773. static int edac_mc_idx;
  774. struct mem_ctl_info *mci;
  775. struct edac_mc_layer layers[2];
  776. void __iomem *vbase;
  777. struct cpc925_mc_pdata *pdata;
  778. struct resource *r;
  779. int res = 0, nr_channels;
  780. edac_dbg(0, "%s platform device found!\n", pdev->name);
  781. if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
  782. res = -ENOMEM;
  783. goto out;
  784. }
  785. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  786. if (!r) {
  787. cpc925_printk(KERN_ERR, "Unable to get resource\n");
  788. res = -ENOENT;
  789. goto err1;
  790. }
  791. if (!devm_request_mem_region(&pdev->dev,
  792. r->start,
  793. resource_size(r),
  794. pdev->name)) {
  795. cpc925_printk(KERN_ERR, "Unable to request mem region\n");
  796. res = -EBUSY;
  797. goto err1;
  798. }
  799. vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
  800. if (!vbase) {
  801. cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
  802. res = -ENOMEM;
  803. goto err2;
  804. }
  805. nr_channels = cpc925_mc_get_channels(vbase) + 1;
  806. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  807. layers[0].size = CPC925_NR_CSROWS;
  808. layers[0].is_virt_csrow = true;
  809. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  810. layers[1].size = nr_channels;
  811. layers[1].is_virt_csrow = false;
  812. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  813. sizeof(struct cpc925_mc_pdata));
  814. if (!mci) {
  815. cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
  816. res = -ENOMEM;
  817. goto err2;
  818. }
  819. pdata = mci->pvt_info;
  820. pdata->vbase = vbase;
  821. pdata->edac_idx = edac_mc_idx++;
  822. pdata->name = pdev->name;
  823. mci->pdev = &pdev->dev;
  824. platform_set_drvdata(pdev, mci);
  825. mci->dev_name = dev_name(&pdev->dev);
  826. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
  827. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  828. mci->edac_cap = EDAC_FLAG_SECDED;
  829. mci->mod_name = CPC925_EDAC_MOD_STR;
  830. mci->ctl_name = pdev->name;
  831. if (edac_op_state == EDAC_OPSTATE_POLL)
  832. mci->edac_check = cpc925_mc_check;
  833. mci->ctl_page_to_phys = NULL;
  834. mci->scrub_mode = SCRUB_SW_SRC;
  835. mci->set_sdram_scrub_rate = NULL;
  836. mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
  837. cpc925_init_csrows(mci);
  838. /* Setup memory controller registers */
  839. cpc925_mc_init(mci);
  840. if (edac_mc_add_mc(mci) > 0) {
  841. cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
  842. goto err3;
  843. }
  844. cpc925_add_edac_devices(vbase);
  845. /* get this far and it's successful */
  846. edac_dbg(0, "success\n");
  847. res = 0;
  848. goto out;
  849. err3:
  850. cpc925_mc_exit(mci);
  851. edac_mc_free(mci);
  852. err2:
  853. devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
  854. err1:
  855. devres_release_group(&pdev->dev, cpc925_probe);
  856. out:
  857. return res;
  858. }
  859. static int cpc925_remove(struct platform_device *pdev)
  860. {
  861. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  862. /*
  863. * Delete common edac devices before edac mc, because
  864. * the former share the MMIO of the latter.
  865. */
  866. cpc925_del_edac_devices();
  867. cpc925_mc_exit(mci);
  868. edac_mc_del_mc(&pdev->dev);
  869. edac_mc_free(mci);
  870. return 0;
  871. }
  872. static struct platform_driver cpc925_edac_driver = {
  873. .probe = cpc925_probe,
  874. .remove = cpc925_remove,
  875. .driver = {
  876. .name = "cpc925_edac",
  877. }
  878. };
  879. static int __init cpc925_edac_init(void)
  880. {
  881. int ret = 0;
  882. printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
  883. printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
  884. /* Only support POLL mode so far */
  885. edac_op_state = EDAC_OPSTATE_POLL;
  886. ret = platform_driver_register(&cpc925_edac_driver);
  887. if (ret) {
  888. printk(KERN_WARNING "Failed to register %s\n",
  889. CPC925_EDAC_MOD_STR);
  890. }
  891. return ret;
  892. }
  893. static void __exit cpc925_edac_exit(void)
  894. {
  895. platform_driver_unregister(&cpc925_edac_driver);
  896. }
  897. module_init(cpc925_edac_init);
  898. module_exit(cpc925_edac_exit);
  899. MODULE_LICENSE("GPL");
  900. MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
  901. MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");