mv64x60_edac.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /*
  2. * Marvell MV64x60 Memory Controller kernel module for PPC platforms
  3. *
  4. * Author: Dave Jiang <djiang@mvista.com>
  5. *
  6. * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
  7. * the terms of the GNU General Public License version 2. This program
  8. * is licensed "as is" without any warranty of any kind, whether express
  9. * or implied.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/edac.h>
  17. #include <linux/gfp.h>
  18. #include "edac_module.h"
  19. #include "mv64x60_edac.h"
  20. static const char *mv64x60_ctl_name = "MV64x60";
  21. static int edac_dev_idx;
  22. static int edac_pci_idx;
  23. static int edac_mc_idx;
  24. /*********************** PCI err device **********************************/
  25. #ifdef CONFIG_PCI
  26. static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
  27. {
  28. struct mv64x60_pci_pdata *pdata = pci->pvt_info;
  29. u32 cause;
  30. cause = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
  31. if (!cause)
  32. return;
  33. printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
  34. printk(KERN_ERR "Cause register: 0x%08x\n", cause);
  35. printk(KERN_ERR "Address Low: 0x%08x\n",
  36. readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
  37. printk(KERN_ERR "Address High: 0x%08x\n",
  38. readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
  39. printk(KERN_ERR "Attribute: 0x%08x\n",
  40. readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
  41. printk(KERN_ERR "Command: 0x%08x\n",
  42. readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
  43. writel(~cause, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
  44. if (cause & MV64X60_PCI_PE_MASK)
  45. edac_pci_handle_pe(pci, pci->ctl_name);
  46. if (!(cause & MV64X60_PCI_PE_MASK))
  47. edac_pci_handle_npe(pci, pci->ctl_name);
  48. }
  49. static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
  50. {
  51. struct edac_pci_ctl_info *pci = dev_id;
  52. struct mv64x60_pci_pdata *pdata = pci->pvt_info;
  53. u32 val;
  54. val = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
  55. if (!val)
  56. return IRQ_NONE;
  57. mv64x60_pci_check(pci);
  58. return IRQ_HANDLED;
  59. }
  60. /*
  61. * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
  62. * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
  63. * well. IOW, don't set bit 0.
  64. */
  65. /* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
  66. static int __init mv64x60_pci_fixup(struct platform_device *pdev)
  67. {
  68. struct resource *r;
  69. void __iomem *pci_serr;
  70. r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  71. if (!r) {
  72. printk(KERN_ERR "%s: Unable to get resource for "
  73. "PCI err regs\n", __func__);
  74. return -ENOENT;
  75. }
  76. pci_serr = ioremap(r->start, resource_size(r));
  77. if (!pci_serr)
  78. return -ENOMEM;
  79. writel(readl(pci_serr) & ~0x1, pci_serr);
  80. iounmap(pci_serr);
  81. return 0;
  82. }
  83. static int mv64x60_pci_err_probe(struct platform_device *pdev)
  84. {
  85. struct edac_pci_ctl_info *pci;
  86. struct mv64x60_pci_pdata *pdata;
  87. struct resource *r;
  88. int res = 0;
  89. if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
  90. return -ENOMEM;
  91. pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
  92. if (!pci)
  93. return -ENOMEM;
  94. pdata = pci->pvt_info;
  95. pdata->pci_hose = pdev->id;
  96. pdata->name = "mv64x60_pci_err";
  97. platform_set_drvdata(pdev, pci);
  98. pci->dev = &pdev->dev;
  99. pci->dev_name = dev_name(&pdev->dev);
  100. pci->mod_name = EDAC_MOD_STR;
  101. pci->ctl_name = pdata->name;
  102. if (edac_op_state == EDAC_OPSTATE_POLL)
  103. pci->edac_check = mv64x60_pci_check;
  104. pdata->edac_idx = edac_pci_idx++;
  105. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  106. if (!r) {
  107. printk(KERN_ERR "%s: Unable to get resource for "
  108. "PCI err regs\n", __func__);
  109. res = -ENOENT;
  110. goto err;
  111. }
  112. if (!devm_request_mem_region(&pdev->dev,
  113. r->start,
  114. resource_size(r),
  115. pdata->name)) {
  116. printk(KERN_ERR "%s: Error while requesting mem region\n",
  117. __func__);
  118. res = -EBUSY;
  119. goto err;
  120. }
  121. pdata->pci_vbase = devm_ioremap(&pdev->dev,
  122. r->start,
  123. resource_size(r));
  124. if (!pdata->pci_vbase) {
  125. printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
  126. res = -ENOMEM;
  127. goto err;
  128. }
  129. res = mv64x60_pci_fixup(pdev);
  130. if (res < 0) {
  131. printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
  132. goto err;
  133. }
  134. writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
  135. writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
  136. writel(MV64X60_PCIx_ERR_MASK_VAL,
  137. pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
  138. if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
  139. edac_dbg(3, "failed edac_pci_add_device()\n");
  140. goto err;
  141. }
  142. if (edac_op_state == EDAC_OPSTATE_INT) {
  143. pdata->irq = platform_get_irq(pdev, 0);
  144. res = devm_request_irq(&pdev->dev,
  145. pdata->irq,
  146. mv64x60_pci_isr,
  147. 0,
  148. "[EDAC] PCI err",
  149. pci);
  150. if (res < 0) {
  151. printk(KERN_ERR "%s: Unable to request irq %d for "
  152. "MV64x60 PCI ERR\n", __func__, pdata->irq);
  153. res = -ENODEV;
  154. goto err2;
  155. }
  156. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
  157. pdata->irq);
  158. }
  159. devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
  160. /* get this far and it's successful */
  161. edac_dbg(3, "success\n");
  162. return 0;
  163. err2:
  164. edac_pci_del_device(&pdev->dev);
  165. err:
  166. edac_pci_free_ctl_info(pci);
  167. devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
  168. return res;
  169. }
  170. static int mv64x60_pci_err_remove(struct platform_device *pdev)
  171. {
  172. struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
  173. edac_dbg(0, "\n");
  174. edac_pci_del_device(&pdev->dev);
  175. edac_pci_free_ctl_info(pci);
  176. return 0;
  177. }
  178. static struct platform_driver mv64x60_pci_err_driver = {
  179. .probe = mv64x60_pci_err_probe,
  180. .remove = mv64x60_pci_err_remove,
  181. .driver = {
  182. .name = "mv64x60_pci_err",
  183. }
  184. };
  185. #endif /* CONFIG_PCI */
  186. /*********************** SRAM err device **********************************/
  187. static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
  188. {
  189. struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
  190. u32 cause;
  191. cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
  192. if (!cause)
  193. return;
  194. printk(KERN_ERR "Error in internal SRAM\n");
  195. printk(KERN_ERR "Cause register: 0x%08x\n", cause);
  196. printk(KERN_ERR "Address Low: 0x%08x\n",
  197. readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
  198. printk(KERN_ERR "Address High: 0x%08x\n",
  199. readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
  200. printk(KERN_ERR "Data Low: 0x%08x\n",
  201. readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
  202. printk(KERN_ERR "Data High: 0x%08x\n",
  203. readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
  204. printk(KERN_ERR "Parity: 0x%08x\n",
  205. readl(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
  206. writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
  207. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  208. }
  209. static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
  210. {
  211. struct edac_device_ctl_info *edac_dev = dev_id;
  212. struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
  213. u32 cause;
  214. cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
  215. if (!cause)
  216. return IRQ_NONE;
  217. mv64x60_sram_check(edac_dev);
  218. return IRQ_HANDLED;
  219. }
  220. static int mv64x60_sram_err_probe(struct platform_device *pdev)
  221. {
  222. struct edac_device_ctl_info *edac_dev;
  223. struct mv64x60_sram_pdata *pdata;
  224. struct resource *r;
  225. int res = 0;
  226. if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
  227. return -ENOMEM;
  228. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  229. "sram", 1, NULL, 0, 0, NULL, 0,
  230. edac_dev_idx);
  231. if (!edac_dev) {
  232. devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
  233. return -ENOMEM;
  234. }
  235. pdata = edac_dev->pvt_info;
  236. pdata->name = "mv64x60_sram_err";
  237. edac_dev->dev = &pdev->dev;
  238. platform_set_drvdata(pdev, edac_dev);
  239. edac_dev->dev_name = dev_name(&pdev->dev);
  240. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  241. if (!r) {
  242. printk(KERN_ERR "%s: Unable to get resource for "
  243. "SRAM err regs\n", __func__);
  244. res = -ENOENT;
  245. goto err;
  246. }
  247. if (!devm_request_mem_region(&pdev->dev,
  248. r->start,
  249. resource_size(r),
  250. pdata->name)) {
  251. printk(KERN_ERR "%s: Error while request mem region\n",
  252. __func__);
  253. res = -EBUSY;
  254. goto err;
  255. }
  256. pdata->sram_vbase = devm_ioremap(&pdev->dev,
  257. r->start,
  258. resource_size(r));
  259. if (!pdata->sram_vbase) {
  260. printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
  261. __func__);
  262. res = -ENOMEM;
  263. goto err;
  264. }
  265. /* setup SRAM err registers */
  266. writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
  267. edac_dev->mod_name = EDAC_MOD_STR;
  268. edac_dev->ctl_name = pdata->name;
  269. if (edac_op_state == EDAC_OPSTATE_POLL)
  270. edac_dev->edac_check = mv64x60_sram_check;
  271. pdata->edac_idx = edac_dev_idx++;
  272. if (edac_device_add_device(edac_dev) > 0) {
  273. edac_dbg(3, "failed edac_device_add_device()\n");
  274. goto err;
  275. }
  276. if (edac_op_state == EDAC_OPSTATE_INT) {
  277. pdata->irq = platform_get_irq(pdev, 0);
  278. res = devm_request_irq(&pdev->dev,
  279. pdata->irq,
  280. mv64x60_sram_isr,
  281. 0,
  282. "[EDAC] SRAM err",
  283. edac_dev);
  284. if (res < 0) {
  285. printk(KERN_ERR
  286. "%s: Unable to request irq %d for "
  287. "MV64x60 SRAM ERR\n", __func__, pdata->irq);
  288. res = -ENODEV;
  289. goto err2;
  290. }
  291. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
  292. pdata->irq);
  293. }
  294. devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
  295. /* get this far and it's successful */
  296. edac_dbg(3, "success\n");
  297. return 0;
  298. err2:
  299. edac_device_del_device(&pdev->dev);
  300. err:
  301. devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
  302. edac_device_free_ctl_info(edac_dev);
  303. return res;
  304. }
  305. static int mv64x60_sram_err_remove(struct platform_device *pdev)
  306. {
  307. struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
  308. edac_dbg(0, "\n");
  309. edac_device_del_device(&pdev->dev);
  310. edac_device_free_ctl_info(edac_dev);
  311. return 0;
  312. }
  313. static struct platform_driver mv64x60_sram_err_driver = {
  314. .probe = mv64x60_sram_err_probe,
  315. .remove = mv64x60_sram_err_remove,
  316. .driver = {
  317. .name = "mv64x60_sram_err",
  318. }
  319. };
  320. /*********************** CPU err device **********************************/
  321. static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
  322. {
  323. struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
  324. u32 cause;
  325. cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
  326. MV64x60_CPU_CAUSE_MASK;
  327. if (!cause)
  328. return;
  329. printk(KERN_ERR "Error on CPU interface\n");
  330. printk(KERN_ERR "Cause register: 0x%08x\n", cause);
  331. printk(KERN_ERR "Address Low: 0x%08x\n",
  332. readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
  333. printk(KERN_ERR "Address High: 0x%08x\n",
  334. readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
  335. printk(KERN_ERR "Data Low: 0x%08x\n",
  336. readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
  337. printk(KERN_ERR "Data High: 0x%08x\n",
  338. readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
  339. printk(KERN_ERR "Parity: 0x%08x\n",
  340. readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
  341. writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
  342. edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
  343. }
  344. static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
  345. {
  346. struct edac_device_ctl_info *edac_dev = dev_id;
  347. struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
  348. u32 cause;
  349. cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
  350. MV64x60_CPU_CAUSE_MASK;
  351. if (!cause)
  352. return IRQ_NONE;
  353. mv64x60_cpu_check(edac_dev);
  354. return IRQ_HANDLED;
  355. }
  356. static int mv64x60_cpu_err_probe(struct platform_device *pdev)
  357. {
  358. struct edac_device_ctl_info *edac_dev;
  359. struct resource *r;
  360. struct mv64x60_cpu_pdata *pdata;
  361. int res = 0;
  362. if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
  363. return -ENOMEM;
  364. edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
  365. "cpu", 1, NULL, 0, 0, NULL, 0,
  366. edac_dev_idx);
  367. if (!edac_dev) {
  368. devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
  369. return -ENOMEM;
  370. }
  371. pdata = edac_dev->pvt_info;
  372. pdata->name = "mv64x60_cpu_err";
  373. edac_dev->dev = &pdev->dev;
  374. platform_set_drvdata(pdev, edac_dev);
  375. edac_dev->dev_name = dev_name(&pdev->dev);
  376. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  377. if (!r) {
  378. printk(KERN_ERR "%s: Unable to get resource for "
  379. "CPU err regs\n", __func__);
  380. res = -ENOENT;
  381. goto err;
  382. }
  383. if (!devm_request_mem_region(&pdev->dev,
  384. r->start,
  385. resource_size(r),
  386. pdata->name)) {
  387. printk(KERN_ERR "%s: Error while requesting mem region\n",
  388. __func__);
  389. res = -EBUSY;
  390. goto err;
  391. }
  392. pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
  393. r->start,
  394. resource_size(r));
  395. if (!pdata->cpu_vbase[0]) {
  396. printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
  397. res = -ENOMEM;
  398. goto err;
  399. }
  400. r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  401. if (!r) {
  402. printk(KERN_ERR "%s: Unable to get resource for "
  403. "CPU err regs\n", __func__);
  404. res = -ENOENT;
  405. goto err;
  406. }
  407. if (!devm_request_mem_region(&pdev->dev,
  408. r->start,
  409. resource_size(r),
  410. pdata->name)) {
  411. printk(KERN_ERR "%s: Error while requesting mem region\n",
  412. __func__);
  413. res = -EBUSY;
  414. goto err;
  415. }
  416. pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
  417. r->start,
  418. resource_size(r));
  419. if (!pdata->cpu_vbase[1]) {
  420. printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
  421. res = -ENOMEM;
  422. goto err;
  423. }
  424. /* setup CPU err registers */
  425. writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
  426. writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
  427. writel(0x000000ff, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
  428. edac_dev->mod_name = EDAC_MOD_STR;
  429. edac_dev->ctl_name = pdata->name;
  430. if (edac_op_state == EDAC_OPSTATE_POLL)
  431. edac_dev->edac_check = mv64x60_cpu_check;
  432. pdata->edac_idx = edac_dev_idx++;
  433. if (edac_device_add_device(edac_dev) > 0) {
  434. edac_dbg(3, "failed edac_device_add_device()\n");
  435. goto err;
  436. }
  437. if (edac_op_state == EDAC_OPSTATE_INT) {
  438. pdata->irq = platform_get_irq(pdev, 0);
  439. res = devm_request_irq(&pdev->dev,
  440. pdata->irq,
  441. mv64x60_cpu_isr,
  442. 0,
  443. "[EDAC] CPU err",
  444. edac_dev);
  445. if (res < 0) {
  446. printk(KERN_ERR
  447. "%s: Unable to request irq %d for MV64x60 "
  448. "CPU ERR\n", __func__, pdata->irq);
  449. res = -ENODEV;
  450. goto err2;
  451. }
  452. printk(KERN_INFO EDAC_MOD_STR
  453. " acquired irq %d for CPU Err\n", pdata->irq);
  454. }
  455. devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
  456. /* get this far and it's successful */
  457. edac_dbg(3, "success\n");
  458. return 0;
  459. err2:
  460. edac_device_del_device(&pdev->dev);
  461. err:
  462. devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
  463. edac_device_free_ctl_info(edac_dev);
  464. return res;
  465. }
  466. static int mv64x60_cpu_err_remove(struct platform_device *pdev)
  467. {
  468. struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
  469. edac_dbg(0, "\n");
  470. edac_device_del_device(&pdev->dev);
  471. edac_device_free_ctl_info(edac_dev);
  472. return 0;
  473. }
  474. static struct platform_driver mv64x60_cpu_err_driver = {
  475. .probe = mv64x60_cpu_err_probe,
  476. .remove = mv64x60_cpu_err_remove,
  477. .driver = {
  478. .name = "mv64x60_cpu_err",
  479. }
  480. };
  481. /*********************** DRAM err device **********************************/
  482. static void mv64x60_mc_check(struct mem_ctl_info *mci)
  483. {
  484. struct mv64x60_mc_pdata *pdata = mci->pvt_info;
  485. u32 reg;
  486. u32 err_addr;
  487. u32 sdram_ecc;
  488. u32 comp_ecc;
  489. u32 syndrome;
  490. reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
  491. if (!reg)
  492. return;
  493. err_addr = reg & ~0x3;
  494. sdram_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
  495. comp_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
  496. syndrome = sdram_ecc ^ comp_ecc;
  497. /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
  498. if (!(reg & 0x1))
  499. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  500. err_addr >> PAGE_SHIFT,
  501. err_addr & PAGE_MASK, syndrome,
  502. 0, 0, -1,
  503. mci->ctl_name, "");
  504. else /* 2 bit error, UE */
  505. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  506. err_addr >> PAGE_SHIFT,
  507. err_addr & PAGE_MASK, 0,
  508. 0, 0, -1,
  509. mci->ctl_name, "");
  510. /* clear the error */
  511. writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
  512. }
  513. static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
  514. {
  515. struct mem_ctl_info *mci = dev_id;
  516. struct mv64x60_mc_pdata *pdata = mci->pvt_info;
  517. u32 reg;
  518. reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
  519. if (!reg)
  520. return IRQ_NONE;
  521. /* writing 0's to the ECC err addr in check function clears irq */
  522. mv64x60_mc_check(mci);
  523. return IRQ_HANDLED;
  524. }
  525. static void get_total_mem(struct mv64x60_mc_pdata *pdata)
  526. {
  527. struct device_node *np = NULL;
  528. const unsigned int *reg;
  529. np = of_find_node_by_type(NULL, "memory");
  530. if (!np)
  531. return;
  532. reg = of_get_property(np, "reg", NULL);
  533. pdata->total_mem = reg[1];
  534. }
  535. static void mv64x60_init_csrows(struct mem_ctl_info *mci,
  536. struct mv64x60_mc_pdata *pdata)
  537. {
  538. struct csrow_info *csrow;
  539. struct dimm_info *dimm;
  540. u32 devtype;
  541. u32 ctl;
  542. get_total_mem(pdata);
  543. ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
  544. csrow = mci->csrows[0];
  545. dimm = csrow->channels[0]->dimm;
  546. dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
  547. dimm->grain = 8;
  548. dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
  549. devtype = (ctl >> 20) & 0x3;
  550. switch (devtype) {
  551. case 0x0:
  552. dimm->dtype = DEV_X32;
  553. break;
  554. case 0x2: /* could be X8 too, but no way to tell */
  555. dimm->dtype = DEV_X16;
  556. break;
  557. case 0x3:
  558. dimm->dtype = DEV_X4;
  559. break;
  560. default:
  561. dimm->dtype = DEV_UNKNOWN;
  562. break;
  563. }
  564. dimm->edac_mode = EDAC_SECDED;
  565. }
  566. static int mv64x60_mc_err_probe(struct platform_device *pdev)
  567. {
  568. struct mem_ctl_info *mci;
  569. struct edac_mc_layer layers[2];
  570. struct mv64x60_mc_pdata *pdata;
  571. struct resource *r;
  572. u32 ctl;
  573. int res = 0;
  574. if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
  575. return -ENOMEM;
  576. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  577. layers[0].size = 1;
  578. layers[0].is_virt_csrow = true;
  579. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  580. layers[1].size = 1;
  581. layers[1].is_virt_csrow = false;
  582. mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
  583. sizeof(struct mv64x60_mc_pdata));
  584. if (!mci) {
  585. printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
  586. devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
  587. return -ENOMEM;
  588. }
  589. pdata = mci->pvt_info;
  590. mci->pdev = &pdev->dev;
  591. platform_set_drvdata(pdev, mci);
  592. pdata->name = "mv64x60_mc_err";
  593. mci->dev_name = dev_name(&pdev->dev);
  594. pdata->edac_idx = edac_mc_idx++;
  595. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  596. if (!r) {
  597. printk(KERN_ERR "%s: Unable to get resource for "
  598. "MC err regs\n", __func__);
  599. res = -ENOENT;
  600. goto err;
  601. }
  602. if (!devm_request_mem_region(&pdev->dev,
  603. r->start,
  604. resource_size(r),
  605. pdata->name)) {
  606. printk(KERN_ERR "%s: Error while requesting mem region\n",
  607. __func__);
  608. res = -EBUSY;
  609. goto err;
  610. }
  611. pdata->mc_vbase = devm_ioremap(&pdev->dev,
  612. r->start,
  613. resource_size(r));
  614. if (!pdata->mc_vbase) {
  615. printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
  616. res = -ENOMEM;
  617. goto err;
  618. }
  619. ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
  620. if (!(ctl & MV64X60_SDRAM_ECC)) {
  621. /* Non-ECC RAM? */
  622. printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
  623. res = -ENODEV;
  624. goto err;
  625. }
  626. edac_dbg(3, "init mci\n");
  627. mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
  628. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  629. mci->edac_cap = EDAC_FLAG_SECDED;
  630. mci->mod_name = EDAC_MOD_STR;
  631. mci->ctl_name = mv64x60_ctl_name;
  632. if (edac_op_state == EDAC_OPSTATE_POLL)
  633. mci->edac_check = mv64x60_mc_check;
  634. mci->ctl_page_to_phys = NULL;
  635. mci->scrub_mode = SCRUB_SW_SRC;
  636. mv64x60_init_csrows(mci, pdata);
  637. /* setup MC registers */
  638. writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
  639. ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
  640. ctl = (ctl & 0xff00ffff) | 0x10000;
  641. writel(ctl, pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
  642. res = edac_mc_add_mc(mci);
  643. if (res) {
  644. edac_dbg(3, "failed edac_mc_add_mc()\n");
  645. goto err;
  646. }
  647. if (edac_op_state == EDAC_OPSTATE_INT) {
  648. /* acquire interrupt that reports errors */
  649. pdata->irq = platform_get_irq(pdev, 0);
  650. res = devm_request_irq(&pdev->dev,
  651. pdata->irq,
  652. mv64x60_mc_isr,
  653. 0,
  654. "[EDAC] MC err",
  655. mci);
  656. if (res < 0) {
  657. printk(KERN_ERR "%s: Unable to request irq %d for "
  658. "MV64x60 DRAM ERR\n", __func__, pdata->irq);
  659. res = -ENODEV;
  660. goto err2;
  661. }
  662. printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
  663. pdata->irq);
  664. }
  665. /* get this far and it's successful */
  666. edac_dbg(3, "success\n");
  667. return 0;
  668. err2:
  669. edac_mc_del_mc(&pdev->dev);
  670. err:
  671. devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
  672. edac_mc_free(mci);
  673. return res;
  674. }
  675. static int mv64x60_mc_err_remove(struct platform_device *pdev)
  676. {
  677. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  678. edac_dbg(0, "\n");
  679. edac_mc_del_mc(&pdev->dev);
  680. edac_mc_free(mci);
  681. return 0;
  682. }
  683. static struct platform_driver mv64x60_mc_err_driver = {
  684. .probe = mv64x60_mc_err_probe,
  685. .remove = mv64x60_mc_err_remove,
  686. .driver = {
  687. .name = "mv64x60_mc_err",
  688. }
  689. };
  690. static struct platform_driver * const drivers[] = {
  691. &mv64x60_mc_err_driver,
  692. &mv64x60_cpu_err_driver,
  693. &mv64x60_sram_err_driver,
  694. #ifdef CONFIG_PCI
  695. &mv64x60_pci_err_driver,
  696. #endif
  697. };
  698. static int __init mv64x60_edac_init(void)
  699. {
  700. printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
  701. printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
  702. /* make sure error reporting method is sane */
  703. switch (edac_op_state) {
  704. case EDAC_OPSTATE_POLL:
  705. case EDAC_OPSTATE_INT:
  706. break;
  707. default:
  708. edac_op_state = EDAC_OPSTATE_INT;
  709. break;
  710. }
  711. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  712. }
  713. module_init(mv64x60_edac_init);
  714. static void __exit mv64x60_edac_exit(void)
  715. {
  716. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  717. }
  718. module_exit(mv64x60_edac_exit);
  719. MODULE_LICENSE("GPL");
  720. MODULE_AUTHOR("Montavista Software, Inc.");
  721. module_param(edac_op_state, int, 0444);
  722. MODULE_PARM_DESC(edac_op_state,
  723. "EDAC Error Reporting state: 0=Poll, 2=Interrupt");