al_mc_edac.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. */
  5. #include <linux/bitfield.h>
  6. #include <linux/bitops.h>
  7. #include <linux/edac.h>
  8. #include <linux/of_irq.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/spinlock.h>
  11. #include "edac_module.h"
  12. /* Registers Offset */
  13. #define AL_MC_ECC_CFG 0x70
  14. #define AL_MC_ECC_CLEAR 0x7c
  15. #define AL_MC_ECC_ERR_COUNT 0x80
  16. #define AL_MC_ECC_CE_ADDR0 0x84
  17. #define AL_MC_ECC_CE_ADDR1 0x88
  18. #define AL_MC_ECC_UE_ADDR0 0xa4
  19. #define AL_MC_ECC_UE_ADDR1 0xa8
  20. #define AL_MC_ECC_CE_SYND0 0x8c
  21. #define AL_MC_ECC_CE_SYND1 0x90
  22. #define AL_MC_ECC_CE_SYND2 0x94
  23. #define AL_MC_ECC_UE_SYND0 0xac
  24. #define AL_MC_ECC_UE_SYND1 0xb0
  25. #define AL_MC_ECC_UE_SYND2 0xb4
  26. /* Registers Fields */
  27. #define AL_MC_ECC_CFG_SCRUB_DISABLED BIT(4)
  28. #define AL_MC_ECC_CLEAR_UE_COUNT BIT(3)
  29. #define AL_MC_ECC_CLEAR_CE_COUNT BIT(2)
  30. #define AL_MC_ECC_CLEAR_UE_ERR BIT(1)
  31. #define AL_MC_ECC_CLEAR_CE_ERR BIT(0)
  32. #define AL_MC_ECC_ERR_COUNT_UE GENMASK(31, 16)
  33. #define AL_MC_ECC_ERR_COUNT_CE GENMASK(15, 0)
  34. #define AL_MC_ECC_CE_ADDR0_RANK GENMASK(25, 24)
  35. #define AL_MC_ECC_CE_ADDR0_ROW GENMASK(17, 0)
  36. #define AL_MC_ECC_CE_ADDR1_BG GENMASK(25, 24)
  37. #define AL_MC_ECC_CE_ADDR1_BANK GENMASK(18, 16)
  38. #define AL_MC_ECC_CE_ADDR1_COLUMN GENMASK(11, 0)
  39. #define AL_MC_ECC_UE_ADDR0_RANK GENMASK(25, 24)
  40. #define AL_MC_ECC_UE_ADDR0_ROW GENMASK(17, 0)
  41. #define AL_MC_ECC_UE_ADDR1_BG GENMASK(25, 24)
  42. #define AL_MC_ECC_UE_ADDR1_BANK GENMASK(18, 16)
  43. #define AL_MC_ECC_UE_ADDR1_COLUMN GENMASK(11, 0)
  44. #define DRV_NAME "al_mc_edac"
  45. #define AL_MC_EDAC_MSG_MAX 256
  46. struct al_mc_edac {
  47. void __iomem *mmio_base;
  48. spinlock_t lock;
  49. int irq_ce;
  50. int irq_ue;
  51. };
  52. static void prepare_msg(char *message, size_t buffer_size,
  53. enum hw_event_mc_err_type type,
  54. u8 rank, u32 row, u8 bg, u8 bank, u16 column,
  55. u32 syn0, u32 syn1, u32 syn2)
  56. {
  57. snprintf(message, buffer_size,
  58. "%s rank=0x%x row=0x%x bg=0x%x bank=0x%x col=0x%x syn0: 0x%x syn1: 0x%x syn2: 0x%x",
  59. type == HW_EVENT_ERR_UNCORRECTED ? "UE" : "CE",
  60. rank, row, bg, bank, column, syn0, syn1, syn2);
  61. }
  62. static int handle_ce(struct mem_ctl_info *mci)
  63. {
  64. u32 eccerrcnt, ecccaddr0, ecccaddr1, ecccsyn0, ecccsyn1, ecccsyn2, row;
  65. struct al_mc_edac *al_mc = mci->pvt_info;
  66. char msg[AL_MC_EDAC_MSG_MAX];
  67. u16 ce_count, column;
  68. unsigned long flags;
  69. u8 rank, bg, bank;
  70. eccerrcnt = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_ERR_COUNT);
  71. ce_count = FIELD_GET(AL_MC_ECC_ERR_COUNT_CE, eccerrcnt);
  72. if (!ce_count)
  73. return 0;
  74. ecccaddr0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_ADDR0);
  75. ecccaddr1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_ADDR1);
  76. ecccsyn0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND0);
  77. ecccsyn1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND1);
  78. ecccsyn2 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_CE_SYND2);
  79. writel_relaxed(AL_MC_ECC_CLEAR_CE_COUNT | AL_MC_ECC_CLEAR_CE_ERR,
  80. al_mc->mmio_base + AL_MC_ECC_CLEAR);
  81. dev_dbg(mci->pdev, "eccuaddr0=0x%08x eccuaddr1=0x%08x\n",
  82. ecccaddr0, ecccaddr1);
  83. rank = FIELD_GET(AL_MC_ECC_CE_ADDR0_RANK, ecccaddr0);
  84. row = FIELD_GET(AL_MC_ECC_CE_ADDR0_ROW, ecccaddr0);
  85. bg = FIELD_GET(AL_MC_ECC_CE_ADDR1_BG, ecccaddr1);
  86. bank = FIELD_GET(AL_MC_ECC_CE_ADDR1_BANK, ecccaddr1);
  87. column = FIELD_GET(AL_MC_ECC_CE_ADDR1_COLUMN, ecccaddr1);
  88. prepare_msg(msg, sizeof(msg), HW_EVENT_ERR_CORRECTED,
  89. rank, row, bg, bank, column,
  90. ecccsyn0, ecccsyn1, ecccsyn2);
  91. spin_lock_irqsave(&al_mc->lock, flags);
  92. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
  93. ce_count, 0, 0, 0, 0, 0, -1, mci->ctl_name, msg);
  94. spin_unlock_irqrestore(&al_mc->lock, flags);
  95. return ce_count;
  96. }
  97. static int handle_ue(struct mem_ctl_info *mci)
  98. {
  99. u32 eccerrcnt, eccuaddr0, eccuaddr1, eccusyn0, eccusyn1, eccusyn2, row;
  100. struct al_mc_edac *al_mc = mci->pvt_info;
  101. char msg[AL_MC_EDAC_MSG_MAX];
  102. u16 ue_count, column;
  103. unsigned long flags;
  104. u8 rank, bg, bank;
  105. eccerrcnt = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_ERR_COUNT);
  106. ue_count = FIELD_GET(AL_MC_ECC_ERR_COUNT_UE, eccerrcnt);
  107. if (!ue_count)
  108. return 0;
  109. eccuaddr0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_ADDR0);
  110. eccuaddr1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_ADDR1);
  111. eccusyn0 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND0);
  112. eccusyn1 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND1);
  113. eccusyn2 = readl_relaxed(al_mc->mmio_base + AL_MC_ECC_UE_SYND2);
  114. writel_relaxed(AL_MC_ECC_CLEAR_UE_COUNT | AL_MC_ECC_CLEAR_UE_ERR,
  115. al_mc->mmio_base + AL_MC_ECC_CLEAR);
  116. dev_dbg(mci->pdev, "eccuaddr0=0x%08x eccuaddr1=0x%08x\n",
  117. eccuaddr0, eccuaddr1);
  118. rank = FIELD_GET(AL_MC_ECC_UE_ADDR0_RANK, eccuaddr0);
  119. row = FIELD_GET(AL_MC_ECC_UE_ADDR0_ROW, eccuaddr0);
  120. bg = FIELD_GET(AL_MC_ECC_UE_ADDR1_BG, eccuaddr1);
  121. bank = FIELD_GET(AL_MC_ECC_UE_ADDR1_BANK, eccuaddr1);
  122. column = FIELD_GET(AL_MC_ECC_UE_ADDR1_COLUMN, eccuaddr1);
  123. prepare_msg(msg, sizeof(msg), HW_EVENT_ERR_UNCORRECTED,
  124. rank, row, bg, bank, column,
  125. eccusyn0, eccusyn1, eccusyn2);
  126. spin_lock_irqsave(&al_mc->lock, flags);
  127. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
  128. ue_count, 0, 0, 0, 0, 0, -1, mci->ctl_name, msg);
  129. spin_unlock_irqrestore(&al_mc->lock, flags);
  130. return ue_count;
  131. }
  132. static void al_mc_edac_check(struct mem_ctl_info *mci)
  133. {
  134. struct al_mc_edac *al_mc = mci->pvt_info;
  135. if (al_mc->irq_ue <= 0)
  136. handle_ue(mci);
  137. if (al_mc->irq_ce <= 0)
  138. handle_ce(mci);
  139. }
  140. static irqreturn_t al_mc_edac_irq_handler_ue(int irq, void *info)
  141. {
  142. struct platform_device *pdev = info;
  143. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  144. if (handle_ue(mci))
  145. return IRQ_HANDLED;
  146. return IRQ_NONE;
  147. }
  148. static irqreturn_t al_mc_edac_irq_handler_ce(int irq, void *info)
  149. {
  150. struct platform_device *pdev = info;
  151. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  152. if (handle_ce(mci))
  153. return IRQ_HANDLED;
  154. return IRQ_NONE;
  155. }
  156. static enum scrub_type get_scrub_mode(void __iomem *mmio_base)
  157. {
  158. u32 ecccfg0;
  159. ecccfg0 = readl(mmio_base + AL_MC_ECC_CFG);
  160. if (FIELD_GET(AL_MC_ECC_CFG_SCRUB_DISABLED, ecccfg0))
  161. return SCRUB_NONE;
  162. else
  163. return SCRUB_HW_SRC;
  164. }
  165. static void devm_al_mc_edac_free(void *data)
  166. {
  167. edac_mc_free(data);
  168. }
  169. static void devm_al_mc_edac_del(void *data)
  170. {
  171. edac_mc_del_mc(data);
  172. }
  173. static int al_mc_edac_probe(struct platform_device *pdev)
  174. {
  175. struct edac_mc_layer layers[1];
  176. struct mem_ctl_info *mci;
  177. struct al_mc_edac *al_mc;
  178. void __iomem *mmio_base;
  179. struct dimm_info *dimm;
  180. int ret;
  181. mmio_base = devm_platform_ioremap_resource(pdev, 0);
  182. if (IS_ERR(mmio_base)) {
  183. dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
  184. PTR_ERR(mmio_base));
  185. return PTR_ERR(mmio_base);
  186. }
  187. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  188. layers[0].size = 1;
  189. layers[0].is_virt_csrow = false;
  190. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
  191. sizeof(struct al_mc_edac));
  192. if (!mci)
  193. return -ENOMEM;
  194. ret = devm_add_action(&pdev->dev, devm_al_mc_edac_free, mci);
  195. if (ret) {
  196. edac_mc_free(mci);
  197. return ret;
  198. }
  199. platform_set_drvdata(pdev, mci);
  200. al_mc = mci->pvt_info;
  201. al_mc->mmio_base = mmio_base;
  202. al_mc->irq_ue = of_irq_get_byname(pdev->dev.of_node, "ue");
  203. if (al_mc->irq_ue <= 0)
  204. dev_dbg(&pdev->dev,
  205. "no IRQ defined for UE - falling back to polling\n");
  206. al_mc->irq_ce = of_irq_get_byname(pdev->dev.of_node, "ce");
  207. if (al_mc->irq_ce <= 0)
  208. dev_dbg(&pdev->dev,
  209. "no IRQ defined for CE - falling back to polling\n");
  210. /*
  211. * In case both interrupts (ue/ce) are to be found, use interrupt mode.
  212. * In case none of the interrupt are foud, use polling mode.
  213. * In case only one interrupt is found, use interrupt mode for it but
  214. * keep polling mode enable for the other.
  215. */
  216. if (al_mc->irq_ue <= 0 || al_mc->irq_ce <= 0) {
  217. edac_op_state = EDAC_OPSTATE_POLL;
  218. mci->edac_check = al_mc_edac_check;
  219. } else {
  220. edac_op_state = EDAC_OPSTATE_INT;
  221. }
  222. spin_lock_init(&al_mc->lock);
  223. mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
  224. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  225. mci->edac_cap = EDAC_FLAG_SECDED;
  226. mci->mod_name = DRV_NAME;
  227. mci->ctl_name = "al_mc";
  228. mci->pdev = &pdev->dev;
  229. mci->scrub_mode = get_scrub_mode(mmio_base);
  230. dimm = *mci->dimms;
  231. dimm->grain = 1;
  232. ret = edac_mc_add_mc(mci);
  233. if (ret < 0) {
  234. dev_err(&pdev->dev,
  235. "fail to add memory controller device (%d)\n",
  236. ret);
  237. return ret;
  238. }
  239. ret = devm_add_action(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
  240. if (ret) {
  241. edac_mc_del_mc(&pdev->dev);
  242. return ret;
  243. }
  244. if (al_mc->irq_ue > 0) {
  245. ret = devm_request_irq(&pdev->dev,
  246. al_mc->irq_ue,
  247. al_mc_edac_irq_handler_ue,
  248. IRQF_SHARED,
  249. pdev->name,
  250. pdev);
  251. if (ret != 0) {
  252. dev_err(&pdev->dev,
  253. "failed to request UE IRQ %d (%d)\n",
  254. al_mc->irq_ue, ret);
  255. return ret;
  256. }
  257. }
  258. if (al_mc->irq_ce > 0) {
  259. ret = devm_request_irq(&pdev->dev,
  260. al_mc->irq_ce,
  261. al_mc_edac_irq_handler_ce,
  262. IRQF_SHARED,
  263. pdev->name,
  264. pdev);
  265. if (ret != 0) {
  266. dev_err(&pdev->dev,
  267. "failed to request CE IRQ %d (%d)\n",
  268. al_mc->irq_ce, ret);
  269. return ret;
  270. }
  271. }
  272. return 0;
  273. }
  274. static const struct of_device_id al_mc_edac_of_match[] = {
  275. { .compatible = "amazon,al-mc-edac", },
  276. {},
  277. };
  278. MODULE_DEVICE_TABLE(of, al_mc_edac_of_match);
  279. static struct platform_driver al_mc_edac_driver = {
  280. .probe = al_mc_edac_probe,
  281. .driver = {
  282. .name = DRV_NAME,
  283. .of_match_table = al_mc_edac_of_match,
  284. },
  285. };
  286. module_platform_driver(al_mc_edac_driver);
  287. MODULE_LICENSE("GPL v2");
  288. MODULE_AUTHOR("Talel Shenhar");
  289. MODULE_DESCRIPTION("Amazon's Annapurna Lab's Memory Controller EDAC Driver");