i7300_edac.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel 7300 class Memory Controllers kernel module (Clarksboro)
  4. *
  5. * Copyright (c) 2010 by:
  6. * Mauro Carvalho Chehab
  7. *
  8. * Red Hat Inc. https://www.redhat.com
  9. *
  10. * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
  11. * http://www.intel.com/Assets/PDF/datasheet/318082.pdf
  12. *
  13. * TODO: The chipset allow checking for PCI Express errors also. Currently,
  14. * the driver covers only memory error errors
  15. *
  16. * This driver uses "csrows" EDAC attribute to represent DIMM slot#
  17. */
  18. #include <linux/module.h>
  19. #include <linux/init.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_ids.h>
  22. #include <linux/slab.h>
  23. #include <linux/edac.h>
  24. #include <linux/mmzone.h>
  25. #include "edac_module.h"
  26. /*
  27. * Alter this version for the I7300 module when modifications are made
  28. */
  29. #define I7300_REVISION " Ver: 1.0.0"
  30. #define EDAC_MOD_STR "i7300_edac"
  31. #define i7300_printk(level, fmt, arg...) \
  32. edac_printk(level, "i7300", fmt, ##arg)
  33. #define i7300_mc_printk(mci, level, fmt, arg...) \
  34. edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
  35. /***********************************************
  36. * i7300 Limit constants Structs and static vars
  37. ***********************************************/
  38. /*
  39. * Memory topology is organized as:
  40. * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
  41. * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
  42. * Each channel can have to 8 DIMM sets (called as SLOTS)
  43. * Slots should generally be filled in pairs
  44. * Except on Single Channel mode of operation
  45. * just slot 0/channel0 filled on this mode
  46. * On normal operation mode, the two channels on a branch should be
  47. * filled together for the same SLOT#
  48. * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
  49. * channels on both branches should be filled
  50. */
  51. /* Limits for i7300 */
  52. #define MAX_SLOTS 8
  53. #define MAX_BRANCHES 2
  54. #define MAX_CH_PER_BRANCH 2
  55. #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES)
  56. #define MAX_MIR 3
  57. #define to_channel(ch, branch) ((((branch)) << 1) | (ch))
  58. #define to_csrow(slot, ch, branch) \
  59. (to_channel(ch, branch) | ((slot) << 2))
  60. /* Device name and register DID (Device ID) */
  61. struct i7300_dev_info {
  62. const char *ctl_name; /* name for this device */
  63. u16 fsb_mapping_errors; /* DID for the branchmap,control */
  64. };
  65. /* Table of devices attributes supported by this driver */
  66. static const struct i7300_dev_info i7300_devs[] = {
  67. {
  68. .ctl_name = "I7300",
  69. .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
  70. },
  71. };
  72. struct i7300_dimm_info {
  73. int megabytes; /* size, 0 means not present */
  74. };
  75. /* driver private data structure */
  76. struct i7300_pvt {
  77. struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */
  78. struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */
  79. struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */
  80. struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */
  81. u16 tolm; /* top of low memory */
  82. u64 ambase; /* AMB BAR */
  83. u32 mc_settings; /* Report several settings */
  84. u32 mc_settings_a;
  85. u16 mir[MAX_MIR]; /* Memory Interleave Reg*/
  86. u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */
  87. u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */
  88. /* DIMM information matrix, allocating architecture maximums */
  89. struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
  90. /* Temporary buffer for use when preparing error messages */
  91. char *tmp_prt_buffer;
  92. };
  93. /* FIXME: Why do we need to have this static? */
  94. static struct edac_pci_ctl_info *i7300_pci;
  95. /***************************************************
  96. * i7300 Register definitions for memory enumeration
  97. ***************************************************/
  98. /*
  99. * Device 16,
  100. * Function 0: System Address (not documented)
  101. * Function 1: Memory Branch Map, Control, Errors Register
  102. */
  103. /* OFFSETS for Function 0 */
  104. #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */
  105. #define MAXCH 0x56 /* Max Channel Number */
  106. #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */
  107. /* OFFSETS for Function 1 */
  108. #define MC_SETTINGS 0x40
  109. #define IS_MIRRORED(mc) ((mc) & (1 << 16))
  110. #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5))
  111. #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31))
  112. #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8))
  113. #define MC_SETTINGS_A 0x58
  114. #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14))
  115. #define TOLM 0x6C
  116. #define MIR0 0x80
  117. #define MIR1 0x84
  118. #define MIR2 0x88
  119. /*
  120. * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
  121. * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
  122. * seems that we cannot use this information directly for the same usage.
  123. * Each memory slot may have up to 2 AMB interfaces, one for income and another
  124. * for outcome interface to the next slot.
  125. * For now, the driver just stores the AMB present registers, but rely only at
  126. * the MTR info to detect memory.
  127. * Datasheet is also not clear about how to map each AMBPRESENT registers to
  128. * one of the 4 available channels.
  129. */
  130. #define AMBPRESENT_0 0x64
  131. #define AMBPRESENT_1 0x66
  132. static const u16 mtr_regs[MAX_SLOTS] = {
  133. 0x80, 0x84, 0x88, 0x8c,
  134. 0x82, 0x86, 0x8a, 0x8e
  135. };
  136. /*
  137. * Defines to extract the vaious fields from the
  138. * MTRx - Memory Technology Registers
  139. */
  140. #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8))
  141. #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7))
  142. #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4)
  143. #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4)
  144. #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0)
  145. #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
  146. #define MTR_DRAM_BANKS_ADDR_BITS 2
  147. #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
  148. #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
  149. #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
  150. /************************************************
  151. * i7300 Register definitions for error detection
  152. ************************************************/
  153. /*
  154. * Device 16.1: FBD Error Registers
  155. */
  156. #define FERR_FAT_FBD 0x98
  157. static const char *ferr_fat_fbd_name[] = {
  158. [22] = "Non-Redundant Fast Reset Timeout",
  159. [2] = ">Tmid Thermal event with intelligent throttling disabled",
  160. [1] = "Memory or FBD configuration CRC read error",
  161. [0] = "Memory Write error on non-redundant retry or "
  162. "FBD configuration Write error on retry",
  163. };
  164. #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
  165. #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
  166. #define FERR_NF_FBD 0xa0
  167. static const char *ferr_nf_fbd_name[] = {
  168. [24] = "DIMM-Spare Copy Completed",
  169. [23] = "DIMM-Spare Copy Initiated",
  170. [22] = "Redundant Fast Reset Timeout",
  171. [21] = "Memory Write error on redundant retry",
  172. [18] = "SPD protocol Error",
  173. [17] = "FBD Northbound parity error on FBD Sync Status",
  174. [16] = "Correctable Patrol Data ECC",
  175. [15] = "Correctable Resilver- or Spare-Copy Data ECC",
  176. [14] = "Correctable Mirrored Demand Data ECC",
  177. [13] = "Correctable Non-Mirrored Demand Data ECC",
  178. [11] = "Memory or FBD configuration CRC read error",
  179. [10] = "FBD Configuration Write error on first attempt",
  180. [9] = "Memory Write error on first attempt",
  181. [8] = "Non-Aliased Uncorrectable Patrol Data ECC",
  182. [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
  183. [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
  184. [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
  185. [4] = "Aliased Uncorrectable Patrol Data ECC",
  186. [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
  187. [2] = "Aliased Uncorrectable Mirrored Demand Data ECC",
  188. [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
  189. [0] = "Uncorrectable Data ECC on Replay",
  190. };
  191. #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
  192. #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
  193. (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
  194. (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
  195. (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
  196. (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
  197. (1 << 1) | (1 << 0))
  198. #define EMASK_FBD 0xa8
  199. #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
  200. (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
  201. (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
  202. (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
  203. (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\
  204. (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\
  205. (1 << 1) | (1 << 0))
  206. /*
  207. * Device 16.2: Global Error Registers
  208. */
  209. #define FERR_GLOBAL_HI 0x48
  210. static const char *ferr_global_hi_name[] = {
  211. [3] = "FSB 3 Fatal Error",
  212. [2] = "FSB 2 Fatal Error",
  213. [1] = "FSB 1 Fatal Error",
  214. [0] = "FSB 0 Fatal Error",
  215. };
  216. #define ferr_global_hi_is_fatal(errno) 1
  217. #define FERR_GLOBAL_LO 0x40
  218. static const char *ferr_global_lo_name[] = {
  219. [31] = "Internal MCH Fatal Error",
  220. [30] = "Intel QuickData Technology Device Fatal Error",
  221. [29] = "FSB1 Fatal Error",
  222. [28] = "FSB0 Fatal Error",
  223. [27] = "FBD Channel 3 Fatal Error",
  224. [26] = "FBD Channel 2 Fatal Error",
  225. [25] = "FBD Channel 1 Fatal Error",
  226. [24] = "FBD Channel 0 Fatal Error",
  227. [23] = "PCI Express Device 7Fatal Error",
  228. [22] = "PCI Express Device 6 Fatal Error",
  229. [21] = "PCI Express Device 5 Fatal Error",
  230. [20] = "PCI Express Device 4 Fatal Error",
  231. [19] = "PCI Express Device 3 Fatal Error",
  232. [18] = "PCI Express Device 2 Fatal Error",
  233. [17] = "PCI Express Device 1 Fatal Error",
  234. [16] = "ESI Fatal Error",
  235. [15] = "Internal MCH Non-Fatal Error",
  236. [14] = "Intel QuickData Technology Device Non Fatal Error",
  237. [13] = "FSB1 Non-Fatal Error",
  238. [12] = "FSB 0 Non-Fatal Error",
  239. [11] = "FBD Channel 3 Non-Fatal Error",
  240. [10] = "FBD Channel 2 Non-Fatal Error",
  241. [9] = "FBD Channel 1 Non-Fatal Error",
  242. [8] = "FBD Channel 0 Non-Fatal Error",
  243. [7] = "PCI Express Device 7 Non-Fatal Error",
  244. [6] = "PCI Express Device 6 Non-Fatal Error",
  245. [5] = "PCI Express Device 5 Non-Fatal Error",
  246. [4] = "PCI Express Device 4 Non-Fatal Error",
  247. [3] = "PCI Express Device 3 Non-Fatal Error",
  248. [2] = "PCI Express Device 2 Non-Fatal Error",
  249. [1] = "PCI Express Device 1 Non-Fatal Error",
  250. [0] = "ESI Non-Fatal Error",
  251. };
  252. #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1)
  253. #define NRECMEMA 0xbe
  254. #define NRECMEMA_BANK(v) (((v) >> 12) & 7)
  255. #define NRECMEMA_RANK(v) (((v) >> 8) & 15)
  256. #define NRECMEMB 0xc0
  257. #define NRECMEMB_IS_WR(v) ((v) & (1 << 31))
  258. #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
  259. #define NRECMEMB_RAS(v) ((v) & 0xffff)
  260. #define REDMEMA 0xdc
  261. #define REDMEMB 0x7c
  262. #define RECMEMA 0xe0
  263. #define RECMEMA_BANK(v) (((v) >> 12) & 7)
  264. #define RECMEMA_RANK(v) (((v) >> 8) & 15)
  265. #define RECMEMB 0xe4
  266. #define RECMEMB_IS_WR(v) ((v) & (1 << 31))
  267. #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff)
  268. #define RECMEMB_RAS(v) ((v) & 0xffff)
  269. /********************************************
  270. * i7300 Functions related to error detection
  271. ********************************************/
  272. /**
  273. * get_err_from_table() - Gets the error message from a table
  274. * @table: table name (array of char *)
  275. * @size: number of elements at the table
  276. * @pos: position of the element to be returned
  277. *
  278. * This is a small routine that gets the pos-th element of a table. If the
  279. * element doesn't exist (or it is empty), it returns "reserved".
  280. * Instead of calling it directly, the better is to call via the macro
  281. * GET_ERR_FROM_TABLE(), that automatically checks the table size via
  282. * ARRAY_SIZE() macro
  283. */
  284. static const char *get_err_from_table(const char *table[], int size, int pos)
  285. {
  286. if (unlikely(pos >= size))
  287. return "Reserved";
  288. if (unlikely(!table[pos]))
  289. return "Reserved";
  290. return table[pos];
  291. }
  292. #define GET_ERR_FROM_TABLE(table, pos) \
  293. get_err_from_table(table, ARRAY_SIZE(table), pos)
  294. /**
  295. * i7300_process_error_global() - Retrieve the hardware error information from
  296. * the hardware global error registers and
  297. * sends it to dmesg
  298. * @mci: struct mem_ctl_info pointer
  299. */
  300. static void i7300_process_error_global(struct mem_ctl_info *mci)
  301. {
  302. struct i7300_pvt *pvt;
  303. u32 errnum, error_reg;
  304. unsigned long errors;
  305. const char *specific;
  306. bool is_fatal;
  307. pvt = mci->pvt_info;
  308. /* read in the 1st FATAL error register */
  309. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  310. FERR_GLOBAL_HI, &error_reg);
  311. if (unlikely(error_reg)) {
  312. errors = error_reg;
  313. errnum = find_first_bit(&errors,
  314. ARRAY_SIZE(ferr_global_hi_name));
  315. specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
  316. is_fatal = ferr_global_hi_is_fatal(errnum);
  317. /* Clear the error bit */
  318. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  319. FERR_GLOBAL_HI, error_reg);
  320. goto error_global;
  321. }
  322. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  323. FERR_GLOBAL_LO, &error_reg);
  324. if (unlikely(error_reg)) {
  325. errors = error_reg;
  326. errnum = find_first_bit(&errors,
  327. ARRAY_SIZE(ferr_global_lo_name));
  328. specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
  329. is_fatal = ferr_global_lo_is_fatal(errnum);
  330. /* Clear the error bit */
  331. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  332. FERR_GLOBAL_LO, error_reg);
  333. goto error_global;
  334. }
  335. return;
  336. error_global:
  337. i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
  338. is_fatal ? "Fatal" : "NOT fatal", specific);
  339. }
  340. /**
  341. * i7300_process_fbd_error() - Retrieve the hardware error information from
  342. * the FBD error registers and sends it via
  343. * EDAC error API calls
  344. * @mci: struct mem_ctl_info pointer
  345. */
  346. static void i7300_process_fbd_error(struct mem_ctl_info *mci)
  347. {
  348. struct i7300_pvt *pvt;
  349. u32 errnum, value, error_reg;
  350. u16 val16;
  351. unsigned branch, channel, bank, rank, cas, ras;
  352. u32 syndrome;
  353. unsigned long errors;
  354. const char *specific;
  355. bool is_wr;
  356. pvt = mci->pvt_info;
  357. /* read in the 1st FATAL error register */
  358. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  359. FERR_FAT_FBD, &error_reg);
  360. if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
  361. errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
  362. errnum = find_first_bit(&errors,
  363. ARRAY_SIZE(ferr_fat_fbd_name));
  364. specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
  365. branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
  366. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
  367. NRECMEMA, &val16);
  368. bank = NRECMEMA_BANK(val16);
  369. rank = NRECMEMA_RANK(val16);
  370. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  371. NRECMEMB, &value);
  372. is_wr = NRECMEMB_IS_WR(value);
  373. cas = NRECMEMB_CAS(value);
  374. ras = NRECMEMB_RAS(value);
  375. /* Clean the error register */
  376. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  377. FERR_FAT_FBD, error_reg);
  378. snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
  379. "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
  380. bank, ras, cas, errors, specific);
  381. edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
  382. branch, -1, rank,
  383. is_wr ? "Write error" : "Read error",
  384. pvt->tmp_prt_buffer);
  385. }
  386. /* read in the 1st NON-FATAL error register */
  387. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  388. FERR_NF_FBD, &error_reg);
  389. if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
  390. errors = error_reg & FERR_NF_FBD_ERR_MASK;
  391. errnum = find_first_bit(&errors,
  392. ARRAY_SIZE(ferr_nf_fbd_name));
  393. specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
  394. branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
  395. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  396. REDMEMA, &syndrome);
  397. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
  398. RECMEMA, &val16);
  399. bank = RECMEMA_BANK(val16);
  400. rank = RECMEMA_RANK(val16);
  401. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  402. RECMEMB, &value);
  403. is_wr = RECMEMB_IS_WR(value);
  404. cas = RECMEMB_CAS(value);
  405. ras = RECMEMB_RAS(value);
  406. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  407. REDMEMB, &value);
  408. channel = (branch << 1);
  409. /* Second channel ? */
  410. channel += !!(value & BIT(17));
  411. /* Clear the error bit */
  412. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  413. FERR_NF_FBD, error_reg);
  414. /* Form out message */
  415. snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
  416. "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
  417. bank, ras, cas, errors, specific);
  418. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
  419. syndrome,
  420. branch >> 1, channel % 2, rank,
  421. is_wr ? "Write error" : "Read error",
  422. pvt->tmp_prt_buffer);
  423. }
  424. return;
  425. }
  426. /**
  427. * i7300_check_error() - Calls the error checking subroutines
  428. * @mci: struct mem_ctl_info pointer
  429. */
  430. static void i7300_check_error(struct mem_ctl_info *mci)
  431. {
  432. i7300_process_error_global(mci);
  433. i7300_process_fbd_error(mci);
  434. };
  435. /**
  436. * i7300_clear_error() - Clears the error registers
  437. * @mci: struct mem_ctl_info pointer
  438. */
  439. static void i7300_clear_error(struct mem_ctl_info *mci)
  440. {
  441. struct i7300_pvt *pvt = mci->pvt_info;
  442. u32 value;
  443. /*
  444. * All error values are RWC - we need to read and write 1 to the
  445. * bit that we want to cleanup
  446. */
  447. /* Clear global error registers */
  448. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  449. FERR_GLOBAL_HI, &value);
  450. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  451. FERR_GLOBAL_HI, value);
  452. pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  453. FERR_GLOBAL_LO, &value);
  454. pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
  455. FERR_GLOBAL_LO, value);
  456. /* Clear FBD error registers */
  457. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  458. FERR_FAT_FBD, &value);
  459. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  460. FERR_FAT_FBD, value);
  461. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  462. FERR_NF_FBD, &value);
  463. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  464. FERR_NF_FBD, value);
  465. }
  466. /**
  467. * i7300_enable_error_reporting() - Enable the memory reporting logic at the
  468. * hardware
  469. * @mci: struct mem_ctl_info pointer
  470. */
  471. static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
  472. {
  473. struct i7300_pvt *pvt = mci->pvt_info;
  474. u32 fbd_error_mask;
  475. /* Read the FBD Error Mask Register */
  476. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  477. EMASK_FBD, &fbd_error_mask);
  478. /* Enable with a '0' */
  479. fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
  480. pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
  481. EMASK_FBD, fbd_error_mask);
  482. }
  483. /************************************************
  484. * i7300 Functions related to memory enumberation
  485. ************************************************/
  486. /**
  487. * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
  488. * @pvt: pointer to the private data struct used by i7300 driver
  489. * @slot: DIMM slot (0 to 7)
  490. * @ch: Channel number within the branch (0 or 1)
  491. * @branch: Branch number (0 or 1)
  492. * @dinfo: Pointer to DIMM info where dimm size is stored
  493. * @dimm: Pointer to the struct dimm_info that corresponds to that element
  494. */
  495. static int decode_mtr(struct i7300_pvt *pvt,
  496. int slot, int ch, int branch,
  497. struct i7300_dimm_info *dinfo,
  498. struct dimm_info *dimm)
  499. {
  500. int mtr, ans, addrBits, channel;
  501. channel = to_channel(ch, branch);
  502. mtr = pvt->mtr[slot][branch];
  503. ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
  504. edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
  505. slot, channel, ans ? "" : "NOT ");
  506. /* Determine if there is a DIMM present in this DIMM slot */
  507. if (!ans)
  508. return 0;
  509. /* Start with the number of bits for a Bank
  510. * on the DRAM */
  511. addrBits = MTR_DRAM_BANKS_ADDR_BITS;
  512. /* Add thenumber of ROW bits */
  513. addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
  514. /* add the number of COLUMN bits */
  515. addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
  516. /* add the number of RANK bits */
  517. addrBits += MTR_DIMM_RANKS(mtr);
  518. addrBits += 6; /* add 64 bits per DIMM */
  519. addrBits -= 20; /* divide by 2^^20 */
  520. addrBits -= 3; /* 8 bits per bytes */
  521. dinfo->megabytes = 1 << addrBits;
  522. edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
  523. edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
  524. MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
  525. edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
  526. edac_dbg(2, "\t\tNUMRANK: %s\n",
  527. MTR_DIMM_RANKS(mtr) ? "double" : "single");
  528. edac_dbg(2, "\t\tNUMROW: %s\n",
  529. MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
  530. MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
  531. MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
  532. "65,536 - 16 rows");
  533. edac_dbg(2, "\t\tNUMCOL: %s\n",
  534. MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
  535. MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
  536. MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
  537. "reserved");
  538. edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
  539. /*
  540. * The type of error detection actually depends of the
  541. * mode of operation. When it is just one single memory chip, at
  542. * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
  543. * In normal or mirrored mode, it uses Lockstep mode,
  544. * with the possibility of using an extended algorithm for x8 memories
  545. * See datasheet Sections 7.3.6 to 7.3.8
  546. */
  547. dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
  548. dimm->grain = 8;
  549. dimm->mtype = MEM_FB_DDR2;
  550. if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
  551. dimm->edac_mode = EDAC_SECDED;
  552. edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
  553. } else {
  554. edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
  555. if (MTR_DRAM_WIDTH(mtr) == 8)
  556. dimm->edac_mode = EDAC_S8ECD8ED;
  557. else
  558. dimm->edac_mode = EDAC_S4ECD4ED;
  559. }
  560. /* ask what device type on this row */
  561. if (MTR_DRAM_WIDTH(mtr) == 8) {
  562. edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
  563. IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
  564. "enhanced" : "normal");
  565. dimm->dtype = DEV_X8;
  566. } else
  567. dimm->dtype = DEV_X4;
  568. return mtr;
  569. }
  570. /**
  571. * print_dimm_size() - Prints dump of the memory organization
  572. * @pvt: pointer to the private data struct used by i7300 driver
  573. *
  574. * Useful for debug. If debug is disabled, this routine do nothing
  575. */
  576. static void print_dimm_size(struct i7300_pvt *pvt)
  577. {
  578. #ifdef CONFIG_EDAC_DEBUG
  579. struct i7300_dimm_info *dinfo;
  580. char *p;
  581. int space, n;
  582. int channel, slot;
  583. space = PAGE_SIZE;
  584. p = pvt->tmp_prt_buffer;
  585. n = snprintf(p, space, " ");
  586. p += n;
  587. space -= n;
  588. for (channel = 0; channel < MAX_CHANNELS; channel++) {
  589. n = snprintf(p, space, "channel %d | ", channel);
  590. p += n;
  591. space -= n;
  592. }
  593. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  594. p = pvt->tmp_prt_buffer;
  595. space = PAGE_SIZE;
  596. n = snprintf(p, space, "-------------------------------"
  597. "------------------------------");
  598. p += n;
  599. space -= n;
  600. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  601. p = pvt->tmp_prt_buffer;
  602. space = PAGE_SIZE;
  603. for (slot = 0; slot < MAX_SLOTS; slot++) {
  604. n = snprintf(p, space, "csrow/SLOT %d ", slot);
  605. p += n;
  606. space -= n;
  607. for (channel = 0; channel < MAX_CHANNELS; channel++) {
  608. dinfo = &pvt->dimm_info[slot][channel];
  609. n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
  610. p += n;
  611. space -= n;
  612. }
  613. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  614. p = pvt->tmp_prt_buffer;
  615. space = PAGE_SIZE;
  616. }
  617. n = snprintf(p, space, "-------------------------------"
  618. "------------------------------");
  619. p += n;
  620. space -= n;
  621. edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
  622. p = pvt->tmp_prt_buffer;
  623. space = PAGE_SIZE;
  624. #endif
  625. }
  626. /**
  627. * i7300_init_csrows() - Initialize the 'csrows' table within
  628. * the mci control structure with the
  629. * addressing of memory.
  630. * @mci: struct mem_ctl_info pointer
  631. */
  632. static int i7300_init_csrows(struct mem_ctl_info *mci)
  633. {
  634. struct i7300_pvt *pvt;
  635. struct i7300_dimm_info *dinfo;
  636. int rc = -ENODEV;
  637. int mtr;
  638. int ch, branch, slot, channel, max_channel, max_branch;
  639. struct dimm_info *dimm;
  640. pvt = mci->pvt_info;
  641. edac_dbg(2, "Memory Technology Registers:\n");
  642. if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
  643. max_branch = 1;
  644. max_channel = 1;
  645. } else {
  646. max_branch = MAX_BRANCHES;
  647. max_channel = MAX_CH_PER_BRANCH;
  648. }
  649. /* Get the AMB present registers for the four channels */
  650. for (branch = 0; branch < max_branch; branch++) {
  651. /* Read and dump branch 0's MTRs */
  652. channel = to_channel(0, branch);
  653. pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
  654. AMBPRESENT_0,
  655. &pvt->ambpresent[channel]);
  656. edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
  657. channel, pvt->ambpresent[channel]);
  658. if (max_channel == 1)
  659. continue;
  660. channel = to_channel(1, branch);
  661. pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
  662. AMBPRESENT_1,
  663. &pvt->ambpresent[channel]);
  664. edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
  665. channel, pvt->ambpresent[channel]);
  666. }
  667. /* Get the set of MTR[0-7] regs by each branch */
  668. for (slot = 0; slot < MAX_SLOTS; slot++) {
  669. int where = mtr_regs[slot];
  670. for (branch = 0; branch < max_branch; branch++) {
  671. pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
  672. where,
  673. &pvt->mtr[slot][branch]);
  674. for (ch = 0; ch < max_channel; ch++) {
  675. int channel = to_channel(ch, branch);
  676. dimm = edac_get_dimm(mci, branch, ch, slot);
  677. dinfo = &pvt->dimm_info[slot][channel];
  678. mtr = decode_mtr(pvt, slot, ch, branch,
  679. dinfo, dimm);
  680. /* if no DIMMS on this row, continue */
  681. if (!MTR_DIMMS_PRESENT(mtr))
  682. continue;
  683. rc = 0;
  684. }
  685. }
  686. }
  687. return rc;
  688. }
  689. /**
  690. * decode_mir() - Decodes Memory Interleave Register (MIR) info
  691. * @mir_no: number of the MIR register to decode
  692. * @mir: array with the MIR data cached on the driver
  693. */
  694. static void decode_mir(int mir_no, u16 mir[MAX_MIR])
  695. {
  696. if (mir[mir_no] & 3)
  697. edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
  698. mir_no,
  699. (mir[mir_no] >> 4) & 0xfff,
  700. (mir[mir_no] & 1) ? "B0" : "",
  701. (mir[mir_no] & 2) ? "B1" : "");
  702. }
  703. /**
  704. * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
  705. * @mci: struct mem_ctl_info pointer
  706. *
  707. * Data read is cached internally for its usage when needed
  708. */
  709. static int i7300_get_mc_regs(struct mem_ctl_info *mci)
  710. {
  711. struct i7300_pvt *pvt;
  712. u32 actual_tolm;
  713. int i, rc;
  714. pvt = mci->pvt_info;
  715. pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
  716. (u32 *) &pvt->ambase);
  717. edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
  718. /* Get the Branch Map regs */
  719. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
  720. pvt->tolm >>= 12;
  721. edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
  722. pvt->tolm, pvt->tolm);
  723. actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
  724. edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
  725. actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
  726. /* Get memory controller settings */
  727. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
  728. &pvt->mc_settings);
  729. pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
  730. &pvt->mc_settings_a);
  731. if (IS_SINGLE_MODE(pvt->mc_settings_a))
  732. edac_dbg(0, "Memory controller operating on single mode\n");
  733. else
  734. edac_dbg(0, "Memory controller operating on %smirrored mode\n",
  735. IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
  736. edac_dbg(0, "Error detection is %s\n",
  737. IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
  738. edac_dbg(0, "Retry is %s\n",
  739. IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
  740. /* Get Memory Interleave Range registers */
  741. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
  742. &pvt->mir[0]);
  743. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
  744. &pvt->mir[1]);
  745. pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
  746. &pvt->mir[2]);
  747. /* Decode the MIR regs */
  748. for (i = 0; i < MAX_MIR; i++)
  749. decode_mir(i, pvt->mir);
  750. rc = i7300_init_csrows(mci);
  751. if (rc < 0)
  752. return rc;
  753. /* Go and determine the size of each DIMM and place in an
  754. * orderly matrix */
  755. print_dimm_size(pvt);
  756. return 0;
  757. }
  758. /*************************************************
  759. * i7300 Functions related to device probe/release
  760. *************************************************/
  761. /**
  762. * i7300_put_devices() - Release the PCI devices
  763. * @mci: struct mem_ctl_info pointer
  764. */
  765. static void i7300_put_devices(struct mem_ctl_info *mci)
  766. {
  767. struct i7300_pvt *pvt;
  768. int branch;
  769. pvt = mci->pvt_info;
  770. /* Decrement usage count for devices */
  771. for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
  772. pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
  773. pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
  774. pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
  775. }
  776. /**
  777. * i7300_get_devices() - Find and perform 'get' operation on the MCH's
  778. * device/functions we want to reference for this driver
  779. * @mci: struct mem_ctl_info pointer
  780. *
  781. * Access and prepare the several devices for usage:
  782. * I7300 devices used by this driver:
  783. * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
  784. * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
  785. * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
  786. */
  787. static int i7300_get_devices(struct mem_ctl_info *mci)
  788. {
  789. struct i7300_pvt *pvt;
  790. struct pci_dev *pdev;
  791. pvt = mci->pvt_info;
  792. /* Attempt to 'get' the MCH register we want */
  793. pdev = NULL;
  794. while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  795. PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
  796. pdev))) {
  797. /* Store device 16 funcs 1 and 2 */
  798. switch (PCI_FUNC(pdev->devfn)) {
  799. case 1:
  800. if (!pvt->pci_dev_16_1_fsb_addr_map)
  801. pvt->pci_dev_16_1_fsb_addr_map =
  802. pci_dev_get(pdev);
  803. break;
  804. case 2:
  805. if (!pvt->pci_dev_16_2_fsb_err_regs)
  806. pvt->pci_dev_16_2_fsb_err_regs =
  807. pci_dev_get(pdev);
  808. break;
  809. }
  810. }
  811. if (!pvt->pci_dev_16_1_fsb_addr_map ||
  812. !pvt->pci_dev_16_2_fsb_err_regs) {
  813. /* At least one device was not found */
  814. i7300_printk(KERN_ERR,
  815. "'system address,Process Bus' device not found:"
  816. "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
  817. PCI_VENDOR_ID_INTEL,
  818. PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
  819. goto error;
  820. }
  821. edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
  822. pci_name(pvt->pci_dev_16_0_fsb_ctlr),
  823. pvt->pci_dev_16_0_fsb_ctlr->vendor,
  824. pvt->pci_dev_16_0_fsb_ctlr->device);
  825. edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
  826. pci_name(pvt->pci_dev_16_1_fsb_addr_map),
  827. pvt->pci_dev_16_1_fsb_addr_map->vendor,
  828. pvt->pci_dev_16_1_fsb_addr_map->device);
  829. edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
  830. pci_name(pvt->pci_dev_16_2_fsb_err_regs),
  831. pvt->pci_dev_16_2_fsb_err_regs->vendor,
  832. pvt->pci_dev_16_2_fsb_err_regs->device);
  833. pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
  834. PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
  835. NULL);
  836. if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
  837. i7300_printk(KERN_ERR,
  838. "MC: 'BRANCH 0' device not found:"
  839. "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
  840. PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
  841. goto error;
  842. }
  843. pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
  844. PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
  845. NULL);
  846. if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
  847. i7300_printk(KERN_ERR,
  848. "MC: 'BRANCH 1' device not found:"
  849. "vendor 0x%x device 0x%x Func 0 "
  850. "(broken BIOS?)\n",
  851. PCI_VENDOR_ID_INTEL,
  852. PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
  853. goto error;
  854. }
  855. return 0;
  856. error:
  857. i7300_put_devices(mci);
  858. return -ENODEV;
  859. }
  860. /**
  861. * i7300_init_one() - Probe for one instance of the device
  862. * @pdev: struct pci_dev pointer
  863. * @id: struct pci_device_id pointer - currently unused
  864. */
  865. static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
  866. {
  867. struct mem_ctl_info *mci;
  868. struct edac_mc_layer layers[3];
  869. struct i7300_pvt *pvt;
  870. int rc;
  871. /* wake up device */
  872. rc = pci_enable_device(pdev);
  873. if (rc == -EIO)
  874. return rc;
  875. edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
  876. pdev->bus->number,
  877. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  878. /* We only are looking for func 0 of the set */
  879. if (PCI_FUNC(pdev->devfn) != 0)
  880. return -ENODEV;
  881. /* allocate a new MC control structure */
  882. layers[0].type = EDAC_MC_LAYER_BRANCH;
  883. layers[0].size = MAX_BRANCHES;
  884. layers[0].is_virt_csrow = false;
  885. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  886. layers[1].size = MAX_CH_PER_BRANCH;
  887. layers[1].is_virt_csrow = true;
  888. layers[2].type = EDAC_MC_LAYER_SLOT;
  889. layers[2].size = MAX_SLOTS;
  890. layers[2].is_virt_csrow = true;
  891. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
  892. if (mci == NULL)
  893. return -ENOMEM;
  894. edac_dbg(0, "MC: mci = %p\n", mci);
  895. mci->pdev = &pdev->dev; /* record ptr to the generic device */
  896. pvt = mci->pvt_info;
  897. pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
  898. pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
  899. if (!pvt->tmp_prt_buffer) {
  900. edac_mc_free(mci);
  901. return -ENOMEM;
  902. }
  903. /* 'get' the pci devices we want to reserve for our use */
  904. if (i7300_get_devices(mci))
  905. goto fail0;
  906. mci->mc_idx = 0;
  907. mci->mtype_cap = MEM_FLAG_FB_DDR2;
  908. mci->edac_ctl_cap = EDAC_FLAG_NONE;
  909. mci->edac_cap = EDAC_FLAG_NONE;
  910. mci->mod_name = "i7300_edac.c";
  911. mci->ctl_name = i7300_devs[0].ctl_name;
  912. mci->dev_name = pci_name(pdev);
  913. mci->ctl_page_to_phys = NULL;
  914. /* Set the function pointer to an actual operation function */
  915. mci->edac_check = i7300_check_error;
  916. /* initialize the MC control structure 'csrows' table
  917. * with the mapping and control information */
  918. if (i7300_get_mc_regs(mci)) {
  919. edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
  920. mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
  921. } else {
  922. edac_dbg(1, "MC: Enable error reporting now\n");
  923. i7300_enable_error_reporting(mci);
  924. }
  925. /* add this new MC control structure to EDAC's list of MCs */
  926. if (edac_mc_add_mc(mci)) {
  927. edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
  928. /* FIXME: perhaps some code should go here that disables error
  929. * reporting if we just enabled it
  930. */
  931. goto fail1;
  932. }
  933. i7300_clear_error(mci);
  934. /* allocating generic PCI control info */
  935. i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
  936. if (!i7300_pci) {
  937. printk(KERN_WARNING
  938. "%s(): Unable to create PCI control\n",
  939. __func__);
  940. printk(KERN_WARNING
  941. "%s(): PCI error report via EDAC not setup\n",
  942. __func__);
  943. }
  944. return 0;
  945. /* Error exit unwinding stack */
  946. fail1:
  947. i7300_put_devices(mci);
  948. fail0:
  949. kfree(pvt->tmp_prt_buffer);
  950. edac_mc_free(mci);
  951. return -ENODEV;
  952. }
  953. /**
  954. * i7300_remove_one() - Remove the driver
  955. * @pdev: struct pci_dev pointer
  956. */
  957. static void i7300_remove_one(struct pci_dev *pdev)
  958. {
  959. struct mem_ctl_info *mci;
  960. char *tmp;
  961. edac_dbg(0, "\n");
  962. if (i7300_pci)
  963. edac_pci_release_generic_ctl(i7300_pci);
  964. mci = edac_mc_del_mc(&pdev->dev);
  965. if (!mci)
  966. return;
  967. tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
  968. /* retrieve references to resources, and free those resources */
  969. i7300_put_devices(mci);
  970. kfree(tmp);
  971. edac_mc_free(mci);
  972. }
  973. /*
  974. * pci_device_id: table for which devices we are looking for
  975. *
  976. * Has only 8086:360c PCI ID
  977. */
  978. static const struct pci_device_id i7300_pci_tbl[] = {
  979. {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
  980. {0,} /* 0 terminated list. */
  981. };
  982. MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
  983. /*
  984. * i7300_driver: pci_driver structure for this module
  985. */
  986. static struct pci_driver i7300_driver = {
  987. .name = "i7300_edac",
  988. .probe = i7300_init_one,
  989. .remove = i7300_remove_one,
  990. .id_table = i7300_pci_tbl,
  991. };
  992. /**
  993. * i7300_init() - Registers the driver
  994. */
  995. static int __init i7300_init(void)
  996. {
  997. int pci_rc;
  998. edac_dbg(2, "\n");
  999. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  1000. opstate_init();
  1001. pci_rc = pci_register_driver(&i7300_driver);
  1002. return (pci_rc < 0) ? pci_rc : 0;
  1003. }
  1004. /**
  1005. * i7300_init() - Unregisters the driver
  1006. */
  1007. static void __exit i7300_exit(void)
  1008. {
  1009. edac_dbg(2, "\n");
  1010. pci_unregister_driver(&i7300_driver);
  1011. }
  1012. module_init(i7300_init);
  1013. module_exit(i7300_exit);
  1014. MODULE_LICENSE("GPL");
  1015. MODULE_AUTHOR("Mauro Carvalho Chehab");
  1016. MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
  1017. MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
  1018. I7300_REVISION);
  1019. module_param(edac_op_state, int, 0444);
  1020. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");