sata_sil24.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
  4. *
  5. * Copyright 2005 Tejun Heo
  6. *
  7. * Based on preview driver from Silicon Image.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/gfp.h>
  12. #include <linux/pci.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/delay.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/device.h>
  18. #include <scsi/scsi_host.h>
  19. #include <scsi/scsi_cmnd.h>
  20. #include <linux/libata.h>
  21. #define DRV_NAME "sata_sil24"
  22. #define DRV_VERSION "1.1"
  23. /*
  24. * Port request block (PRB) 32 bytes
  25. */
  26. struct sil24_prb {
  27. __le16 ctrl;
  28. __le16 prot;
  29. __le32 rx_cnt;
  30. u8 fis[6 * 4];
  31. };
  32. /*
  33. * Scatter gather entry (SGE) 16 bytes
  34. */
  35. struct sil24_sge {
  36. __le64 addr;
  37. __le32 cnt;
  38. __le32 flags;
  39. };
  40. enum {
  41. SIL24_HOST_BAR = 0,
  42. SIL24_PORT_BAR = 2,
  43. /* sil24 fetches in chunks of 64bytes. The first block
  44. * contains the PRB and two SGEs. From the second block, it's
  45. * consisted of four SGEs and called SGT. Calculate the
  46. * number of SGTs that fit into one page.
  47. */
  48. SIL24_PRB_SZ = sizeof(struct sil24_prb)
  49. + 2 * sizeof(struct sil24_sge),
  50. SIL24_MAX_SGT = (PAGE_SIZE - SIL24_PRB_SZ)
  51. / (4 * sizeof(struct sil24_sge)),
  52. /* This will give us one unused SGEs for ATA. This extra SGE
  53. * will be used to store CDB for ATAPI devices.
  54. */
  55. SIL24_MAX_SGE = 4 * SIL24_MAX_SGT + 1,
  56. /*
  57. * Global controller registers (128 bytes @ BAR0)
  58. */
  59. /* 32 bit regs */
  60. HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */
  61. HOST_CTRL = 0x40,
  62. HOST_IRQ_STAT = 0x44,
  63. HOST_PHY_CFG = 0x48,
  64. HOST_BIST_CTRL = 0x50,
  65. HOST_BIST_PTRN = 0x54,
  66. HOST_BIST_STAT = 0x58,
  67. HOST_MEM_BIST_STAT = 0x5c,
  68. HOST_FLASH_CMD = 0x70,
  69. /* 8 bit regs */
  70. HOST_FLASH_DATA = 0x74,
  71. HOST_TRANSITION_DETECT = 0x75,
  72. HOST_GPIO_CTRL = 0x76,
  73. HOST_I2C_ADDR = 0x78, /* 32 bit */
  74. HOST_I2C_DATA = 0x7c,
  75. HOST_I2C_XFER_CNT = 0x7e,
  76. HOST_I2C_CTRL = 0x7f,
  77. /* HOST_SLOT_STAT bits */
  78. HOST_SSTAT_ATTN = (1 << 31),
  79. /* HOST_CTRL bits */
  80. HOST_CTRL_M66EN = (1 << 16), /* M66EN PCI bus signal */
  81. HOST_CTRL_TRDY = (1 << 17), /* latched PCI TRDY */
  82. HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
  83. HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
  84. HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
  85. HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
  86. /*
  87. * Port registers
  88. * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
  89. */
  90. PORT_REGS_SIZE = 0x2000,
  91. PORT_LRAM = 0x0000, /* 31 LRAM slots and PMP regs */
  92. PORT_LRAM_SLOT_SZ = 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
  93. PORT_PMP = 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
  94. PORT_PMP_STATUS = 0x0000, /* port device status offset */
  95. PORT_PMP_QACTIVE = 0x0004, /* port device QActive offset */
  96. PORT_PMP_SIZE = 0x0008, /* 8 bytes per PMP */
  97. /* 32 bit regs */
  98. PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */
  99. PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */
  100. PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */
  101. PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */
  102. PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */
  103. PORT_ACTIVATE_UPPER_ADDR= 0x101c,
  104. PORT_EXEC_FIFO = 0x1020, /* command execution fifo */
  105. PORT_CMD_ERR = 0x1024, /* command error number */
  106. PORT_FIS_CFG = 0x1028,
  107. PORT_FIFO_THRES = 0x102c,
  108. /* 16 bit regs */
  109. PORT_DECODE_ERR_CNT = 0x1040,
  110. PORT_DECODE_ERR_THRESH = 0x1042,
  111. PORT_CRC_ERR_CNT = 0x1044,
  112. PORT_CRC_ERR_THRESH = 0x1046,
  113. PORT_HSHK_ERR_CNT = 0x1048,
  114. PORT_HSHK_ERR_THRESH = 0x104a,
  115. /* 32 bit regs */
  116. PORT_PHY_CFG = 0x1050,
  117. PORT_SLOT_STAT = 0x1800,
  118. PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
  119. PORT_CONTEXT = 0x1e04,
  120. PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
  121. PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
  122. PORT_SCONTROL = 0x1f00,
  123. PORT_SSTATUS = 0x1f04,
  124. PORT_SERROR = 0x1f08,
  125. PORT_SACTIVE = 0x1f0c,
  126. /* PORT_CTRL_STAT bits */
  127. PORT_CS_PORT_RST = (1 << 0), /* port reset */
  128. PORT_CS_DEV_RST = (1 << 1), /* device reset */
  129. PORT_CS_INIT = (1 << 2), /* port initialize */
  130. PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */
  131. PORT_CS_CDB16 = (1 << 5), /* 0=12b cdb, 1=16b cdb */
  132. PORT_CS_PMP_RESUME = (1 << 6), /* PMP resume */
  133. PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */
  134. PORT_CS_PMP_EN = (1 << 13), /* port multiplier enable */
  135. PORT_CS_RDY = (1 << 31), /* port ready to accept commands */
  136. /* PORT_IRQ_STAT/ENABLE_SET/CLR */
  137. /* bits[11:0] are masked */
  138. PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */
  139. PORT_IRQ_ERROR = (1 << 1), /* command execution error */
  140. PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */
  141. PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */
  142. PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */
  143. PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */
  144. PORT_IRQ_UNK_FIS = (1 << 6), /* unknown FIS received */
  145. PORT_IRQ_DEV_XCHG = (1 << 7), /* device exchanged */
  146. PORT_IRQ_8B10B = (1 << 8), /* 8b/10b decode error threshold */
  147. PORT_IRQ_CRC = (1 << 9), /* CRC error threshold */
  148. PORT_IRQ_HANDSHAKE = (1 << 10), /* handshake error threshold */
  149. PORT_IRQ_SDB_NOTIFY = (1 << 11), /* SDB notify received */
  150. DEF_PORT_IRQ = PORT_IRQ_COMPLETE | PORT_IRQ_ERROR |
  151. PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG |
  152. PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_NOTIFY,
  153. /* bits[27:16] are unmasked (raw) */
  154. PORT_IRQ_RAW_SHIFT = 16,
  155. PORT_IRQ_MASKED_MASK = 0x7ff,
  156. PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT),
  157. /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
  158. PORT_IRQ_STEER_SHIFT = 30,
  159. PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT),
  160. /* PORT_CMD_ERR constants */
  161. PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */
  162. PORT_CERR_SDB = 2, /* Error bit in SDB FIS */
  163. PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */
  164. PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */
  165. PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */
  166. PORT_CERR_DIRECTION = 6, /* Data direction mismatch */
  167. PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */
  168. PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */
  169. PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */
  170. PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */
  171. PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */
  172. PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */
  173. PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
  174. PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
  175. PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */
  176. PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */
  177. PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
  178. PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */
  179. PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */
  180. PORT_CERR_XFR_MSTABRT = 34, /* PSD ecode 10 - master abort */
  181. PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */
  182. PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */
  183. /* bits of PRB control field */
  184. PRB_CTRL_PROTOCOL = (1 << 0), /* override def. ATA protocol */
  185. PRB_CTRL_PACKET_READ = (1 << 4), /* PACKET cmd read */
  186. PRB_CTRL_PACKET_WRITE = (1 << 5), /* PACKET cmd write */
  187. PRB_CTRL_NIEN = (1 << 6), /* Mask completion irq */
  188. PRB_CTRL_SRST = (1 << 7), /* Soft reset request (ign BSY?) */
  189. /* PRB protocol field */
  190. PRB_PROT_PACKET = (1 << 0),
  191. PRB_PROT_TCQ = (1 << 1),
  192. PRB_PROT_NCQ = (1 << 2),
  193. PRB_PROT_READ = (1 << 3),
  194. PRB_PROT_WRITE = (1 << 4),
  195. PRB_PROT_TRANSPARENT = (1 << 5),
  196. /*
  197. * Other constants
  198. */
  199. SGE_TRM = (1 << 31), /* Last SGE in chain */
  200. SGE_LNK = (1 << 30), /* linked list
  201. Points to SGT, not SGE */
  202. SGE_DRD = (1 << 29), /* discard data read (/dev/null)
  203. data address ignored */
  204. SIL24_MAX_CMDS = 31,
  205. /* board id */
  206. BID_SIL3124 = 0,
  207. BID_SIL3132 = 1,
  208. BID_SIL3131 = 2,
  209. /* host flags */
  210. SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
  211. ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
  212. ATA_FLAG_AN | ATA_FLAG_PMP,
  213. SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
  214. IRQ_STAT_4PORTS = 0xf,
  215. };
  216. struct sil24_ata_block {
  217. struct sil24_prb prb;
  218. struct sil24_sge sge[SIL24_MAX_SGE];
  219. };
  220. struct sil24_atapi_block {
  221. struct sil24_prb prb;
  222. u8 cdb[16];
  223. struct sil24_sge sge[SIL24_MAX_SGE];
  224. };
  225. union sil24_cmd_block {
  226. struct sil24_ata_block ata;
  227. struct sil24_atapi_block atapi;
  228. };
  229. static const struct sil24_cerr_info {
  230. unsigned int err_mask, action;
  231. const char *desc;
  232. } sil24_cerr_db[] = {
  233. [0] = { AC_ERR_DEV, 0,
  234. "device error" },
  235. [PORT_CERR_DEV] = { AC_ERR_DEV, 0,
  236. "device error via D2H FIS" },
  237. [PORT_CERR_SDB] = { AC_ERR_DEV, 0,
  238. "device error via SDB FIS" },
  239. [PORT_CERR_DATA] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
  240. "error in data FIS" },
  241. [PORT_CERR_SEND] = { AC_ERR_ATA_BUS, ATA_EH_RESET,
  242. "failed to transmit command FIS" },
  243. [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
  244. "protocol mismatch" },
  245. [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
  246. "data direction mismatch" },
  247. [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
  248. "ran out of SGEs while writing" },
  249. [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
  250. "ran out of SGEs while reading" },
  251. [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
  252. "invalid data direction for ATAPI CDB" },
  253. [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
  254. "SGT not on qword boundary" },
  255. [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  256. "PCI target abort while fetching SGT" },
  257. [PORT_CERR_SGT_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  258. "PCI master abort while fetching SGT" },
  259. [PORT_CERR_SGT_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  260. "PCI parity error while fetching SGT" },
  261. [PORT_CERR_CMD_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
  262. "PRB not on qword boundary" },
  263. [PORT_CERR_CMD_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  264. "PCI target abort while fetching PRB" },
  265. [PORT_CERR_CMD_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  266. "PCI master abort while fetching PRB" },
  267. [PORT_CERR_CMD_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  268. "PCI parity error while fetching PRB" },
  269. [PORT_CERR_XFR_UNDEF] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  270. "undefined error while transferring data" },
  271. [PORT_CERR_XFR_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  272. "PCI target abort while transferring data" },
  273. [PORT_CERR_XFR_MSTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  274. "PCI master abort while transferring data" },
  275. [PORT_CERR_XFR_PCIPERR] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
  276. "PCI parity error while transferring data" },
  277. [PORT_CERR_SENDSERVICE] = { AC_ERR_HSM, ATA_EH_RESET,
  278. "FIS received while sending service FIS" },
  279. };
  280. /*
  281. * ap->private_data
  282. *
  283. * The preview driver always returned 0 for status. We emulate it
  284. * here from the previous interrupt.
  285. */
  286. struct sil24_port_priv {
  287. union sil24_cmd_block *cmd_block; /* 32 cmd blocks */
  288. dma_addr_t cmd_block_dma; /* DMA base addr for them */
  289. int do_port_rst;
  290. };
  291. static void sil24_dev_config(struct ata_device *dev);
  292. static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
  293. static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
  294. static int sil24_qc_defer(struct ata_queued_cmd *qc);
  295. static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
  296. static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
  297. static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
  298. static void sil24_pmp_attach(struct ata_port *ap);
  299. static void sil24_pmp_detach(struct ata_port *ap);
  300. static void sil24_freeze(struct ata_port *ap);
  301. static void sil24_thaw(struct ata_port *ap);
  302. static int sil24_softreset(struct ata_link *link, unsigned int *class,
  303. unsigned long deadline);
  304. static int sil24_hardreset(struct ata_link *link, unsigned int *class,
  305. unsigned long deadline);
  306. static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
  307. unsigned long deadline);
  308. static void sil24_error_handler(struct ata_port *ap);
  309. static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
  310. static int sil24_port_start(struct ata_port *ap);
  311. static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  312. #ifdef CONFIG_PM_SLEEP
  313. static int sil24_pci_device_resume(struct pci_dev *pdev);
  314. #endif
  315. #ifdef CONFIG_PM
  316. static int sil24_port_resume(struct ata_port *ap);
  317. #endif
  318. static const struct pci_device_id sil24_pci_tbl[] = {
  319. { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
  320. { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
  321. { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
  322. { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 },
  323. { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 },
  324. { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
  325. { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
  326. { } /* terminate list */
  327. };
  328. static struct pci_driver sil24_pci_driver = {
  329. .name = DRV_NAME,
  330. .id_table = sil24_pci_tbl,
  331. .probe = sil24_init_one,
  332. .remove = ata_pci_remove_one,
  333. #ifdef CONFIG_PM_SLEEP
  334. .suspend = ata_pci_device_suspend,
  335. .resume = sil24_pci_device_resume,
  336. #endif
  337. };
  338. static struct scsi_host_template sil24_sht = {
  339. ATA_NCQ_SHT(DRV_NAME),
  340. .can_queue = SIL24_MAX_CMDS,
  341. .sg_tablesize = SIL24_MAX_SGE,
  342. .dma_boundary = ATA_DMA_BOUNDARY,
  343. .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
  344. };
  345. static struct ata_port_operations sil24_ops = {
  346. .inherits = &sata_pmp_port_ops,
  347. .qc_defer = sil24_qc_defer,
  348. .qc_prep = sil24_qc_prep,
  349. .qc_issue = sil24_qc_issue,
  350. .qc_fill_rtf = sil24_qc_fill_rtf,
  351. .freeze = sil24_freeze,
  352. .thaw = sil24_thaw,
  353. .softreset = sil24_softreset,
  354. .hardreset = sil24_hardreset,
  355. .pmp_softreset = sil24_softreset,
  356. .pmp_hardreset = sil24_pmp_hardreset,
  357. .error_handler = sil24_error_handler,
  358. .post_internal_cmd = sil24_post_internal_cmd,
  359. .dev_config = sil24_dev_config,
  360. .scr_read = sil24_scr_read,
  361. .scr_write = sil24_scr_write,
  362. .pmp_attach = sil24_pmp_attach,
  363. .pmp_detach = sil24_pmp_detach,
  364. .port_start = sil24_port_start,
  365. #ifdef CONFIG_PM
  366. .port_resume = sil24_port_resume,
  367. #endif
  368. };
  369. static bool sata_sil24_msi; /* Disable MSI */
  370. module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
  371. MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
  372. /*
  373. * Use bits 30-31 of port_flags to encode available port numbers.
  374. * Current maxium is 4.
  375. */
  376. #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
  377. #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
  378. static const struct ata_port_info sil24_port_info[] = {
  379. /* sil_3124 */
  380. {
  381. .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
  382. SIL24_FLAG_PCIX_IRQ_WOC,
  383. .pio_mask = ATA_PIO4,
  384. .mwdma_mask = ATA_MWDMA2,
  385. .udma_mask = ATA_UDMA5,
  386. .port_ops = &sil24_ops,
  387. },
  388. /* sil_3132 */
  389. {
  390. .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
  391. .pio_mask = ATA_PIO4,
  392. .mwdma_mask = ATA_MWDMA2,
  393. .udma_mask = ATA_UDMA5,
  394. .port_ops = &sil24_ops,
  395. },
  396. /* sil_3131/sil_3531 */
  397. {
  398. .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
  399. .pio_mask = ATA_PIO4,
  400. .mwdma_mask = ATA_MWDMA2,
  401. .udma_mask = ATA_UDMA5,
  402. .port_ops = &sil24_ops,
  403. },
  404. };
  405. static int sil24_tag(int tag)
  406. {
  407. if (unlikely(ata_tag_internal(tag)))
  408. return 0;
  409. return tag;
  410. }
  411. static unsigned long sil24_port_offset(struct ata_port *ap)
  412. {
  413. return ap->port_no * PORT_REGS_SIZE;
  414. }
  415. static void __iomem *sil24_port_base(struct ata_port *ap)
  416. {
  417. return ap->host->iomap[SIL24_PORT_BAR] + sil24_port_offset(ap);
  418. }
  419. static void sil24_dev_config(struct ata_device *dev)
  420. {
  421. void __iomem *port = sil24_port_base(dev->link->ap);
  422. if (dev->cdb_len == 16)
  423. writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
  424. else
  425. writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
  426. }
  427. static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
  428. {
  429. void __iomem *port = sil24_port_base(ap);
  430. struct sil24_prb __iomem *prb;
  431. u8 fis[6 * 4];
  432. prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
  433. memcpy_fromio(fis, prb->fis, sizeof(fis));
  434. ata_tf_from_fis(fis, tf);
  435. }
  436. static int sil24_scr_map[] = {
  437. [SCR_CONTROL] = 0,
  438. [SCR_STATUS] = 1,
  439. [SCR_ERROR] = 2,
  440. [SCR_ACTIVE] = 3,
  441. };
  442. static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
  443. {
  444. void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
  445. if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
  446. *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
  447. return 0;
  448. }
  449. return -EINVAL;
  450. }
  451. static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
  452. {
  453. void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
  454. if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
  455. writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
  456. return 0;
  457. }
  458. return -EINVAL;
  459. }
  460. static void sil24_config_port(struct ata_port *ap)
  461. {
  462. void __iomem *port = sil24_port_base(ap);
  463. /* configure IRQ WoC */
  464. if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
  465. writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
  466. else
  467. writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
  468. /* zero error counters. */
  469. writew(0x8000, port + PORT_DECODE_ERR_THRESH);
  470. writew(0x8000, port + PORT_CRC_ERR_THRESH);
  471. writew(0x8000, port + PORT_HSHK_ERR_THRESH);
  472. writew(0x0000, port + PORT_DECODE_ERR_CNT);
  473. writew(0x0000, port + PORT_CRC_ERR_CNT);
  474. writew(0x0000, port + PORT_HSHK_ERR_CNT);
  475. /* always use 64bit activation */
  476. writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
  477. /* clear port multiplier enable and resume bits */
  478. writel(PORT_CS_PMP_EN | PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
  479. }
  480. static void sil24_config_pmp(struct ata_port *ap, int attached)
  481. {
  482. void __iomem *port = sil24_port_base(ap);
  483. if (attached)
  484. writel(PORT_CS_PMP_EN, port + PORT_CTRL_STAT);
  485. else
  486. writel(PORT_CS_PMP_EN, port + PORT_CTRL_CLR);
  487. }
  488. static void sil24_clear_pmp(struct ata_port *ap)
  489. {
  490. void __iomem *port = sil24_port_base(ap);
  491. int i;
  492. writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_CLR);
  493. for (i = 0; i < SATA_PMP_MAX_PORTS; i++) {
  494. void __iomem *pmp_base = port + PORT_PMP + i * PORT_PMP_SIZE;
  495. writel(0, pmp_base + PORT_PMP_STATUS);
  496. writel(0, pmp_base + PORT_PMP_QACTIVE);
  497. }
  498. }
  499. static int sil24_init_port(struct ata_port *ap)
  500. {
  501. void __iomem *port = sil24_port_base(ap);
  502. struct sil24_port_priv *pp = ap->private_data;
  503. u32 tmp;
  504. /* clear PMP error status */
  505. if (sata_pmp_attached(ap))
  506. sil24_clear_pmp(ap);
  507. writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
  508. ata_wait_register(ap, port + PORT_CTRL_STAT,
  509. PORT_CS_INIT, PORT_CS_INIT, 10, 100);
  510. tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
  511. PORT_CS_RDY, 0, 10, 100);
  512. if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) {
  513. pp->do_port_rst = 1;
  514. ap->link.eh_context.i.action |= ATA_EH_RESET;
  515. return -EIO;
  516. }
  517. return 0;
  518. }
  519. static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
  520. const struct ata_taskfile *tf,
  521. int is_cmd, u32 ctrl,
  522. unsigned long timeout_msec)
  523. {
  524. void __iomem *port = sil24_port_base(ap);
  525. struct sil24_port_priv *pp = ap->private_data;
  526. struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
  527. dma_addr_t paddr = pp->cmd_block_dma;
  528. u32 irq_enabled, irq_mask, irq_stat;
  529. int rc;
  530. prb->ctrl = cpu_to_le16(ctrl);
  531. ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
  532. /* temporarily plug completion and error interrupts */
  533. irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
  534. writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
  535. /*
  536. * The barrier is required to ensure that writes to cmd_block reach
  537. * the memory before the write to PORT_CMD_ACTIVATE.
  538. */
  539. wmb();
  540. writel((u32)paddr, port + PORT_CMD_ACTIVATE);
  541. writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
  542. irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
  543. irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0,
  544. 10, timeout_msec);
  545. writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
  546. irq_stat >>= PORT_IRQ_RAW_SHIFT;
  547. if (irq_stat & PORT_IRQ_COMPLETE)
  548. rc = 0;
  549. else {
  550. /* force port into known state */
  551. sil24_init_port(ap);
  552. if (irq_stat & PORT_IRQ_ERROR)
  553. rc = -EIO;
  554. else
  555. rc = -EBUSY;
  556. }
  557. /* restore IRQ enabled */
  558. writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
  559. return rc;
  560. }
  561. static int sil24_softreset(struct ata_link *link, unsigned int *class,
  562. unsigned long deadline)
  563. {
  564. struct ata_port *ap = link->ap;
  565. int pmp = sata_srst_pmp(link);
  566. unsigned long timeout_msec = 0;
  567. struct ata_taskfile tf;
  568. const char *reason;
  569. int rc;
  570. DPRINTK("ENTER\n");
  571. /* put the port into known state */
  572. if (sil24_init_port(ap)) {
  573. reason = "port not ready";
  574. goto err;
  575. }
  576. /* do SRST */
  577. if (time_after(deadline, jiffies))
  578. timeout_msec = jiffies_to_msecs(deadline - jiffies);
  579. ata_tf_init(link->device, &tf); /* doesn't really matter */
  580. rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
  581. timeout_msec);
  582. if (rc == -EBUSY) {
  583. reason = "timeout";
  584. goto err;
  585. } else if (rc) {
  586. reason = "SRST command error";
  587. goto err;
  588. }
  589. sil24_read_tf(ap, 0, &tf);
  590. *class = ata_dev_classify(&tf);
  591. DPRINTK("EXIT, class=%u\n", *class);
  592. return 0;
  593. err:
  594. ata_link_err(link, "softreset failed (%s)\n", reason);
  595. return -EIO;
  596. }
  597. static int sil24_hardreset(struct ata_link *link, unsigned int *class,
  598. unsigned long deadline)
  599. {
  600. struct ata_port *ap = link->ap;
  601. void __iomem *port = sil24_port_base(ap);
  602. struct sil24_port_priv *pp = ap->private_data;
  603. int did_port_rst = 0;
  604. const char *reason;
  605. int tout_msec, rc;
  606. u32 tmp;
  607. retry:
  608. /* Sometimes, DEV_RST is not enough to recover the controller.
  609. * This happens often after PM DMA CS errata.
  610. */
  611. if (pp->do_port_rst) {
  612. ata_port_warn(ap,
  613. "controller in dubious state, performing PORT_RST\n");
  614. writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
  615. ata_msleep(ap, 10);
  616. writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
  617. ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0,
  618. 10, 5000);
  619. /* restore port configuration */
  620. sil24_config_port(ap);
  621. sil24_config_pmp(ap, ap->nr_pmp_links);
  622. pp->do_port_rst = 0;
  623. did_port_rst = 1;
  624. }
  625. /* sil24 does the right thing(tm) without any protection */
  626. sata_set_spd(link);
  627. tout_msec = 100;
  628. if (ata_link_online(link))
  629. tout_msec = 5000;
  630. writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
  631. tmp = ata_wait_register(ap, port + PORT_CTRL_STAT,
  632. PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
  633. tout_msec);
  634. /* SStatus oscillates between zero and valid status after
  635. * DEV_RST, debounce it.
  636. */
  637. rc = sata_link_debounce(link, sata_deb_timing_long, deadline);
  638. if (rc) {
  639. reason = "PHY debouncing failed";
  640. goto err;
  641. }
  642. if (tmp & PORT_CS_DEV_RST) {
  643. if (ata_link_offline(link))
  644. return 0;
  645. reason = "link not ready";
  646. goto err;
  647. }
  648. /* Sil24 doesn't store signature FIS after hardreset, so we
  649. * can't wait for BSY to clear. Some devices take a long time
  650. * to get ready and those devices will choke if we don't wait
  651. * for BSY clearance here. Tell libata to perform follow-up
  652. * softreset.
  653. */
  654. return -EAGAIN;
  655. err:
  656. if (!did_port_rst) {
  657. pp->do_port_rst = 1;
  658. goto retry;
  659. }
  660. ata_link_err(link, "hardreset failed (%s)\n", reason);
  661. return -EIO;
  662. }
  663. static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
  664. struct sil24_sge *sge)
  665. {
  666. struct scatterlist *sg;
  667. struct sil24_sge *last_sge = NULL;
  668. unsigned int si;
  669. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  670. sge->addr = cpu_to_le64(sg_dma_address(sg));
  671. sge->cnt = cpu_to_le32(sg_dma_len(sg));
  672. sge->flags = 0;
  673. last_sge = sge;
  674. sge++;
  675. }
  676. last_sge->flags = cpu_to_le32(SGE_TRM);
  677. }
  678. static int sil24_qc_defer(struct ata_queued_cmd *qc)
  679. {
  680. struct ata_link *link = qc->dev->link;
  681. struct ata_port *ap = link->ap;
  682. u8 prot = qc->tf.protocol;
  683. /*
  684. * There is a bug in the chip:
  685. * Port LRAM Causes the PRB/SGT Data to be Corrupted
  686. * If the host issues a read request for LRAM and SActive registers
  687. * while active commands are available in the port, PRB/SGT data in
  688. * the LRAM can become corrupted. This issue applies only when
  689. * reading from, but not writing to, the LRAM.
  690. *
  691. * Therefore, reading LRAM when there is no particular error [and
  692. * other commands may be outstanding] is prohibited.
  693. *
  694. * To avoid this bug there are two situations where a command must run
  695. * exclusive of any other commands on the port:
  696. *
  697. * - ATAPI commands which check the sense data
  698. * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
  699. * set.
  700. *
  701. */
  702. int is_excl = (ata_is_atapi(prot) ||
  703. (qc->flags & ATA_QCFLAG_RESULT_TF));
  704. if (unlikely(ap->excl_link)) {
  705. if (link == ap->excl_link) {
  706. if (ap->nr_active_links)
  707. return ATA_DEFER_PORT;
  708. qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
  709. } else
  710. return ATA_DEFER_PORT;
  711. } else if (unlikely(is_excl)) {
  712. ap->excl_link = link;
  713. if (ap->nr_active_links)
  714. return ATA_DEFER_PORT;
  715. qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
  716. }
  717. return ata_std_qc_defer(qc);
  718. }
  719. static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
  720. {
  721. struct ata_port *ap = qc->ap;
  722. struct sil24_port_priv *pp = ap->private_data;
  723. union sil24_cmd_block *cb;
  724. struct sil24_prb *prb;
  725. struct sil24_sge *sge;
  726. u16 ctrl = 0;
  727. cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
  728. if (!ata_is_atapi(qc->tf.protocol)) {
  729. prb = &cb->ata.prb;
  730. sge = cb->ata.sge;
  731. if (ata_is_data(qc->tf.protocol)) {
  732. u16 prot = 0;
  733. ctrl = PRB_CTRL_PROTOCOL;
  734. if (ata_is_ncq(qc->tf.protocol))
  735. prot |= PRB_PROT_NCQ;
  736. if (qc->tf.flags & ATA_TFLAG_WRITE)
  737. prot |= PRB_PROT_WRITE;
  738. else
  739. prot |= PRB_PROT_READ;
  740. prb->prot = cpu_to_le16(prot);
  741. }
  742. } else {
  743. prb = &cb->atapi.prb;
  744. sge = cb->atapi.sge;
  745. memset(cb->atapi.cdb, 0, sizeof(cb->atapi.cdb));
  746. memcpy(cb->atapi.cdb, qc->cdb, qc->dev->cdb_len);
  747. if (ata_is_data(qc->tf.protocol)) {
  748. if (qc->tf.flags & ATA_TFLAG_WRITE)
  749. ctrl = PRB_CTRL_PACKET_WRITE;
  750. else
  751. ctrl = PRB_CTRL_PACKET_READ;
  752. }
  753. }
  754. prb->ctrl = cpu_to_le16(ctrl);
  755. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, prb->fis);
  756. if (qc->flags & ATA_QCFLAG_DMAMAP)
  757. sil24_fill_sg(qc, sge);
  758. return AC_ERR_OK;
  759. }
  760. static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
  761. {
  762. struct ata_port *ap = qc->ap;
  763. struct sil24_port_priv *pp = ap->private_data;
  764. void __iomem *port = sil24_port_base(ap);
  765. unsigned int tag = sil24_tag(qc->hw_tag);
  766. dma_addr_t paddr;
  767. void __iomem *activate;
  768. paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block);
  769. activate = port + PORT_CMD_ACTIVATE + tag * 8;
  770. /*
  771. * The barrier is required to ensure that writes to cmd_block reach
  772. * the memory before the write to PORT_CMD_ACTIVATE.
  773. */
  774. wmb();
  775. writel((u32)paddr, activate);
  776. writel((u64)paddr >> 32, activate + 4);
  777. return 0;
  778. }
  779. static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
  780. {
  781. sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
  782. return true;
  783. }
  784. static void sil24_pmp_attach(struct ata_port *ap)
  785. {
  786. u32 *gscr = ap->link.device->gscr;
  787. sil24_config_pmp(ap, 1);
  788. sil24_init_port(ap);
  789. if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
  790. sata_pmp_gscr_devid(gscr) == 0x4140) {
  791. ata_port_info(ap,
  792. "disabling NCQ support due to sil24-mv4140 quirk\n");
  793. ap->flags &= ~ATA_FLAG_NCQ;
  794. }
  795. }
  796. static void sil24_pmp_detach(struct ata_port *ap)
  797. {
  798. sil24_init_port(ap);
  799. sil24_config_pmp(ap, 0);
  800. ap->flags |= ATA_FLAG_NCQ;
  801. }
  802. static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
  803. unsigned long deadline)
  804. {
  805. int rc;
  806. rc = sil24_init_port(link->ap);
  807. if (rc) {
  808. ata_link_err(link, "hardreset failed (port not ready)\n");
  809. return rc;
  810. }
  811. return sata_std_hardreset(link, class, deadline);
  812. }
  813. static void sil24_freeze(struct ata_port *ap)
  814. {
  815. void __iomem *port = sil24_port_base(ap);
  816. /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
  817. * PORT_IRQ_ENABLE instead.
  818. */
  819. writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
  820. }
  821. static void sil24_thaw(struct ata_port *ap)
  822. {
  823. void __iomem *port = sil24_port_base(ap);
  824. u32 tmp;
  825. /* clear IRQ */
  826. tmp = readl(port + PORT_IRQ_STAT);
  827. writel(tmp, port + PORT_IRQ_STAT);
  828. /* turn IRQ back on */
  829. writel(DEF_PORT_IRQ, port + PORT_IRQ_ENABLE_SET);
  830. }
  831. static void sil24_error_intr(struct ata_port *ap)
  832. {
  833. void __iomem *port = sil24_port_base(ap);
  834. struct sil24_port_priv *pp = ap->private_data;
  835. struct ata_queued_cmd *qc = NULL;
  836. struct ata_link *link;
  837. struct ata_eh_info *ehi;
  838. int abort = 0, freeze = 0;
  839. u32 irq_stat;
  840. /* on error, we need to clear IRQ explicitly */
  841. irq_stat = readl(port + PORT_IRQ_STAT);
  842. writel(irq_stat, port + PORT_IRQ_STAT);
  843. /* first, analyze and record host port events */
  844. link = &ap->link;
  845. ehi = &link->eh_info;
  846. ata_ehi_clear_desc(ehi);
  847. ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
  848. if (irq_stat & PORT_IRQ_SDB_NOTIFY) {
  849. ata_ehi_push_desc(ehi, "SDB notify");
  850. sata_async_notification(ap);
  851. }
  852. if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
  853. ata_ehi_hotplugged(ehi);
  854. ata_ehi_push_desc(ehi, "%s",
  855. irq_stat & PORT_IRQ_PHYRDY_CHG ?
  856. "PHY RDY changed" : "device exchanged");
  857. freeze = 1;
  858. }
  859. if (irq_stat & PORT_IRQ_UNK_FIS) {
  860. ehi->err_mask |= AC_ERR_HSM;
  861. ehi->action |= ATA_EH_RESET;
  862. ata_ehi_push_desc(ehi, "unknown FIS");
  863. freeze = 1;
  864. }
  865. /* deal with command error */
  866. if (irq_stat & PORT_IRQ_ERROR) {
  867. const struct sil24_cerr_info *ci = NULL;
  868. unsigned int err_mask = 0, action = 0;
  869. u32 context, cerr;
  870. int pmp;
  871. abort = 1;
  872. /* DMA Context Switch Failure in Port Multiplier Mode
  873. * errata. If we have active commands to 3 or more
  874. * devices, any error condition on active devices can
  875. * corrupt DMA context switching.
  876. */
  877. if (ap->nr_active_links >= 3) {
  878. ehi->err_mask |= AC_ERR_OTHER;
  879. ehi->action |= ATA_EH_RESET;
  880. ata_ehi_push_desc(ehi, "PMP DMA CS errata");
  881. pp->do_port_rst = 1;
  882. freeze = 1;
  883. }
  884. /* find out the offending link and qc */
  885. if (sata_pmp_attached(ap)) {
  886. context = readl(port + PORT_CONTEXT);
  887. pmp = (context >> 5) & 0xf;
  888. if (pmp < ap->nr_pmp_links) {
  889. link = &ap->pmp_link[pmp];
  890. ehi = &link->eh_info;
  891. qc = ata_qc_from_tag(ap, link->active_tag);
  892. ata_ehi_clear_desc(ehi);
  893. ata_ehi_push_desc(ehi, "irq_stat 0x%08x",
  894. irq_stat);
  895. } else {
  896. err_mask |= AC_ERR_HSM;
  897. action |= ATA_EH_RESET;
  898. freeze = 1;
  899. }
  900. } else
  901. qc = ata_qc_from_tag(ap, link->active_tag);
  902. /* analyze CMD_ERR */
  903. cerr = readl(port + PORT_CMD_ERR);
  904. if (cerr < ARRAY_SIZE(sil24_cerr_db))
  905. ci = &sil24_cerr_db[cerr];
  906. if (ci && ci->desc) {
  907. err_mask |= ci->err_mask;
  908. action |= ci->action;
  909. if (action & ATA_EH_RESET)
  910. freeze = 1;
  911. ata_ehi_push_desc(ehi, "%s", ci->desc);
  912. } else {
  913. err_mask |= AC_ERR_OTHER;
  914. action |= ATA_EH_RESET;
  915. freeze = 1;
  916. ata_ehi_push_desc(ehi, "unknown command error %d",
  917. cerr);
  918. }
  919. /* record error info */
  920. if (qc)
  921. qc->err_mask |= err_mask;
  922. else
  923. ehi->err_mask |= err_mask;
  924. ehi->action |= action;
  925. /* if PMP, resume */
  926. if (sata_pmp_attached(ap))
  927. writel(PORT_CS_PMP_RESUME, port + PORT_CTRL_STAT);
  928. }
  929. /* freeze or abort */
  930. if (freeze)
  931. ata_port_freeze(ap);
  932. else if (abort) {
  933. if (qc)
  934. ata_link_abort(qc->dev->link);
  935. else
  936. ata_port_abort(ap);
  937. }
  938. }
  939. static inline void sil24_host_intr(struct ata_port *ap)
  940. {
  941. void __iomem *port = sil24_port_base(ap);
  942. u32 slot_stat, qc_active;
  943. int rc;
  944. /* If PCIX_IRQ_WOC, there's an inherent race window between
  945. * clearing IRQ pending status and reading PORT_SLOT_STAT
  946. * which may cause spurious interrupts afterwards. This is
  947. * unavoidable and much better than losing interrupts which
  948. * happens if IRQ pending is cleared after reading
  949. * PORT_SLOT_STAT.
  950. */
  951. if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
  952. writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
  953. slot_stat = readl(port + PORT_SLOT_STAT);
  954. if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
  955. sil24_error_intr(ap);
  956. return;
  957. }
  958. qc_active = slot_stat & ~HOST_SSTAT_ATTN;
  959. rc = ata_qc_complete_multiple(ap, qc_active);
  960. if (rc > 0)
  961. return;
  962. if (rc < 0) {
  963. struct ata_eh_info *ehi = &ap->link.eh_info;
  964. ehi->err_mask |= AC_ERR_HSM;
  965. ehi->action |= ATA_EH_RESET;
  966. ata_port_freeze(ap);
  967. return;
  968. }
  969. /* spurious interrupts are expected if PCIX_IRQ_WOC */
  970. if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
  971. ata_port_info(ap,
  972. "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
  973. slot_stat, ap->link.active_tag, ap->link.sactive);
  974. }
  975. static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
  976. {
  977. struct ata_host *host = dev_instance;
  978. void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
  979. unsigned handled = 0;
  980. u32 status;
  981. int i;
  982. status = readl(host_base + HOST_IRQ_STAT);
  983. if (status == 0xffffffff) {
  984. dev_err(host->dev, "IRQ status == 0xffffffff, "
  985. "PCI fault or device removal?\n");
  986. goto out;
  987. }
  988. if (!(status & IRQ_STAT_4PORTS))
  989. goto out;
  990. spin_lock(&host->lock);
  991. for (i = 0; i < host->n_ports; i++)
  992. if (status & (1 << i)) {
  993. sil24_host_intr(host->ports[i]);
  994. handled++;
  995. }
  996. spin_unlock(&host->lock);
  997. out:
  998. return IRQ_RETVAL(handled);
  999. }
  1000. static void sil24_error_handler(struct ata_port *ap)
  1001. {
  1002. struct sil24_port_priv *pp = ap->private_data;
  1003. if (sil24_init_port(ap))
  1004. ata_eh_freeze_port(ap);
  1005. sata_pmp_error_handler(ap);
  1006. pp->do_port_rst = 0;
  1007. }
  1008. static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
  1009. {
  1010. struct ata_port *ap = qc->ap;
  1011. /* make DMA engine forget about the failed command */
  1012. if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
  1013. ata_eh_freeze_port(ap);
  1014. }
  1015. static int sil24_port_start(struct ata_port *ap)
  1016. {
  1017. struct device *dev = ap->host->dev;
  1018. struct sil24_port_priv *pp;
  1019. union sil24_cmd_block *cb;
  1020. size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
  1021. dma_addr_t cb_dma;
  1022. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1023. if (!pp)
  1024. return -ENOMEM;
  1025. cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
  1026. if (!cb)
  1027. return -ENOMEM;
  1028. pp->cmd_block = cb;
  1029. pp->cmd_block_dma = cb_dma;
  1030. ap->private_data = pp;
  1031. ata_port_pbar_desc(ap, SIL24_HOST_BAR, -1, "host");
  1032. ata_port_pbar_desc(ap, SIL24_PORT_BAR, sil24_port_offset(ap), "port");
  1033. return 0;
  1034. }
  1035. static void sil24_init_controller(struct ata_host *host)
  1036. {
  1037. void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
  1038. u32 tmp;
  1039. int i;
  1040. /* GPIO off */
  1041. writel(0, host_base + HOST_FLASH_CMD);
  1042. /* clear global reset & mask interrupts during initialization */
  1043. writel(0, host_base + HOST_CTRL);
  1044. /* init ports */
  1045. for (i = 0; i < host->n_ports; i++) {
  1046. struct ata_port *ap = host->ports[i];
  1047. void __iomem *port = sil24_port_base(ap);
  1048. /* Initial PHY setting */
  1049. writel(0x20c, port + PORT_PHY_CFG);
  1050. /* Clear port RST */
  1051. tmp = readl(port + PORT_CTRL_STAT);
  1052. if (tmp & PORT_CS_PORT_RST) {
  1053. writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
  1054. tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT,
  1055. PORT_CS_PORT_RST,
  1056. PORT_CS_PORT_RST, 10, 100);
  1057. if (tmp & PORT_CS_PORT_RST)
  1058. dev_err(host->dev,
  1059. "failed to clear port RST\n");
  1060. }
  1061. /* configure port */
  1062. sil24_config_port(ap);
  1063. }
  1064. /* Turn on interrupts */
  1065. writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
  1066. }
  1067. static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1068. {
  1069. extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
  1070. struct ata_port_info pi = sil24_port_info[ent->driver_data];
  1071. const struct ata_port_info *ppi[] = { &pi, NULL };
  1072. void __iomem * const *iomap;
  1073. struct ata_host *host;
  1074. int rc;
  1075. u32 tmp;
  1076. /* cause link error if sil24_cmd_block is sized wrongly */
  1077. if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
  1078. __MARKER__sil24_cmd_block_is_sized_wrongly = 1;
  1079. ata_print_version_once(&pdev->dev, DRV_VERSION);
  1080. /* acquire resources */
  1081. rc = pcim_enable_device(pdev);
  1082. if (rc)
  1083. return rc;
  1084. rc = pcim_iomap_regions(pdev,
  1085. (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
  1086. DRV_NAME);
  1087. if (rc)
  1088. return rc;
  1089. iomap = pcim_iomap_table(pdev);
  1090. /* apply workaround for completion IRQ loss on PCI-X errata */
  1091. if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
  1092. tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
  1093. if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
  1094. dev_info(&pdev->dev,
  1095. "Applying completion IRQ loss on PCI-X errata fix\n");
  1096. else
  1097. pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
  1098. }
  1099. /* allocate and fill host */
  1100. host = ata_host_alloc_pinfo(&pdev->dev, ppi,
  1101. SIL24_FLAG2NPORTS(ppi[0]->flags));
  1102. if (!host)
  1103. return -ENOMEM;
  1104. host->iomap = iomap;
  1105. /* configure and activate the device */
  1106. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1107. if (rc) {
  1108. dev_err(&pdev->dev, "DMA enable failed\n");
  1109. return rc;
  1110. }
  1111. /* Set max read request size to 4096. This slightly increases
  1112. * write throughput for pci-e variants.
  1113. */
  1114. pcie_set_readrq(pdev, 4096);
  1115. sil24_init_controller(host);
  1116. if (sata_sil24_msi && !pci_enable_msi(pdev)) {
  1117. dev_info(&pdev->dev, "Using MSI\n");
  1118. pci_intx(pdev, 0);
  1119. }
  1120. pci_set_master(pdev);
  1121. return ata_host_activate(host, pdev->irq, sil24_interrupt, IRQF_SHARED,
  1122. &sil24_sht);
  1123. }
  1124. #ifdef CONFIG_PM_SLEEP
  1125. static int sil24_pci_device_resume(struct pci_dev *pdev)
  1126. {
  1127. struct ata_host *host = pci_get_drvdata(pdev);
  1128. void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
  1129. int rc;
  1130. rc = ata_pci_device_do_resume(pdev);
  1131. if (rc)
  1132. return rc;
  1133. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
  1134. writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
  1135. sil24_init_controller(host);
  1136. ata_host_resume(host);
  1137. return 0;
  1138. }
  1139. #endif
  1140. #ifdef CONFIG_PM
  1141. static int sil24_port_resume(struct ata_port *ap)
  1142. {
  1143. sil24_config_pmp(ap, ap->nr_pmp_links);
  1144. return 0;
  1145. }
  1146. #endif
  1147. module_pci_driver(sil24_pci_driver);
  1148. MODULE_AUTHOR("Tejun Heo");
  1149. MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
  1150. MODULE_LICENSE("GPL");
  1151. MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);