sata_dwc_460ex.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * drivers/ata/sata_dwc_460ex.c
  4. *
  5. * Synopsys DesignWare Cores (DWC) SATA host driver
  6. *
  7. * Author: Mark Miesfeld <mmiesfeld@amcc.com>
  8. *
  9. * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
  10. * Copyright 2008 DENX Software Engineering
  11. *
  12. * Based on versions provided by AMCC and Synopsys which are:
  13. * Copyright 2006 Applied Micro Circuits Corporation
  14. * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
  15. */
  16. #ifdef CONFIG_SATA_DWC_DEBUG
  17. #define DEBUG
  18. #endif
  19. #ifdef CONFIG_SATA_DWC_VDEBUG
  20. #define VERBOSE_DEBUG
  21. #define DEBUG_NCQ
  22. #endif
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/device.h>
  26. #include <linux/dmaengine.h>
  27. #include <linux/of_address.h>
  28. #include <linux/of_irq.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/phy/phy.h>
  32. #include <linux/libata.h>
  33. #include <linux/slab.h>
  34. #include "libata.h"
  35. #include <scsi/scsi_host.h>
  36. #include <scsi/scsi_cmnd.h>
  37. /* These two are defined in "libata.h" */
  38. #undef DRV_NAME
  39. #undef DRV_VERSION
  40. #define DRV_NAME "sata-dwc"
  41. #define DRV_VERSION "1.3"
  42. #define sata_dwc_writel(a, v) writel_relaxed(v, a)
  43. #define sata_dwc_readl(a) readl_relaxed(a)
  44. #ifndef NO_IRQ
  45. #define NO_IRQ 0
  46. #endif
  47. #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
  48. enum {
  49. SATA_DWC_MAX_PORTS = 1,
  50. SATA_DWC_SCR_OFFSET = 0x24,
  51. SATA_DWC_REG_OFFSET = 0x64,
  52. };
  53. /* DWC SATA Registers */
  54. struct sata_dwc_regs {
  55. u32 fptagr; /* 1st party DMA tag */
  56. u32 fpbor; /* 1st party DMA buffer offset */
  57. u32 fptcr; /* 1st party DMA Xfr count */
  58. u32 dmacr; /* DMA Control */
  59. u32 dbtsr; /* DMA Burst Transac size */
  60. u32 intpr; /* Interrupt Pending */
  61. u32 intmr; /* Interrupt Mask */
  62. u32 errmr; /* Error Mask */
  63. u32 llcr; /* Link Layer Control */
  64. u32 phycr; /* PHY Control */
  65. u32 physr; /* PHY Status */
  66. u32 rxbistpd; /* Recvd BIST pattern def register */
  67. u32 rxbistpd1; /* Recvd BIST data dword1 */
  68. u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
  69. u32 txbistpd; /* Trans BIST pattern def register */
  70. u32 txbistpd1; /* Trans BIST data dword1 */
  71. u32 txbistpd2; /* Trans BIST data dword2 */
  72. u32 bistcr; /* BIST Control Register */
  73. u32 bistfctr; /* BIST FIS Count Register */
  74. u32 bistsr; /* BIST Status Register */
  75. u32 bistdecr; /* BIST Dword Error count register */
  76. u32 res[15]; /* Reserved locations */
  77. u32 testr; /* Test Register */
  78. u32 versionr; /* Version Register */
  79. u32 idr; /* ID Register */
  80. u32 unimpl[192]; /* Unimplemented */
  81. u32 dmadr[256]; /* FIFO Locations in DMA Mode */
  82. };
  83. enum {
  84. SCR_SCONTROL_DET_ENABLE = 0x00000001,
  85. SCR_SSTATUS_DET_PRESENT = 0x00000001,
  86. SCR_SERROR_DIAG_X = 0x04000000,
  87. /* DWC SATA Register Operations */
  88. SATA_DWC_TXFIFO_DEPTH = 0x01FF,
  89. SATA_DWC_RXFIFO_DEPTH = 0x01FF,
  90. SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
  91. SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
  92. SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
  93. SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
  94. SATA_DWC_INTPR_DMAT = 0x00000001,
  95. SATA_DWC_INTPR_NEWFP = 0x00000002,
  96. SATA_DWC_INTPR_PMABRT = 0x00000004,
  97. SATA_DWC_INTPR_ERR = 0x00000008,
  98. SATA_DWC_INTPR_NEWBIST = 0x00000010,
  99. SATA_DWC_INTPR_IPF = 0x10000000,
  100. SATA_DWC_INTMR_DMATM = 0x00000001,
  101. SATA_DWC_INTMR_NEWFPM = 0x00000002,
  102. SATA_DWC_INTMR_PMABRTM = 0x00000004,
  103. SATA_DWC_INTMR_ERRM = 0x00000008,
  104. SATA_DWC_INTMR_NEWBISTM = 0x00000010,
  105. SATA_DWC_LLCR_SCRAMEN = 0x00000001,
  106. SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
  107. SATA_DWC_LLCR_RPDEN = 0x00000004,
  108. /* This is all error bits, zero's are reserved fields. */
  109. SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
  110. };
  111. #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
  112. #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
  113. SATA_DWC_DMACR_TMOD_TXCHEN)
  114. #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
  115. SATA_DWC_DMACR_TMOD_TXCHEN)
  116. #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
  117. #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
  118. << 16)
  119. struct sata_dwc_device {
  120. struct device *dev; /* generic device struct */
  121. struct ata_probe_ent *pe; /* ptr to probe-ent */
  122. struct ata_host *host;
  123. struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
  124. u32 sactive_issued;
  125. u32 sactive_queued;
  126. struct phy *phy;
  127. phys_addr_t dmadr;
  128. #ifdef CONFIG_SATA_DWC_OLD_DMA
  129. struct dw_dma_chip *dma;
  130. #endif
  131. };
  132. /*
  133. * Allow one extra special slot for commands and DMA management
  134. * to account for libata internal commands.
  135. */
  136. #define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
  137. struct sata_dwc_device_port {
  138. struct sata_dwc_device *hsdev;
  139. int cmd_issued[SATA_DWC_QCMD_MAX];
  140. int dma_pending[SATA_DWC_QCMD_MAX];
  141. /* DMA info */
  142. struct dma_chan *chan;
  143. struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
  144. u32 dma_interrupt_count;
  145. };
  146. /*
  147. * Commonly used DWC SATA driver macros
  148. */
  149. #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
  150. #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
  151. #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
  152. #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
  153. #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
  154. enum {
  155. SATA_DWC_CMD_ISSUED_NOT = 0,
  156. SATA_DWC_CMD_ISSUED_PEND = 1,
  157. SATA_DWC_CMD_ISSUED_EXEC = 2,
  158. SATA_DWC_CMD_ISSUED_NODATA = 3,
  159. SATA_DWC_DMA_PENDING_NONE = 0,
  160. SATA_DWC_DMA_PENDING_TX = 1,
  161. SATA_DWC_DMA_PENDING_RX = 2,
  162. };
  163. /*
  164. * Prototypes
  165. */
  166. static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
  167. static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
  168. u32 check_status);
  169. static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
  170. static void sata_dwc_port_stop(struct ata_port *ap);
  171. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
  172. #ifdef CONFIG_SATA_DWC_OLD_DMA
  173. #include <linux/platform_data/dma-dw.h>
  174. #include <linux/dma/dw.h>
  175. static struct dw_dma_slave sata_dwc_dma_dws = {
  176. .src_id = 0,
  177. .dst_id = 0,
  178. .m_master = 1,
  179. .p_master = 0,
  180. };
  181. static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
  182. {
  183. struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  184. if (dws->dma_dev != chan->device->dev)
  185. return false;
  186. chan->private = dws;
  187. return true;
  188. }
  189. static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
  190. {
  191. struct sata_dwc_device *hsdev = hsdevp->hsdev;
  192. struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  193. dma_cap_mask_t mask;
  194. dws->dma_dev = hsdev->dev;
  195. dma_cap_zero(mask);
  196. dma_cap_set(DMA_SLAVE, mask);
  197. /* Acquire DMA channel */
  198. hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
  199. if (!hsdevp->chan) {
  200. dev_err(hsdev->dev, "%s: dma channel unavailable\n",
  201. __func__);
  202. return -EAGAIN;
  203. }
  204. return 0;
  205. }
  206. static int sata_dwc_dma_init_old(struct platform_device *pdev,
  207. struct sata_dwc_device *hsdev)
  208. {
  209. struct device_node *np = pdev->dev.of_node;
  210. struct resource *res;
  211. hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
  212. if (!hsdev->dma)
  213. return -ENOMEM;
  214. hsdev->dma->dev = &pdev->dev;
  215. hsdev->dma->id = pdev->id;
  216. /* Get SATA DMA interrupt number */
  217. hsdev->dma->irq = irq_of_parse_and_map(np, 1);
  218. if (hsdev->dma->irq == NO_IRQ) {
  219. dev_err(&pdev->dev, "no SATA DMA irq\n");
  220. return -ENODEV;
  221. }
  222. /* Get physical SATA DMA register base address */
  223. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  224. hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
  225. if (IS_ERR(hsdev->dma->regs))
  226. return PTR_ERR(hsdev->dma->regs);
  227. /* Initialize AHB DMAC */
  228. return dw_dma_probe(hsdev->dma);
  229. }
  230. static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
  231. {
  232. if (!hsdev->dma)
  233. return;
  234. dw_dma_remove(hsdev->dma);
  235. }
  236. #endif
  237. static const char *get_prot_descript(u8 protocol)
  238. {
  239. switch (protocol) {
  240. case ATA_PROT_NODATA:
  241. return "ATA no data";
  242. case ATA_PROT_PIO:
  243. return "ATA PIO";
  244. case ATA_PROT_DMA:
  245. return "ATA DMA";
  246. case ATA_PROT_NCQ:
  247. return "ATA NCQ";
  248. case ATA_PROT_NCQ_NODATA:
  249. return "ATA NCQ no data";
  250. case ATAPI_PROT_NODATA:
  251. return "ATAPI no data";
  252. case ATAPI_PROT_PIO:
  253. return "ATAPI PIO";
  254. case ATAPI_PROT_DMA:
  255. return "ATAPI DMA";
  256. default:
  257. return "unknown";
  258. }
  259. }
  260. static const char *get_dma_dir_descript(int dma_dir)
  261. {
  262. switch ((enum dma_data_direction)dma_dir) {
  263. case DMA_BIDIRECTIONAL:
  264. return "bidirectional";
  265. case DMA_TO_DEVICE:
  266. return "to device";
  267. case DMA_FROM_DEVICE:
  268. return "from device";
  269. default:
  270. return "none";
  271. }
  272. }
  273. static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf)
  274. {
  275. dev_vdbg(ap->dev,
  276. "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n",
  277. tf->command, get_prot_descript(tf->protocol), tf->flags,
  278. tf->device);
  279. dev_vdbg(ap->dev,
  280. "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n",
  281. tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah);
  282. dev_vdbg(ap->dev,
  283. "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
  284. tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
  285. tf->hob_lbah);
  286. }
  287. static void dma_dwc_xfer_done(void *hsdev_instance)
  288. {
  289. unsigned long flags;
  290. struct sata_dwc_device *hsdev = hsdev_instance;
  291. struct ata_host *host = (struct ata_host *)hsdev->host;
  292. struct ata_port *ap;
  293. struct sata_dwc_device_port *hsdevp;
  294. u8 tag = 0;
  295. unsigned int port = 0;
  296. spin_lock_irqsave(&host->lock, flags);
  297. ap = host->ports[port];
  298. hsdevp = HSDEVP_FROM_AP(ap);
  299. tag = ap->link.active_tag;
  300. /*
  301. * Each DMA command produces 2 interrupts. Only
  302. * complete the command after both interrupts have been
  303. * seen. (See sata_dwc_isr())
  304. */
  305. hsdevp->dma_interrupt_count++;
  306. sata_dwc_clear_dmacr(hsdevp, tag);
  307. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
  308. dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
  309. tag, hsdevp->dma_pending[tag]);
  310. }
  311. if ((hsdevp->dma_interrupt_count % 2) == 0)
  312. sata_dwc_dma_xfer_complete(ap, 1);
  313. spin_unlock_irqrestore(&host->lock, flags);
  314. }
  315. static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
  316. {
  317. struct ata_port *ap = qc->ap;
  318. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  319. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  320. struct dma_slave_config sconf;
  321. struct dma_async_tx_descriptor *desc;
  322. if (qc->dma_dir == DMA_DEV_TO_MEM) {
  323. sconf.src_addr = hsdev->dmadr;
  324. sconf.device_fc = false;
  325. } else { /* DMA_MEM_TO_DEV */
  326. sconf.dst_addr = hsdev->dmadr;
  327. sconf.device_fc = false;
  328. }
  329. sconf.direction = qc->dma_dir;
  330. sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  331. sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  332. sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  333. sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  334. dmaengine_slave_config(hsdevp->chan, &sconf);
  335. /* Convert SG list to linked list of items (LLIs) for AHB DMA */
  336. desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
  337. qc->dma_dir,
  338. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  339. if (!desc)
  340. return NULL;
  341. desc->callback = dma_dwc_xfer_done;
  342. desc->callback_param = hsdev;
  343. dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
  344. qc->sg, qc->n_elem, &hsdev->dmadr);
  345. return desc;
  346. }
  347. static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
  348. {
  349. if (scr > SCR_NOTIFICATION) {
  350. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  351. __func__, scr);
  352. return -EINVAL;
  353. }
  354. *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
  355. dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  356. link->ap->print_id, scr, *val);
  357. return 0;
  358. }
  359. static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
  360. {
  361. dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  362. link->ap->print_id, scr, val);
  363. if (scr > SCR_NOTIFICATION) {
  364. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  365. __func__, scr);
  366. return -EINVAL;
  367. }
  368. sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
  369. return 0;
  370. }
  371. static void clear_serror(struct ata_port *ap)
  372. {
  373. u32 val;
  374. sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
  375. sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
  376. }
  377. static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
  378. {
  379. sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
  380. sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
  381. }
  382. static u32 qcmd_tag_to_mask(u8 tag)
  383. {
  384. return 0x00000001 << (tag & 0x1f);
  385. }
  386. /* See ahci.c */
  387. static void sata_dwc_error_intr(struct ata_port *ap,
  388. struct sata_dwc_device *hsdev, uint intpr)
  389. {
  390. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  391. struct ata_eh_info *ehi = &ap->link.eh_info;
  392. unsigned int err_mask = 0, action = 0;
  393. struct ata_queued_cmd *qc;
  394. u32 serror;
  395. u8 status, tag;
  396. ata_ehi_clear_desc(ehi);
  397. sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
  398. status = ap->ops->sff_check_status(ap);
  399. tag = ap->link.active_tag;
  400. dev_err(ap->dev,
  401. "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
  402. __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
  403. hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
  404. /* Clear error register and interrupt bit */
  405. clear_serror(ap);
  406. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
  407. /* This is the only error happening now. TODO check for exact error */
  408. err_mask |= AC_ERR_HOST_BUS;
  409. action |= ATA_EH_RESET;
  410. /* Pass this on to EH */
  411. ehi->serror |= serror;
  412. ehi->action |= action;
  413. qc = ata_qc_from_tag(ap, tag);
  414. if (qc)
  415. qc->err_mask |= err_mask;
  416. else
  417. ehi->err_mask |= err_mask;
  418. ata_port_abort(ap);
  419. }
  420. /*
  421. * Function : sata_dwc_isr
  422. * arguments : irq, void *dev_instance, struct pt_regs *regs
  423. * Return value : irqreturn_t - status of IRQ
  424. * This Interrupt handler called via port ops registered function.
  425. * .irq_handler = sata_dwc_isr
  426. */
  427. static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
  428. {
  429. struct ata_host *host = (struct ata_host *)dev_instance;
  430. struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
  431. struct ata_port *ap;
  432. struct ata_queued_cmd *qc;
  433. unsigned long flags;
  434. u8 status, tag;
  435. int handled, num_processed, port = 0;
  436. uint intpr, sactive, sactive2, tag_mask;
  437. struct sata_dwc_device_port *hsdevp;
  438. hsdev->sactive_issued = 0;
  439. spin_lock_irqsave(&host->lock, flags);
  440. /* Read the interrupt register */
  441. intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
  442. ap = host->ports[port];
  443. hsdevp = HSDEVP_FROM_AP(ap);
  444. dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
  445. ap->link.active_tag);
  446. /* Check for error interrupt */
  447. if (intpr & SATA_DWC_INTPR_ERR) {
  448. sata_dwc_error_intr(ap, hsdev, intpr);
  449. handled = 1;
  450. goto DONE;
  451. }
  452. /* Check for DMA SETUP FIS (FP DMA) interrupt */
  453. if (intpr & SATA_DWC_INTPR_NEWFP) {
  454. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
  455. tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
  456. dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
  457. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
  458. dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
  459. hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
  460. qc = ata_qc_from_tag(ap, tag);
  461. /*
  462. * Start FP DMA for NCQ command. At this point the tag is the
  463. * active tag. It is the tag that matches the command about to
  464. * be completed.
  465. */
  466. qc->ap->link.active_tag = tag;
  467. sata_dwc_bmdma_start_by_tag(qc, tag);
  468. handled = 1;
  469. goto DONE;
  470. }
  471. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  472. tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  473. /* If no sactive issued and tag_mask is zero then this is not NCQ */
  474. if (hsdev->sactive_issued == 0 && tag_mask == 0) {
  475. if (ap->link.active_tag == ATA_TAG_POISON)
  476. tag = 0;
  477. else
  478. tag = ap->link.active_tag;
  479. qc = ata_qc_from_tag(ap, tag);
  480. /* DEV interrupt w/ no active qc? */
  481. if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
  482. dev_err(ap->dev,
  483. "%s interrupt with no active qc qc=%p\n",
  484. __func__, qc);
  485. ap->ops->sff_check_status(ap);
  486. handled = 1;
  487. goto DONE;
  488. }
  489. status = ap->ops->sff_check_status(ap);
  490. qc->ap->link.active_tag = tag;
  491. hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
  492. if (status & ATA_ERR) {
  493. dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
  494. sata_dwc_qc_complete(ap, qc, 1);
  495. handled = 1;
  496. goto DONE;
  497. }
  498. dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
  499. __func__, get_prot_descript(qc->tf.protocol));
  500. DRVSTILLBUSY:
  501. if (ata_is_dma(qc->tf.protocol)) {
  502. /*
  503. * Each DMA transaction produces 2 interrupts. The DMAC
  504. * transfer complete interrupt and the SATA controller
  505. * operation done interrupt. The command should be
  506. * completed only after both interrupts are seen.
  507. */
  508. hsdevp->dma_interrupt_count++;
  509. if (hsdevp->dma_pending[tag] == \
  510. SATA_DWC_DMA_PENDING_NONE) {
  511. dev_err(ap->dev,
  512. "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
  513. __func__, intpr, status,
  514. hsdevp->dma_pending[tag]);
  515. }
  516. if ((hsdevp->dma_interrupt_count % 2) == 0)
  517. sata_dwc_dma_xfer_complete(ap, 1);
  518. } else if (ata_is_pio(qc->tf.protocol)) {
  519. ata_sff_hsm_move(ap, qc, status, 0);
  520. handled = 1;
  521. goto DONE;
  522. } else {
  523. if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
  524. goto DRVSTILLBUSY;
  525. }
  526. handled = 1;
  527. goto DONE;
  528. }
  529. /*
  530. * This is a NCQ command. At this point we need to figure out for which
  531. * tags we have gotten a completion interrupt. One interrupt may serve
  532. * as completion for more than one operation when commands are queued
  533. * (NCQ). We need to process each completed command.
  534. */
  535. /* process completed commands */
  536. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  537. tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  538. if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
  539. dev_dbg(ap->dev,
  540. "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  541. __func__, sactive, hsdev->sactive_issued, tag_mask);
  542. }
  543. if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
  544. dev_warn(ap->dev,
  545. "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  546. sactive, hsdev->sactive_issued, tag_mask);
  547. }
  548. /* read just to clear ... not bad if currently still busy */
  549. status = ap->ops->sff_check_status(ap);
  550. dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
  551. tag = 0;
  552. num_processed = 0;
  553. while (tag_mask) {
  554. num_processed++;
  555. while (!(tag_mask & 0x00000001)) {
  556. tag++;
  557. tag_mask <<= 1;
  558. }
  559. tag_mask &= (~0x00000001);
  560. qc = ata_qc_from_tag(ap, tag);
  561. /* To be picked up by completion functions */
  562. qc->ap->link.active_tag = tag;
  563. hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
  564. /* Let libata/scsi layers handle error */
  565. if (status & ATA_ERR) {
  566. dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
  567. status);
  568. sata_dwc_qc_complete(ap, qc, 1);
  569. handled = 1;
  570. goto DONE;
  571. }
  572. /* Process completed command */
  573. dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
  574. get_prot_descript(qc->tf.protocol));
  575. if (ata_is_dma(qc->tf.protocol)) {
  576. hsdevp->dma_interrupt_count++;
  577. if (hsdevp->dma_pending[tag] == \
  578. SATA_DWC_DMA_PENDING_NONE)
  579. dev_warn(ap->dev, "%s: DMA not pending?\n",
  580. __func__);
  581. if ((hsdevp->dma_interrupt_count % 2) == 0)
  582. sata_dwc_dma_xfer_complete(ap, 1);
  583. } else {
  584. if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
  585. goto STILLBUSY;
  586. }
  587. continue;
  588. STILLBUSY:
  589. ap->stats.idle_irq++;
  590. dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
  591. ap->print_id);
  592. } /* while tag_mask */
  593. /*
  594. * Check to see if any commands completed while we were processing our
  595. * initial set of completed commands (read status clears interrupts,
  596. * so we might miss a completed command interrupt if one came in while
  597. * we were processing --we read status as part of processing a completed
  598. * command).
  599. */
  600. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
  601. if (sactive2 != sactive) {
  602. dev_dbg(ap->dev,
  603. "More completed - sactive=0x%x sactive2=0x%x\n",
  604. sactive, sactive2);
  605. }
  606. handled = 1;
  607. DONE:
  608. spin_unlock_irqrestore(&host->lock, flags);
  609. return IRQ_RETVAL(handled);
  610. }
  611. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
  612. {
  613. struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
  614. u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
  615. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
  616. dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
  617. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  618. } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
  619. dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
  620. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  621. } else {
  622. /*
  623. * This should not happen, it indicates the driver is out of
  624. * sync. If it does happen, clear dmacr anyway.
  625. */
  626. dev_err(hsdev->dev,
  627. "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
  628. __func__, tag, hsdevp->dma_pending[tag], dmacr);
  629. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  630. SATA_DWC_DMACR_TXRXCH_CLEAR);
  631. }
  632. }
  633. static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
  634. {
  635. struct ata_queued_cmd *qc;
  636. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  637. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  638. u8 tag = 0;
  639. tag = ap->link.active_tag;
  640. qc = ata_qc_from_tag(ap, tag);
  641. if (!qc) {
  642. dev_err(ap->dev, "failed to get qc");
  643. return;
  644. }
  645. #ifdef DEBUG_NCQ
  646. if (tag > 0) {
  647. dev_info(ap->dev,
  648. "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
  649. __func__, qc->hw_tag, qc->tf.command,
  650. get_dma_dir_descript(qc->dma_dir),
  651. get_prot_descript(qc->tf.protocol),
  652. sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
  653. }
  654. #endif
  655. if (ata_is_dma(qc->tf.protocol)) {
  656. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
  657. dev_err(ap->dev,
  658. "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
  659. __func__,
  660. sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
  661. }
  662. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
  663. sata_dwc_qc_complete(ap, qc, check_status);
  664. ap->link.active_tag = ATA_TAG_POISON;
  665. } else {
  666. sata_dwc_qc_complete(ap, qc, check_status);
  667. }
  668. }
  669. static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
  670. u32 check_status)
  671. {
  672. u8 status = 0;
  673. u32 mask = 0x0;
  674. u8 tag = qc->hw_tag;
  675. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  676. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  677. hsdev->sactive_queued = 0;
  678. dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
  679. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
  680. dev_err(ap->dev, "TX DMA PENDING\n");
  681. else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
  682. dev_err(ap->dev, "RX DMA PENDING\n");
  683. dev_dbg(ap->dev,
  684. "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
  685. qc->tf.command, status, ap->print_id, qc->tf.protocol);
  686. /* clear active bit */
  687. mask = (~(qcmd_tag_to_mask(tag)));
  688. hsdev->sactive_queued = hsdev->sactive_queued & mask;
  689. hsdev->sactive_issued = hsdev->sactive_issued & mask;
  690. ata_qc_complete(qc);
  691. return 0;
  692. }
  693. static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
  694. {
  695. /* Enable selective interrupts by setting the interrupt maskregister*/
  696. sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
  697. SATA_DWC_INTMR_ERRM |
  698. SATA_DWC_INTMR_NEWFPM |
  699. SATA_DWC_INTMR_PMABRTM |
  700. SATA_DWC_INTMR_DMATM);
  701. /*
  702. * Unmask the error bits that should trigger an error interrupt by
  703. * setting the error mask register.
  704. */
  705. sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
  706. dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
  707. __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
  708. sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
  709. }
  710. static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
  711. {
  712. port->cmd_addr = base + 0x00;
  713. port->data_addr = base + 0x00;
  714. port->error_addr = base + 0x04;
  715. port->feature_addr = base + 0x04;
  716. port->nsect_addr = base + 0x08;
  717. port->lbal_addr = base + 0x0c;
  718. port->lbam_addr = base + 0x10;
  719. port->lbah_addr = base + 0x14;
  720. port->device_addr = base + 0x18;
  721. port->command_addr = base + 0x1c;
  722. port->status_addr = base + 0x1c;
  723. port->altstatus_addr = base + 0x20;
  724. port->ctl_addr = base + 0x20;
  725. }
  726. static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
  727. {
  728. struct sata_dwc_device *hsdev = hsdevp->hsdev;
  729. struct device *dev = hsdev->dev;
  730. #ifdef CONFIG_SATA_DWC_OLD_DMA
  731. if (!of_find_property(dev->of_node, "dmas", NULL))
  732. return sata_dwc_dma_get_channel_old(hsdevp);
  733. #endif
  734. hsdevp->chan = dma_request_chan(dev, "sata-dma");
  735. if (IS_ERR(hsdevp->chan)) {
  736. dev_err(dev, "failed to allocate dma channel: %ld\n",
  737. PTR_ERR(hsdevp->chan));
  738. return PTR_ERR(hsdevp->chan);
  739. }
  740. return 0;
  741. }
  742. /*
  743. * Function : sata_dwc_port_start
  744. * arguments : struct ata_ioports *port
  745. * Return value : returns 0 if success, error code otherwise
  746. * This function allocates the scatter gather LLI table for AHB DMA
  747. */
  748. static int sata_dwc_port_start(struct ata_port *ap)
  749. {
  750. int err = 0;
  751. struct sata_dwc_device *hsdev;
  752. struct sata_dwc_device_port *hsdevp = NULL;
  753. struct device *pdev;
  754. int i;
  755. hsdev = HSDEV_FROM_AP(ap);
  756. dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
  757. hsdev->host = ap->host;
  758. pdev = ap->host->dev;
  759. if (!pdev) {
  760. dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
  761. err = -ENODEV;
  762. goto CLEANUP;
  763. }
  764. /* Allocate Port Struct */
  765. hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
  766. if (!hsdevp) {
  767. err = -ENOMEM;
  768. goto CLEANUP;
  769. }
  770. hsdevp->hsdev = hsdev;
  771. err = sata_dwc_dma_get_channel(hsdevp);
  772. if (err)
  773. goto CLEANUP_ALLOC;
  774. err = phy_power_on(hsdev->phy);
  775. if (err)
  776. goto CLEANUP_ALLOC;
  777. for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
  778. hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
  779. ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
  780. ap->bmdma_prd_dma = 0;
  781. if (ap->port_no == 0) {
  782. dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
  783. __func__);
  784. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  785. SATA_DWC_DMACR_TXRXCH_CLEAR);
  786. dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
  787. __func__);
  788. sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  789. (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  790. SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
  791. }
  792. /* Clear any error bits before libata starts issuing commands */
  793. clear_serror(ap);
  794. ap->private_data = hsdevp;
  795. dev_dbg(ap->dev, "%s: done\n", __func__);
  796. return 0;
  797. CLEANUP_ALLOC:
  798. kfree(hsdevp);
  799. CLEANUP:
  800. dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
  801. return err;
  802. }
  803. static void sata_dwc_port_stop(struct ata_port *ap)
  804. {
  805. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  806. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  807. dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
  808. dmaengine_terminate_sync(hsdevp->chan);
  809. dma_release_channel(hsdevp->chan);
  810. phy_power_off(hsdev->phy);
  811. kfree(hsdevp);
  812. ap->private_data = NULL;
  813. }
  814. /*
  815. * Function : sata_dwc_exec_command_by_tag
  816. * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
  817. * Return value : None
  818. * This function keeps track of individual command tag ids and calls
  819. * ata_exec_command in libata
  820. */
  821. static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
  822. struct ata_taskfile *tf,
  823. u8 tag, u32 cmd_issued)
  824. {
  825. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  826. dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
  827. ata_get_cmd_descript(tf->command), tag);
  828. hsdevp->cmd_issued[tag] = cmd_issued;
  829. /*
  830. * Clear SError before executing a new command.
  831. * sata_dwc_scr_write and read can not be used here. Clearing the PM
  832. * managed SError register for the disk needs to be done before the
  833. * task file is loaded.
  834. */
  835. clear_serror(ap);
  836. ata_sff_exec_command(ap, tf);
  837. }
  838. static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
  839. {
  840. sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
  841. SATA_DWC_CMD_ISSUED_PEND);
  842. }
  843. static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
  844. {
  845. u8 tag = qc->hw_tag;
  846. if (ata_is_ncq(qc->tf.protocol)) {
  847. dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
  848. __func__, qc->ap->link.sactive, tag);
  849. } else {
  850. tag = 0;
  851. }
  852. sata_dwc_bmdma_setup_by_tag(qc, tag);
  853. }
  854. static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
  855. {
  856. int start_dma;
  857. u32 reg;
  858. struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
  859. struct ata_port *ap = qc->ap;
  860. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  861. struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
  862. int dir = qc->dma_dir;
  863. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
  864. start_dma = 1;
  865. if (dir == DMA_TO_DEVICE)
  866. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
  867. else
  868. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
  869. } else {
  870. dev_err(ap->dev,
  871. "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
  872. __func__, hsdevp->cmd_issued[tag], tag);
  873. start_dma = 0;
  874. }
  875. dev_dbg(ap->dev,
  876. "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n",
  877. __func__, qc, tag, qc->tf.command,
  878. get_dma_dir_descript(qc->dma_dir), start_dma);
  879. sata_dwc_tf_dump(ap, &qc->tf);
  880. if (start_dma) {
  881. sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
  882. if (reg & SATA_DWC_SERROR_ERR_BITS) {
  883. dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
  884. __func__, reg);
  885. }
  886. if (dir == DMA_TO_DEVICE)
  887. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  888. SATA_DWC_DMACR_TXCHEN);
  889. else
  890. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  891. SATA_DWC_DMACR_RXCHEN);
  892. /* Enable AHB DMA transfer on the specified channel */
  893. dmaengine_submit(desc);
  894. dma_async_issue_pending(hsdevp->chan);
  895. }
  896. }
  897. static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
  898. {
  899. u8 tag = qc->hw_tag;
  900. if (ata_is_ncq(qc->tf.protocol)) {
  901. dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
  902. __func__, qc->ap->link.sactive, tag);
  903. } else {
  904. tag = 0;
  905. }
  906. dev_dbg(qc->ap->dev, "%s\n", __func__);
  907. sata_dwc_bmdma_start_by_tag(qc, tag);
  908. }
  909. static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
  910. {
  911. u32 sactive;
  912. u8 tag = qc->hw_tag;
  913. struct ata_port *ap = qc->ap;
  914. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  915. #ifdef DEBUG_NCQ
  916. if (qc->hw_tag > 0 || ap->link.sactive > 1)
  917. dev_info(ap->dev,
  918. "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
  919. __func__, ap->print_id, qc->tf.command,
  920. ata_get_cmd_descript(qc->tf.command),
  921. qc->hw_tag, get_prot_descript(qc->tf.protocol),
  922. ap->link.active_tag, ap->link.sactive);
  923. #endif
  924. if (!ata_is_ncq(qc->tf.protocol))
  925. tag = 0;
  926. if (ata_is_dma(qc->tf.protocol)) {
  927. hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
  928. if (!hsdevp->desc[tag])
  929. return AC_ERR_SYSTEM;
  930. } else {
  931. hsdevp->desc[tag] = NULL;
  932. }
  933. if (ata_is_ncq(qc->tf.protocol)) {
  934. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  935. sactive |= (0x00000001 << tag);
  936. sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
  937. dev_dbg(qc->ap->dev,
  938. "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
  939. __func__, tag, qc->ap->link.sactive, sactive);
  940. ap->ops->sff_tf_load(ap, &qc->tf);
  941. sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
  942. SATA_DWC_CMD_ISSUED_PEND);
  943. } else {
  944. return ata_bmdma_qc_issue(qc);
  945. }
  946. return 0;
  947. }
  948. static void sata_dwc_error_handler(struct ata_port *ap)
  949. {
  950. ata_sff_error_handler(ap);
  951. }
  952. static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
  953. unsigned long deadline)
  954. {
  955. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
  956. int ret;
  957. ret = sata_sff_hardreset(link, class, deadline);
  958. sata_dwc_enable_interrupts(hsdev);
  959. /* Reconfigure the DMA control register */
  960. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  961. SATA_DWC_DMACR_TXRXCH_CLEAR);
  962. /* Reconfigure the DMA Burst Transaction Size register */
  963. sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  964. SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  965. SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
  966. return ret;
  967. }
  968. static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
  969. {
  970. /* SATA DWC is master only */
  971. }
  972. /*
  973. * scsi mid-layer and libata interface structures
  974. */
  975. static struct scsi_host_template sata_dwc_sht = {
  976. ATA_NCQ_SHT(DRV_NAME),
  977. /*
  978. * test-only: Currently this driver doesn't handle NCQ
  979. * correctly. We enable NCQ but set the queue depth to a
  980. * max of 1. This will get fixed in in a future release.
  981. */
  982. .sg_tablesize = LIBATA_MAX_PRD,
  983. /* .can_queue = ATA_MAX_QUEUE, */
  984. /*
  985. * Make sure a LLI block is not created that will span 8K max FIS
  986. * boundary. If the block spans such a FIS boundary, there is a chance
  987. * that a DMA burst will cross that boundary -- this results in an
  988. * error in the host controller.
  989. */
  990. .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
  991. };
  992. static struct ata_port_operations sata_dwc_ops = {
  993. .inherits = &ata_sff_port_ops,
  994. .error_handler = sata_dwc_error_handler,
  995. .hardreset = sata_dwc_hardreset,
  996. .qc_issue = sata_dwc_qc_issue,
  997. .scr_read = sata_dwc_scr_read,
  998. .scr_write = sata_dwc_scr_write,
  999. .port_start = sata_dwc_port_start,
  1000. .port_stop = sata_dwc_port_stop,
  1001. .sff_dev_select = sata_dwc_dev_select,
  1002. .bmdma_setup = sata_dwc_bmdma_setup,
  1003. .bmdma_start = sata_dwc_bmdma_start,
  1004. };
  1005. static const struct ata_port_info sata_dwc_port_info[] = {
  1006. {
  1007. .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
  1008. .pio_mask = ATA_PIO4,
  1009. .udma_mask = ATA_UDMA6,
  1010. .port_ops = &sata_dwc_ops,
  1011. },
  1012. };
  1013. static int sata_dwc_probe(struct platform_device *ofdev)
  1014. {
  1015. struct sata_dwc_device *hsdev;
  1016. u32 idr, versionr;
  1017. char *ver = (char *)&versionr;
  1018. void __iomem *base;
  1019. int err = 0;
  1020. int irq;
  1021. struct ata_host *host;
  1022. struct ata_port_info pi = sata_dwc_port_info[0];
  1023. const struct ata_port_info *ppi[] = { &pi, NULL };
  1024. struct device_node *np = ofdev->dev.of_node;
  1025. struct resource *res;
  1026. /* Allocate DWC SATA device */
  1027. host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
  1028. hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
  1029. if (!host || !hsdev)
  1030. return -ENOMEM;
  1031. host->private_data = hsdev;
  1032. /* Ioremap SATA registers */
  1033. res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
  1034. base = devm_ioremap_resource(&ofdev->dev, res);
  1035. if (IS_ERR(base))
  1036. return PTR_ERR(base);
  1037. dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
  1038. /* Synopsys DWC SATA specific Registers */
  1039. hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
  1040. hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
  1041. /* Setup port */
  1042. host->ports[0]->ioaddr.cmd_addr = base;
  1043. host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
  1044. sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
  1045. /* Read the ID and Version Registers */
  1046. idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
  1047. versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
  1048. dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
  1049. idr, ver[0], ver[1], ver[2]);
  1050. /* Save dev for later use in dev_xxx() routines */
  1051. hsdev->dev = &ofdev->dev;
  1052. /* Enable SATA Interrupts */
  1053. sata_dwc_enable_interrupts(hsdev);
  1054. /* Get SATA interrupt number */
  1055. irq = irq_of_parse_and_map(np, 0);
  1056. if (irq == NO_IRQ) {
  1057. dev_err(&ofdev->dev, "no SATA DMA irq\n");
  1058. return -ENODEV;
  1059. }
  1060. #ifdef CONFIG_SATA_DWC_OLD_DMA
  1061. if (!of_find_property(np, "dmas", NULL)) {
  1062. err = sata_dwc_dma_init_old(ofdev, hsdev);
  1063. if (err)
  1064. return err;
  1065. }
  1066. #endif
  1067. hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
  1068. if (IS_ERR(hsdev->phy))
  1069. return PTR_ERR(hsdev->phy);
  1070. err = phy_init(hsdev->phy);
  1071. if (err)
  1072. goto error_out;
  1073. /*
  1074. * Now, register with libATA core, this will also initiate the
  1075. * device discovery process, invoking our port_start() handler &
  1076. * error_handler() to execute a dummy Softreset EH session
  1077. */
  1078. err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
  1079. if (err)
  1080. dev_err(&ofdev->dev, "failed to activate host");
  1081. return 0;
  1082. error_out:
  1083. phy_exit(hsdev->phy);
  1084. return err;
  1085. }
  1086. static int sata_dwc_remove(struct platform_device *ofdev)
  1087. {
  1088. struct device *dev = &ofdev->dev;
  1089. struct ata_host *host = dev_get_drvdata(dev);
  1090. struct sata_dwc_device *hsdev = host->private_data;
  1091. ata_host_detach(host);
  1092. phy_exit(hsdev->phy);
  1093. #ifdef CONFIG_SATA_DWC_OLD_DMA
  1094. /* Free SATA DMA resources */
  1095. sata_dwc_dma_exit_old(hsdev);
  1096. #endif
  1097. dev_dbg(&ofdev->dev, "done\n");
  1098. return 0;
  1099. }
  1100. static const struct of_device_id sata_dwc_match[] = {
  1101. { .compatible = "amcc,sata-460ex", },
  1102. {}
  1103. };
  1104. MODULE_DEVICE_TABLE(of, sata_dwc_match);
  1105. static struct platform_driver sata_dwc_driver = {
  1106. .driver = {
  1107. .name = DRV_NAME,
  1108. .of_match_table = sata_dwc_match,
  1109. },
  1110. .probe = sata_dwc_probe,
  1111. .remove = sata_dwc_remove,
  1112. };
  1113. module_platform_driver(sata_dwc_driver);
  1114. MODULE_LICENSE("GPL");
  1115. MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
  1116. MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
  1117. MODULE_VERSION(DRV_VERSION);