alcor.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
  4. *
  5. * Driver for Alcor Micro AU6601 and AU6621 controllers
  6. */
  7. /* Note: this driver was created without any documentation. Based
  8. * on sniffing, testing and in some cases mimic of original driver.
  9. * As soon as some one with documentation or more experience in SD/MMC, or
  10. * reverse engineering then me, please review this driver and question every
  11. * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
  12. */
  13. #include <linux/delay.h>
  14. #include <linux/pci.h>
  15. #include <linux/module.h>
  16. #include <linux/io.h>
  17. #include <linux/pm.h>
  18. #include <linux/irq.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/mmc/host.h>
  22. #include <linux/mmc/mmc.h>
  23. #include <linux/alcor_pci.h>
  24. enum alcor_cookie {
  25. COOKIE_UNMAPPED,
  26. COOKIE_PRE_MAPPED,
  27. COOKIE_MAPPED,
  28. };
  29. struct alcor_pll_conf {
  30. unsigned int clk_src_freq;
  31. unsigned int clk_src_reg;
  32. unsigned int min_div;
  33. unsigned int max_div;
  34. };
  35. struct alcor_sdmmc_host {
  36. struct device *dev;
  37. struct alcor_pci_priv *alcor_pci;
  38. struct mmc_request *mrq;
  39. struct mmc_command *cmd;
  40. struct mmc_data *data;
  41. unsigned int dma_on:1;
  42. struct mutex cmd_mutex;
  43. struct delayed_work timeout_work;
  44. struct sg_mapping_iter sg_miter; /* SG state for PIO */
  45. struct scatterlist *sg;
  46. unsigned int blocks; /* remaining PIO blocks */
  47. int sg_count;
  48. u32 irq_status_sd;
  49. unsigned char cur_power_mode;
  50. };
  51. static const struct alcor_pll_conf alcor_pll_cfg[] = {
  52. /* MHZ, CLK src, max div, min div */
  53. { 31250000, AU6601_CLK_31_25_MHZ, 1, 511},
  54. { 48000000, AU6601_CLK_48_MHZ, 1, 511},
  55. {125000000, AU6601_CLK_125_MHZ, 1, 511},
  56. {384000000, AU6601_CLK_384_MHZ, 1, 511},
  57. };
  58. static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
  59. u8 clear, u8 set)
  60. {
  61. struct alcor_pci_priv *priv = host->alcor_pci;
  62. u32 var;
  63. var = alcor_read8(priv, addr);
  64. var &= ~clear;
  65. var |= set;
  66. alcor_write8(priv, var, addr);
  67. }
  68. /* As soon as irqs are masked, some status updates may be missed.
  69. * Use this with care.
  70. */
  71. static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
  72. {
  73. struct alcor_pci_priv *priv = host->alcor_pci;
  74. alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
  75. }
  76. static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
  77. {
  78. struct alcor_pci_priv *priv = host->alcor_pci;
  79. alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
  80. AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
  81. AU6601_INT_OVER_CURRENT_ERR,
  82. AU6601_REG_INT_ENABLE);
  83. }
  84. static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
  85. {
  86. struct alcor_pci_priv *priv = host->alcor_pci;
  87. int i;
  88. alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
  89. AU6601_REG_SW_RESET);
  90. for (i = 0; i < 100; i++) {
  91. if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
  92. return;
  93. udelay(50);
  94. }
  95. dev_err(host->dev, "%s: timeout\n", __func__);
  96. }
  97. /*
  98. * Perform DMA I/O of a single page.
  99. */
  100. static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
  101. {
  102. struct alcor_pci_priv *priv = host->alcor_pci;
  103. u32 addr;
  104. if (!host->sg_count)
  105. return;
  106. if (!host->sg) {
  107. dev_err(host->dev, "have blocks, but no SG\n");
  108. return;
  109. }
  110. if (!sg_dma_len(host->sg)) {
  111. dev_err(host->dev, "DMA SG len == 0\n");
  112. return;
  113. }
  114. addr = (u32)sg_dma_address(host->sg);
  115. alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
  116. host->sg = sg_next(host->sg);
  117. host->sg_count--;
  118. }
  119. static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
  120. {
  121. struct alcor_pci_priv *priv = host->alcor_pci;
  122. struct mmc_data *data = host->data;
  123. u8 ctrl = 0;
  124. if (data->flags & MMC_DATA_WRITE)
  125. ctrl |= AU6601_DATA_WRITE;
  126. if (data->host_cookie == COOKIE_MAPPED) {
  127. /*
  128. * For DMA transfers, this function is called just once,
  129. * at the start of the operation. The hardware can only
  130. * perform DMA I/O on a single page at a time, so here
  131. * we kick off the transfer with the first page, and expect
  132. * subsequent pages to be transferred upon IRQ events
  133. * indicating that the single-page DMA was completed.
  134. */
  135. alcor_data_set_dma(host);
  136. ctrl |= AU6601_DATA_DMA_MODE;
  137. host->dma_on = 1;
  138. alcor_write32(priv, data->sg_count * 0x1000,
  139. AU6601_REG_BLOCK_SIZE);
  140. } else {
  141. /*
  142. * For PIO transfers, we break down each operation
  143. * into several sector-sized transfers. When one sector has
  144. * complete, the IRQ handler will call this function again
  145. * to kick off the transfer of the next sector.
  146. */
  147. alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
  148. }
  149. alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
  150. AU6601_DATA_XFER_CTRL);
  151. }
  152. static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
  153. {
  154. struct alcor_pci_priv *priv = host->alcor_pci;
  155. size_t blksize, len;
  156. u8 *buf;
  157. if (!host->blocks)
  158. return;
  159. if (host->dma_on) {
  160. dev_err(host->dev, "configured DMA but got PIO request.\n");
  161. return;
  162. }
  163. if (!!(host->data->flags & MMC_DATA_READ) != read) {
  164. dev_err(host->dev, "got unexpected direction %i != %i\n",
  165. !!(host->data->flags & MMC_DATA_READ), read);
  166. }
  167. if (!sg_miter_next(&host->sg_miter))
  168. return;
  169. blksize = host->data->blksz;
  170. len = min(host->sg_miter.length, blksize);
  171. dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
  172. read ? "read" : "write", blksize);
  173. host->sg_miter.consumed = len;
  174. host->blocks--;
  175. buf = host->sg_miter.addr;
  176. if (read)
  177. ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
  178. else
  179. iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
  180. sg_miter_stop(&host->sg_miter);
  181. }
  182. static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
  183. {
  184. unsigned int flags = SG_MITER_ATOMIC;
  185. struct mmc_data *data = host->data;
  186. if (data->flags & MMC_DATA_READ)
  187. flags |= SG_MITER_TO_SG;
  188. else
  189. flags |= SG_MITER_FROM_SG;
  190. sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
  191. }
  192. static void alcor_prepare_data(struct alcor_sdmmc_host *host,
  193. struct mmc_command *cmd)
  194. {
  195. struct alcor_pci_priv *priv = host->alcor_pci;
  196. struct mmc_data *data = cmd->data;
  197. if (!data)
  198. return;
  199. host->data = data;
  200. host->data->bytes_xfered = 0;
  201. host->blocks = data->blocks;
  202. host->sg = data->sg;
  203. host->sg_count = data->sg_count;
  204. dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
  205. host->sg_count, host->blocks);
  206. if (data->host_cookie != COOKIE_MAPPED)
  207. alcor_prepare_sg_miter(host);
  208. alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
  209. }
  210. static void alcor_send_cmd(struct alcor_sdmmc_host *host,
  211. struct mmc_command *cmd, bool set_timeout)
  212. {
  213. struct alcor_pci_priv *priv = host->alcor_pci;
  214. unsigned long timeout = 0;
  215. u8 ctrl = 0;
  216. host->cmd = cmd;
  217. alcor_prepare_data(host, cmd);
  218. dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
  219. cmd->opcode, cmd->arg);
  220. alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
  221. alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
  222. switch (mmc_resp_type(cmd)) {
  223. case MMC_RSP_NONE:
  224. ctrl = AU6601_CMD_NO_RESP;
  225. break;
  226. case MMC_RSP_R1:
  227. ctrl = AU6601_CMD_6_BYTE_CRC;
  228. break;
  229. case MMC_RSP_R1B:
  230. ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
  231. break;
  232. case MMC_RSP_R2:
  233. ctrl = AU6601_CMD_17_BYTE_CRC;
  234. break;
  235. case MMC_RSP_R3:
  236. ctrl = AU6601_CMD_6_BYTE_WO_CRC;
  237. break;
  238. default:
  239. dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
  240. mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
  241. break;
  242. }
  243. if (set_timeout) {
  244. if (!cmd->data && cmd->busy_timeout)
  245. timeout = cmd->busy_timeout;
  246. else
  247. timeout = 10000;
  248. schedule_delayed_work(&host->timeout_work,
  249. msecs_to_jiffies(timeout));
  250. }
  251. dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
  252. alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
  253. AU6601_CMD_XFER_CTRL);
  254. }
  255. static void alcor_request_complete(struct alcor_sdmmc_host *host,
  256. bool cancel_timeout)
  257. {
  258. struct mmc_request *mrq;
  259. /*
  260. * If this work gets rescheduled while running, it will
  261. * be run again afterwards but without any active request.
  262. */
  263. if (!host->mrq)
  264. return;
  265. if (cancel_timeout)
  266. cancel_delayed_work(&host->timeout_work);
  267. mrq = host->mrq;
  268. host->mrq = NULL;
  269. host->cmd = NULL;
  270. host->data = NULL;
  271. host->dma_on = 0;
  272. mmc_request_done(mmc_from_priv(host), mrq);
  273. }
  274. static void alcor_finish_data(struct alcor_sdmmc_host *host)
  275. {
  276. struct mmc_data *data;
  277. data = host->data;
  278. host->data = NULL;
  279. host->dma_on = 0;
  280. /*
  281. * The specification states that the block count register must
  282. * be updated, but it does not specify at what point in the
  283. * data flow. That makes the register entirely useless to read
  284. * back so we have to assume that nothing made it to the card
  285. * in the event of an error.
  286. */
  287. if (data->error)
  288. data->bytes_xfered = 0;
  289. else
  290. data->bytes_xfered = data->blksz * data->blocks;
  291. /*
  292. * Need to send CMD12 if -
  293. * a) open-ended multiblock transfer (no CMD23)
  294. * b) error in multiblock transfer
  295. */
  296. if (data->stop &&
  297. (data->error ||
  298. !host->mrq->sbc)) {
  299. /*
  300. * The controller needs a reset of internal state machines
  301. * upon error conditions.
  302. */
  303. if (data->error)
  304. alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
  305. alcor_unmask_sd_irqs(host);
  306. alcor_send_cmd(host, data->stop, false);
  307. return;
  308. }
  309. alcor_request_complete(host, 1);
  310. }
  311. static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
  312. {
  313. dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
  314. if (host->cmd) {
  315. if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
  316. host->cmd->error = -ETIMEDOUT;
  317. else
  318. host->cmd->error = -EILSEQ;
  319. }
  320. if (host->data) {
  321. if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
  322. host->data->error = -ETIMEDOUT;
  323. else
  324. host->data->error = -EILSEQ;
  325. host->data->bytes_xfered = 0;
  326. }
  327. alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
  328. alcor_request_complete(host, 1);
  329. }
  330. static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
  331. {
  332. struct alcor_pci_priv *priv = host->alcor_pci;
  333. intmask &= AU6601_INT_CMD_END;
  334. if (!intmask)
  335. return true;
  336. /* got CMD_END but no CMD is in progress, wake thread an process the
  337. * error
  338. */
  339. if (!host->cmd)
  340. return false;
  341. if (host->cmd->flags & MMC_RSP_PRESENT) {
  342. struct mmc_command *cmd = host->cmd;
  343. cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
  344. dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
  345. if (host->cmd->flags & MMC_RSP_136) {
  346. cmd->resp[1] =
  347. alcor_read32be(priv, AU6601_REG_CMD_RSP1);
  348. cmd->resp[2] =
  349. alcor_read32be(priv, AU6601_REG_CMD_RSP2);
  350. cmd->resp[3] =
  351. alcor_read32be(priv, AU6601_REG_CMD_RSP3);
  352. dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
  353. cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  354. }
  355. }
  356. host->cmd->error = 0;
  357. /* Processed actual command. */
  358. if (!host->data)
  359. return false;
  360. alcor_trigger_data_transfer(host);
  361. host->cmd = NULL;
  362. return true;
  363. }
  364. static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
  365. {
  366. intmask &= AU6601_INT_CMD_END;
  367. if (!intmask)
  368. return;
  369. if (!host->cmd && intmask & AU6601_INT_CMD_END) {
  370. dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
  371. intmask);
  372. }
  373. /* Processed actual command. */
  374. if (!host->data)
  375. alcor_request_complete(host, 1);
  376. else
  377. alcor_trigger_data_transfer(host);
  378. host->cmd = NULL;
  379. }
  380. static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
  381. {
  382. u32 tmp;
  383. intmask &= AU6601_INT_DATA_MASK;
  384. /* nothing here to do */
  385. if (!intmask)
  386. return 1;
  387. /* we was too fast and got DATA_END after it was processed?
  388. * lets ignore it for now.
  389. */
  390. if (!host->data && intmask == AU6601_INT_DATA_END)
  391. return 1;
  392. /* looks like an error, so lets handle it. */
  393. if (!host->data)
  394. return 0;
  395. tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
  396. | AU6601_INT_DMA_END);
  397. switch (tmp) {
  398. case 0:
  399. break;
  400. case AU6601_INT_READ_BUF_RDY:
  401. alcor_trf_block_pio(host, true);
  402. return 1;
  403. case AU6601_INT_WRITE_BUF_RDY:
  404. alcor_trf_block_pio(host, false);
  405. return 1;
  406. case AU6601_INT_DMA_END:
  407. if (!host->sg_count)
  408. break;
  409. alcor_data_set_dma(host);
  410. break;
  411. default:
  412. dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
  413. break;
  414. }
  415. if (intmask & AU6601_INT_DATA_END) {
  416. if (!host->dma_on && host->blocks) {
  417. alcor_trigger_data_transfer(host);
  418. return 1;
  419. } else {
  420. return 0;
  421. }
  422. }
  423. return 1;
  424. }
  425. static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
  426. {
  427. intmask &= AU6601_INT_DATA_MASK;
  428. if (!intmask)
  429. return;
  430. if (!host->data) {
  431. dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
  432. intmask);
  433. alcor_reset(host, AU6601_RESET_DATA);
  434. return;
  435. }
  436. if (alcor_data_irq_done(host, intmask))
  437. return;
  438. if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
  439. (host->dma_on && !host->sg_count))
  440. alcor_finish_data(host);
  441. }
  442. static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
  443. {
  444. dev_dbg(host->dev, "card %s\n",
  445. intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
  446. if (host->mrq) {
  447. dev_dbg(host->dev, "cancel all pending tasks.\n");
  448. if (host->data)
  449. host->data->error = -ENOMEDIUM;
  450. if (host->cmd)
  451. host->cmd->error = -ENOMEDIUM;
  452. else
  453. host->mrq->cmd->error = -ENOMEDIUM;
  454. alcor_request_complete(host, 1);
  455. }
  456. mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
  457. }
  458. static irqreturn_t alcor_irq_thread(int irq, void *d)
  459. {
  460. struct alcor_sdmmc_host *host = d;
  461. irqreturn_t ret = IRQ_HANDLED;
  462. u32 intmask, tmp;
  463. mutex_lock(&host->cmd_mutex);
  464. intmask = host->irq_status_sd;
  465. /* some thing bad */
  466. if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
  467. dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
  468. ret = IRQ_NONE;
  469. goto exit;
  470. }
  471. tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
  472. if (tmp) {
  473. if (tmp & AU6601_INT_ERROR_MASK)
  474. alcor_err_irq(host, tmp);
  475. else {
  476. alcor_cmd_irq_thread(host, tmp);
  477. alcor_data_irq_thread(host, tmp);
  478. }
  479. intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
  480. }
  481. if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
  482. alcor_cd_irq(host, intmask);
  483. intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
  484. }
  485. if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
  486. dev_warn(host->dev,
  487. "warning: over current detected!\n");
  488. intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
  489. }
  490. if (intmask)
  491. dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
  492. exit:
  493. mutex_unlock(&host->cmd_mutex);
  494. alcor_unmask_sd_irqs(host);
  495. return ret;
  496. }
  497. static irqreturn_t alcor_irq(int irq, void *d)
  498. {
  499. struct alcor_sdmmc_host *host = d;
  500. struct alcor_pci_priv *priv = host->alcor_pci;
  501. u32 status, tmp;
  502. irqreturn_t ret;
  503. int cmd_done, data_done;
  504. status = alcor_read32(priv, AU6601_REG_INT_STATUS);
  505. if (!status)
  506. return IRQ_NONE;
  507. alcor_write32(priv, status, AU6601_REG_INT_STATUS);
  508. tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
  509. | AU6601_INT_DATA_END | AU6601_INT_DMA_END
  510. | AU6601_INT_CMD_END);
  511. if (tmp == status) {
  512. cmd_done = alcor_cmd_irq_done(host, tmp);
  513. data_done = alcor_data_irq_done(host, tmp);
  514. /* use fast path for simple tasks */
  515. if (cmd_done && data_done) {
  516. ret = IRQ_HANDLED;
  517. goto alcor_irq_done;
  518. }
  519. }
  520. host->irq_status_sd = status;
  521. ret = IRQ_WAKE_THREAD;
  522. alcor_mask_sd_irqs(host);
  523. alcor_irq_done:
  524. return ret;
  525. }
  526. static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
  527. {
  528. struct alcor_pci_priv *priv = host->alcor_pci;
  529. int i, diff = 0x7fffffff, tmp_clock = 0;
  530. u16 clk_src = 0;
  531. u8 clk_div = 0;
  532. if (clock == 0) {
  533. alcor_write16(priv, 0, AU6601_CLK_SELECT);
  534. return;
  535. }
  536. for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
  537. unsigned int tmp_div, tmp_diff;
  538. const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
  539. tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
  540. if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
  541. continue;
  542. tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
  543. tmp_diff = abs(clock - tmp_clock);
  544. if (tmp_diff < diff) {
  545. diff = tmp_diff;
  546. clk_src = cfg->clk_src_reg;
  547. clk_div = tmp_div;
  548. }
  549. }
  550. clk_src |= ((clk_div - 1) << 8);
  551. clk_src |= AU6601_CLK_ENABLE;
  552. dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
  553. clock, tmp_clock, clk_div, clk_src);
  554. alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
  555. }
  556. static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
  557. {
  558. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  559. if (ios->timing == MMC_TIMING_LEGACY) {
  560. alcor_rmw8(host, AU6601_CLK_DELAY,
  561. AU6601_CLK_POSITIVE_EDGE_ALL, 0);
  562. } else {
  563. alcor_rmw8(host, AU6601_CLK_DELAY,
  564. 0, AU6601_CLK_POSITIVE_EDGE_ALL);
  565. }
  566. }
  567. static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
  568. {
  569. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  570. struct alcor_pci_priv *priv = host->alcor_pci;
  571. if (ios->bus_width == MMC_BUS_WIDTH_1) {
  572. alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
  573. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  574. alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
  575. AU6601_REG_BUS_CTRL);
  576. } else
  577. dev_err(host->dev, "Unknown BUS mode\n");
  578. }
  579. static int alcor_card_busy(struct mmc_host *mmc)
  580. {
  581. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  582. struct alcor_pci_priv *priv = host->alcor_pci;
  583. u8 status;
  584. /* Check whether dat[0:3] low */
  585. status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
  586. return !(status & AU6601_BUS_STAT_DAT_MASK);
  587. }
  588. static int alcor_get_cd(struct mmc_host *mmc)
  589. {
  590. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  591. struct alcor_pci_priv *priv = host->alcor_pci;
  592. u8 detect;
  593. detect = alcor_read8(priv, AU6601_DETECT_STATUS)
  594. & AU6601_DETECT_STATUS_M;
  595. /* check if card is present then send command and data */
  596. return (detect == AU6601_SD_DETECTED);
  597. }
  598. static int alcor_get_ro(struct mmc_host *mmc)
  599. {
  600. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  601. struct alcor_pci_priv *priv = host->alcor_pci;
  602. u8 status;
  603. /* get write protect pin status */
  604. status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
  605. return !!(status & AU6601_SD_CARD_WP);
  606. }
  607. static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
  608. {
  609. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  610. mutex_lock(&host->cmd_mutex);
  611. host->mrq = mrq;
  612. /* check if card is present then send command and data */
  613. if (alcor_get_cd(mmc))
  614. alcor_send_cmd(host, mrq->cmd, true);
  615. else {
  616. mrq->cmd->error = -ENOMEDIUM;
  617. alcor_request_complete(host, 1);
  618. }
  619. mutex_unlock(&host->cmd_mutex);
  620. }
  621. static void alcor_pre_req(struct mmc_host *mmc,
  622. struct mmc_request *mrq)
  623. {
  624. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  625. struct mmc_data *data = mrq->data;
  626. struct mmc_command *cmd = mrq->cmd;
  627. struct scatterlist *sg;
  628. unsigned int i, sg_len;
  629. if (!data || !cmd)
  630. return;
  631. data->host_cookie = COOKIE_UNMAPPED;
  632. /* FIXME: looks like the DMA engine works only with CMD18 */
  633. if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
  634. && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
  635. return;
  636. /*
  637. * We don't do DMA on "complex" transfers, i.e. with
  638. * non-word-aligned buffers or lengths. A future improvement
  639. * could be made to use temporary DMA bounce-buffers when these
  640. * requirements are not met.
  641. *
  642. * Also, we don't bother with all the DMA setup overhead for
  643. * short transfers.
  644. */
  645. if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
  646. return;
  647. if (data->blksz & 3)
  648. return;
  649. for_each_sg(data->sg, sg, data->sg_len, i) {
  650. if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
  651. return;
  652. if (sg->offset != 0)
  653. return;
  654. }
  655. /* This data might be unmapped at this time */
  656. sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
  657. mmc_get_dma_dir(data));
  658. if (sg_len)
  659. data->host_cookie = COOKIE_MAPPED;
  660. data->sg_count = sg_len;
  661. }
  662. static void alcor_post_req(struct mmc_host *mmc,
  663. struct mmc_request *mrq,
  664. int err)
  665. {
  666. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  667. struct mmc_data *data = mrq->data;
  668. if (!data)
  669. return;
  670. if (data->host_cookie == COOKIE_MAPPED) {
  671. dma_unmap_sg(host->dev,
  672. data->sg,
  673. data->sg_len,
  674. mmc_get_dma_dir(data));
  675. }
  676. data->host_cookie = COOKIE_UNMAPPED;
  677. }
  678. static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
  679. {
  680. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  681. struct alcor_pci_priv *priv = host->alcor_pci;
  682. switch (ios->power_mode) {
  683. case MMC_POWER_OFF:
  684. alcor_set_clock(host, ios->clock);
  685. /* set all pins to input */
  686. alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
  687. /* turn of VDD */
  688. alcor_write8(priv, 0, AU6601_POWER_CONTROL);
  689. break;
  690. case MMC_POWER_UP:
  691. break;
  692. case MMC_POWER_ON:
  693. /* This is most trickiest part. The order and timings of
  694. * instructions seems to play important role. Any changes may
  695. * confuse internal state engine if this HW.
  696. * FIXME: If we will ever get access to documentation, then this
  697. * part should be reviewed again.
  698. */
  699. /* enable SD card mode */
  700. alcor_write8(priv, AU6601_SD_CARD,
  701. AU6601_ACTIVE_CTRL);
  702. /* set signal voltage to 3.3V */
  703. alcor_write8(priv, 0, AU6601_OPT);
  704. /* no documentation about clk delay, for now just try to mimic
  705. * original driver.
  706. */
  707. alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
  708. /* set BUS width to 1 bit */
  709. alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
  710. /* set CLK first time */
  711. alcor_set_clock(host, ios->clock);
  712. /* power on VDD */
  713. alcor_write8(priv, AU6601_SD_CARD,
  714. AU6601_POWER_CONTROL);
  715. /* wait until the CLK will get stable */
  716. mdelay(20);
  717. /* set CLK again, mimic original driver. */
  718. alcor_set_clock(host, ios->clock);
  719. /* enable output */
  720. alcor_write8(priv, AU6601_SD_CARD,
  721. AU6601_OUTPUT_ENABLE);
  722. /* The clk will not work on au6621. We need to trigger data
  723. * transfer.
  724. */
  725. alcor_write8(priv, AU6601_DATA_WRITE,
  726. AU6601_DATA_XFER_CTRL);
  727. /* configure timeout. Not clear what exactly it means. */
  728. alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
  729. mdelay(100);
  730. break;
  731. default:
  732. dev_err(host->dev, "Unknown power parameter\n");
  733. }
  734. }
  735. static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  736. {
  737. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  738. mutex_lock(&host->cmd_mutex);
  739. dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
  740. ios->bus_width, ios->power_mode);
  741. if (ios->power_mode != host->cur_power_mode) {
  742. alcor_set_power_mode(mmc, ios);
  743. host->cur_power_mode = ios->power_mode;
  744. } else {
  745. alcor_set_timing(mmc, ios);
  746. alcor_set_bus_width(mmc, ios);
  747. alcor_set_clock(host, ios->clock);
  748. }
  749. mutex_unlock(&host->cmd_mutex);
  750. }
  751. static int alcor_signal_voltage_switch(struct mmc_host *mmc,
  752. struct mmc_ios *ios)
  753. {
  754. struct alcor_sdmmc_host *host = mmc_priv(mmc);
  755. mutex_lock(&host->cmd_mutex);
  756. switch (ios->signal_voltage) {
  757. case MMC_SIGNAL_VOLTAGE_330:
  758. alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
  759. break;
  760. case MMC_SIGNAL_VOLTAGE_180:
  761. alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
  762. break;
  763. default:
  764. /* No signal voltage switch required */
  765. break;
  766. }
  767. mutex_unlock(&host->cmd_mutex);
  768. return 0;
  769. }
  770. static const struct mmc_host_ops alcor_sdc_ops = {
  771. .card_busy = alcor_card_busy,
  772. .get_cd = alcor_get_cd,
  773. .get_ro = alcor_get_ro,
  774. .post_req = alcor_post_req,
  775. .pre_req = alcor_pre_req,
  776. .request = alcor_request,
  777. .set_ios = alcor_set_ios,
  778. .start_signal_voltage_switch = alcor_signal_voltage_switch,
  779. };
  780. static void alcor_timeout_timer(struct work_struct *work)
  781. {
  782. struct delayed_work *d = to_delayed_work(work);
  783. struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
  784. timeout_work);
  785. mutex_lock(&host->cmd_mutex);
  786. dev_dbg(host->dev, "triggered timeout\n");
  787. if (host->mrq) {
  788. dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
  789. if (host->data) {
  790. host->data->error = -ETIMEDOUT;
  791. } else {
  792. if (host->cmd)
  793. host->cmd->error = -ETIMEDOUT;
  794. else
  795. host->mrq->cmd->error = -ETIMEDOUT;
  796. }
  797. alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
  798. alcor_request_complete(host, 0);
  799. }
  800. mutex_unlock(&host->cmd_mutex);
  801. }
  802. static void alcor_hw_init(struct alcor_sdmmc_host *host)
  803. {
  804. struct alcor_pci_priv *priv = host->alcor_pci;
  805. struct alcor_dev_cfg *cfg = priv->cfg;
  806. /* FIXME: This part is a mimics HW init of original driver.
  807. * If we will ever get access to documentation, then this part
  808. * should be reviewed again.
  809. */
  810. /* reset command state engine */
  811. alcor_reset(host, AU6601_RESET_CMD);
  812. alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
  813. /* enable sd card mode */
  814. alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
  815. /* set BUS width to 1 bit */
  816. alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
  817. /* reset data state engine */
  818. alcor_reset(host, AU6601_RESET_DATA);
  819. /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
  820. alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
  821. alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
  822. /* not clear what we are doing here. */
  823. alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
  824. alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
  825. alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
  826. /* for 6601 - dma_boundary; for 6621 - dma_page_cnt
  827. * exact meaning of this register is not clear.
  828. */
  829. alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
  830. /* make sure all pins are set to input and VDD is off */
  831. alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
  832. alcor_write8(priv, 0, AU6601_POWER_CONTROL);
  833. alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
  834. /* now we should be safe to enable IRQs */
  835. alcor_unmask_sd_irqs(host);
  836. }
  837. static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
  838. {
  839. struct alcor_pci_priv *priv = host->alcor_pci;
  840. alcor_mask_sd_irqs(host);
  841. alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
  842. alcor_write8(priv, 0, AU6601_DETECT_STATUS);
  843. alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
  844. alcor_write8(priv, 0, AU6601_POWER_CONTROL);
  845. alcor_write8(priv, 0, AU6601_OPT);
  846. }
  847. static void alcor_init_mmc(struct alcor_sdmmc_host *host)
  848. {
  849. struct mmc_host *mmc = mmc_from_priv(host);
  850. mmc->f_min = AU6601_MIN_CLOCK;
  851. mmc->f_max = AU6601_MAX_CLOCK;
  852. mmc->ocr_avail = MMC_VDD_33_34;
  853. mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
  854. | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
  855. | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
  856. mmc->caps2 = MMC_CAP2_NO_SDIO;
  857. mmc->ops = &alcor_sdc_ops;
  858. /* The hardware does DMA data transfer of 4096 bytes to/from a single
  859. * buffer address. Scatterlists are not supported at the hardware
  860. * level, however we can work with them at the driver level,
  861. * provided that each segment is exactly 4096 bytes in size.
  862. * Upon DMA completion of a single segment (signalled via IRQ), we
  863. * immediately proceed to transfer the next segment from the
  864. * scatterlist.
  865. *
  866. * The overall request is limited to 240 sectors, matching the
  867. * original vendor driver.
  868. */
  869. mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
  870. mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
  871. mmc->max_blk_count = 240;
  872. mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
  873. dma_set_max_seg_size(host->dev, mmc->max_seg_size);
  874. }
  875. static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
  876. {
  877. struct alcor_pci_priv *priv = pdev->dev.platform_data;
  878. struct mmc_host *mmc;
  879. struct alcor_sdmmc_host *host;
  880. int ret;
  881. mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
  882. if (!mmc) {
  883. dev_err(&pdev->dev, "Can't allocate MMC\n");
  884. return -ENOMEM;
  885. }
  886. host = mmc_priv(mmc);
  887. host->dev = &pdev->dev;
  888. host->cur_power_mode = MMC_POWER_UNDEFINED;
  889. host->alcor_pci = priv;
  890. /* make sure irqs are disabled */
  891. alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
  892. alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
  893. ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
  894. alcor_irq, alcor_irq_thread, IRQF_SHARED,
  895. DRV_NAME_ALCOR_PCI_SDMMC, host);
  896. if (ret) {
  897. dev_err(&pdev->dev, "Failed to get irq for data line\n");
  898. goto free_host;
  899. }
  900. mutex_init(&host->cmd_mutex);
  901. INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
  902. alcor_init_mmc(host);
  903. alcor_hw_init(host);
  904. dev_set_drvdata(&pdev->dev, host);
  905. mmc_add_host(mmc);
  906. return 0;
  907. free_host:
  908. mmc_free_host(mmc);
  909. return ret;
  910. }
  911. static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
  912. {
  913. struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
  914. struct mmc_host *mmc = mmc_from_priv(host);
  915. if (cancel_delayed_work_sync(&host->timeout_work))
  916. alcor_request_complete(host, 0);
  917. alcor_hw_uninit(host);
  918. mmc_remove_host(mmc);
  919. mmc_free_host(mmc);
  920. return 0;
  921. }
  922. #ifdef CONFIG_PM_SLEEP
  923. static int alcor_pci_sdmmc_suspend(struct device *dev)
  924. {
  925. struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
  926. if (cancel_delayed_work_sync(&host->timeout_work))
  927. alcor_request_complete(host, 0);
  928. alcor_hw_uninit(host);
  929. return 0;
  930. }
  931. static int alcor_pci_sdmmc_resume(struct device *dev)
  932. {
  933. struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
  934. alcor_hw_init(host);
  935. return 0;
  936. }
  937. #endif /* CONFIG_PM_SLEEP */
  938. static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
  939. alcor_pci_sdmmc_resume);
  940. static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
  941. {
  942. .name = DRV_NAME_ALCOR_PCI_SDMMC,
  943. }, {
  944. /* sentinel */
  945. }
  946. };
  947. MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
  948. static struct platform_driver alcor_pci_sdmmc_driver = {
  949. .probe = alcor_pci_sdmmc_drv_probe,
  950. .remove = alcor_pci_sdmmc_drv_remove,
  951. .id_table = alcor_pci_sdmmc_ids,
  952. .driver = {
  953. .name = DRV_NAME_ALCOR_PCI_SDMMC,
  954. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  955. .pm = &alcor_mmc_pm_ops
  956. },
  957. };
  958. module_platform_driver(alcor_pci_sdmmc_driver);
  959. MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
  960. MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
  961. MODULE_LICENSE("GPL");