mxs-mmc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Portions copyright (C) 2003 Russell King, PXA MMCI Driver
  4. * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver
  5. *
  6. * Copyright 2008 Embedded Alley Solutions, Inc.
  7. * Copyright 2009-2011 Freescale Semiconductor, Inc.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/ioport.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/delay.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/dma/mxs-dma.h>
  20. #include <linux/highmem.h>
  21. #include <linux/clk.h>
  22. #include <linux/err.h>
  23. #include <linux/completion.h>
  24. #include <linux/mmc/host.h>
  25. #include <linux/mmc/mmc.h>
  26. #include <linux/mmc/sdio.h>
  27. #include <linux/mmc/slot-gpio.h>
  28. #include <linux/regulator/consumer.h>
  29. #include <linux/module.h>
  30. #include <linux/stmp_device.h>
  31. #include <linux/spi/mxs-spi.h>
  32. #define DRIVER_NAME "mxs-mmc"
  33. #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
  34. BM_SSP_CTRL1_RESP_ERR_IRQ | \
  35. BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
  36. BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
  37. BM_SSP_CTRL1_DATA_CRC_IRQ | \
  38. BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
  39. BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
  40. BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
  41. /* card detect polling timeout */
  42. #define MXS_MMC_DETECT_TIMEOUT (HZ/2)
  43. struct mxs_mmc_host {
  44. struct mxs_ssp ssp;
  45. struct mmc_host *mmc;
  46. struct mmc_request *mrq;
  47. struct mmc_command *cmd;
  48. struct mmc_data *data;
  49. unsigned char bus_width;
  50. spinlock_t lock;
  51. int sdio_irq_en;
  52. bool broken_cd;
  53. };
  54. static int mxs_mmc_get_cd(struct mmc_host *mmc)
  55. {
  56. struct mxs_mmc_host *host = mmc_priv(mmc);
  57. struct mxs_ssp *ssp = &host->ssp;
  58. int present, ret;
  59. if (host->broken_cd)
  60. return -ENOSYS;
  61. ret = mmc_gpio_get_cd(mmc);
  62. if (ret >= 0)
  63. return ret;
  64. present = mmc->caps & MMC_CAP_NEEDS_POLL ||
  65. !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
  66. BM_SSP_STATUS_CARD_DETECT);
  67. if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
  68. present = !present;
  69. return present;
  70. }
  71. static int mxs_mmc_reset(struct mxs_mmc_host *host)
  72. {
  73. struct mxs_ssp *ssp = &host->ssp;
  74. u32 ctrl0, ctrl1;
  75. int ret;
  76. ret = stmp_reset_block(ssp->base);
  77. if (ret)
  78. return ret;
  79. ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
  80. ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
  81. BF_SSP(0x7, CTRL1_WORD_LENGTH) |
  82. BM_SSP_CTRL1_DMA_ENABLE |
  83. BM_SSP_CTRL1_POLARITY |
  84. BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
  85. BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
  86. BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
  87. BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
  88. BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
  89. writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
  90. BF_SSP(2, TIMING_CLOCK_DIVIDE) |
  91. BF_SSP(0, TIMING_CLOCK_RATE),
  92. ssp->base + HW_SSP_TIMING(ssp));
  93. if (host->sdio_irq_en) {
  94. ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
  95. ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
  96. }
  97. writel(ctrl0, ssp->base + HW_SSP_CTRL0);
  98. writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
  99. return 0;
  100. }
  101. static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
  102. struct mmc_command *cmd);
  103. static void mxs_mmc_request_done(struct mxs_mmc_host *host)
  104. {
  105. struct mmc_command *cmd = host->cmd;
  106. struct mmc_data *data = host->data;
  107. struct mmc_request *mrq = host->mrq;
  108. struct mxs_ssp *ssp = &host->ssp;
  109. if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
  110. if (mmc_resp_type(cmd) & MMC_RSP_136) {
  111. cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
  112. cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
  113. cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
  114. cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
  115. } else {
  116. cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
  117. }
  118. }
  119. if (cmd == mrq->sbc) {
  120. /* Finished CMD23, now send actual command. */
  121. mxs_mmc_start_cmd(host, mrq->cmd);
  122. return;
  123. } else if (data) {
  124. dma_unmap_sg(mmc_dev(host->mmc), data->sg,
  125. data->sg_len, ssp->dma_dir);
  126. /*
  127. * If there was an error on any block, we mark all
  128. * data blocks as being in error.
  129. */
  130. if (!data->error)
  131. data->bytes_xfered = data->blocks * data->blksz;
  132. else
  133. data->bytes_xfered = 0;
  134. host->data = NULL;
  135. if (data->stop && (data->error || !mrq->sbc)) {
  136. mxs_mmc_start_cmd(host, mrq->stop);
  137. return;
  138. }
  139. }
  140. host->mrq = NULL;
  141. mmc_request_done(host->mmc, mrq);
  142. }
  143. static void mxs_mmc_dma_irq_callback(void *param)
  144. {
  145. struct mxs_mmc_host *host = param;
  146. mxs_mmc_request_done(host);
  147. }
  148. static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
  149. {
  150. struct mxs_mmc_host *host = dev_id;
  151. struct mmc_command *cmd = host->cmd;
  152. struct mmc_data *data = host->data;
  153. struct mxs_ssp *ssp = &host->ssp;
  154. u32 stat;
  155. spin_lock(&host->lock);
  156. stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
  157. writel(stat & MXS_MMC_IRQ_BITS,
  158. ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
  159. spin_unlock(&host->lock);
  160. if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
  161. mmc_signal_sdio_irq(host->mmc);
  162. if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
  163. cmd->error = -ETIMEDOUT;
  164. else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
  165. cmd->error = -EIO;
  166. if (data) {
  167. if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
  168. BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
  169. data->error = -ETIMEDOUT;
  170. else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
  171. data->error = -EILSEQ;
  172. else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
  173. BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
  174. data->error = -EIO;
  175. }
  176. return IRQ_HANDLED;
  177. }
  178. static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
  179. struct mxs_mmc_host *host, unsigned long flags)
  180. {
  181. struct mxs_ssp *ssp = &host->ssp;
  182. struct dma_async_tx_descriptor *desc;
  183. struct mmc_data *data = host->data;
  184. struct scatterlist * sgl;
  185. unsigned int sg_len;
  186. if (data) {
  187. /* data */
  188. dma_map_sg(mmc_dev(host->mmc), data->sg,
  189. data->sg_len, ssp->dma_dir);
  190. sgl = data->sg;
  191. sg_len = data->sg_len;
  192. } else {
  193. /* pio */
  194. sgl = (struct scatterlist *) ssp->ssp_pio_words;
  195. sg_len = SSP_PIO_NUM;
  196. }
  197. desc = dmaengine_prep_slave_sg(ssp->dmach,
  198. sgl, sg_len, ssp->slave_dirn, flags);
  199. if (desc) {
  200. desc->callback = mxs_mmc_dma_irq_callback;
  201. desc->callback_param = host;
  202. } else {
  203. if (data)
  204. dma_unmap_sg(mmc_dev(host->mmc), data->sg,
  205. data->sg_len, ssp->dma_dir);
  206. }
  207. return desc;
  208. }
  209. static void mxs_mmc_bc(struct mxs_mmc_host *host)
  210. {
  211. struct mxs_ssp *ssp = &host->ssp;
  212. struct mmc_command *cmd = host->cmd;
  213. struct dma_async_tx_descriptor *desc;
  214. u32 ctrl0, cmd0, cmd1;
  215. ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
  216. cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
  217. cmd1 = cmd->arg;
  218. if (host->sdio_irq_en) {
  219. ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
  220. cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
  221. }
  222. ssp->ssp_pio_words[0] = ctrl0;
  223. ssp->ssp_pio_words[1] = cmd0;
  224. ssp->ssp_pio_words[2] = cmd1;
  225. ssp->dma_dir = DMA_NONE;
  226. ssp->slave_dirn = DMA_TRANS_NONE;
  227. desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
  228. if (!desc)
  229. goto out;
  230. dmaengine_submit(desc);
  231. dma_async_issue_pending(ssp->dmach);
  232. return;
  233. out:
  234. dev_warn(mmc_dev(host->mmc),
  235. "%s: failed to prep dma\n", __func__);
  236. }
  237. static void mxs_mmc_ac(struct mxs_mmc_host *host)
  238. {
  239. struct mxs_ssp *ssp = &host->ssp;
  240. struct mmc_command *cmd = host->cmd;
  241. struct dma_async_tx_descriptor *desc;
  242. u32 ignore_crc, get_resp, long_resp;
  243. u32 ctrl0, cmd0, cmd1;
  244. ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
  245. 0 : BM_SSP_CTRL0_IGNORE_CRC;
  246. get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
  247. BM_SSP_CTRL0_GET_RESP : 0;
  248. long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
  249. BM_SSP_CTRL0_LONG_RESP : 0;
  250. ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
  251. cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
  252. cmd1 = cmd->arg;
  253. if (cmd->opcode == MMC_STOP_TRANSMISSION)
  254. cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
  255. if (host->sdio_irq_en) {
  256. ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
  257. cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
  258. }
  259. ssp->ssp_pio_words[0] = ctrl0;
  260. ssp->ssp_pio_words[1] = cmd0;
  261. ssp->ssp_pio_words[2] = cmd1;
  262. ssp->dma_dir = DMA_NONE;
  263. ssp->slave_dirn = DMA_TRANS_NONE;
  264. desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
  265. if (!desc)
  266. goto out;
  267. dmaengine_submit(desc);
  268. dma_async_issue_pending(ssp->dmach);
  269. return;
  270. out:
  271. dev_warn(mmc_dev(host->mmc),
  272. "%s: failed to prep dma\n", __func__);
  273. }
  274. static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
  275. {
  276. const unsigned int ssp_timeout_mul = 4096;
  277. /*
  278. * Calculate ticks in ms since ns are large numbers
  279. * and might overflow
  280. */
  281. const unsigned int clock_per_ms = clock_rate / 1000;
  282. const unsigned int ms = ns / 1000;
  283. const unsigned int ticks = ms * clock_per_ms;
  284. const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
  285. WARN_ON(ssp_ticks == 0);
  286. return ssp_ticks;
  287. }
  288. static void mxs_mmc_adtc(struct mxs_mmc_host *host)
  289. {
  290. struct mmc_command *cmd = host->cmd;
  291. struct mmc_data *data = cmd->data;
  292. struct dma_async_tx_descriptor *desc;
  293. struct scatterlist *sgl = data->sg, *sg;
  294. unsigned int sg_len = data->sg_len;
  295. unsigned int i;
  296. unsigned short dma_data_dir, timeout;
  297. enum dma_transfer_direction slave_dirn;
  298. unsigned int data_size = 0, log2_blksz;
  299. unsigned int blocks = data->blocks;
  300. struct mxs_ssp *ssp = &host->ssp;
  301. u32 ignore_crc, get_resp, long_resp, read;
  302. u32 ctrl0, cmd0, cmd1, val;
  303. ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
  304. 0 : BM_SSP_CTRL0_IGNORE_CRC;
  305. get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
  306. BM_SSP_CTRL0_GET_RESP : 0;
  307. long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
  308. BM_SSP_CTRL0_LONG_RESP : 0;
  309. if (data->flags & MMC_DATA_WRITE) {
  310. dma_data_dir = DMA_TO_DEVICE;
  311. slave_dirn = DMA_MEM_TO_DEV;
  312. read = 0;
  313. } else {
  314. dma_data_dir = DMA_FROM_DEVICE;
  315. slave_dirn = DMA_DEV_TO_MEM;
  316. read = BM_SSP_CTRL0_READ;
  317. }
  318. ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
  319. ignore_crc | get_resp | long_resp |
  320. BM_SSP_CTRL0_DATA_XFER | read |
  321. BM_SSP_CTRL0_WAIT_FOR_IRQ |
  322. BM_SSP_CTRL0_ENABLE;
  323. cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
  324. /* get logarithm to base 2 of block size for setting register */
  325. log2_blksz = ilog2(data->blksz);
  326. /*
  327. * take special care of the case that data size from data->sg
  328. * is not equal to blocks x blksz
  329. */
  330. for_each_sg(sgl, sg, sg_len, i)
  331. data_size += sg->length;
  332. if (data_size != data->blocks * data->blksz)
  333. blocks = 1;
  334. /* xfer count, block size and count need to be set differently */
  335. if (ssp_is_old(ssp)) {
  336. ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
  337. cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
  338. BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
  339. } else {
  340. writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
  341. writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
  342. BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
  343. ssp->base + HW_SSP_BLOCK_SIZE);
  344. }
  345. if (cmd->opcode == SD_IO_RW_EXTENDED)
  346. cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
  347. cmd1 = cmd->arg;
  348. if (host->sdio_irq_en) {
  349. ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
  350. cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
  351. }
  352. /* set the timeout count */
  353. timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
  354. val = readl(ssp->base + HW_SSP_TIMING(ssp));
  355. val &= ~(BM_SSP_TIMING_TIMEOUT);
  356. val |= BF_SSP(timeout, TIMING_TIMEOUT);
  357. writel(val, ssp->base + HW_SSP_TIMING(ssp));
  358. /* pio */
  359. ssp->ssp_pio_words[0] = ctrl0;
  360. ssp->ssp_pio_words[1] = cmd0;
  361. ssp->ssp_pio_words[2] = cmd1;
  362. ssp->dma_dir = DMA_NONE;
  363. ssp->slave_dirn = DMA_TRANS_NONE;
  364. desc = mxs_mmc_prep_dma(host, 0);
  365. if (!desc)
  366. goto out;
  367. /* append data sg */
  368. WARN_ON(host->data != NULL);
  369. host->data = data;
  370. ssp->dma_dir = dma_data_dir;
  371. ssp->slave_dirn = slave_dirn;
  372. desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
  373. if (!desc)
  374. goto out;
  375. dmaengine_submit(desc);
  376. dma_async_issue_pending(ssp->dmach);
  377. return;
  378. out:
  379. dev_warn(mmc_dev(host->mmc),
  380. "%s: failed to prep dma\n", __func__);
  381. }
  382. static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
  383. struct mmc_command *cmd)
  384. {
  385. host->cmd = cmd;
  386. switch (mmc_cmd_type(cmd)) {
  387. case MMC_CMD_BC:
  388. mxs_mmc_bc(host);
  389. break;
  390. case MMC_CMD_BCR:
  391. mxs_mmc_ac(host);
  392. break;
  393. case MMC_CMD_AC:
  394. mxs_mmc_ac(host);
  395. break;
  396. case MMC_CMD_ADTC:
  397. mxs_mmc_adtc(host);
  398. break;
  399. default:
  400. dev_warn(mmc_dev(host->mmc),
  401. "%s: unknown MMC command\n", __func__);
  402. break;
  403. }
  404. }
  405. static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  406. {
  407. struct mxs_mmc_host *host = mmc_priv(mmc);
  408. WARN_ON(host->mrq != NULL);
  409. host->mrq = mrq;
  410. if (mrq->sbc)
  411. mxs_mmc_start_cmd(host, mrq->sbc);
  412. else
  413. mxs_mmc_start_cmd(host, mrq->cmd);
  414. }
  415. static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  416. {
  417. struct mxs_mmc_host *host = mmc_priv(mmc);
  418. if (ios->bus_width == MMC_BUS_WIDTH_8)
  419. host->bus_width = 2;
  420. else if (ios->bus_width == MMC_BUS_WIDTH_4)
  421. host->bus_width = 1;
  422. else
  423. host->bus_width = 0;
  424. if (ios->clock)
  425. mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
  426. }
  427. static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
  428. {
  429. struct mxs_mmc_host *host = mmc_priv(mmc);
  430. struct mxs_ssp *ssp = &host->ssp;
  431. unsigned long flags;
  432. spin_lock_irqsave(&host->lock, flags);
  433. host->sdio_irq_en = enable;
  434. if (enable) {
  435. writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
  436. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
  437. writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
  438. ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
  439. } else {
  440. writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
  441. ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
  442. writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
  443. ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
  444. }
  445. spin_unlock_irqrestore(&host->lock, flags);
  446. if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
  447. BM_SSP_STATUS_SDIO_IRQ)
  448. mmc_signal_sdio_irq(host->mmc);
  449. }
  450. static const struct mmc_host_ops mxs_mmc_ops = {
  451. .request = mxs_mmc_request,
  452. .get_ro = mmc_gpio_get_ro,
  453. .get_cd = mxs_mmc_get_cd,
  454. .set_ios = mxs_mmc_set_ios,
  455. .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
  456. };
  457. static const struct platform_device_id mxs_ssp_ids[] = {
  458. {
  459. .name = "imx23-mmc",
  460. .driver_data = IMX23_SSP,
  461. }, {
  462. .name = "imx28-mmc",
  463. .driver_data = IMX28_SSP,
  464. }, {
  465. /* sentinel */
  466. }
  467. };
  468. MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
  469. static const struct of_device_id mxs_mmc_dt_ids[] = {
  470. { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
  471. { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
  472. { /* sentinel */ }
  473. };
  474. MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
  475. static void mxs_mmc_regulator_disable(void *regulator)
  476. {
  477. regulator_disable(regulator);
  478. }
  479. static int mxs_mmc_probe(struct platform_device *pdev)
  480. {
  481. const struct of_device_id *of_id =
  482. of_match_device(mxs_mmc_dt_ids, &pdev->dev);
  483. struct device_node *np = pdev->dev.of_node;
  484. struct mxs_mmc_host *host;
  485. struct mmc_host *mmc;
  486. int ret = 0, irq_err;
  487. struct regulator *reg_vmmc;
  488. struct mxs_ssp *ssp;
  489. irq_err = platform_get_irq(pdev, 0);
  490. if (irq_err < 0)
  491. return irq_err;
  492. mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
  493. if (!mmc)
  494. return -ENOMEM;
  495. host = mmc_priv(mmc);
  496. ssp = &host->ssp;
  497. ssp->dev = &pdev->dev;
  498. ssp->base = devm_platform_ioremap_resource(pdev, 0);
  499. if (IS_ERR(ssp->base)) {
  500. ret = PTR_ERR(ssp->base);
  501. goto out_mmc_free;
  502. }
  503. ssp->devid = (enum mxs_ssp_id) of_id->data;
  504. host->mmc = mmc;
  505. host->sdio_irq_en = 0;
  506. reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
  507. if (!IS_ERR(reg_vmmc)) {
  508. ret = regulator_enable(reg_vmmc);
  509. if (ret) {
  510. dev_err(&pdev->dev,
  511. "Failed to enable vmmc regulator: %d\n", ret);
  512. goto out_mmc_free;
  513. }
  514. ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
  515. reg_vmmc);
  516. if (ret)
  517. goto out_mmc_free;
  518. }
  519. ssp->clk = devm_clk_get(&pdev->dev, NULL);
  520. if (IS_ERR(ssp->clk)) {
  521. ret = PTR_ERR(ssp->clk);
  522. goto out_mmc_free;
  523. }
  524. ret = clk_prepare_enable(ssp->clk);
  525. if (ret)
  526. goto out_mmc_free;
  527. ret = mxs_mmc_reset(host);
  528. if (ret) {
  529. dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
  530. goto out_clk_disable;
  531. }
  532. ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx");
  533. if (IS_ERR(ssp->dmach)) {
  534. dev_err(mmc_dev(host->mmc),
  535. "%s: failed to request dma\n", __func__);
  536. ret = PTR_ERR(ssp->dmach);
  537. goto out_clk_disable;
  538. }
  539. /* set mmc core parameters */
  540. mmc->ops = &mxs_mmc_ops;
  541. mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
  542. MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
  543. host->broken_cd = of_property_read_bool(np, "broken-cd");
  544. mmc->f_min = 400000;
  545. mmc->f_max = 288000000;
  546. ret = mmc_of_parse(mmc);
  547. if (ret)
  548. goto out_free_dma;
  549. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  550. mmc->max_segs = 52;
  551. mmc->max_blk_size = 1 << 0xf;
  552. mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
  553. mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
  554. mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
  555. platform_set_drvdata(pdev, mmc);
  556. spin_lock_init(&host->lock);
  557. ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
  558. dev_name(&pdev->dev), host);
  559. if (ret)
  560. goto out_free_dma;
  561. ret = mmc_add_host(mmc);
  562. if (ret)
  563. goto out_free_dma;
  564. dev_info(mmc_dev(host->mmc), "initialized\n");
  565. return 0;
  566. out_free_dma:
  567. dma_release_channel(ssp->dmach);
  568. out_clk_disable:
  569. clk_disable_unprepare(ssp->clk);
  570. out_mmc_free:
  571. mmc_free_host(mmc);
  572. return ret;
  573. }
  574. static int mxs_mmc_remove(struct platform_device *pdev)
  575. {
  576. struct mmc_host *mmc = platform_get_drvdata(pdev);
  577. struct mxs_mmc_host *host = mmc_priv(mmc);
  578. struct mxs_ssp *ssp = &host->ssp;
  579. mmc_remove_host(mmc);
  580. if (ssp->dmach)
  581. dma_release_channel(ssp->dmach);
  582. clk_disable_unprepare(ssp->clk);
  583. mmc_free_host(mmc);
  584. return 0;
  585. }
  586. #ifdef CONFIG_PM_SLEEP
  587. static int mxs_mmc_suspend(struct device *dev)
  588. {
  589. struct mmc_host *mmc = dev_get_drvdata(dev);
  590. struct mxs_mmc_host *host = mmc_priv(mmc);
  591. struct mxs_ssp *ssp = &host->ssp;
  592. clk_disable_unprepare(ssp->clk);
  593. return 0;
  594. }
  595. static int mxs_mmc_resume(struct device *dev)
  596. {
  597. struct mmc_host *mmc = dev_get_drvdata(dev);
  598. struct mxs_mmc_host *host = mmc_priv(mmc);
  599. struct mxs_ssp *ssp = &host->ssp;
  600. return clk_prepare_enable(ssp->clk);
  601. }
  602. #endif
  603. static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
  604. static struct platform_driver mxs_mmc_driver = {
  605. .probe = mxs_mmc_probe,
  606. .remove = mxs_mmc_remove,
  607. .id_table = mxs_ssp_ids,
  608. .driver = {
  609. .name = DRIVER_NAME,
  610. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  611. .pm = &mxs_mmc_pm_ops,
  612. .of_match_table = mxs_mmc_dt_ids,
  613. },
  614. };
  615. module_platform_driver(mxs_mmc_driver);
  616. MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
  617. MODULE_AUTHOR("Freescale Semiconductor");
  618. MODULE_LICENSE("GPL");
  619. MODULE_ALIAS("platform:" DRIVER_NAME);