dw_mmc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2012 SAMSUNG Electronics
  4. * Jaehoon Chung <jh80.chung@samsung.com>
  5. * Rajeshawari Shinde <rajeshwari.s@samsung.com>
  6. */
  7. #include <bouncebuf.h>
  8. #include <common.h>
  9. #include <cpu_func.h>
  10. #include <errno.h>
  11. #include <log.h>
  12. #include <malloc.h>
  13. #include <memalign.h>
  14. #include <mmc.h>
  15. #include <dwmmc.h>
  16. #include <wait_bit.h>
  17. #include <asm/cache.h>
  18. #include <linux/delay.h>
  19. #include <power/regulator.h>
  20. #define PAGE_SIZE 4096
  21. static inline int __test_and_clear_bit_1(int nr, void *addr)
  22. {
  23. int mask, retval;
  24. unsigned int *a = (unsigned int *)addr;
  25. a += nr >> 5;
  26. mask = 1 << (nr & 0x1f);
  27. retval = (mask & *a) != 0;
  28. *a &= ~mask;
  29. return retval;
  30. }
  31. static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
  32. {
  33. unsigned long timeout = 1000;
  34. u32 ctrl;
  35. dwmci_writel(host, DWMCI_CTRL, value);
  36. while (timeout--) {
  37. ctrl = dwmci_readl(host, DWMCI_CTRL);
  38. if (!(ctrl & DWMCI_RESET_ALL))
  39. return 1;
  40. }
  41. return 0;
  42. }
  43. static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
  44. u32 desc0, u32 desc1, u32 desc2)
  45. {
  46. struct dwmci_idmac *desc = idmac;
  47. desc->flags = desc0;
  48. desc->cnt = desc1;
  49. desc->addr = desc2;
  50. desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
  51. }
  52. static void dwmci_prepare_data(struct dwmci_host *host,
  53. struct mmc_data *data,
  54. struct dwmci_idmac *cur_idmac,
  55. void *bounce_buffer)
  56. {
  57. unsigned long ctrl;
  58. unsigned int i = 0, flags, cnt, blk_cnt;
  59. ulong data_start, data_end;
  60. blk_cnt = data->blocks;
  61. dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
  62. /* Clear IDMAC interrupt */
  63. dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
  64. data_start = (ulong)cur_idmac;
  65. dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
  66. do {
  67. flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
  68. flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
  69. if (blk_cnt <= 8) {
  70. flags |= DWMCI_IDMAC_LD;
  71. cnt = data->blocksize * blk_cnt;
  72. } else
  73. cnt = data->blocksize * 8;
  74. dwmci_set_idma_desc(cur_idmac, flags, cnt,
  75. (ulong)bounce_buffer + (i * PAGE_SIZE));
  76. cur_idmac++;
  77. if (blk_cnt <= 8)
  78. break;
  79. blk_cnt -= 8;
  80. i++;
  81. } while(1);
  82. data_end = (ulong)cur_idmac;
  83. flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
  84. ctrl = dwmci_readl(host, DWMCI_CTRL);
  85. ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
  86. dwmci_writel(host, DWMCI_CTRL, ctrl);
  87. ctrl = dwmci_readl(host, DWMCI_BMOD);
  88. ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
  89. dwmci_writel(host, DWMCI_BMOD, ctrl);
  90. dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
  91. dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
  92. }
  93. static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
  94. {
  95. u32 timeout = 20000;
  96. *len = dwmci_readl(host, DWMCI_STATUS);
  97. while (--timeout && (*len & bit)) {
  98. udelay(200);
  99. *len = dwmci_readl(host, DWMCI_STATUS);
  100. }
  101. if (!timeout) {
  102. debug("%s: FIFO underflow timeout\n", __func__);
  103. return -ETIMEDOUT;
  104. }
  105. return 0;
  106. }
  107. static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
  108. {
  109. unsigned int timeout;
  110. timeout = size * 8; /* counting in bits */
  111. timeout *= 10; /* wait 10 times as long */
  112. timeout /= mmc->clock;
  113. timeout /= mmc->bus_width;
  114. timeout /= mmc->ddr_mode ? 2 : 1;
  115. timeout *= 1000; /* counting in msec */
  116. timeout = (timeout < 1000) ? 1000 : timeout;
  117. return timeout;
  118. }
  119. static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
  120. {
  121. struct mmc *mmc = host->mmc;
  122. int ret = 0;
  123. u32 timeout, mask, size, i, len = 0;
  124. u32 *buf = NULL;
  125. ulong start = get_timer(0);
  126. u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
  127. RX_WMARK_SHIFT) + 1) * 2;
  128. size = data->blocksize * data->blocks;
  129. if (data->flags == MMC_DATA_READ)
  130. buf = (unsigned int *)data->dest;
  131. else
  132. buf = (unsigned int *)data->src;
  133. timeout = dwmci_get_timeout(mmc, size);
  134. size /= 4;
  135. for (;;) {
  136. mask = dwmci_readl(host, DWMCI_RINTSTS);
  137. /* Error during data transfer. */
  138. if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
  139. debug("%s: DATA ERROR!\n", __func__);
  140. ret = -EINVAL;
  141. break;
  142. }
  143. if (host->fifo_mode && size) {
  144. len = 0;
  145. if (data->flags == MMC_DATA_READ &&
  146. (mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO))) {
  147. dwmci_writel(host, DWMCI_RINTSTS,
  148. DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO);
  149. while (size) {
  150. ret = dwmci_fifo_ready(host,
  151. DWMCI_FIFO_EMPTY,
  152. &len);
  153. if (ret < 0)
  154. break;
  155. len = (len >> DWMCI_FIFO_SHIFT) &
  156. DWMCI_FIFO_MASK;
  157. len = min(size, len);
  158. for (i = 0; i < len; i++)
  159. *buf++ =
  160. dwmci_readl(host, DWMCI_DATA);
  161. size = size > len ? (size - len) : 0;
  162. }
  163. } else if (data->flags == MMC_DATA_WRITE &&
  164. (mask & DWMCI_INTMSK_TXDR)) {
  165. while (size) {
  166. ret = dwmci_fifo_ready(host,
  167. DWMCI_FIFO_FULL,
  168. &len);
  169. if (ret < 0)
  170. break;
  171. len = fifo_depth - ((len >>
  172. DWMCI_FIFO_SHIFT) &
  173. DWMCI_FIFO_MASK);
  174. len = min(size, len);
  175. for (i = 0; i < len; i++)
  176. dwmci_writel(host, DWMCI_DATA,
  177. *buf++);
  178. size = size > len ? (size - len) : 0;
  179. }
  180. dwmci_writel(host, DWMCI_RINTSTS,
  181. DWMCI_INTMSK_TXDR);
  182. }
  183. }
  184. /* Data arrived correctly. */
  185. if (mask & DWMCI_INTMSK_DTO) {
  186. ret = 0;
  187. break;
  188. }
  189. /* Check for timeout. */
  190. if (get_timer(start) > timeout) {
  191. debug("%s: Timeout waiting for data!\n",
  192. __func__);
  193. ret = -ETIMEDOUT;
  194. break;
  195. }
  196. }
  197. dwmci_writel(host, DWMCI_RINTSTS, mask);
  198. return ret;
  199. }
  200. static int dwmci_set_transfer_mode(struct dwmci_host *host,
  201. struct mmc_data *data)
  202. {
  203. unsigned long mode;
  204. mode = DWMCI_CMD_DATA_EXP;
  205. if (data->flags & MMC_DATA_WRITE)
  206. mode |= DWMCI_CMD_RW;
  207. return mode;
  208. }
  209. #ifdef CONFIG_DM_MMC
  210. static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
  211. struct mmc_data *data)
  212. {
  213. struct mmc *mmc = mmc_get_mmc_dev(dev);
  214. #else
  215. static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
  216. struct mmc_data *data)
  217. {
  218. #endif
  219. struct dwmci_host *host = mmc->priv;
  220. ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
  221. data ? DIV_ROUND_UP(data->blocks, 8) : 0);
  222. int ret = 0, flags = 0, i;
  223. unsigned int timeout = 500;
  224. u32 retry = 100000;
  225. u32 mask, ctrl;
  226. ulong start = get_timer(0);
  227. struct bounce_buffer bbstate;
  228. while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
  229. if (get_timer(start) > timeout) {
  230. debug("%s: Timeout on data busy\n", __func__);
  231. return -ETIMEDOUT;
  232. }
  233. }
  234. dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
  235. if (data) {
  236. if (host->fifo_mode) {
  237. dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
  238. dwmci_writel(host, DWMCI_BYTCNT,
  239. data->blocksize * data->blocks);
  240. dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
  241. } else {
  242. if (data->flags == MMC_DATA_READ) {
  243. ret = bounce_buffer_start(&bbstate,
  244. (void*)data->dest,
  245. data->blocksize *
  246. data->blocks, GEN_BB_WRITE);
  247. } else {
  248. ret = bounce_buffer_start(&bbstate,
  249. (void*)data->src,
  250. data->blocksize *
  251. data->blocks, GEN_BB_READ);
  252. }
  253. if (ret)
  254. return ret;
  255. dwmci_prepare_data(host, data, cur_idmac,
  256. bbstate.bounce_buffer);
  257. }
  258. }
  259. dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
  260. if (data)
  261. flags = dwmci_set_transfer_mode(host, data);
  262. if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
  263. return -1;
  264. if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
  265. flags |= DWMCI_CMD_ABORT_STOP;
  266. else
  267. flags |= DWMCI_CMD_PRV_DAT_WAIT;
  268. if (cmd->resp_type & MMC_RSP_PRESENT) {
  269. flags |= DWMCI_CMD_RESP_EXP;
  270. if (cmd->resp_type & MMC_RSP_136)
  271. flags |= DWMCI_CMD_RESP_LENGTH;
  272. }
  273. if (cmd->resp_type & MMC_RSP_CRC)
  274. flags |= DWMCI_CMD_CHECK_CRC;
  275. if (__test_and_clear_bit_1(DW_MMC_CARD_NEED_INIT, &host->flags))
  276. flags |= DWMCI_CMD_SEND_INIT;
  277. flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
  278. debug("Sending CMD%d\n",cmd->cmdidx);
  279. dwmci_writel(host, DWMCI_CMD, flags);
  280. for (i = 0; i < retry; i++) {
  281. mask = dwmci_readl(host, DWMCI_RINTSTS);
  282. if (mask & DWMCI_INTMSK_CDONE) {
  283. if (!data)
  284. dwmci_writel(host, DWMCI_RINTSTS, mask);
  285. break;
  286. }
  287. }
  288. if (i == retry) {
  289. debug("%s: Timeout.\n", __func__);
  290. return -ETIMEDOUT;
  291. }
  292. if (mask & DWMCI_INTMSK_RTO) {
  293. /*
  294. * Timeout here is not necessarily fatal. (e)MMC cards
  295. * will splat here when they receive CMD55 as they do
  296. * not support this command and that is exactly the way
  297. * to tell them apart from SD cards. Thus, this output
  298. * below shall be debug(). eMMC cards also do not favor
  299. * CMD8, please keep that in mind.
  300. */
  301. debug("%s: Response Timeout.\n", __func__);
  302. return -ETIMEDOUT;
  303. } else if (mask & DWMCI_INTMSK_RE) {
  304. debug("%s: Response Error.\n", __func__);
  305. return -EIO;
  306. } else if ((cmd->resp_type & MMC_RSP_CRC) &&
  307. (mask & DWMCI_INTMSK_RCRC)) {
  308. debug("%s: Response CRC Error.\n", __func__);
  309. return -EIO;
  310. }
  311. if (cmd->resp_type & MMC_RSP_PRESENT) {
  312. if (cmd->resp_type & MMC_RSP_136) {
  313. cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
  314. cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
  315. cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
  316. cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
  317. } else {
  318. cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
  319. }
  320. }
  321. if (data) {
  322. ret = dwmci_data_transfer(host, data);
  323. /* only dma mode need it */
  324. if (!host->fifo_mode) {
  325. if (data->flags == MMC_DATA_READ)
  326. mask = DWMCI_IDINTEN_RI;
  327. else
  328. mask = DWMCI_IDINTEN_TI;
  329. ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
  330. mask, true, 1000, false);
  331. if (ret)
  332. debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
  333. __func__, mask);
  334. /* clear interrupts */
  335. dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
  336. ctrl = dwmci_readl(host, DWMCI_CTRL);
  337. ctrl &= ~(DWMCI_DMA_EN);
  338. dwmci_writel(host, DWMCI_CTRL, ctrl);
  339. bounce_buffer_stop(&bbstate);
  340. }
  341. }
  342. udelay(100);
  343. return ret;
  344. }
  345. static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
  346. {
  347. u32 div, status;
  348. int timeout = 10000;
  349. unsigned long sclk;
  350. if ((freq == host->clock) || (freq == 0))
  351. return 0;
  352. /*
  353. * If host->get_mmc_clk isn't defined,
  354. * then assume that host->bus_hz is source clock value.
  355. * host->bus_hz should be set by user.
  356. */
  357. if (host->get_mmc_clk)
  358. sclk = host->get_mmc_clk(host, freq);
  359. else if (host->bus_hz)
  360. sclk = host->bus_hz;
  361. else {
  362. debug("%s: Didn't get source clock value.\n", __func__);
  363. return -EINVAL;
  364. }
  365. if (sclk == freq)
  366. div = 0; /* bypass mode */
  367. else
  368. div = DIV_ROUND_UP(sclk, 2 * freq);
  369. dwmci_writel(host, DWMCI_CLKENA, 0);
  370. dwmci_writel(host, DWMCI_CLKSRC, 0);
  371. dwmci_writel(host, DWMCI_CLKDIV, div);
  372. dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
  373. DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
  374. do {
  375. status = dwmci_readl(host, DWMCI_CMD);
  376. if (timeout-- < 0) {
  377. debug("%s: Timeout!\n", __func__);
  378. return -ETIMEDOUT;
  379. }
  380. } while (status & DWMCI_CMD_START);
  381. dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
  382. DWMCI_CLKEN_LOW_PWR);
  383. dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
  384. DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
  385. timeout = 10000;
  386. do {
  387. status = dwmci_readl(host, DWMCI_CMD);
  388. if (timeout-- < 0) {
  389. debug("%s: Timeout!\n", __func__);
  390. return -ETIMEDOUT;
  391. }
  392. } while (status & DWMCI_CMD_START);
  393. host->clock = freq;
  394. return 0;
  395. }
  396. #ifdef CONFIG_DM_MMC
  397. static int dwmci_set_ios(struct udevice *dev)
  398. {
  399. struct mmc *mmc = mmc_get_mmc_dev(dev);
  400. #else
  401. static int dwmci_set_ios(struct mmc *mmc)
  402. {
  403. #endif
  404. struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
  405. u32 ctype, regs;
  406. debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
  407. dwmci_setup_bus(host, mmc->clock);
  408. switch (mmc->bus_width) {
  409. case 8:
  410. ctype = DWMCI_CTYPE_8BIT;
  411. break;
  412. case 4:
  413. ctype = DWMCI_CTYPE_4BIT;
  414. break;
  415. default:
  416. ctype = DWMCI_CTYPE_1BIT;
  417. break;
  418. }
  419. dwmci_writel(host, DWMCI_CTYPE, ctype);
  420. regs = dwmci_readl(host, DWMCI_UHS_REG);
  421. if (mmc->ddr_mode)
  422. regs |= DWMCI_DDR_MODE;
  423. else
  424. regs &= ~DWMCI_DDR_MODE;
  425. dwmci_writel(host, DWMCI_UHS_REG, regs);
  426. if (host->clksel) {
  427. int ret;
  428. ret = host->clksel(host);
  429. if (ret)
  430. return ret;
  431. }
  432. #if CONFIG_IS_ENABLED(DM_REGULATOR)
  433. if (mmc->vqmmc_supply) {
  434. int ret;
  435. if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
  436. regulator_set_value(mmc->vqmmc_supply, 1800000);
  437. else
  438. regulator_set_value(mmc->vqmmc_supply, 3300000);
  439. ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
  440. if (ret)
  441. return ret;
  442. }
  443. #endif
  444. return 0;
  445. }
  446. static void dw_mci_hs_set_bits(struct dwmci_host *host, u32 smpl_phase)
  447. {
  448. /* change driver phase and sample phase */
  449. u32 mask = 0x1f;
  450. u32 reg_value;
  451. reg_value = dwmci_readl(host, DWMCI_UHS_REG_EXT);
  452. /* In UHS_REG_EXT, only 5 bits valid in DRV_PHASE and SMPL_PHASE */
  453. reg_value &= ~(mask << 16);
  454. reg_value |= (smpl_phase << 16);
  455. dwmci_writel(host, DWMCI_UHS_REG_EXT, reg_value);
  456. /* We should delay 1ms wait for timing setting finished. */
  457. udelay(1000);
  458. }
  459. static int dwmci_execute_tuning(struct udevice *dev, uint opcode)
  460. {
  461. struct mmc *mmc = mmc_get_mmc_dev(dev);
  462. struct dwmci_host *host = mmc->priv;
  463. int err = -1;
  464. int smpl_phase, smpl_raise = -1, smpl_fall = -1;
  465. int i;
  466. for (i = 0; i < 32; ++i) {
  467. smpl_phase = i;
  468. dw_mci_hs_set_bits(host, smpl_phase);
  469. dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
  470. err = mmc_send_tuning(mmc, opcode, NULL);
  471. if (!err && smpl_raise < 0) {
  472. smpl_raise = i;
  473. } else if (err && smpl_raise >= 0) {
  474. smpl_fall = i - 1;
  475. break;
  476. }
  477. }
  478. if (i >= 32 && smpl_raise >= 0)
  479. smpl_fall = 31;
  480. if (smpl_raise < 0) {
  481. pr_err("No valid delay chain! use default\n");
  482. dw_mci_hs_set_bits(host, 0);
  483. err = -EINVAL;
  484. } else {
  485. smpl_phase = (smpl_raise + smpl_fall) / 2;
  486. dw_mci_hs_set_bits(host, smpl_phase);
  487. pr_debug("Found valid delay chain! use it [delay=%d]\n", smpl_phase);
  488. err = 0;
  489. }
  490. dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
  491. return err;
  492. }
  493. static int dwmci_init(struct mmc *mmc)
  494. {
  495. struct dwmci_host *host = mmc->priv;
  496. if (host->board_init)
  497. host->board_init(host);
  498. dwmci_writel(host, DWMCI_PWREN, 1);
  499. if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
  500. debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
  501. return -EIO;
  502. }
  503. host->flags = 1 << DW_MMC_CARD_NEED_INIT;
  504. /* Enumerate at 400KHz */
  505. dwmci_setup_bus(host, mmc->cfg->f_min);
  506. dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
  507. dwmci_writel(host, DWMCI_INTMASK, 0);
  508. dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
  509. dwmci_writel(host, DWMCI_IDINTEN, 0);
  510. dwmci_writel(host, DWMCI_BMOD, 1);
  511. if (!host->fifoth_val) {
  512. uint32_t fifo_size;
  513. fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
  514. fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
  515. host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
  516. TX_WMARK(fifo_size / 2);
  517. }
  518. dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
  519. dwmci_writel(host, DWMCI_CLKENA, 0);
  520. dwmci_writel(host, DWMCI_CLKSRC, 0);
  521. if (!host->fifo_mode)
  522. dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
  523. return 0;
  524. }
  525. #ifdef CONFIG_DM_MMC
  526. int dwmci_probe(struct udevice *dev)
  527. {
  528. struct mmc *mmc = mmc_get_mmc_dev(dev);
  529. return dwmci_init(mmc);
  530. }
  531. const struct dm_mmc_ops dm_dwmci_ops = {
  532. .send_cmd = dwmci_send_cmd,
  533. .set_ios = dwmci_set_ios,
  534. .execute_tuning = dwmci_execute_tuning,
  535. };
  536. #else
  537. static const struct mmc_ops dwmci_ops = {
  538. .send_cmd = dwmci_send_cmd,
  539. .set_ios = dwmci_set_ios,
  540. .init = dwmci_init,
  541. };
  542. #endif
  543. void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
  544. u32 max_clk, u32 min_clk)
  545. {
  546. cfg->name = host->name;
  547. #ifndef CONFIG_DM_MMC
  548. cfg->ops = &dwmci_ops;
  549. #endif
  550. cfg->f_min = min_clk;
  551. cfg->f_max = max_clk;
  552. cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
  553. cfg->host_caps = host->caps;
  554. if (host->buswidth == 8) {
  555. cfg->host_caps |= MMC_MODE_8BIT;
  556. cfg->host_caps &= ~MMC_MODE_4BIT;
  557. } else {
  558. cfg->host_caps |= MMC_MODE_4BIT;
  559. cfg->host_caps &= ~MMC_MODE_8BIT;
  560. }
  561. #if CONFIG_IS_ENABLED(TARGET_STARFIVE_VISIONFIVE2)
  562. cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_HS200;
  563. #else
  564. cfg->host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS200;
  565. #endif
  566. cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
  567. }
  568. #ifdef CONFIG_BLK
  569. int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
  570. {
  571. return mmc_bind(dev, mmc, cfg);
  572. }
  573. #else
  574. int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
  575. {
  576. dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
  577. host->mmc = mmc_create(&host->cfg, host);
  578. if (host->mmc == NULL)
  579. return -1;
  580. return 0;
  581. }
  582. #endif