sdhci.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2011, Marvell Semiconductor Inc.
  4. * Lei Wen <leiwen@marvell.com>
  5. *
  6. * Back ported to the 8xx platform (from the 8260 platform) by
  7. * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
  8. */
  9. #include <common.h>
  10. #include <cpu_func.h>
  11. #include <dm.h>
  12. #include <errno.h>
  13. #include <log.h>
  14. #include <malloc.h>
  15. #include <mmc.h>
  16. #include <sdhci.h>
  17. #include <dm.h>
  18. #include <asm/cache.h>
  19. #include <linux/bitops.h>
  20. #include <linux/delay.h>
  21. #include <linux/dma-mapping.h>
  22. #include <phys2bus.h>
  23. static void sdhci_reset(struct sdhci_host *host, u8 mask)
  24. {
  25. unsigned long timeout;
  26. /* Wait max 100 ms */
  27. timeout = 100;
  28. sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
  29. while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
  30. if (timeout == 0) {
  31. printf("%s: Reset 0x%x never completed.\n",
  32. __func__, (int)mask);
  33. return;
  34. }
  35. timeout--;
  36. udelay(1000);
  37. }
  38. }
  39. static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
  40. {
  41. int i;
  42. if (cmd->resp_type & MMC_RSP_136) {
  43. /* CRC is stripped so we need to do some shifting. */
  44. for (i = 0; i < 4; i++) {
  45. cmd->response[i] = sdhci_readl(host,
  46. SDHCI_RESPONSE + (3-i)*4) << 8;
  47. if (i != 3)
  48. cmd->response[i] |= sdhci_readb(host,
  49. SDHCI_RESPONSE + (3-i)*4-1);
  50. }
  51. } else {
  52. cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
  53. }
  54. }
  55. static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
  56. {
  57. int i;
  58. char *offs;
  59. for (i = 0; i < data->blocksize; i += 4) {
  60. offs = data->dest + i;
  61. if (data->flags == MMC_DATA_READ)
  62. *(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
  63. else
  64. sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
  65. }
  66. }
  67. #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
  68. static void sdhci_adma_desc(struct sdhci_host *host, dma_addr_t dma_addr,
  69. u16 len, bool end)
  70. {
  71. struct sdhci_adma_desc *desc;
  72. u8 attr;
  73. desc = &host->adma_desc_table[host->desc_slot];
  74. attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA;
  75. if (!end)
  76. host->desc_slot++;
  77. else
  78. attr |= ADMA_DESC_ATTR_END;
  79. desc->attr = attr;
  80. desc->len = len;
  81. desc->reserved = 0;
  82. desc->addr_lo = lower_32_bits(dma_addr);
  83. #ifdef CONFIG_DMA_ADDR_T_64BIT
  84. desc->addr_hi = upper_32_bits(dma_addr);
  85. #endif
  86. }
  87. static void sdhci_prepare_adma_table(struct sdhci_host *host,
  88. struct mmc_data *data)
  89. {
  90. uint trans_bytes = data->blocksize * data->blocks;
  91. uint desc_count = DIV_ROUND_UP(trans_bytes, ADMA_MAX_LEN);
  92. int i = desc_count;
  93. dma_addr_t dma_addr = host->start_addr;
  94. host->desc_slot = 0;
  95. while (--i) {
  96. sdhci_adma_desc(host, dma_addr, ADMA_MAX_LEN, false);
  97. dma_addr += ADMA_MAX_LEN;
  98. trans_bytes -= ADMA_MAX_LEN;
  99. }
  100. sdhci_adma_desc(host, dma_addr, trans_bytes, true);
  101. flush_cache((dma_addr_t)host->adma_desc_table,
  102. ROUND(desc_count * sizeof(struct sdhci_adma_desc),
  103. ARCH_DMA_MINALIGN));
  104. }
  105. #elif defined(CONFIG_MMC_SDHCI_SDMA)
  106. static void sdhci_prepare_adma_table(struct sdhci_host *host,
  107. struct mmc_data *data)
  108. {}
  109. #endif
  110. #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
  111. static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
  112. int *is_aligned, int trans_bytes)
  113. {
  114. unsigned char ctrl;
  115. void *buf;
  116. if (data->flags == MMC_DATA_READ)
  117. buf = data->dest;
  118. else
  119. buf = (void *)data->src;
  120. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  121. ctrl &= ~SDHCI_CTRL_DMA_MASK;
  122. if (host->flags & USE_ADMA64)
  123. ctrl |= SDHCI_CTRL_ADMA64;
  124. else if (host->flags & USE_ADMA)
  125. ctrl |= SDHCI_CTRL_ADMA32;
  126. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  127. if (host->flags & USE_SDMA &&
  128. (host->force_align_buffer ||
  129. (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
  130. ((unsigned long)buf & 0x7) != 0x0))) {
  131. *is_aligned = 0;
  132. if (data->flags != MMC_DATA_READ)
  133. memcpy(host->align_buffer, buf, trans_bytes);
  134. buf = host->align_buffer;
  135. }
  136. host->start_addr = dma_map_single(buf, trans_bytes,
  137. mmc_get_dma_dir(data));
  138. if (host->flags & USE_SDMA) {
  139. sdhci_writel(host, phys_to_bus((ulong)host->start_addr),
  140. SDHCI_DMA_ADDRESS);
  141. } else if (host->flags & (USE_ADMA | USE_ADMA64)) {
  142. sdhci_prepare_adma_table(host, data);
  143. sdhci_writel(host, lower_32_bits(host->adma_addr),
  144. SDHCI_ADMA_ADDRESS);
  145. if (host->flags & USE_ADMA64)
  146. sdhci_writel(host, upper_32_bits(host->adma_addr),
  147. SDHCI_ADMA_ADDRESS_HI);
  148. }
  149. }
  150. #else
  151. static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
  152. int *is_aligned, int trans_bytes)
  153. {}
  154. #endif
  155. static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
  156. {
  157. dma_addr_t start_addr = host->start_addr;
  158. unsigned int stat, rdy, mask, timeout, block = 0;
  159. bool transfer_done = false;
  160. timeout = 1000000;
  161. rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
  162. mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
  163. do {
  164. stat = sdhci_readl(host, SDHCI_INT_STATUS);
  165. if (stat & SDHCI_INT_ERROR) {
  166. pr_debug("%s: Error detected in status(0x%X)!\n",
  167. __func__, stat);
  168. return -EIO;
  169. }
  170. if (!transfer_done && (stat & rdy)) {
  171. if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
  172. continue;
  173. sdhci_writel(host, rdy, SDHCI_INT_STATUS);
  174. sdhci_transfer_pio(host, data);
  175. data->dest += data->blocksize;
  176. if (++block >= data->blocks) {
  177. /* Keep looping until the SDHCI_INT_DATA_END is
  178. * cleared, even if we finished sending all the
  179. * blocks.
  180. */
  181. transfer_done = true;
  182. continue;
  183. }
  184. }
  185. if ((host->flags & USE_DMA) && !transfer_done &&
  186. (stat & SDHCI_INT_DMA_END)) {
  187. sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
  188. if (host->flags & USE_SDMA) {
  189. start_addr &=
  190. ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
  191. start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
  192. sdhci_writel(host, phys_to_bus((ulong)start_addr),
  193. SDHCI_DMA_ADDRESS);
  194. }
  195. }
  196. if (timeout-- > 0)
  197. udelay(10);
  198. else {
  199. printf("%s: Transfer data timeout\n", __func__);
  200. return -ETIMEDOUT;
  201. }
  202. } while (!(stat & SDHCI_INT_DATA_END));
  203. dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
  204. mmc_get_dma_dir(data));
  205. return 0;
  206. }
  207. /*
  208. * No command will be sent by driver if card is busy, so driver must wait
  209. * for card ready state.
  210. * Every time when card is busy after timeout then (last) timeout value will be
  211. * increased twice but only if it doesn't exceed global defined maximum.
  212. * Each function call will use last timeout value.
  213. */
  214. #define SDHCI_CMD_MAX_TIMEOUT 3200
  215. #define SDHCI_CMD_DEFAULT_TIMEOUT 100
  216. #define SDHCI_READ_STATUS_TIMEOUT 1000
  217. #ifdef CONFIG_DM_MMC
  218. static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
  219. struct mmc_data *data)
  220. {
  221. struct mmc *mmc = mmc_get_mmc_dev(dev);
  222. #else
  223. static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
  224. struct mmc_data *data)
  225. {
  226. #endif
  227. struct sdhci_host *host = mmc->priv;
  228. unsigned int stat = 0;
  229. int ret = 0;
  230. int trans_bytes = 0, is_aligned = 1;
  231. u32 mask, flags, mode;
  232. unsigned int time = 0;
  233. int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
  234. ulong start = get_timer(0);
  235. host->start_addr = 0;
  236. /* Timeout unit - ms */
  237. static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
  238. mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
  239. /* We shouldn't wait for data inihibit for stop commands, even
  240. though they might use busy signaling */
  241. if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
  242. ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
  243. cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
  244. mask &= ~SDHCI_DATA_INHIBIT;
  245. while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
  246. if (time >= cmd_timeout) {
  247. printf("%s: MMC: %d busy ", __func__, mmc_dev);
  248. if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
  249. cmd_timeout += cmd_timeout;
  250. printf("timeout increasing to: %u ms.\n",
  251. cmd_timeout);
  252. } else {
  253. puts("timeout.\n");
  254. return -ECOMM;
  255. }
  256. }
  257. time++;
  258. udelay(1000);
  259. }
  260. sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
  261. mask = SDHCI_INT_RESPONSE;
  262. if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
  263. cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
  264. mask = SDHCI_INT_DATA_AVAIL;
  265. if (!(cmd->resp_type & MMC_RSP_PRESENT))
  266. flags = SDHCI_CMD_RESP_NONE;
  267. else if (cmd->resp_type & MMC_RSP_136)
  268. flags = SDHCI_CMD_RESP_LONG;
  269. else if (cmd->resp_type & MMC_RSP_BUSY) {
  270. flags = SDHCI_CMD_RESP_SHORT_BUSY;
  271. if (data)
  272. mask |= SDHCI_INT_DATA_END;
  273. } else
  274. flags = SDHCI_CMD_RESP_SHORT;
  275. if (cmd->resp_type & MMC_RSP_CRC)
  276. flags |= SDHCI_CMD_CRC;
  277. if (cmd->resp_type & MMC_RSP_OPCODE)
  278. flags |= SDHCI_CMD_INDEX;
  279. if (data || cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
  280. cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
  281. flags |= SDHCI_CMD_DATA;
  282. /* Set Transfer mode regarding to data flag */
  283. if (data) {
  284. sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
  285. mode = SDHCI_TRNS_BLK_CNT_EN;
  286. trans_bytes = data->blocks * data->blocksize;
  287. if (data->blocks > 1)
  288. mode |= SDHCI_TRNS_MULTI;
  289. if (data->flags == MMC_DATA_READ)
  290. mode |= SDHCI_TRNS_READ;
  291. if (host->flags & USE_DMA) {
  292. mode |= SDHCI_TRNS_DMA;
  293. sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
  294. }
  295. sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
  296. data->blocksize),
  297. SDHCI_BLOCK_SIZE);
  298. sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
  299. sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
  300. } else if (cmd->resp_type & MMC_RSP_BUSY) {
  301. sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
  302. }
  303. sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
  304. sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
  305. start = get_timer(0);
  306. do {
  307. stat = sdhci_readl(host, SDHCI_INT_STATUS);
  308. if (stat & SDHCI_INT_ERROR)
  309. break;
  310. if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
  311. if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
  312. return 0;
  313. } else {
  314. printf("%s: Timeout for status update!\n",
  315. __func__);
  316. return -ETIMEDOUT;
  317. }
  318. }
  319. } while ((stat & mask) != mask);
  320. if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
  321. sdhci_cmd_done(host, cmd);
  322. sdhci_writel(host, mask, SDHCI_INT_STATUS);
  323. } else
  324. ret = -1;
  325. if (!ret && data)
  326. ret = sdhci_transfer_data(host, data);
  327. if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
  328. udelay(1000);
  329. stat = sdhci_readl(host, SDHCI_INT_STATUS);
  330. sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
  331. if (!ret) {
  332. if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
  333. !is_aligned && (data->flags == MMC_DATA_READ))
  334. memcpy(data->dest, host->align_buffer, trans_bytes);
  335. return 0;
  336. }
  337. sdhci_reset(host, SDHCI_RESET_CMD);
  338. sdhci_reset(host, SDHCI_RESET_DATA);
  339. if (stat & SDHCI_INT_TIMEOUT)
  340. return -ETIMEDOUT;
  341. else
  342. return -ECOMM;
  343. }
  344. #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
  345. static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
  346. {
  347. int err;
  348. struct mmc *mmc = mmc_get_mmc_dev(dev);
  349. struct sdhci_host *host = mmc->priv;
  350. debug("%s\n", __func__);
  351. if (host->ops && host->ops->platform_execute_tuning) {
  352. err = host->ops->platform_execute_tuning(mmc, opcode);
  353. if (err)
  354. return err;
  355. return 0;
  356. }
  357. return 0;
  358. }
  359. #endif
  360. int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
  361. {
  362. struct sdhci_host *host = mmc->priv;
  363. unsigned int div, clk = 0, timeout;
  364. /* Wait max 20 ms */
  365. timeout = 200;
  366. while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
  367. (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
  368. if (timeout == 0) {
  369. printf("%s: Timeout to wait cmd & data inhibit\n",
  370. __func__);
  371. return -EBUSY;
  372. }
  373. timeout--;
  374. udelay(100);
  375. }
  376. sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
  377. if (clock == 0)
  378. return 0;
  379. if (host->ops && host->ops->set_delay)
  380. host->ops->set_delay(host);
  381. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
  382. /*
  383. * Check if the Host Controller supports Programmable Clock
  384. * Mode.
  385. */
  386. if (host->clk_mul) {
  387. for (div = 1; div <= 1024; div++) {
  388. if ((host->max_clk / div) <= clock)
  389. break;
  390. }
  391. /*
  392. * Set Programmable Clock Mode in the Clock
  393. * Control register.
  394. */
  395. clk = SDHCI_PROG_CLOCK_MODE;
  396. div--;
  397. } else {
  398. /* Version 3.00 divisors must be a multiple of 2. */
  399. if (host->max_clk <= clock) {
  400. div = 1;
  401. } else {
  402. for (div = 2;
  403. div < SDHCI_MAX_DIV_SPEC_300;
  404. div += 2) {
  405. if ((host->max_clk / div) <= clock)
  406. break;
  407. }
  408. }
  409. div >>= 1;
  410. }
  411. } else {
  412. /* Version 2.00 divisors must be a power of 2. */
  413. for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
  414. if ((host->max_clk / div) <= clock)
  415. break;
  416. }
  417. div >>= 1;
  418. }
  419. if (host->ops && host->ops->set_clock)
  420. host->ops->set_clock(host, div);
  421. clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
  422. clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
  423. << SDHCI_DIVIDER_HI_SHIFT;
  424. clk |= SDHCI_CLOCK_INT_EN;
  425. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  426. /* Wait max 20 ms */
  427. timeout = 20;
  428. while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
  429. & SDHCI_CLOCK_INT_STABLE)) {
  430. if (timeout == 0) {
  431. printf("%s: Internal clock never stabilised.\n",
  432. __func__);
  433. return -EBUSY;
  434. }
  435. timeout--;
  436. udelay(1000);
  437. }
  438. clk |= SDHCI_CLOCK_CARD_EN;
  439. sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
  440. return 0;
  441. }
  442. static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
  443. {
  444. u8 pwr = 0;
  445. if (power != (unsigned short)-1) {
  446. switch (1 << power) {
  447. case MMC_VDD_165_195:
  448. pwr = SDHCI_POWER_180;
  449. break;
  450. case MMC_VDD_29_30:
  451. case MMC_VDD_30_31:
  452. pwr = SDHCI_POWER_300;
  453. break;
  454. case MMC_VDD_32_33:
  455. case MMC_VDD_33_34:
  456. pwr = SDHCI_POWER_330;
  457. break;
  458. }
  459. }
  460. if (pwr == 0) {
  461. sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
  462. return;
  463. }
  464. pwr |= SDHCI_POWER_ON;
  465. sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
  466. }
  467. void sdhci_set_uhs_timing(struct sdhci_host *host)
  468. {
  469. struct mmc *mmc = host->mmc;
  470. u32 reg;
  471. reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
  472. reg &= ~SDHCI_CTRL_UHS_MASK;
  473. switch (mmc->selected_mode) {
  474. case UHS_SDR50:
  475. case MMC_HS_52:
  476. reg |= SDHCI_CTRL_UHS_SDR50;
  477. break;
  478. case UHS_DDR50:
  479. case MMC_DDR_52:
  480. reg |= SDHCI_CTRL_UHS_DDR50;
  481. break;
  482. case UHS_SDR104:
  483. case MMC_HS_200:
  484. reg |= SDHCI_CTRL_UHS_SDR104;
  485. break;
  486. default:
  487. reg |= SDHCI_CTRL_UHS_SDR12;
  488. }
  489. sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
  490. }
  491. #ifdef CONFIG_DM_MMC
  492. static int sdhci_set_ios(struct udevice *dev)
  493. {
  494. struct mmc *mmc = mmc_get_mmc_dev(dev);
  495. #else
  496. static int sdhci_set_ios(struct mmc *mmc)
  497. {
  498. #endif
  499. u32 ctrl;
  500. struct sdhci_host *host = mmc->priv;
  501. bool no_hispd_bit = false;
  502. if (host->ops && host->ops->set_control_reg)
  503. host->ops->set_control_reg(host);
  504. if (mmc->clock != host->clock)
  505. sdhci_set_clock(mmc, mmc->clock);
  506. if (mmc->clk_disable)
  507. sdhci_set_clock(mmc, 0);
  508. /* Set bus width */
  509. ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
  510. if (mmc->bus_width == 8) {
  511. ctrl &= ~SDHCI_CTRL_4BITBUS;
  512. if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
  513. (host->quirks & SDHCI_QUIRK_USE_WIDE8))
  514. ctrl |= SDHCI_CTRL_8BITBUS;
  515. } else {
  516. if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
  517. (host->quirks & SDHCI_QUIRK_USE_WIDE8))
  518. ctrl &= ~SDHCI_CTRL_8BITBUS;
  519. if (mmc->bus_width == 4)
  520. ctrl |= SDHCI_CTRL_4BITBUS;
  521. else
  522. ctrl &= ~SDHCI_CTRL_4BITBUS;
  523. }
  524. if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
  525. (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) {
  526. ctrl &= ~SDHCI_CTRL_HISPD;
  527. no_hispd_bit = true;
  528. }
  529. if (!no_hispd_bit) {
  530. if (mmc->selected_mode == MMC_HS ||
  531. mmc->selected_mode == SD_HS ||
  532. mmc->selected_mode == MMC_DDR_52 ||
  533. mmc->selected_mode == MMC_HS_200 ||
  534. mmc->selected_mode == MMC_HS_400 ||
  535. mmc->selected_mode == UHS_SDR25 ||
  536. mmc->selected_mode == UHS_SDR50 ||
  537. mmc->selected_mode == UHS_SDR104 ||
  538. mmc->selected_mode == UHS_DDR50)
  539. ctrl |= SDHCI_CTRL_HISPD;
  540. else
  541. ctrl &= ~SDHCI_CTRL_HISPD;
  542. }
  543. sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
  544. /* If available, call the driver specific "post" set_ios() function */
  545. if (host->ops && host->ops->set_ios_post)
  546. return host->ops->set_ios_post(host);
  547. return 0;
  548. }
  549. static int sdhci_init(struct mmc *mmc)
  550. {
  551. struct sdhci_host *host = mmc->priv;
  552. #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
  553. struct udevice *dev = mmc->dev;
  554. gpio_request_by_name(dev, "cd-gpios", 0,
  555. &host->cd_gpio, GPIOD_IS_IN);
  556. #endif
  557. sdhci_reset(host, SDHCI_RESET_ALL);
  558. #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
  559. host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
  560. /*
  561. * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
  562. * is defined.
  563. */
  564. host->force_align_buffer = true;
  565. #else
  566. if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
  567. host->align_buffer = memalign(8, 512 * 1024);
  568. if (!host->align_buffer) {
  569. printf("%s: Aligned buffer alloc failed!!!\n",
  570. __func__);
  571. return -ENOMEM;
  572. }
  573. }
  574. #endif
  575. sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
  576. if (host->ops && host->ops->get_cd)
  577. host->ops->get_cd(host);
  578. /* Enable only interrupts served by the SD controller */
  579. sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
  580. SDHCI_INT_ENABLE);
  581. /* Mask all sdhci interrupt sources */
  582. sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
  583. return 0;
  584. }
  585. #ifdef CONFIG_DM_MMC
  586. int sdhci_probe(struct udevice *dev)
  587. {
  588. struct mmc *mmc = mmc_get_mmc_dev(dev);
  589. return sdhci_init(mmc);
  590. }
  591. static int sdhci_deferred_probe(struct udevice *dev)
  592. {
  593. int err;
  594. struct mmc *mmc = mmc_get_mmc_dev(dev);
  595. struct sdhci_host *host = mmc->priv;
  596. if (host->ops && host->ops->deferred_probe) {
  597. err = host->ops->deferred_probe(host);
  598. if (err)
  599. return err;
  600. }
  601. return 0;
  602. }
  603. static int sdhci_get_cd(struct udevice *dev)
  604. {
  605. struct mmc *mmc = mmc_get_mmc_dev(dev);
  606. struct sdhci_host *host = mmc->priv;
  607. int value;
  608. /* If nonremovable, assume that the card is always present. */
  609. if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
  610. return 1;
  611. /* If polling, assume that the card is always present. */
  612. if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
  613. return 1;
  614. #if CONFIG_IS_ENABLED(DM_GPIO)
  615. value = dm_gpio_get_value(&host->cd_gpio);
  616. if (value >= 0) {
  617. if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
  618. return !value;
  619. else
  620. return value;
  621. }
  622. #endif
  623. value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
  624. SDHCI_CARD_PRESENT);
  625. if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
  626. return !value;
  627. else
  628. return value;
  629. }
  630. const struct dm_mmc_ops sdhci_ops = {
  631. .send_cmd = sdhci_send_command,
  632. .set_ios = sdhci_set_ios,
  633. .get_cd = sdhci_get_cd,
  634. .deferred_probe = sdhci_deferred_probe,
  635. #ifdef MMC_SUPPORTS_TUNING
  636. .execute_tuning = sdhci_execute_tuning,
  637. #endif
  638. };
  639. #else
  640. static const struct mmc_ops sdhci_ops = {
  641. .send_cmd = sdhci_send_command,
  642. .set_ios = sdhci_set_ios,
  643. .init = sdhci_init,
  644. };
  645. #endif
  646. int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
  647. u32 f_max, u32 f_min)
  648. {
  649. u32 caps, caps_1 = 0;
  650. #if CONFIG_IS_ENABLED(DM_MMC)
  651. u64 dt_caps, dt_caps_mask;
  652. dt_caps_mask = dev_read_u64_default(host->mmc->dev,
  653. "sdhci-caps-mask", 0);
  654. dt_caps = dev_read_u64_default(host->mmc->dev,
  655. "sdhci-caps", 0);
  656. caps = ~(u32)dt_caps_mask &
  657. sdhci_readl(host, SDHCI_CAPABILITIES);
  658. caps |= (u32)dt_caps;
  659. #else
  660. caps = sdhci_readl(host, SDHCI_CAPABILITIES);
  661. #endif
  662. debug("%s, caps: 0x%x\n", __func__, caps);
  663. #ifdef CONFIG_MMC_SDHCI_SDMA
  664. if ((caps & SDHCI_CAN_DO_SDMA)) {
  665. host->flags |= USE_SDMA;
  666. } else {
  667. debug("%s: Your controller doesn't support SDMA!!\n",
  668. __func__);
  669. }
  670. #endif
  671. #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
  672. if (!(caps & SDHCI_CAN_DO_ADMA2)) {
  673. printf("%s: Your controller doesn't support SDMA!!\n",
  674. __func__);
  675. return -EINVAL;
  676. }
  677. host->adma_desc_table = memalign(ARCH_DMA_MINALIGN, ADMA_TABLE_SZ);
  678. host->adma_addr = (dma_addr_t)host->adma_desc_table;
  679. #ifdef CONFIG_DMA_ADDR_T_64BIT
  680. host->flags |= USE_ADMA64;
  681. #else
  682. host->flags |= USE_ADMA;
  683. #endif
  684. #endif
  685. if (host->quirks & SDHCI_QUIRK_REG32_RW)
  686. host->version =
  687. sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
  688. else
  689. host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
  690. cfg->name = host->name;
  691. #ifndef CONFIG_DM_MMC
  692. cfg->ops = &sdhci_ops;
  693. #endif
  694. /* Check whether the clock multiplier is supported or not */
  695. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
  696. #if CONFIG_IS_ENABLED(DM_MMC)
  697. caps_1 = ~(u32)(dt_caps_mask >> 32) &
  698. sdhci_readl(host, SDHCI_CAPABILITIES_1);
  699. caps_1 |= (u32)(dt_caps >> 32);
  700. #else
  701. caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
  702. #endif
  703. debug("%s, caps_1: 0x%x\n", __func__, caps_1);
  704. host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
  705. SDHCI_CLOCK_MUL_SHIFT;
  706. }
  707. if (host->max_clk == 0) {
  708. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
  709. host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
  710. SDHCI_CLOCK_BASE_SHIFT;
  711. else
  712. host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
  713. SDHCI_CLOCK_BASE_SHIFT;
  714. host->max_clk *= 1000000;
  715. if (host->clk_mul)
  716. host->max_clk *= host->clk_mul;
  717. }
  718. if (host->max_clk == 0) {
  719. printf("%s: Hardware doesn't specify base clock frequency\n",
  720. __func__);
  721. return -EINVAL;
  722. }
  723. if (f_max && (f_max < host->max_clk))
  724. cfg->f_max = f_max;
  725. else
  726. cfg->f_max = host->max_clk;
  727. if (f_min)
  728. cfg->f_min = f_min;
  729. else {
  730. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
  731. cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
  732. else
  733. cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
  734. }
  735. cfg->voltages = 0;
  736. if (caps & SDHCI_CAN_VDD_330)
  737. cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
  738. if (caps & SDHCI_CAN_VDD_300)
  739. cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
  740. if (caps & SDHCI_CAN_VDD_180)
  741. cfg->voltages |= MMC_VDD_165_195;
  742. if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
  743. cfg->voltages |= host->voltages;
  744. cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;
  745. /* Since Host Controller Version3.0 */
  746. if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
  747. if (!(caps & SDHCI_CAN_DO_8BIT))
  748. cfg->host_caps &= ~MMC_MODE_8BIT;
  749. }
  750. if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
  751. cfg->host_caps &= ~MMC_MODE_HS;
  752. cfg->host_caps &= ~MMC_MODE_HS_52MHz;
  753. }
  754. if (!(cfg->voltages & MMC_VDD_165_195))
  755. caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
  756. SDHCI_SUPPORT_DDR50);
  757. if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
  758. SDHCI_SUPPORT_DDR50))
  759. cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
  760. if (caps_1 & SDHCI_SUPPORT_SDR104) {
  761. cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
  762. /*
  763. * SD3.0: SDR104 is supported so (for eMMC) the caps2
  764. * field can be promoted to support HS200.
  765. */
  766. cfg->host_caps |= MMC_CAP(MMC_HS_200);
  767. } else if (caps_1 & SDHCI_SUPPORT_SDR50) {
  768. cfg->host_caps |= MMC_CAP(UHS_SDR50);
  769. }
  770. if (caps_1 & SDHCI_SUPPORT_DDR50)
  771. cfg->host_caps |= MMC_CAP(UHS_DDR50);
  772. if (host->host_caps)
  773. cfg->host_caps |= host->host_caps;
  774. cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
  775. return 0;
  776. }
  777. #ifdef CONFIG_BLK
  778. int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
  779. {
  780. return mmc_bind(dev, mmc, cfg);
  781. }
  782. #else
  783. int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
  784. {
  785. int ret;
  786. ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
  787. if (ret)
  788. return ret;
  789. host->mmc = mmc_create(&host->cfg, host);
  790. if (host->mmc == NULL) {
  791. printf("%s: mmc create fail!\n", __func__);
  792. return -ENOMEM;
  793. }
  794. return 0;
  795. }
  796. #endif