mmc_ops.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * linux/drivers/mmc/core/mmc_ops.h
  4. *
  5. * Copyright 2006-2007 Pierre Ossman
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/export.h>
  9. #include <linux/types.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/mmc/host.h>
  12. #include <linux/mmc/card.h>
  13. #include <linux/mmc/mmc.h>
  14. #include "core.h"
  15. #include "card.h"
  16. #include "host.h"
  17. #include "mmc_ops.h"
  18. #define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
  19. #define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */
  20. #define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
  21. static const u8 tuning_blk_pattern_4bit[] = {
  22. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  23. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  24. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  25. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  26. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  27. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  28. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  29. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  30. };
  31. static const u8 tuning_blk_pattern_8bit[] = {
  32. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  33. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  34. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  35. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  36. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  37. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  38. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  39. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  40. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  41. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  42. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  43. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  44. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  45. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  46. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  47. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  48. };
  49. int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  50. {
  51. int err;
  52. struct mmc_command cmd = {};
  53. cmd.opcode = MMC_SEND_STATUS;
  54. if (!mmc_host_is_spi(card->host))
  55. cmd.arg = card->rca << 16;
  56. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  57. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  58. if (err)
  59. return err;
  60. /* NOTE: callers are required to understand the difference
  61. * between "native" and SPI format status words!
  62. */
  63. if (status)
  64. *status = cmd.resp[0];
  65. return 0;
  66. }
  67. EXPORT_SYMBOL_GPL(__mmc_send_status);
  68. int mmc_send_status(struct mmc_card *card, u32 *status)
  69. {
  70. return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  71. }
  72. EXPORT_SYMBOL_GPL(mmc_send_status);
  73. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  74. {
  75. struct mmc_command cmd = {};
  76. cmd.opcode = MMC_SELECT_CARD;
  77. if (card) {
  78. cmd.arg = card->rca << 16;
  79. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  80. } else {
  81. cmd.arg = 0;
  82. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  83. }
  84. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  85. }
  86. int mmc_select_card(struct mmc_card *card)
  87. {
  88. return _mmc_select_card(card->host, card);
  89. }
  90. int mmc_deselect_cards(struct mmc_host *host)
  91. {
  92. return _mmc_select_card(host, NULL);
  93. }
  94. /*
  95. * Write the value specified in the device tree or board code into the optional
  96. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  97. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  98. * value is hardware dependant.
  99. * The presence of the DSR register can be determined from the CSD register,
  100. * bit 76.
  101. */
  102. int mmc_set_dsr(struct mmc_host *host)
  103. {
  104. struct mmc_command cmd = {};
  105. cmd.opcode = MMC_SET_DSR;
  106. cmd.arg = (host->dsr << 16) | 0xffff;
  107. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  108. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  109. }
  110. int mmc_go_idle(struct mmc_host *host)
  111. {
  112. int err;
  113. struct mmc_command cmd = {};
  114. /*
  115. * Non-SPI hosts need to prevent chipselect going active during
  116. * GO_IDLE; that would put chips into SPI mode. Remind them of
  117. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  118. *
  119. * SPI hosts ignore ios.chip_select; it's managed according to
  120. * rules that must accommodate non-MMC slaves which this layer
  121. * won't even know about.
  122. */
  123. if (!mmc_host_is_spi(host)) {
  124. mmc_set_chip_select(host, MMC_CS_HIGH);
  125. mmc_delay(1);
  126. }
  127. cmd.opcode = MMC_GO_IDLE_STATE;
  128. cmd.arg = 0;
  129. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  130. err = mmc_wait_for_cmd(host, &cmd, 0);
  131. mmc_delay(1);
  132. if (!mmc_host_is_spi(host)) {
  133. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  134. mmc_delay(1);
  135. }
  136. host->use_spi_crc = 0;
  137. return err;
  138. }
  139. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  140. {
  141. struct mmc_command cmd = {};
  142. int i, err = 0;
  143. cmd.opcode = MMC_SEND_OP_COND;
  144. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  145. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  146. for (i = 100; i; i--) {
  147. err = mmc_wait_for_cmd(host, &cmd, 0);
  148. if (err)
  149. break;
  150. /* wait until reset completes */
  151. if (mmc_host_is_spi(host)) {
  152. if (!(cmd.resp[0] & R1_SPI_IDLE))
  153. break;
  154. } else {
  155. if (cmd.resp[0] & MMC_CARD_BUSY)
  156. break;
  157. }
  158. err = -ETIMEDOUT;
  159. mmc_delay(10);
  160. /*
  161. * According to eMMC specification v5.1 section 6.4.3, we
  162. * should issue CMD1 repeatedly in the idle state until
  163. * the eMMC is ready. Otherwise some eMMC devices seem to enter
  164. * the inactive mode after mmc_init_card() issued CMD0 when
  165. * the eMMC device is busy.
  166. */
  167. if (!ocr && !mmc_host_is_spi(host))
  168. cmd.arg = cmd.resp[0] | BIT(30);
  169. }
  170. if (rocr && !mmc_host_is_spi(host))
  171. *rocr = cmd.resp[0];
  172. return err;
  173. }
  174. int mmc_set_relative_addr(struct mmc_card *card)
  175. {
  176. struct mmc_command cmd = {};
  177. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  178. cmd.arg = card->rca << 16;
  179. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  180. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  181. }
  182. static int
  183. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  184. {
  185. int err;
  186. struct mmc_command cmd = {};
  187. cmd.opcode = opcode;
  188. cmd.arg = arg;
  189. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  190. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  191. if (err)
  192. return err;
  193. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  194. return 0;
  195. }
  196. /*
  197. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  198. * buffer or on-stack buffer (with some overhead in callee).
  199. */
  200. static int
  201. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  202. u32 opcode, void *buf, unsigned len)
  203. {
  204. struct mmc_request mrq = {};
  205. struct mmc_command cmd = {};
  206. struct mmc_data data = {};
  207. struct scatterlist sg;
  208. mrq.cmd = &cmd;
  209. mrq.data = &data;
  210. cmd.opcode = opcode;
  211. cmd.arg = 0;
  212. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  213. * rely on callers to never use this with "native" calls for reading
  214. * CSD or CID. Native versions of those commands use the R2 type,
  215. * not R1 plus a data block.
  216. */
  217. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  218. data.blksz = len;
  219. data.blocks = 1;
  220. data.flags = MMC_DATA_READ;
  221. data.sg = &sg;
  222. data.sg_len = 1;
  223. sg_init_one(&sg, buf, len);
  224. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  225. /*
  226. * The spec states that CSR and CID accesses have a timeout
  227. * of 64 clock cycles.
  228. */
  229. data.timeout_ns = 0;
  230. data.timeout_clks = 64;
  231. } else
  232. mmc_set_data_timeout(&data, card);
  233. mmc_wait_for_req(host, &mrq);
  234. if (cmd.error)
  235. return cmd.error;
  236. if (data.error)
  237. return data.error;
  238. return 0;
  239. }
  240. static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
  241. {
  242. int ret, i;
  243. __be32 *csd_tmp;
  244. csd_tmp = kzalloc(16, GFP_KERNEL);
  245. if (!csd_tmp)
  246. return -ENOMEM;
  247. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  248. if (ret)
  249. goto err;
  250. for (i = 0; i < 4; i++)
  251. csd[i] = be32_to_cpu(csd_tmp[i]);
  252. err:
  253. kfree(csd_tmp);
  254. return ret;
  255. }
  256. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  257. {
  258. if (mmc_host_is_spi(card->host))
  259. return mmc_spi_send_csd(card, csd);
  260. return mmc_send_cxd_native(card->host, card->rca << 16, csd,
  261. MMC_SEND_CSD);
  262. }
  263. static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
  264. {
  265. int ret, i;
  266. __be32 *cid_tmp;
  267. cid_tmp = kzalloc(16, GFP_KERNEL);
  268. if (!cid_tmp)
  269. return -ENOMEM;
  270. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  271. if (ret)
  272. goto err;
  273. for (i = 0; i < 4; i++)
  274. cid[i] = be32_to_cpu(cid_tmp[i]);
  275. err:
  276. kfree(cid_tmp);
  277. return ret;
  278. }
  279. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  280. {
  281. if (mmc_host_is_spi(host))
  282. return mmc_spi_send_cid(host, cid);
  283. return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
  284. }
  285. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  286. {
  287. int err;
  288. u8 *ext_csd;
  289. if (!card || !new_ext_csd)
  290. return -EINVAL;
  291. if (!mmc_can_ext_csd(card))
  292. return -EOPNOTSUPP;
  293. /*
  294. * As the ext_csd is so large and mostly unused, we don't store the
  295. * raw block in mmc_card.
  296. */
  297. ext_csd = kzalloc(512, GFP_KERNEL);
  298. if (!ext_csd)
  299. return -ENOMEM;
  300. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  301. 512);
  302. if (err)
  303. kfree(ext_csd);
  304. else
  305. *new_ext_csd = ext_csd;
  306. return err;
  307. }
  308. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  309. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  310. {
  311. struct mmc_command cmd = {};
  312. int err;
  313. cmd.opcode = MMC_SPI_READ_OCR;
  314. cmd.arg = highcap ? (1 << 30) : 0;
  315. cmd.flags = MMC_RSP_SPI_R3;
  316. err = mmc_wait_for_cmd(host, &cmd, 0);
  317. *ocrp = cmd.resp[1];
  318. return err;
  319. }
  320. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  321. {
  322. struct mmc_command cmd = {};
  323. int err;
  324. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  325. cmd.flags = MMC_RSP_SPI_R1;
  326. cmd.arg = use_crc;
  327. err = mmc_wait_for_cmd(host, &cmd, 0);
  328. if (!err)
  329. host->use_spi_crc = use_crc;
  330. return err;
  331. }
  332. static int mmc_switch_status_error(struct mmc_host *host, u32 status)
  333. {
  334. if (mmc_host_is_spi(host)) {
  335. if (status & R1_SPI_ILLEGAL_COMMAND)
  336. return -EBADMSG;
  337. } else {
  338. if (R1_STATUS(status))
  339. pr_warn("%s: unexpected status %#x after switch\n",
  340. mmc_hostname(host), status);
  341. if (status & R1_SWITCH_ERROR)
  342. return -EBADMSG;
  343. }
  344. return 0;
  345. }
  346. /* Caller must hold re-tuning */
  347. int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
  348. {
  349. u32 status;
  350. int err;
  351. err = mmc_send_status(card, &status);
  352. if (!crc_err_fatal && err == -EILSEQ)
  353. return 0;
  354. if (err)
  355. return err;
  356. return mmc_switch_status_error(card->host, status);
  357. }
  358. static int mmc_busy_status(struct mmc_card *card, bool retry_crc_err,
  359. enum mmc_busy_cmd busy_cmd, bool *busy)
  360. {
  361. struct mmc_host *host = card->host;
  362. u32 status = 0;
  363. int err;
  364. if (host->ops->card_busy) {
  365. *busy = host->ops->card_busy(host);
  366. return 0;
  367. }
  368. err = mmc_send_status(card, &status);
  369. if (retry_crc_err && err == -EILSEQ) {
  370. *busy = true;
  371. return 0;
  372. }
  373. if (err)
  374. return err;
  375. switch (busy_cmd) {
  376. case MMC_BUSY_CMD6:
  377. err = mmc_switch_status_error(card->host, status);
  378. break;
  379. case MMC_BUSY_ERASE:
  380. err = R1_STATUS(status) ? -EIO : 0;
  381. break;
  382. case MMC_BUSY_HPI:
  383. break;
  384. default:
  385. err = -EINVAL;
  386. }
  387. if (err)
  388. return err;
  389. *busy = !mmc_ready_for_data(status);
  390. return 0;
  391. }
  392. static int __mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  393. bool send_status, bool retry_crc_err,
  394. enum mmc_busy_cmd busy_cmd)
  395. {
  396. struct mmc_host *host = card->host;
  397. int err;
  398. unsigned long timeout;
  399. unsigned int udelay = 32, udelay_max = 32768;
  400. bool expired = false;
  401. bool busy = false;
  402. /*
  403. * In cases when not allowed to poll by using CMD13 or because we aren't
  404. * capable of polling by using ->card_busy(), then rely on waiting the
  405. * stated timeout to be sufficient.
  406. */
  407. if (!send_status && !host->ops->card_busy) {
  408. mmc_delay(timeout_ms);
  409. return 0;
  410. }
  411. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  412. do {
  413. /*
  414. * Due to the possibility of being preempted while polling,
  415. * check the expiration time first.
  416. */
  417. expired = time_after(jiffies, timeout);
  418. err = mmc_busy_status(card, retry_crc_err, busy_cmd, &busy);
  419. if (err)
  420. return err;
  421. /* Timeout if the device still remains busy. */
  422. if (expired && busy) {
  423. pr_err("%s: Card stuck being busy! %s\n",
  424. mmc_hostname(host), __func__);
  425. return -ETIMEDOUT;
  426. }
  427. /* Throttle the polling rate to avoid hogging the CPU. */
  428. if (busy) {
  429. usleep_range(udelay, udelay * 2);
  430. if (udelay < udelay_max)
  431. udelay *= 2;
  432. }
  433. } while (busy);
  434. return 0;
  435. }
  436. int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  437. enum mmc_busy_cmd busy_cmd)
  438. {
  439. return __mmc_poll_for_busy(card, timeout_ms, true, false, busy_cmd);
  440. }
  441. /**
  442. * __mmc_switch - modify EXT_CSD register
  443. * @card: the MMC card associated with the data transfer
  444. * @set: cmd set values
  445. * @index: EXT_CSD register index
  446. * @value: value to program into EXT_CSD register
  447. * @timeout_ms: timeout (ms) for operation performed by register write,
  448. * timeout of zero implies maximum possible timeout
  449. * @timing: new timing to change to
  450. * @send_status: send status cmd to poll for busy
  451. * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  452. *
  453. * Modifies the EXT_CSD register for selected card.
  454. */
  455. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  456. unsigned int timeout_ms, unsigned char timing,
  457. bool send_status, bool retry_crc_err)
  458. {
  459. struct mmc_host *host = card->host;
  460. int err;
  461. struct mmc_command cmd = {};
  462. bool use_r1b_resp = true;
  463. unsigned char old_timing = host->ios.timing;
  464. mmc_retune_hold(host);
  465. if (!timeout_ms) {
  466. pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
  467. mmc_hostname(host));
  468. timeout_ms = card->ext_csd.generic_cmd6_time;
  469. }
  470. /*
  471. * If the max_busy_timeout of the host is specified, make sure it's
  472. * enough to fit the used timeout_ms. In case it's not, let's instruct
  473. * the host to avoid HW busy detection, by converting to a R1 response
  474. * instead of a R1B. Note, some hosts requires R1B, which also means
  475. * they are on their own when it comes to deal with the busy timeout.
  476. */
  477. if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
  478. (timeout_ms > host->max_busy_timeout))
  479. use_r1b_resp = false;
  480. cmd.opcode = MMC_SWITCH;
  481. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  482. (index << 16) |
  483. (value << 8) |
  484. set;
  485. cmd.flags = MMC_CMD_AC;
  486. if (use_r1b_resp) {
  487. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  488. cmd.busy_timeout = timeout_ms;
  489. } else {
  490. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  491. }
  492. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  493. if (err)
  494. goto out;
  495. /*If SPI or used HW busy detection above, then we don't need to poll. */
  496. if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
  497. mmc_host_is_spi(host))
  498. goto out_tim;
  499. /* Let's try to poll to find out when the command is completed. */
  500. err = __mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
  501. MMC_BUSY_CMD6);
  502. if (err)
  503. goto out;
  504. out_tim:
  505. /* Switch to new timing before check switch status. */
  506. if (timing)
  507. mmc_set_timing(host, timing);
  508. if (send_status) {
  509. err = mmc_switch_status(card, true);
  510. if (err && timing)
  511. mmc_set_timing(host, old_timing);
  512. }
  513. out:
  514. mmc_retune_release(host);
  515. return err;
  516. }
  517. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  518. unsigned int timeout_ms)
  519. {
  520. return __mmc_switch(card, set, index, value, timeout_ms, 0,
  521. true, false);
  522. }
  523. EXPORT_SYMBOL_GPL(mmc_switch);
  524. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  525. {
  526. struct mmc_request mrq = {};
  527. struct mmc_command cmd = {};
  528. struct mmc_data data = {};
  529. struct scatterlist sg;
  530. struct mmc_ios *ios = &host->ios;
  531. const u8 *tuning_block_pattern;
  532. int size, err = 0;
  533. u8 *data_buf;
  534. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  535. tuning_block_pattern = tuning_blk_pattern_8bit;
  536. size = sizeof(tuning_blk_pattern_8bit);
  537. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  538. tuning_block_pattern = tuning_blk_pattern_4bit;
  539. size = sizeof(tuning_blk_pattern_4bit);
  540. } else
  541. return -EINVAL;
  542. data_buf = kzalloc(size, GFP_KERNEL);
  543. if (!data_buf)
  544. return -ENOMEM;
  545. mrq.cmd = &cmd;
  546. mrq.data = &data;
  547. cmd.opcode = opcode;
  548. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  549. data.blksz = size;
  550. data.blocks = 1;
  551. data.flags = MMC_DATA_READ;
  552. /*
  553. * According to the tuning specs, Tuning process
  554. * is normally shorter 40 executions of CMD19,
  555. * and timeout value should be shorter than 150 ms
  556. */
  557. data.timeout_ns = 150 * NSEC_PER_MSEC;
  558. data.sg = &sg;
  559. data.sg_len = 1;
  560. sg_init_one(&sg, data_buf, size);
  561. mmc_wait_for_req(host, &mrq);
  562. if (cmd_error)
  563. *cmd_error = cmd.error;
  564. if (cmd.error) {
  565. err = cmd.error;
  566. goto out;
  567. }
  568. if (data.error) {
  569. err = data.error;
  570. goto out;
  571. }
  572. if (memcmp(data_buf, tuning_block_pattern, size))
  573. err = -EIO;
  574. out:
  575. kfree(data_buf);
  576. return err;
  577. }
  578. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  579. int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
  580. {
  581. struct mmc_command cmd = {};
  582. /*
  583. * eMMC specification specifies that CMD12 can be used to stop a tuning
  584. * command, but SD specification does not, so do nothing unless it is
  585. * eMMC.
  586. */
  587. if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
  588. return 0;
  589. cmd.opcode = MMC_STOP_TRANSMISSION;
  590. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  591. /*
  592. * For drivers that override R1 to R1b, set an arbitrary timeout based
  593. * on the tuning timeout i.e. 150ms.
  594. */
  595. cmd.busy_timeout = 150;
  596. return mmc_wait_for_cmd(host, &cmd, 0);
  597. }
  598. EXPORT_SYMBOL_GPL(mmc_abort_tuning);
  599. static int
  600. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  601. u8 len)
  602. {
  603. struct mmc_request mrq = {};
  604. struct mmc_command cmd = {};
  605. struct mmc_data data = {};
  606. struct scatterlist sg;
  607. u8 *data_buf;
  608. u8 *test_buf;
  609. int i, err;
  610. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  611. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  612. /* dma onto stack is unsafe/nonportable, but callers to this
  613. * routine normally provide temporary on-stack buffers ...
  614. */
  615. data_buf = kmalloc(len, GFP_KERNEL);
  616. if (!data_buf)
  617. return -ENOMEM;
  618. if (len == 8)
  619. test_buf = testdata_8bit;
  620. else if (len == 4)
  621. test_buf = testdata_4bit;
  622. else {
  623. pr_err("%s: Invalid bus_width %d\n",
  624. mmc_hostname(host), len);
  625. kfree(data_buf);
  626. return -EINVAL;
  627. }
  628. if (opcode == MMC_BUS_TEST_W)
  629. memcpy(data_buf, test_buf, len);
  630. mrq.cmd = &cmd;
  631. mrq.data = &data;
  632. cmd.opcode = opcode;
  633. cmd.arg = 0;
  634. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  635. * rely on callers to never use this with "native" calls for reading
  636. * CSD or CID. Native versions of those commands use the R2 type,
  637. * not R1 plus a data block.
  638. */
  639. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  640. data.blksz = len;
  641. data.blocks = 1;
  642. if (opcode == MMC_BUS_TEST_R)
  643. data.flags = MMC_DATA_READ;
  644. else
  645. data.flags = MMC_DATA_WRITE;
  646. data.sg = &sg;
  647. data.sg_len = 1;
  648. mmc_set_data_timeout(&data, card);
  649. sg_init_one(&sg, data_buf, len);
  650. mmc_wait_for_req(host, &mrq);
  651. err = 0;
  652. if (opcode == MMC_BUS_TEST_R) {
  653. for (i = 0; i < len / 4; i++)
  654. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  655. err = -EIO;
  656. break;
  657. }
  658. }
  659. kfree(data_buf);
  660. if (cmd.error)
  661. return cmd.error;
  662. if (data.error)
  663. return data.error;
  664. return err;
  665. }
  666. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  667. {
  668. int width;
  669. if (bus_width == MMC_BUS_WIDTH_8)
  670. width = 8;
  671. else if (bus_width == MMC_BUS_WIDTH_4)
  672. width = 4;
  673. else if (bus_width == MMC_BUS_WIDTH_1)
  674. return 0; /* no need for test */
  675. else
  676. return -EINVAL;
  677. /*
  678. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  679. * is a problem. This improves chances that the test will work.
  680. */
  681. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  682. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  683. }
  684. static int mmc_send_hpi_cmd(struct mmc_card *card)
  685. {
  686. unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
  687. struct mmc_host *host = card->host;
  688. bool use_r1b_resp = true;
  689. struct mmc_command cmd = {};
  690. int err;
  691. cmd.opcode = card->ext_csd.hpi_cmd;
  692. cmd.arg = card->rca << 16 | 1;
  693. /*
  694. * Make sure the host's max_busy_timeout fit the needed timeout for HPI.
  695. * In case it doesn't, let's instruct the host to avoid HW busy
  696. * detection, by using a R1 response instead of R1B.
  697. */
  698. if (host->max_busy_timeout && busy_timeout_ms > host->max_busy_timeout)
  699. use_r1b_resp = false;
  700. if (cmd.opcode == MMC_STOP_TRANSMISSION && use_r1b_resp) {
  701. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  702. cmd.busy_timeout = busy_timeout_ms;
  703. } else {
  704. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  705. use_r1b_resp = false;
  706. }
  707. err = mmc_wait_for_cmd(host, &cmd, 0);
  708. if (err) {
  709. pr_warn("%s: HPI error %d. Command response %#x\n",
  710. mmc_hostname(host), err, cmd.resp[0]);
  711. return err;
  712. }
  713. /* No need to poll when using HW busy detection. */
  714. if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
  715. return 0;
  716. /* Let's poll to find out when the HPI request completes. */
  717. return mmc_poll_for_busy(card, busy_timeout_ms, MMC_BUSY_HPI);
  718. }
  719. /**
  720. * mmc_interrupt_hpi - Issue for High priority Interrupt
  721. * @card: the MMC card associated with the HPI transfer
  722. *
  723. * Issued High Priority Interrupt, and check for card status
  724. * until out-of prg-state.
  725. */
  726. static int mmc_interrupt_hpi(struct mmc_card *card)
  727. {
  728. int err;
  729. u32 status;
  730. if (!card->ext_csd.hpi_en) {
  731. pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
  732. return 1;
  733. }
  734. err = mmc_send_status(card, &status);
  735. if (err) {
  736. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  737. goto out;
  738. }
  739. switch (R1_CURRENT_STATE(status)) {
  740. case R1_STATE_IDLE:
  741. case R1_STATE_READY:
  742. case R1_STATE_STBY:
  743. case R1_STATE_TRAN:
  744. /*
  745. * In idle and transfer states, HPI is not needed and the caller
  746. * can issue the next intended command immediately
  747. */
  748. goto out;
  749. case R1_STATE_PRG:
  750. break;
  751. default:
  752. /* In all other states, it's illegal to issue HPI */
  753. pr_debug("%s: HPI cannot be sent. Card state=%d\n",
  754. mmc_hostname(card->host), R1_CURRENT_STATE(status));
  755. err = -EINVAL;
  756. goto out;
  757. }
  758. err = mmc_send_hpi_cmd(card);
  759. out:
  760. return err;
  761. }
  762. int mmc_can_ext_csd(struct mmc_card *card)
  763. {
  764. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  765. }
  766. static int mmc_read_bkops_status(struct mmc_card *card)
  767. {
  768. int err;
  769. u8 *ext_csd;
  770. err = mmc_get_ext_csd(card, &ext_csd);
  771. if (err)
  772. return err;
  773. card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
  774. card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
  775. kfree(ext_csd);
  776. return 0;
  777. }
  778. /**
  779. * mmc_run_bkops - Run BKOPS for supported cards
  780. * @card: MMC card to run BKOPS for
  781. *
  782. * Run background operations synchronously for cards having manual BKOPS
  783. * enabled and in case it reports urgent BKOPS level.
  784. */
  785. void mmc_run_bkops(struct mmc_card *card)
  786. {
  787. int err;
  788. if (!card->ext_csd.man_bkops_en)
  789. return;
  790. err = mmc_read_bkops_status(card);
  791. if (err) {
  792. pr_err("%s: Failed to read bkops status: %d\n",
  793. mmc_hostname(card->host), err);
  794. return;
  795. }
  796. if (!card->ext_csd.raw_bkops_status ||
  797. card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
  798. return;
  799. mmc_retune_hold(card->host);
  800. /*
  801. * For urgent BKOPS status, LEVEL_2 and higher, let's execute
  802. * synchronously. Future wise, we may consider to start BKOPS, for less
  803. * urgent levels by using an asynchronous background task, when idle.
  804. */
  805. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  806. EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
  807. if (err)
  808. pr_warn("%s: Error %d starting bkops\n",
  809. mmc_hostname(card->host), err);
  810. mmc_retune_release(card->host);
  811. }
  812. EXPORT_SYMBOL(mmc_run_bkops);
  813. /*
  814. * Flush the cache to the non-volatile storage.
  815. */
  816. int mmc_flush_cache(struct mmc_card *card)
  817. {
  818. int err = 0;
  819. if (mmc_cache_enabled(card->host)) {
  820. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  821. EXT_CSD_FLUSH_CACHE, 1,
  822. MMC_CACHE_FLUSH_TIMEOUT_MS);
  823. if (err)
  824. pr_err("%s: cache flush error %d\n",
  825. mmc_hostname(card->host), err);
  826. }
  827. return err;
  828. }
  829. EXPORT_SYMBOL(mmc_flush_cache);
  830. static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
  831. {
  832. u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
  833. int err;
  834. if (!card->ext_csd.cmdq_support)
  835. return -EOPNOTSUPP;
  836. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
  837. val, card->ext_csd.generic_cmd6_time);
  838. if (!err)
  839. card->ext_csd.cmdq_en = enable;
  840. return err;
  841. }
  842. int mmc_cmdq_enable(struct mmc_card *card)
  843. {
  844. return mmc_cmdq_switch(card, true);
  845. }
  846. EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
  847. int mmc_cmdq_disable(struct mmc_card *card)
  848. {
  849. return mmc_cmdq_switch(card, false);
  850. }
  851. EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
  852. int mmc_sanitize(struct mmc_card *card)
  853. {
  854. struct mmc_host *host = card->host;
  855. int err;
  856. if (!mmc_can_sanitize(card)) {
  857. pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
  858. return -EOPNOTSUPP;
  859. }
  860. pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
  861. mmc_retune_hold(host);
  862. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
  863. 1, MMC_SANITIZE_TIMEOUT_MS);
  864. if (err)
  865. pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
  866. /*
  867. * If the sanitize operation timed out, the card is probably still busy
  868. * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
  869. * it with a HPI command to get back into R1_STATE_TRAN.
  870. */
  871. if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
  872. pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
  873. mmc_retune_release(host);
  874. pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
  875. return err;
  876. }
  877. EXPORT_SYMBOL_GPL(mmc_sanitize);