core.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/mmc/core/core.c
  4. *
  5. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
  6. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
  7. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
  8. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/completion.h>
  14. #include <linux/device.h>
  15. #include <linux/delay.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/err.h>
  18. #include <linux/leds.h>
  19. #include <linux/scatterlist.h>
  20. #include <linux/log2.h>
  21. #include <linux/pm_runtime.h>
  22. #include <linux/pm_wakeup.h>
  23. #include <linux/suspend.h>
  24. #include <linux/fault-inject.h>
  25. #include <linux/random.h>
  26. #include <linux/slab.h>
  27. #include <linux/of.h>
  28. #include <linux/mmc/card.h>
  29. #include <linux/mmc/host.h>
  30. #include <linux/mmc/mmc.h>
  31. #include <linux/mmc/sd.h>
  32. #include <linux/mmc/slot-gpio.h>
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/mmc.h>
  35. #include "core.h"
  36. #include "card.h"
  37. #include "crypto.h"
  38. #include "bus.h"
  39. #include "host.h"
  40. #include "sdio_bus.h"
  41. #include "pwrseq.h"
  42. #include "mmc_ops.h"
  43. #include "sd_ops.h"
  44. #include "sdio_ops.h"
  45. /* The max erase timeout, used when host->max_busy_timeout isn't specified */
  46. #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
  47. #define SD_DISCARD_TIMEOUT_MS (250)
  48. static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
  49. /*
  50. * Enabling software CRCs on the data blocks can be a significant (30%)
  51. * performance cost, and for other reasons may not always be desired.
  52. * So we allow it it to be disabled.
  53. */
  54. bool use_spi_crc = 1;
  55. module_param(use_spi_crc, bool, 0);
  56. static int mmc_schedule_delayed_work(struct delayed_work *work,
  57. unsigned long delay)
  58. {
  59. /*
  60. * We use the system_freezable_wq, because of two reasons.
  61. * First, it allows several works (not the same work item) to be
  62. * executed simultaneously. Second, the queue becomes frozen when
  63. * userspace becomes frozen during system PM.
  64. */
  65. return queue_delayed_work(system_freezable_wq, work, delay);
  66. }
  67. #ifdef CONFIG_FAIL_MMC_REQUEST
  68. /*
  69. * Internal function. Inject random data errors.
  70. * If mmc_data is NULL no errors are injected.
  71. */
  72. static void mmc_should_fail_request(struct mmc_host *host,
  73. struct mmc_request *mrq)
  74. {
  75. struct mmc_command *cmd = mrq->cmd;
  76. struct mmc_data *data = mrq->data;
  77. static const int data_errors[] = {
  78. -ETIMEDOUT,
  79. -EILSEQ,
  80. -EIO,
  81. };
  82. if (!data)
  83. return;
  84. if ((cmd && cmd->error) || data->error ||
  85. !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
  86. return;
  87. data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
  88. data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
  89. }
  90. #else /* CONFIG_FAIL_MMC_REQUEST */
  91. static inline void mmc_should_fail_request(struct mmc_host *host,
  92. struct mmc_request *mrq)
  93. {
  94. }
  95. #endif /* CONFIG_FAIL_MMC_REQUEST */
  96. static inline void mmc_complete_cmd(struct mmc_request *mrq)
  97. {
  98. if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
  99. complete_all(&mrq->cmd_completion);
  100. }
  101. void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
  102. {
  103. if (!mrq->cap_cmd_during_tfr)
  104. return;
  105. mmc_complete_cmd(mrq);
  106. pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
  107. mmc_hostname(host), mrq->cmd->opcode);
  108. }
  109. EXPORT_SYMBOL(mmc_command_done);
  110. /**
  111. * mmc_request_done - finish processing an MMC request
  112. * @host: MMC host which completed request
  113. * @mrq: MMC request which request
  114. *
  115. * MMC drivers should call this function when they have completed
  116. * their processing of a request.
  117. */
  118. void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
  119. {
  120. struct mmc_command *cmd = mrq->cmd;
  121. int err = cmd->error;
  122. /* Flag re-tuning needed on CRC errors */
  123. if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
  124. cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
  125. !host->retune_crc_disable &&
  126. (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
  127. (mrq->data && mrq->data->error == -EILSEQ) ||
  128. (mrq->stop && mrq->stop->error == -EILSEQ)))
  129. mmc_retune_needed(host);
  130. if (err && cmd->retries && mmc_host_is_spi(host)) {
  131. if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
  132. cmd->retries = 0;
  133. }
  134. if (host->ongoing_mrq == mrq)
  135. host->ongoing_mrq = NULL;
  136. mmc_complete_cmd(mrq);
  137. trace_mmc_request_done(host, mrq);
  138. /*
  139. * We list various conditions for the command to be considered
  140. * properly done:
  141. *
  142. * - There was no error, OK fine then
  143. * - We are not doing some kind of retry
  144. * - The card was removed (...so just complete everything no matter
  145. * if there are errors or retries)
  146. */
  147. if (!err || !cmd->retries || mmc_card_removed(host->card)) {
  148. mmc_should_fail_request(host, mrq);
  149. if (!host->ongoing_mrq)
  150. led_trigger_event(host->led, LED_OFF);
  151. if (mrq->sbc) {
  152. pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
  153. mmc_hostname(host), mrq->sbc->opcode,
  154. mrq->sbc->error,
  155. mrq->sbc->resp[0], mrq->sbc->resp[1],
  156. mrq->sbc->resp[2], mrq->sbc->resp[3]);
  157. }
  158. pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
  159. mmc_hostname(host), cmd->opcode, err,
  160. cmd->resp[0], cmd->resp[1],
  161. cmd->resp[2], cmd->resp[3]);
  162. if (mrq->data) {
  163. pr_debug("%s: %d bytes transferred: %d\n",
  164. mmc_hostname(host),
  165. mrq->data->bytes_xfered, mrq->data->error);
  166. }
  167. if (mrq->stop) {
  168. pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
  169. mmc_hostname(host), mrq->stop->opcode,
  170. mrq->stop->error,
  171. mrq->stop->resp[0], mrq->stop->resp[1],
  172. mrq->stop->resp[2], mrq->stop->resp[3]);
  173. }
  174. }
  175. /*
  176. * Request starter must handle retries - see
  177. * mmc_wait_for_req_done().
  178. */
  179. if (mrq->done)
  180. mrq->done(mrq);
  181. }
  182. EXPORT_SYMBOL(mmc_request_done);
  183. static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  184. {
  185. int err;
  186. /* Assumes host controller has been runtime resumed by mmc_claim_host */
  187. err = mmc_retune(host);
  188. if (err) {
  189. mrq->cmd->error = err;
  190. mmc_request_done(host, mrq);
  191. return;
  192. }
  193. /*
  194. * For sdio rw commands we must wait for card busy otherwise some
  195. * sdio devices won't work properly.
  196. * And bypass I/O abort, reset and bus suspend operations.
  197. */
  198. if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
  199. host->ops->card_busy) {
  200. int tries = 500; /* Wait aprox 500ms at maximum */
  201. while (host->ops->card_busy(host) && --tries)
  202. mmc_delay(1);
  203. if (tries == 0) {
  204. mrq->cmd->error = -EBUSY;
  205. mmc_request_done(host, mrq);
  206. return;
  207. }
  208. }
  209. if (mrq->cap_cmd_during_tfr) {
  210. host->ongoing_mrq = mrq;
  211. /*
  212. * Retry path could come through here without having waiting on
  213. * cmd_completion, so ensure it is reinitialised.
  214. */
  215. reinit_completion(&mrq->cmd_completion);
  216. }
  217. trace_mmc_request_start(host, mrq);
  218. if (host->cqe_on)
  219. host->cqe_ops->cqe_off(host);
  220. host->ops->request(host, mrq);
  221. }
  222. static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
  223. bool cqe)
  224. {
  225. if (mrq->sbc) {
  226. pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
  227. mmc_hostname(host), mrq->sbc->opcode,
  228. mrq->sbc->arg, mrq->sbc->flags);
  229. }
  230. if (mrq->cmd) {
  231. pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
  232. mmc_hostname(host), cqe ? "CQE direct " : "",
  233. mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
  234. } else if (cqe) {
  235. pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
  236. mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
  237. }
  238. if (mrq->data) {
  239. pr_debug("%s: blksz %d blocks %d flags %08x "
  240. "tsac %d ms nsac %d\n",
  241. mmc_hostname(host), mrq->data->blksz,
  242. mrq->data->blocks, mrq->data->flags,
  243. mrq->data->timeout_ns / 1000000,
  244. mrq->data->timeout_clks);
  245. }
  246. if (mrq->stop) {
  247. pr_debug("%s: CMD%u arg %08x flags %08x\n",
  248. mmc_hostname(host), mrq->stop->opcode,
  249. mrq->stop->arg, mrq->stop->flags);
  250. }
  251. }
  252. static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
  253. {
  254. unsigned int i, sz = 0;
  255. struct scatterlist *sg;
  256. if (mrq->cmd) {
  257. mrq->cmd->error = 0;
  258. mrq->cmd->mrq = mrq;
  259. mrq->cmd->data = mrq->data;
  260. }
  261. if (mrq->sbc) {
  262. mrq->sbc->error = 0;
  263. mrq->sbc->mrq = mrq;
  264. }
  265. if (mrq->data) {
  266. if (mrq->data->blksz > host->max_blk_size ||
  267. mrq->data->blocks > host->max_blk_count ||
  268. mrq->data->blocks * mrq->data->blksz > host->max_req_size)
  269. return -EINVAL;
  270. for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
  271. sz += sg->length;
  272. if (sz != mrq->data->blocks * mrq->data->blksz)
  273. return -EINVAL;
  274. mrq->data->error = 0;
  275. mrq->data->mrq = mrq;
  276. if (mrq->stop) {
  277. mrq->data->stop = mrq->stop;
  278. mrq->stop->error = 0;
  279. mrq->stop->mrq = mrq;
  280. }
  281. }
  282. return 0;
  283. }
  284. int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
  285. {
  286. int err;
  287. init_completion(&mrq->cmd_completion);
  288. mmc_retune_hold(host);
  289. if (mmc_card_removed(host->card))
  290. return -ENOMEDIUM;
  291. mmc_mrq_pr_debug(host, mrq, false);
  292. WARN_ON(!host->claimed);
  293. err = mmc_mrq_prep(host, mrq);
  294. if (err)
  295. return err;
  296. led_trigger_event(host->led, LED_FULL);
  297. __mmc_start_request(host, mrq);
  298. return 0;
  299. }
  300. EXPORT_SYMBOL(mmc_start_request);
  301. static void mmc_wait_done(struct mmc_request *mrq)
  302. {
  303. complete(&mrq->completion);
  304. }
  305. static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
  306. {
  307. struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
  308. /*
  309. * If there is an ongoing transfer, wait for the command line to become
  310. * available.
  311. */
  312. if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
  313. wait_for_completion(&ongoing_mrq->cmd_completion);
  314. }
  315. static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
  316. {
  317. int err;
  318. mmc_wait_ongoing_tfr_cmd(host);
  319. init_completion(&mrq->completion);
  320. mrq->done = mmc_wait_done;
  321. err = mmc_start_request(host, mrq);
  322. if (err) {
  323. mrq->cmd->error = err;
  324. mmc_complete_cmd(mrq);
  325. complete(&mrq->completion);
  326. }
  327. return err;
  328. }
  329. void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
  330. {
  331. struct mmc_command *cmd;
  332. while (1) {
  333. wait_for_completion(&mrq->completion);
  334. cmd = mrq->cmd;
  335. if (!cmd->error || !cmd->retries ||
  336. mmc_card_removed(host->card))
  337. break;
  338. mmc_retune_recheck(host);
  339. pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
  340. mmc_hostname(host), cmd->opcode, cmd->error);
  341. cmd->retries--;
  342. cmd->error = 0;
  343. __mmc_start_request(host, mrq);
  344. }
  345. mmc_retune_release(host);
  346. }
  347. EXPORT_SYMBOL(mmc_wait_for_req_done);
  348. /*
  349. * mmc_cqe_start_req - Start a CQE request.
  350. * @host: MMC host to start the request
  351. * @mrq: request to start
  352. *
  353. * Start the request, re-tuning if needed and it is possible. Returns an error
  354. * code if the request fails to start or -EBUSY if CQE is busy.
  355. */
  356. int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
  357. {
  358. int err;
  359. /*
  360. * CQE cannot process re-tuning commands. Caller must hold retuning
  361. * while CQE is in use. Re-tuning can happen here only when CQE has no
  362. * active requests i.e. this is the first. Note, re-tuning will call
  363. * ->cqe_off().
  364. */
  365. err = mmc_retune(host);
  366. if (err)
  367. goto out_err;
  368. mrq->host = host;
  369. mmc_mrq_pr_debug(host, mrq, true);
  370. err = mmc_mrq_prep(host, mrq);
  371. if (err)
  372. goto out_err;
  373. err = host->cqe_ops->cqe_request(host, mrq);
  374. if (err)
  375. goto out_err;
  376. trace_mmc_request_start(host, mrq);
  377. return 0;
  378. out_err:
  379. if (mrq->cmd) {
  380. pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
  381. mmc_hostname(host), mrq->cmd->opcode, err);
  382. } else {
  383. pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
  384. mmc_hostname(host), mrq->tag, err);
  385. }
  386. return err;
  387. }
  388. EXPORT_SYMBOL(mmc_cqe_start_req);
  389. /**
  390. * mmc_cqe_request_done - CQE has finished processing an MMC request
  391. * @host: MMC host which completed request
  392. * @mrq: MMC request which completed
  393. *
  394. * CQE drivers should call this function when they have completed
  395. * their processing of a request.
  396. */
  397. void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
  398. {
  399. mmc_should_fail_request(host, mrq);
  400. /* Flag re-tuning needed on CRC errors */
  401. if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
  402. (mrq->data && mrq->data->error == -EILSEQ))
  403. mmc_retune_needed(host);
  404. trace_mmc_request_done(host, mrq);
  405. if (mrq->cmd) {
  406. pr_debug("%s: CQE req done (direct CMD%u): %d\n",
  407. mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
  408. } else {
  409. pr_debug("%s: CQE transfer done tag %d\n",
  410. mmc_hostname(host), mrq->tag);
  411. }
  412. if (mrq->data) {
  413. pr_debug("%s: %d bytes transferred: %d\n",
  414. mmc_hostname(host),
  415. mrq->data->bytes_xfered, mrq->data->error);
  416. }
  417. mrq->done(mrq);
  418. }
  419. EXPORT_SYMBOL(mmc_cqe_request_done);
  420. /**
  421. * mmc_cqe_post_req - CQE post process of a completed MMC request
  422. * @host: MMC host
  423. * @mrq: MMC request to be processed
  424. */
  425. void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
  426. {
  427. if (host->cqe_ops->cqe_post_req)
  428. host->cqe_ops->cqe_post_req(host, mrq);
  429. }
  430. EXPORT_SYMBOL(mmc_cqe_post_req);
  431. /* Arbitrary 1 second timeout */
  432. #define MMC_CQE_RECOVERY_TIMEOUT 1000
  433. /*
  434. * mmc_cqe_recovery - Recover from CQE errors.
  435. * @host: MMC host to recover
  436. *
  437. * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
  438. * in eMMC, and discarding the queue in CQE. CQE must call
  439. * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
  440. * fails to discard its queue.
  441. */
  442. int mmc_cqe_recovery(struct mmc_host *host)
  443. {
  444. struct mmc_command cmd;
  445. int err;
  446. mmc_retune_hold_now(host);
  447. /*
  448. * Recovery is expected seldom, if at all, but it reduces performance,
  449. * so make sure it is not completely silent.
  450. */
  451. pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
  452. host->cqe_ops->cqe_recovery_start(host);
  453. memset(&cmd, 0, sizeof(cmd));
  454. cmd.opcode = MMC_STOP_TRANSMISSION,
  455. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
  456. cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
  457. cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
  458. mmc_wait_for_cmd(host, &cmd, 0);
  459. memset(&cmd, 0, sizeof(cmd));
  460. cmd.opcode = MMC_CMDQ_TASK_MGMT;
  461. cmd.arg = 1; /* Discard entire queue */
  462. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  463. cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
  464. cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
  465. err = mmc_wait_for_cmd(host, &cmd, 0);
  466. host->cqe_ops->cqe_recovery_finish(host);
  467. mmc_retune_release(host);
  468. return err;
  469. }
  470. EXPORT_SYMBOL(mmc_cqe_recovery);
  471. /**
  472. * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
  473. * @host: MMC host
  474. * @mrq: MMC request
  475. *
  476. * mmc_is_req_done() is used with requests that have
  477. * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
  478. * starting a request and before waiting for it to complete. That is,
  479. * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
  480. * and before mmc_wait_for_req_done(). If it is called at other times the
  481. * result is not meaningful.
  482. */
  483. bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
  484. {
  485. return completion_done(&mrq->completion);
  486. }
  487. EXPORT_SYMBOL(mmc_is_req_done);
  488. /**
  489. * mmc_wait_for_req - start a request and wait for completion
  490. * @host: MMC host to start command
  491. * @mrq: MMC request to start
  492. *
  493. * Start a new MMC custom command request for a host, and wait
  494. * for the command to complete. In the case of 'cap_cmd_during_tfr'
  495. * requests, the transfer is ongoing and the caller can issue further
  496. * commands that do not use the data lines, and then wait by calling
  497. * mmc_wait_for_req_done().
  498. * Does not attempt to parse the response.
  499. */
  500. void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
  501. {
  502. __mmc_start_req(host, mrq);
  503. if (!mrq->cap_cmd_during_tfr)
  504. mmc_wait_for_req_done(host, mrq);
  505. }
  506. EXPORT_SYMBOL(mmc_wait_for_req);
  507. /**
  508. * mmc_wait_for_cmd - start a command and wait for completion
  509. * @host: MMC host to start command
  510. * @cmd: MMC command to start
  511. * @retries: maximum number of retries
  512. *
  513. * Start a new MMC command for a host, and wait for the command
  514. * to complete. Return any error that occurred while the command
  515. * was executing. Do not attempt to parse the response.
  516. */
  517. int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
  518. {
  519. struct mmc_request mrq = {};
  520. WARN_ON(!host->claimed);
  521. memset(cmd->resp, 0, sizeof(cmd->resp));
  522. cmd->retries = retries;
  523. mrq.cmd = cmd;
  524. cmd->data = NULL;
  525. mmc_wait_for_req(host, &mrq);
  526. return cmd->error;
  527. }
  528. EXPORT_SYMBOL(mmc_wait_for_cmd);
  529. /**
  530. * mmc_set_data_timeout - set the timeout for a data command
  531. * @data: data phase for command
  532. * @card: the MMC card associated with the data transfer
  533. *
  534. * Computes the data timeout parameters according to the
  535. * correct algorithm given the card type.
  536. */
  537. void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
  538. {
  539. unsigned int mult;
  540. /*
  541. * SDIO cards only define an upper 1 s limit on access.
  542. */
  543. if (mmc_card_sdio(card)) {
  544. data->timeout_ns = 1000000000;
  545. data->timeout_clks = 0;
  546. return;
  547. }
  548. /*
  549. * SD cards use a 100 multiplier rather than 10
  550. */
  551. mult = mmc_card_sd(card) ? 100 : 10;
  552. /*
  553. * Scale up the multiplier (and therefore the timeout) by
  554. * the r2w factor for writes.
  555. */
  556. if (data->flags & MMC_DATA_WRITE)
  557. mult <<= card->csd.r2w_factor;
  558. data->timeout_ns = card->csd.taac_ns * mult;
  559. data->timeout_clks = card->csd.taac_clks * mult;
  560. /*
  561. * SD cards also have an upper limit on the timeout.
  562. */
  563. if (mmc_card_sd(card)) {
  564. unsigned int timeout_us, limit_us;
  565. timeout_us = data->timeout_ns / 1000;
  566. if (card->host->ios.clock)
  567. timeout_us += data->timeout_clks * 1000 /
  568. (card->host->ios.clock / 1000);
  569. if (data->flags & MMC_DATA_WRITE)
  570. /*
  571. * The MMC spec "It is strongly recommended
  572. * for hosts to implement more than 500ms
  573. * timeout value even if the card indicates
  574. * the 250ms maximum busy length." Even the
  575. * previous value of 300ms is known to be
  576. * insufficient for some cards.
  577. */
  578. limit_us = 3000000;
  579. else
  580. limit_us = 100000;
  581. /*
  582. * SDHC cards always use these fixed values.
  583. */
  584. if (timeout_us > limit_us) {
  585. data->timeout_ns = limit_us * 1000;
  586. data->timeout_clks = 0;
  587. }
  588. /* assign limit value if invalid */
  589. if (timeout_us == 0)
  590. data->timeout_ns = limit_us * 1000;
  591. }
  592. /*
  593. * Some cards require longer data read timeout than indicated in CSD.
  594. * Address this by setting the read timeout to a "reasonably high"
  595. * value. For the cards tested, 600ms has proven enough. If necessary,
  596. * this value can be increased if other problematic cards require this.
  597. */
  598. if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
  599. data->timeout_ns = 600000000;
  600. data->timeout_clks = 0;
  601. }
  602. /*
  603. * Some cards need very high timeouts if driven in SPI mode.
  604. * The worst observed timeout was 900ms after writing a
  605. * continuous stream of data until the internal logic
  606. * overflowed.
  607. */
  608. if (mmc_host_is_spi(card->host)) {
  609. if (data->flags & MMC_DATA_WRITE) {
  610. if (data->timeout_ns < 1000000000)
  611. data->timeout_ns = 1000000000; /* 1s */
  612. } else {
  613. if (data->timeout_ns < 100000000)
  614. data->timeout_ns = 100000000; /* 100ms */
  615. }
  616. }
  617. }
  618. EXPORT_SYMBOL(mmc_set_data_timeout);
  619. /*
  620. * Allow claiming an already claimed host if the context is the same or there is
  621. * no context but the task is the same.
  622. */
  623. static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
  624. struct task_struct *task)
  625. {
  626. return host->claimer == ctx ||
  627. (!ctx && task && host->claimer->task == task);
  628. }
  629. static inline void mmc_ctx_set_claimer(struct mmc_host *host,
  630. struct mmc_ctx *ctx,
  631. struct task_struct *task)
  632. {
  633. if (!host->claimer) {
  634. if (ctx)
  635. host->claimer = ctx;
  636. else
  637. host->claimer = &host->default_ctx;
  638. }
  639. if (task)
  640. host->claimer->task = task;
  641. }
  642. /**
  643. * __mmc_claim_host - exclusively claim a host
  644. * @host: mmc host to claim
  645. * @ctx: context that claims the host or NULL in which case the default
  646. * context will be used
  647. * @abort: whether or not the operation should be aborted
  648. *
  649. * Claim a host for a set of operations. If @abort is non null and
  650. * dereference a non-zero value then this will return prematurely with
  651. * that non-zero value without acquiring the lock. Returns zero
  652. * with the lock held otherwise.
  653. */
  654. int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
  655. atomic_t *abort)
  656. {
  657. struct task_struct *task = ctx ? NULL : current;
  658. DECLARE_WAITQUEUE(wait, current);
  659. unsigned long flags;
  660. int stop;
  661. bool pm = false;
  662. might_sleep();
  663. add_wait_queue(&host->wq, &wait);
  664. spin_lock_irqsave(&host->lock, flags);
  665. while (1) {
  666. set_current_state(TASK_UNINTERRUPTIBLE);
  667. stop = abort ? atomic_read(abort) : 0;
  668. if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
  669. break;
  670. spin_unlock_irqrestore(&host->lock, flags);
  671. schedule();
  672. spin_lock_irqsave(&host->lock, flags);
  673. }
  674. set_current_state(TASK_RUNNING);
  675. if (!stop) {
  676. host->claimed = 1;
  677. mmc_ctx_set_claimer(host, ctx, task);
  678. host->claim_cnt += 1;
  679. if (host->claim_cnt == 1)
  680. pm = true;
  681. } else
  682. wake_up(&host->wq);
  683. spin_unlock_irqrestore(&host->lock, flags);
  684. remove_wait_queue(&host->wq, &wait);
  685. if (pm)
  686. pm_runtime_get_sync(mmc_dev(host));
  687. return stop;
  688. }
  689. EXPORT_SYMBOL(__mmc_claim_host);
  690. /**
  691. * mmc_release_host - release a host
  692. * @host: mmc host to release
  693. *
  694. * Release a MMC host, allowing others to claim the host
  695. * for their operations.
  696. */
  697. void mmc_release_host(struct mmc_host *host)
  698. {
  699. unsigned long flags;
  700. WARN_ON(!host->claimed);
  701. spin_lock_irqsave(&host->lock, flags);
  702. if (--host->claim_cnt) {
  703. /* Release for nested claim */
  704. spin_unlock_irqrestore(&host->lock, flags);
  705. } else {
  706. host->claimed = 0;
  707. host->claimer->task = NULL;
  708. host->claimer = NULL;
  709. spin_unlock_irqrestore(&host->lock, flags);
  710. wake_up(&host->wq);
  711. pm_runtime_mark_last_busy(mmc_dev(host));
  712. if (host->caps & MMC_CAP_SYNC_RUNTIME_PM)
  713. pm_runtime_put_sync_suspend(mmc_dev(host));
  714. else
  715. pm_runtime_put_autosuspend(mmc_dev(host));
  716. }
  717. }
  718. EXPORT_SYMBOL(mmc_release_host);
  719. /*
  720. * This is a helper function, which fetches a runtime pm reference for the
  721. * card device and also claims the host.
  722. */
  723. void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
  724. {
  725. pm_runtime_get_sync(&card->dev);
  726. __mmc_claim_host(card->host, ctx, NULL);
  727. }
  728. EXPORT_SYMBOL(mmc_get_card);
  729. /*
  730. * This is a helper function, which releases the host and drops the runtime
  731. * pm reference for the card device.
  732. */
  733. void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
  734. {
  735. struct mmc_host *host = card->host;
  736. WARN_ON(ctx && host->claimer != ctx);
  737. mmc_release_host(host);
  738. pm_runtime_mark_last_busy(&card->dev);
  739. pm_runtime_put_autosuspend(&card->dev);
  740. }
  741. EXPORT_SYMBOL(mmc_put_card);
  742. /*
  743. * Internal function that does the actual ios call to the host driver,
  744. * optionally printing some debug output.
  745. */
  746. static inline void mmc_set_ios(struct mmc_host *host)
  747. {
  748. struct mmc_ios *ios = &host->ios;
  749. pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
  750. "width %u timing %u\n",
  751. mmc_hostname(host), ios->clock, ios->bus_mode,
  752. ios->power_mode, ios->chip_select, ios->vdd,
  753. 1 << ios->bus_width, ios->timing);
  754. host->ops->set_ios(host, ios);
  755. }
  756. /*
  757. * Control chip select pin on a host.
  758. */
  759. void mmc_set_chip_select(struct mmc_host *host, int mode)
  760. {
  761. host->ios.chip_select = mode;
  762. mmc_set_ios(host);
  763. }
  764. /*
  765. * Sets the host clock to the highest possible frequency that
  766. * is below "hz".
  767. */
  768. void mmc_set_clock(struct mmc_host *host, unsigned int hz)
  769. {
  770. WARN_ON(hz && hz < host->f_min);
  771. if (hz > host->f_max)
  772. hz = host->f_max;
  773. host->ios.clock = hz;
  774. mmc_set_ios(host);
  775. }
  776. EXPORT_SYMBOL_GPL(mmc_set_clock);
  777. int mmc_execute_tuning(struct mmc_card *card)
  778. {
  779. struct mmc_host *host = card->host;
  780. u32 opcode;
  781. int err;
  782. if (!host->ops->execute_tuning)
  783. return 0;
  784. if (host->cqe_on)
  785. host->cqe_ops->cqe_off(host);
  786. if (mmc_card_mmc(card))
  787. opcode = MMC_SEND_TUNING_BLOCK_HS200;
  788. else
  789. opcode = MMC_SEND_TUNING_BLOCK;
  790. err = host->ops->execute_tuning(host, opcode);
  791. if (err) {
  792. pr_err("%s: tuning execution failed: %d\n",
  793. mmc_hostname(host), err);
  794. } else {
  795. host->retune_now = 0;
  796. host->need_retune = 0;
  797. mmc_retune_enable(host);
  798. }
  799. return err;
  800. }
  801. /*
  802. * Change the bus mode (open drain/push-pull) of a host.
  803. */
  804. void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  805. {
  806. host->ios.bus_mode = mode;
  807. mmc_set_ios(host);
  808. }
  809. /*
  810. * Change data bus width of a host.
  811. */
  812. void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
  813. {
  814. host->ios.bus_width = width;
  815. mmc_set_ios(host);
  816. }
  817. /*
  818. * Set initial state after a power cycle or a hw_reset.
  819. */
  820. void mmc_set_initial_state(struct mmc_host *host)
  821. {
  822. if (host->cqe_on)
  823. host->cqe_ops->cqe_off(host);
  824. mmc_retune_disable(host);
  825. if (mmc_host_is_spi(host))
  826. host->ios.chip_select = MMC_CS_HIGH;
  827. else
  828. host->ios.chip_select = MMC_CS_DONTCARE;
  829. host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
  830. host->ios.bus_width = MMC_BUS_WIDTH_1;
  831. host->ios.timing = MMC_TIMING_LEGACY;
  832. host->ios.drv_type = 0;
  833. host->ios.enhanced_strobe = false;
  834. /*
  835. * Make sure we are in non-enhanced strobe mode before we
  836. * actually enable it in ext_csd.
  837. */
  838. if ((host->caps2 & MMC_CAP2_HS400_ES) &&
  839. host->ops->hs400_enhanced_strobe)
  840. host->ops->hs400_enhanced_strobe(host, &host->ios);
  841. mmc_set_ios(host);
  842. mmc_crypto_set_initial_state(host);
  843. }
  844. EXPORT_SYMBOL_GPL(mmc_set_initial_state);
  845. /**
  846. * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
  847. * @vdd: voltage (mV)
  848. * @low_bits: prefer low bits in boundary cases
  849. *
  850. * This function returns the OCR bit number according to the provided @vdd
  851. * value. If conversion is not possible a negative errno value returned.
  852. *
  853. * Depending on the @low_bits flag the function prefers low or high OCR bits
  854. * on boundary voltages. For example,
  855. * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
  856. * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
  857. *
  858. * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
  859. */
  860. static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
  861. {
  862. const int max_bit = ilog2(MMC_VDD_35_36);
  863. int bit;
  864. if (vdd < 1650 || vdd > 3600)
  865. return -EINVAL;
  866. if (vdd >= 1650 && vdd <= 1950)
  867. return ilog2(MMC_VDD_165_195);
  868. if (low_bits)
  869. vdd -= 1;
  870. /* Base 2000 mV, step 100 mV, bit's base 8. */
  871. bit = (vdd - 2000) / 100 + 8;
  872. if (bit > max_bit)
  873. return max_bit;
  874. return bit;
  875. }
  876. /**
  877. * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
  878. * @vdd_min: minimum voltage value (mV)
  879. * @vdd_max: maximum voltage value (mV)
  880. *
  881. * This function returns the OCR mask bits according to the provided @vdd_min
  882. * and @vdd_max values. If conversion is not possible the function returns 0.
  883. *
  884. * Notes wrt boundary cases:
  885. * This function sets the OCR bits for all boundary voltages, for example
  886. * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
  887. * MMC_VDD_34_35 mask.
  888. */
  889. u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
  890. {
  891. u32 mask = 0;
  892. if (vdd_max < vdd_min)
  893. return 0;
  894. /* Prefer high bits for the boundary vdd_max values. */
  895. vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
  896. if (vdd_max < 0)
  897. return 0;
  898. /* Prefer low bits for the boundary vdd_min values. */
  899. vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
  900. if (vdd_min < 0)
  901. return 0;
  902. /* Fill the mask, from max bit to min bit. */
  903. while (vdd_max >= vdd_min)
  904. mask |= 1 << vdd_max--;
  905. return mask;
  906. }
  907. static int mmc_of_get_func_num(struct device_node *node)
  908. {
  909. u32 reg;
  910. int ret;
  911. ret = of_property_read_u32(node, "reg", &reg);
  912. if (ret < 0)
  913. return ret;
  914. return reg;
  915. }
  916. struct device_node *mmc_of_find_child_device(struct mmc_host *host,
  917. unsigned func_num)
  918. {
  919. struct device_node *node;
  920. if (!host->parent || !host->parent->of_node)
  921. return NULL;
  922. for_each_child_of_node(host->parent->of_node, node) {
  923. if (mmc_of_get_func_num(node) == func_num)
  924. return node;
  925. }
  926. return NULL;
  927. }
  928. /*
  929. * Mask off any voltages we don't support and select
  930. * the lowest voltage
  931. */
  932. u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
  933. {
  934. int bit;
  935. /*
  936. * Sanity check the voltages that the card claims to
  937. * support.
  938. */
  939. if (ocr & 0x7F) {
  940. dev_warn(mmc_dev(host),
  941. "card claims to support voltages below defined range\n");
  942. ocr &= ~0x7F;
  943. }
  944. ocr &= host->ocr_avail;
  945. if (!ocr) {
  946. dev_warn(mmc_dev(host), "no support for card's volts\n");
  947. return 0;
  948. }
  949. if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
  950. bit = ffs(ocr) - 1;
  951. ocr &= 3 << bit;
  952. mmc_power_cycle(host, ocr);
  953. } else {
  954. bit = fls(ocr) - 1;
  955. ocr &= 3 << bit;
  956. if (bit != host->ios.vdd)
  957. dev_warn(mmc_dev(host), "exceeding card's volts\n");
  958. }
  959. return ocr;
  960. }
  961. int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
  962. {
  963. int err = 0;
  964. int old_signal_voltage = host->ios.signal_voltage;
  965. host->ios.signal_voltage = signal_voltage;
  966. if (host->ops->start_signal_voltage_switch)
  967. err = host->ops->start_signal_voltage_switch(host, &host->ios);
  968. if (err)
  969. host->ios.signal_voltage = old_signal_voltage;
  970. return err;
  971. }
  972. void mmc_set_initial_signal_voltage(struct mmc_host *host)
  973. {
  974. /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
  975. if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
  976. dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
  977. else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
  978. dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
  979. else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
  980. dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
  981. }
  982. int mmc_host_set_uhs_voltage(struct mmc_host *host)
  983. {
  984. u32 clock;
  985. /*
  986. * During a signal voltage level switch, the clock must be gated
  987. * for 5 ms according to the SD spec
  988. */
  989. clock = host->ios.clock;
  990. host->ios.clock = 0;
  991. mmc_set_ios(host);
  992. if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
  993. return -EAGAIN;
  994. /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
  995. mmc_delay(10);
  996. host->ios.clock = clock;
  997. mmc_set_ios(host);
  998. return 0;
  999. }
  1000. int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
  1001. {
  1002. struct mmc_command cmd = {};
  1003. int err = 0;
  1004. /*
  1005. * If we cannot switch voltages, return failure so the caller
  1006. * can continue without UHS mode
  1007. */
  1008. if (!host->ops->start_signal_voltage_switch)
  1009. return -EPERM;
  1010. if (!host->ops->card_busy)
  1011. pr_warn("%s: cannot verify signal voltage switch\n",
  1012. mmc_hostname(host));
  1013. cmd.opcode = SD_SWITCH_VOLTAGE;
  1014. cmd.arg = 0;
  1015. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1016. err = mmc_wait_for_cmd(host, &cmd, 0);
  1017. if (err)
  1018. goto power_cycle;
  1019. if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
  1020. return -EIO;
  1021. /*
  1022. * The card should drive cmd and dat[0:3] low immediately
  1023. * after the response of cmd11, but wait 1 ms to be sure
  1024. */
  1025. mmc_delay(1);
  1026. if (host->ops->card_busy && !host->ops->card_busy(host)) {
  1027. err = -EAGAIN;
  1028. goto power_cycle;
  1029. }
  1030. if (mmc_host_set_uhs_voltage(host)) {
  1031. /*
  1032. * Voltages may not have been switched, but we've already
  1033. * sent CMD11, so a power cycle is required anyway
  1034. */
  1035. err = -EAGAIN;
  1036. goto power_cycle;
  1037. }
  1038. /* Wait for at least 1 ms according to spec */
  1039. mmc_delay(1);
  1040. /*
  1041. * Failure to switch is indicated by the card holding
  1042. * dat[0:3] low
  1043. */
  1044. if (host->ops->card_busy && host->ops->card_busy(host))
  1045. err = -EAGAIN;
  1046. power_cycle:
  1047. if (err) {
  1048. pr_debug("%s: Signal voltage switch failed, "
  1049. "power cycling card\n", mmc_hostname(host));
  1050. mmc_power_cycle(host, ocr);
  1051. }
  1052. return err;
  1053. }
  1054. /*
  1055. * Select timing parameters for host.
  1056. */
  1057. void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  1058. {
  1059. host->ios.timing = timing;
  1060. mmc_set_ios(host);
  1061. }
  1062. EXPORT_SYMBOL_GPL(mmc_set_timing);
  1063. /*
  1064. * Select appropriate driver type for host.
  1065. */
  1066. void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
  1067. {
  1068. host->ios.drv_type = drv_type;
  1069. mmc_set_ios(host);
  1070. }
  1071. int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
  1072. int card_drv_type, int *drv_type)
  1073. {
  1074. struct mmc_host *host = card->host;
  1075. int host_drv_type = SD_DRIVER_TYPE_B;
  1076. *drv_type = 0;
  1077. if (!host->ops->select_drive_strength)
  1078. return 0;
  1079. /* Use SD definition of driver strength for hosts */
  1080. if (host->caps & MMC_CAP_DRIVER_TYPE_A)
  1081. host_drv_type |= SD_DRIVER_TYPE_A;
  1082. if (host->caps & MMC_CAP_DRIVER_TYPE_C)
  1083. host_drv_type |= SD_DRIVER_TYPE_C;
  1084. if (host->caps & MMC_CAP_DRIVER_TYPE_D)
  1085. host_drv_type |= SD_DRIVER_TYPE_D;
  1086. /*
  1087. * The drive strength that the hardware can support
  1088. * depends on the board design. Pass the appropriate
  1089. * information and let the hardware specific code
  1090. * return what is possible given the options
  1091. */
  1092. return host->ops->select_drive_strength(card, max_dtr,
  1093. host_drv_type,
  1094. card_drv_type,
  1095. drv_type);
  1096. }
  1097. /*
  1098. * Apply power to the MMC stack. This is a two-stage process.
  1099. * First, we enable power to the card without the clock running.
  1100. * We then wait a bit for the power to stabilise. Finally,
  1101. * enable the bus drivers and clock to the card.
  1102. *
  1103. * We must _NOT_ enable the clock prior to power stablising.
  1104. *
  1105. * If a host does all the power sequencing itself, ignore the
  1106. * initial MMC_POWER_UP stage.
  1107. */
  1108. void mmc_power_up(struct mmc_host *host, u32 ocr)
  1109. {
  1110. if (host->ios.power_mode == MMC_POWER_ON)
  1111. return;
  1112. mmc_pwrseq_pre_power_on(host);
  1113. host->ios.vdd = fls(ocr) - 1;
  1114. host->ios.power_mode = MMC_POWER_UP;
  1115. /* Set initial state and call mmc_set_ios */
  1116. mmc_set_initial_state(host);
  1117. mmc_set_initial_signal_voltage(host);
  1118. /*
  1119. * This delay should be sufficient to allow the power supply
  1120. * to reach the minimum voltage.
  1121. */
  1122. mmc_delay(host->ios.power_delay_ms);
  1123. mmc_pwrseq_post_power_on(host);
  1124. host->ios.clock = host->f_init;
  1125. host->ios.power_mode = MMC_POWER_ON;
  1126. mmc_set_ios(host);
  1127. /*
  1128. * This delay must be at least 74 clock sizes, or 1 ms, or the
  1129. * time required to reach a stable voltage.
  1130. */
  1131. mmc_delay(host->ios.power_delay_ms);
  1132. }
  1133. void mmc_power_off(struct mmc_host *host)
  1134. {
  1135. if (host->ios.power_mode == MMC_POWER_OFF)
  1136. return;
  1137. mmc_pwrseq_power_off(host);
  1138. host->ios.clock = 0;
  1139. host->ios.vdd = 0;
  1140. host->ios.power_mode = MMC_POWER_OFF;
  1141. /* Set initial state and call mmc_set_ios */
  1142. mmc_set_initial_state(host);
  1143. /*
  1144. * Some configurations, such as the 802.11 SDIO card in the OLPC
  1145. * XO-1.5, require a short delay after poweroff before the card
  1146. * can be successfully turned on again.
  1147. */
  1148. mmc_delay(1);
  1149. }
  1150. void mmc_power_cycle(struct mmc_host *host, u32 ocr)
  1151. {
  1152. mmc_power_off(host);
  1153. /* Wait at least 1 ms according to SD spec */
  1154. mmc_delay(1);
  1155. mmc_power_up(host, ocr);
  1156. }
  1157. /*
  1158. * Cleanup when the last reference to the bus operator is dropped.
  1159. */
  1160. static void __mmc_release_bus(struct mmc_host *host)
  1161. {
  1162. WARN_ON(!host->bus_dead);
  1163. host->bus_ops = NULL;
  1164. }
  1165. /*
  1166. * Increase reference count of bus operator
  1167. */
  1168. static inline void mmc_bus_get(struct mmc_host *host)
  1169. {
  1170. unsigned long flags;
  1171. spin_lock_irqsave(&host->lock, flags);
  1172. host->bus_refs++;
  1173. spin_unlock_irqrestore(&host->lock, flags);
  1174. }
  1175. /*
  1176. * Decrease reference count of bus operator and free it if
  1177. * it is the last reference.
  1178. */
  1179. static inline void mmc_bus_put(struct mmc_host *host)
  1180. {
  1181. unsigned long flags;
  1182. spin_lock_irqsave(&host->lock, flags);
  1183. host->bus_refs--;
  1184. if ((host->bus_refs == 0) && host->bus_ops)
  1185. __mmc_release_bus(host);
  1186. spin_unlock_irqrestore(&host->lock, flags);
  1187. }
  1188. /*
  1189. * Assign a mmc bus handler to a host. Only one bus handler may control a
  1190. * host at any given time.
  1191. */
  1192. void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
  1193. {
  1194. unsigned long flags;
  1195. WARN_ON(!host->claimed);
  1196. spin_lock_irqsave(&host->lock, flags);
  1197. WARN_ON(host->bus_ops);
  1198. WARN_ON(host->bus_refs);
  1199. host->bus_ops = ops;
  1200. host->bus_refs = 1;
  1201. host->bus_dead = 0;
  1202. spin_unlock_irqrestore(&host->lock, flags);
  1203. }
  1204. /*
  1205. * Remove the current bus handler from a host.
  1206. */
  1207. void mmc_detach_bus(struct mmc_host *host)
  1208. {
  1209. unsigned long flags;
  1210. WARN_ON(!host->claimed);
  1211. WARN_ON(!host->bus_ops);
  1212. spin_lock_irqsave(&host->lock, flags);
  1213. host->bus_dead = 1;
  1214. spin_unlock_irqrestore(&host->lock, flags);
  1215. mmc_bus_put(host);
  1216. }
  1217. void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
  1218. {
  1219. /*
  1220. * Prevent system sleep for 5s to allow user space to consume the
  1221. * corresponding uevent. This is especially useful, when CD irq is used
  1222. * as a system wakeup, but doesn't hurt in other cases.
  1223. */
  1224. if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
  1225. __pm_wakeup_event(host->ws, 5000);
  1226. host->detect_change = 1;
  1227. mmc_schedule_delayed_work(&host->detect, delay);
  1228. }
  1229. /**
  1230. * mmc_detect_change - process change of state on a MMC socket
  1231. * @host: host which changed state.
  1232. * @delay: optional delay to wait before detection (jiffies)
  1233. *
  1234. * MMC drivers should call this when they detect a card has been
  1235. * inserted or removed. The MMC layer will confirm that any
  1236. * present card is still functional, and initialize any newly
  1237. * inserted.
  1238. */
  1239. void mmc_detect_change(struct mmc_host *host, unsigned long delay)
  1240. {
  1241. _mmc_detect_change(host, delay, true);
  1242. }
  1243. EXPORT_SYMBOL(mmc_detect_change);
  1244. void mmc_init_erase(struct mmc_card *card)
  1245. {
  1246. unsigned int sz;
  1247. if (is_power_of_2(card->erase_size))
  1248. card->erase_shift = ffs(card->erase_size) - 1;
  1249. else
  1250. card->erase_shift = 0;
  1251. /*
  1252. * It is possible to erase an arbitrarily large area of an SD or MMC
  1253. * card. That is not desirable because it can take a long time
  1254. * (minutes) potentially delaying more important I/O, and also the
  1255. * timeout calculations become increasingly hugely over-estimated.
  1256. * Consequently, 'pref_erase' is defined as a guide to limit erases
  1257. * to that size and alignment.
  1258. *
  1259. * For SD cards that define Allocation Unit size, limit erases to one
  1260. * Allocation Unit at a time.
  1261. * For MMC, have a stab at ai good value and for modern cards it will
  1262. * end up being 4MiB. Note that if the value is too small, it can end
  1263. * up taking longer to erase. Also note, erase_size is already set to
  1264. * High Capacity Erase Size if available when this function is called.
  1265. */
  1266. if (mmc_card_sd(card) && card->ssr.au) {
  1267. card->pref_erase = card->ssr.au;
  1268. card->erase_shift = ffs(card->ssr.au) - 1;
  1269. } else if (card->erase_size) {
  1270. sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
  1271. if (sz < 128)
  1272. card->pref_erase = 512 * 1024 / 512;
  1273. else if (sz < 512)
  1274. card->pref_erase = 1024 * 1024 / 512;
  1275. else if (sz < 1024)
  1276. card->pref_erase = 2 * 1024 * 1024 / 512;
  1277. else
  1278. card->pref_erase = 4 * 1024 * 1024 / 512;
  1279. if (card->pref_erase < card->erase_size)
  1280. card->pref_erase = card->erase_size;
  1281. else {
  1282. sz = card->pref_erase % card->erase_size;
  1283. if (sz)
  1284. card->pref_erase += card->erase_size - sz;
  1285. }
  1286. } else
  1287. card->pref_erase = 0;
  1288. }
  1289. static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
  1290. unsigned int arg, unsigned int qty)
  1291. {
  1292. unsigned int erase_timeout;
  1293. if (arg == MMC_DISCARD_ARG ||
  1294. (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
  1295. erase_timeout = card->ext_csd.trim_timeout;
  1296. } else if (card->ext_csd.erase_group_def & 1) {
  1297. /* High Capacity Erase Group Size uses HC timeouts */
  1298. if (arg == MMC_TRIM_ARG)
  1299. erase_timeout = card->ext_csd.trim_timeout;
  1300. else
  1301. erase_timeout = card->ext_csd.hc_erase_timeout;
  1302. } else {
  1303. /* CSD Erase Group Size uses write timeout */
  1304. unsigned int mult = (10 << card->csd.r2w_factor);
  1305. unsigned int timeout_clks = card->csd.taac_clks * mult;
  1306. unsigned int timeout_us;
  1307. /* Avoid overflow: e.g. taac_ns=80000000 mult=1280 */
  1308. if (card->csd.taac_ns < 1000000)
  1309. timeout_us = (card->csd.taac_ns * mult) / 1000;
  1310. else
  1311. timeout_us = (card->csd.taac_ns / 1000) * mult;
  1312. /*
  1313. * ios.clock is only a target. The real clock rate might be
  1314. * less but not that much less, so fudge it by multiplying by 2.
  1315. */
  1316. timeout_clks <<= 1;
  1317. timeout_us += (timeout_clks * 1000) /
  1318. (card->host->ios.clock / 1000);
  1319. erase_timeout = timeout_us / 1000;
  1320. /*
  1321. * Theoretically, the calculation could underflow so round up
  1322. * to 1ms in that case.
  1323. */
  1324. if (!erase_timeout)
  1325. erase_timeout = 1;
  1326. }
  1327. /* Multiplier for secure operations */
  1328. if (arg & MMC_SECURE_ARGS) {
  1329. if (arg == MMC_SECURE_ERASE_ARG)
  1330. erase_timeout *= card->ext_csd.sec_erase_mult;
  1331. else
  1332. erase_timeout *= card->ext_csd.sec_trim_mult;
  1333. }
  1334. erase_timeout *= qty;
  1335. /*
  1336. * Ensure at least a 1 second timeout for SPI as per
  1337. * 'mmc_set_data_timeout()'
  1338. */
  1339. if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
  1340. erase_timeout = 1000;
  1341. return erase_timeout;
  1342. }
  1343. static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
  1344. unsigned int arg,
  1345. unsigned int qty)
  1346. {
  1347. unsigned int erase_timeout;
  1348. /* for DISCARD none of the below calculation applies.
  1349. * the busy timeout is 250msec per discard command.
  1350. */
  1351. if (arg == SD_DISCARD_ARG)
  1352. return SD_DISCARD_TIMEOUT_MS;
  1353. if (card->ssr.erase_timeout) {
  1354. /* Erase timeout specified in SD Status Register (SSR) */
  1355. erase_timeout = card->ssr.erase_timeout * qty +
  1356. card->ssr.erase_offset;
  1357. } else {
  1358. /*
  1359. * Erase timeout not specified in SD Status Register (SSR) so
  1360. * use 250ms per write block.
  1361. */
  1362. erase_timeout = 250 * qty;
  1363. }
  1364. /* Must not be less than 1 second */
  1365. if (erase_timeout < 1000)
  1366. erase_timeout = 1000;
  1367. return erase_timeout;
  1368. }
  1369. static unsigned int mmc_erase_timeout(struct mmc_card *card,
  1370. unsigned int arg,
  1371. unsigned int qty)
  1372. {
  1373. if (mmc_card_sd(card))
  1374. return mmc_sd_erase_timeout(card, arg, qty);
  1375. else
  1376. return mmc_mmc_erase_timeout(card, arg, qty);
  1377. }
  1378. static int mmc_do_erase(struct mmc_card *card, unsigned int from,
  1379. unsigned int to, unsigned int arg)
  1380. {
  1381. struct mmc_command cmd = {};
  1382. unsigned int qty = 0, busy_timeout = 0;
  1383. bool use_r1b_resp = false;
  1384. int err;
  1385. mmc_retune_hold(card->host);
  1386. /*
  1387. * qty is used to calculate the erase timeout which depends on how many
  1388. * erase groups (or allocation units in SD terminology) are affected.
  1389. * We count erasing part of an erase group as one erase group.
  1390. * For SD, the allocation units are always a power of 2. For MMC, the
  1391. * erase group size is almost certainly also power of 2, but it does not
  1392. * seem to insist on that in the JEDEC standard, so we fall back to
  1393. * division in that case. SD may not specify an allocation unit size,
  1394. * in which case the timeout is based on the number of write blocks.
  1395. *
  1396. * Note that the timeout for secure trim 2 will only be correct if the
  1397. * number of erase groups specified is the same as the total of all
  1398. * preceding secure trim 1 commands. Since the power may have been
  1399. * lost since the secure trim 1 commands occurred, it is generally
  1400. * impossible to calculate the secure trim 2 timeout correctly.
  1401. */
  1402. if (card->erase_shift)
  1403. qty += ((to >> card->erase_shift) -
  1404. (from >> card->erase_shift)) + 1;
  1405. else if (mmc_card_sd(card))
  1406. qty += to - from + 1;
  1407. else
  1408. qty += ((to / card->erase_size) -
  1409. (from / card->erase_size)) + 1;
  1410. if (!mmc_card_blockaddr(card)) {
  1411. from <<= 9;
  1412. to <<= 9;
  1413. }
  1414. if (mmc_card_sd(card))
  1415. cmd.opcode = SD_ERASE_WR_BLK_START;
  1416. else
  1417. cmd.opcode = MMC_ERASE_GROUP_START;
  1418. cmd.arg = from;
  1419. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  1420. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  1421. if (err) {
  1422. pr_err("mmc_erase: group start error %d, "
  1423. "status %#x\n", err, cmd.resp[0]);
  1424. err = -EIO;
  1425. goto out;
  1426. }
  1427. memset(&cmd, 0, sizeof(struct mmc_command));
  1428. if (mmc_card_sd(card))
  1429. cmd.opcode = SD_ERASE_WR_BLK_END;
  1430. else
  1431. cmd.opcode = MMC_ERASE_GROUP_END;
  1432. cmd.arg = to;
  1433. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  1434. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  1435. if (err) {
  1436. pr_err("mmc_erase: group end error %d, status %#x\n",
  1437. err, cmd.resp[0]);
  1438. err = -EIO;
  1439. goto out;
  1440. }
  1441. memset(&cmd, 0, sizeof(struct mmc_command));
  1442. cmd.opcode = MMC_ERASE;
  1443. cmd.arg = arg;
  1444. busy_timeout = mmc_erase_timeout(card, arg, qty);
  1445. /*
  1446. * If the host controller supports busy signalling and the timeout for
  1447. * the erase operation does not exceed the max_busy_timeout, we should
  1448. * use R1B response. Or we need to prevent the host from doing hw busy
  1449. * detection, which is done by converting to a R1 response instead.
  1450. * Note, some hosts requires R1B, which also means they are on their own
  1451. * when it comes to deal with the busy timeout.
  1452. */
  1453. if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
  1454. card->host->max_busy_timeout &&
  1455. busy_timeout > card->host->max_busy_timeout) {
  1456. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  1457. } else {
  1458. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  1459. cmd.busy_timeout = busy_timeout;
  1460. use_r1b_resp = true;
  1461. }
  1462. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  1463. if (err) {
  1464. pr_err("mmc_erase: erase error %d, status %#x\n",
  1465. err, cmd.resp[0]);
  1466. err = -EIO;
  1467. goto out;
  1468. }
  1469. if (mmc_host_is_spi(card->host))
  1470. goto out;
  1471. /*
  1472. * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
  1473. * shall be avoided.
  1474. */
  1475. if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
  1476. goto out;
  1477. /* Let's poll to find out when the erase operation completes. */
  1478. err = mmc_poll_for_busy(card, busy_timeout, MMC_BUSY_ERASE);
  1479. out:
  1480. mmc_retune_release(card->host);
  1481. return err;
  1482. }
  1483. static unsigned int mmc_align_erase_size(struct mmc_card *card,
  1484. unsigned int *from,
  1485. unsigned int *to,
  1486. unsigned int nr)
  1487. {
  1488. unsigned int from_new = *from, nr_new = nr, rem;
  1489. /*
  1490. * When the 'card->erase_size' is power of 2, we can use round_up/down()
  1491. * to align the erase size efficiently.
  1492. */
  1493. if (is_power_of_2(card->erase_size)) {
  1494. unsigned int temp = from_new;
  1495. from_new = round_up(temp, card->erase_size);
  1496. rem = from_new - temp;
  1497. if (nr_new > rem)
  1498. nr_new -= rem;
  1499. else
  1500. return 0;
  1501. nr_new = round_down(nr_new, card->erase_size);
  1502. } else {
  1503. rem = from_new % card->erase_size;
  1504. if (rem) {
  1505. rem = card->erase_size - rem;
  1506. from_new += rem;
  1507. if (nr_new > rem)
  1508. nr_new -= rem;
  1509. else
  1510. return 0;
  1511. }
  1512. rem = nr_new % card->erase_size;
  1513. if (rem)
  1514. nr_new -= rem;
  1515. }
  1516. if (nr_new == 0)
  1517. return 0;
  1518. *to = from_new + nr_new;
  1519. *from = from_new;
  1520. return nr_new;
  1521. }
  1522. /**
  1523. * mmc_erase - erase sectors.
  1524. * @card: card to erase
  1525. * @from: first sector to erase
  1526. * @nr: number of sectors to erase
  1527. * @arg: erase command argument
  1528. *
  1529. * Caller must claim host before calling this function.
  1530. */
  1531. int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
  1532. unsigned int arg)
  1533. {
  1534. unsigned int rem, to = from + nr;
  1535. int err;
  1536. if (!(card->csd.cmdclass & CCC_ERASE))
  1537. return -EOPNOTSUPP;
  1538. if (!card->erase_size)
  1539. return -EOPNOTSUPP;
  1540. if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG)
  1541. return -EOPNOTSUPP;
  1542. if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) &&
  1543. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
  1544. return -EOPNOTSUPP;
  1545. if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) &&
  1546. !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
  1547. return -EOPNOTSUPP;
  1548. if (arg == MMC_SECURE_ERASE_ARG) {
  1549. if (from % card->erase_size || nr % card->erase_size)
  1550. return -EINVAL;
  1551. }
  1552. if (arg == MMC_ERASE_ARG)
  1553. nr = mmc_align_erase_size(card, &from, &to, nr);
  1554. if (nr == 0)
  1555. return 0;
  1556. if (to <= from)
  1557. return -EINVAL;
  1558. /* 'from' and 'to' are inclusive */
  1559. to -= 1;
  1560. /*
  1561. * Special case where only one erase-group fits in the timeout budget:
  1562. * If the region crosses an erase-group boundary on this particular
  1563. * case, we will be trimming more than one erase-group which, does not
  1564. * fit in the timeout budget of the controller, so we need to split it
  1565. * and call mmc_do_erase() twice if necessary. This special case is
  1566. * identified by the card->eg_boundary flag.
  1567. */
  1568. rem = card->erase_size - (from % card->erase_size);
  1569. if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
  1570. err = mmc_do_erase(card, from, from + rem - 1, arg);
  1571. from += rem;
  1572. if ((err) || (to <= from))
  1573. return err;
  1574. }
  1575. return mmc_do_erase(card, from, to, arg);
  1576. }
  1577. EXPORT_SYMBOL(mmc_erase);
  1578. int mmc_can_erase(struct mmc_card *card)
  1579. {
  1580. if (card->csd.cmdclass & CCC_ERASE && card->erase_size)
  1581. return 1;
  1582. return 0;
  1583. }
  1584. EXPORT_SYMBOL(mmc_can_erase);
  1585. int mmc_can_trim(struct mmc_card *card)
  1586. {
  1587. if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
  1588. (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
  1589. return 1;
  1590. return 0;
  1591. }
  1592. EXPORT_SYMBOL(mmc_can_trim);
  1593. int mmc_can_discard(struct mmc_card *card)
  1594. {
  1595. /*
  1596. * As there's no way to detect the discard support bit at v4.5
  1597. * use the s/w feature support filed.
  1598. */
  1599. if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
  1600. return 1;
  1601. return 0;
  1602. }
  1603. EXPORT_SYMBOL(mmc_can_discard);
  1604. int mmc_can_sanitize(struct mmc_card *card)
  1605. {
  1606. if (!mmc_can_trim(card) && !mmc_can_erase(card))
  1607. return 0;
  1608. if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
  1609. return 1;
  1610. return 0;
  1611. }
  1612. int mmc_can_secure_erase_trim(struct mmc_card *card)
  1613. {
  1614. if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
  1615. !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
  1616. return 1;
  1617. return 0;
  1618. }
  1619. EXPORT_SYMBOL(mmc_can_secure_erase_trim);
  1620. int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
  1621. unsigned int nr)
  1622. {
  1623. if (!card->erase_size)
  1624. return 0;
  1625. if (from % card->erase_size || nr % card->erase_size)
  1626. return 0;
  1627. return 1;
  1628. }
  1629. EXPORT_SYMBOL(mmc_erase_group_aligned);
  1630. static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
  1631. unsigned int arg)
  1632. {
  1633. struct mmc_host *host = card->host;
  1634. unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
  1635. unsigned int last_timeout = 0;
  1636. unsigned int max_busy_timeout = host->max_busy_timeout ?
  1637. host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
  1638. if (card->erase_shift) {
  1639. max_qty = UINT_MAX >> card->erase_shift;
  1640. min_qty = card->pref_erase >> card->erase_shift;
  1641. } else if (mmc_card_sd(card)) {
  1642. max_qty = UINT_MAX;
  1643. min_qty = card->pref_erase;
  1644. } else {
  1645. max_qty = UINT_MAX / card->erase_size;
  1646. min_qty = card->pref_erase / card->erase_size;
  1647. }
  1648. /*
  1649. * We should not only use 'host->max_busy_timeout' as the limitation
  1650. * when deciding the max discard sectors. We should set a balance value
  1651. * to improve the erase speed, and it can not get too long timeout at
  1652. * the same time.
  1653. *
  1654. * Here we set 'card->pref_erase' as the minimal discard sectors no
  1655. * matter what size of 'host->max_busy_timeout', but if the
  1656. * 'host->max_busy_timeout' is large enough for more discard sectors,
  1657. * then we can continue to increase the max discard sectors until we
  1658. * get a balance value. In cases when the 'host->max_busy_timeout'
  1659. * isn't specified, use the default max erase timeout.
  1660. */
  1661. do {
  1662. y = 0;
  1663. for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
  1664. timeout = mmc_erase_timeout(card, arg, qty + x);
  1665. if (qty + x > min_qty && timeout > max_busy_timeout)
  1666. break;
  1667. if (timeout < last_timeout)
  1668. break;
  1669. last_timeout = timeout;
  1670. y = x;
  1671. }
  1672. qty += y;
  1673. } while (y);
  1674. if (!qty)
  1675. return 0;
  1676. /*
  1677. * When specifying a sector range to trim, chances are we might cross
  1678. * an erase-group boundary even if the amount of sectors is less than
  1679. * one erase-group.
  1680. * If we can only fit one erase-group in the controller timeout budget,
  1681. * we have to care that erase-group boundaries are not crossed by a
  1682. * single trim operation. We flag that special case with "eg_boundary".
  1683. * In all other cases we can just decrement qty and pretend that we
  1684. * always touch (qty + 1) erase-groups as a simple optimization.
  1685. */
  1686. if (qty == 1)
  1687. card->eg_boundary = 1;
  1688. else
  1689. qty--;
  1690. /* Convert qty to sectors */
  1691. if (card->erase_shift)
  1692. max_discard = qty << card->erase_shift;
  1693. else if (mmc_card_sd(card))
  1694. max_discard = qty + 1;
  1695. else
  1696. max_discard = qty * card->erase_size;
  1697. return max_discard;
  1698. }
  1699. unsigned int mmc_calc_max_discard(struct mmc_card *card)
  1700. {
  1701. struct mmc_host *host = card->host;
  1702. unsigned int max_discard, max_trim;
  1703. /*
  1704. * Without erase_group_def set, MMC erase timeout depends on clock
  1705. * frequence which can change. In that case, the best choice is
  1706. * just the preferred erase size.
  1707. */
  1708. if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
  1709. return card->pref_erase;
  1710. max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
  1711. if (mmc_can_trim(card)) {
  1712. max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
  1713. if (max_trim < max_discard || max_discard == 0)
  1714. max_discard = max_trim;
  1715. } else if (max_discard < card->erase_size) {
  1716. max_discard = 0;
  1717. }
  1718. pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
  1719. mmc_hostname(host), max_discard, host->max_busy_timeout ?
  1720. host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
  1721. return max_discard;
  1722. }
  1723. EXPORT_SYMBOL(mmc_calc_max_discard);
  1724. bool mmc_card_is_blockaddr(struct mmc_card *card)
  1725. {
  1726. return card ? mmc_card_blockaddr(card) : false;
  1727. }
  1728. EXPORT_SYMBOL(mmc_card_is_blockaddr);
  1729. int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
  1730. {
  1731. struct mmc_command cmd = {};
  1732. if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
  1733. mmc_card_hs400(card) || mmc_card_hs400es(card))
  1734. return 0;
  1735. cmd.opcode = MMC_SET_BLOCKLEN;
  1736. cmd.arg = blocklen;
  1737. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  1738. return mmc_wait_for_cmd(card->host, &cmd, 5);
  1739. }
  1740. EXPORT_SYMBOL(mmc_set_blocklen);
  1741. static void mmc_hw_reset_for_init(struct mmc_host *host)
  1742. {
  1743. mmc_pwrseq_reset(host);
  1744. if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
  1745. return;
  1746. host->ops->hw_reset(host);
  1747. }
  1748. /**
  1749. * mmc_hw_reset - reset the card in hardware
  1750. * @host: MMC host to which the card is attached
  1751. *
  1752. * Hard reset the card. This function is only for upper layers, like the
  1753. * block layer or card drivers. You cannot use it in host drivers (struct
  1754. * mmc_card might be gone then).
  1755. *
  1756. * Return: 0 on success, -errno on failure
  1757. */
  1758. int mmc_hw_reset(struct mmc_host *host)
  1759. {
  1760. int ret;
  1761. if (!host->card)
  1762. return -EINVAL;
  1763. mmc_bus_get(host);
  1764. if (!host->bus_ops || host->bus_dead || !host->bus_ops->hw_reset) {
  1765. mmc_bus_put(host);
  1766. return -EOPNOTSUPP;
  1767. }
  1768. ret = host->bus_ops->hw_reset(host);
  1769. mmc_bus_put(host);
  1770. if (ret < 0)
  1771. pr_warn("%s: tried to HW reset card, got error %d\n",
  1772. mmc_hostname(host), ret);
  1773. return ret;
  1774. }
  1775. EXPORT_SYMBOL(mmc_hw_reset);
  1776. int mmc_sw_reset(struct mmc_host *host)
  1777. {
  1778. int ret;
  1779. if (!host->card)
  1780. return -EINVAL;
  1781. mmc_bus_get(host);
  1782. if (!host->bus_ops || host->bus_dead || !host->bus_ops->sw_reset) {
  1783. mmc_bus_put(host);
  1784. return -EOPNOTSUPP;
  1785. }
  1786. ret = host->bus_ops->sw_reset(host);
  1787. mmc_bus_put(host);
  1788. if (ret)
  1789. pr_warn("%s: tried to SW reset card, got error %d\n",
  1790. mmc_hostname(host), ret);
  1791. return ret;
  1792. }
  1793. EXPORT_SYMBOL(mmc_sw_reset);
  1794. static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
  1795. {
  1796. host->f_init = freq;
  1797. pr_debug("%s: %s: trying to init card at %u Hz\n",
  1798. mmc_hostname(host), __func__, host->f_init);
  1799. mmc_power_up(host, host->ocr_avail);
  1800. /*
  1801. * Some eMMCs (with VCCQ always on) may not be reset after power up, so
  1802. * do a hardware reset if possible.
  1803. */
  1804. mmc_hw_reset_for_init(host);
  1805. /*
  1806. * sdio_reset sends CMD52 to reset card. Since we do not know
  1807. * if the card is being re-initialized, just send it. CMD52
  1808. * should be ignored by SD/eMMC cards.
  1809. * Skip it if we already know that we do not support SDIO commands
  1810. */
  1811. if (!(host->caps2 & MMC_CAP2_NO_SDIO))
  1812. sdio_reset(host);
  1813. mmc_go_idle(host);
  1814. if (!(host->caps2 & MMC_CAP2_NO_SD))
  1815. mmc_send_if_cond(host, host->ocr_avail);
  1816. /* Order's important: probe SDIO, then SD, then MMC */
  1817. if (!(host->caps2 & MMC_CAP2_NO_SDIO))
  1818. if (!mmc_attach_sdio(host))
  1819. return 0;
  1820. if (!(host->caps2 & MMC_CAP2_NO_SD))
  1821. if (!mmc_attach_sd(host))
  1822. return 0;
  1823. if (!(host->caps2 & MMC_CAP2_NO_MMC))
  1824. if (!mmc_attach_mmc(host))
  1825. return 0;
  1826. mmc_power_off(host);
  1827. return -EIO;
  1828. }
  1829. int _mmc_detect_card_removed(struct mmc_host *host)
  1830. {
  1831. int ret;
  1832. if (!host->card || mmc_card_removed(host->card))
  1833. return 1;
  1834. ret = host->bus_ops->alive(host);
  1835. /*
  1836. * Card detect status and alive check may be out of sync if card is
  1837. * removed slowly, when card detect switch changes while card/slot
  1838. * pads are still contacted in hardware (refer to "SD Card Mechanical
  1839. * Addendum, Appendix C: Card Detection Switch"). So reschedule a
  1840. * detect work 200ms later for this case.
  1841. */
  1842. if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
  1843. mmc_detect_change(host, msecs_to_jiffies(200));
  1844. pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
  1845. }
  1846. if (ret) {
  1847. mmc_card_set_removed(host->card);
  1848. pr_debug("%s: card remove detected\n", mmc_hostname(host));
  1849. }
  1850. return ret;
  1851. }
  1852. int mmc_detect_card_removed(struct mmc_host *host)
  1853. {
  1854. struct mmc_card *card = host->card;
  1855. int ret;
  1856. WARN_ON(!host->claimed);
  1857. if (!card)
  1858. return 1;
  1859. if (!mmc_card_is_removable(host))
  1860. return 0;
  1861. ret = mmc_card_removed(card);
  1862. /*
  1863. * The card will be considered unchanged unless we have been asked to
  1864. * detect a change or host requires polling to provide card detection.
  1865. */
  1866. if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
  1867. return ret;
  1868. host->detect_change = 0;
  1869. if (!ret) {
  1870. ret = _mmc_detect_card_removed(host);
  1871. if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
  1872. /*
  1873. * Schedule a detect work as soon as possible to let a
  1874. * rescan handle the card removal.
  1875. */
  1876. cancel_delayed_work(&host->detect);
  1877. _mmc_detect_change(host, 0, false);
  1878. }
  1879. }
  1880. return ret;
  1881. }
  1882. EXPORT_SYMBOL(mmc_detect_card_removed);
  1883. void mmc_rescan(struct work_struct *work)
  1884. {
  1885. struct mmc_host *host =
  1886. container_of(work, struct mmc_host, detect.work);
  1887. int i;
  1888. if (host->rescan_disable)
  1889. return;
  1890. /* If there is a non-removable card registered, only scan once */
  1891. if (!mmc_card_is_removable(host) && host->rescan_entered)
  1892. return;
  1893. host->rescan_entered = 1;
  1894. if (host->trigger_card_event && host->ops->card_event) {
  1895. mmc_claim_host(host);
  1896. host->ops->card_event(host);
  1897. mmc_release_host(host);
  1898. host->trigger_card_event = false;
  1899. }
  1900. mmc_bus_get(host);
  1901. /* Verify a registered card to be functional, else remove it. */
  1902. if (host->bus_ops && !host->bus_dead)
  1903. host->bus_ops->detect(host);
  1904. host->detect_change = 0;
  1905. /*
  1906. * Let mmc_bus_put() free the bus/bus_ops if we've found that
  1907. * the card is no longer present.
  1908. */
  1909. mmc_bus_put(host);
  1910. mmc_bus_get(host);
  1911. /* if there still is a card present, stop here */
  1912. if (host->bus_ops != NULL) {
  1913. mmc_bus_put(host);
  1914. goto out;
  1915. }
  1916. /*
  1917. * Only we can add a new handler, so it's safe to
  1918. * release the lock here.
  1919. */
  1920. mmc_bus_put(host);
  1921. mmc_claim_host(host);
  1922. if (mmc_card_is_removable(host) && host->ops->get_cd &&
  1923. host->ops->get_cd(host) == 0) {
  1924. mmc_power_off(host);
  1925. mmc_release_host(host);
  1926. goto out;
  1927. }
  1928. for (i = 0; i < ARRAY_SIZE(freqs); i++) {
  1929. unsigned int freq = freqs[i];
  1930. if (freq > host->f_max) {
  1931. if (i + 1 < ARRAY_SIZE(freqs))
  1932. continue;
  1933. freq = host->f_max;
  1934. }
  1935. if (!mmc_rescan_try_freq(host, max(freq, host->f_min)))
  1936. break;
  1937. if (freqs[i] <= host->f_min)
  1938. break;
  1939. }
  1940. mmc_release_host(host);
  1941. out:
  1942. if (host->caps & MMC_CAP_NEEDS_POLL)
  1943. mmc_schedule_delayed_work(&host->detect, HZ);
  1944. }
  1945. void mmc_start_host(struct mmc_host *host)
  1946. {
  1947. host->f_init = max(min(freqs[0], host->f_max), host->f_min);
  1948. host->rescan_disable = 0;
  1949. if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
  1950. mmc_claim_host(host);
  1951. mmc_power_up(host, host->ocr_avail);
  1952. mmc_release_host(host);
  1953. }
  1954. mmc_gpiod_request_cd_irq(host);
  1955. _mmc_detect_change(host, 0, false);
  1956. }
  1957. void __mmc_stop_host(struct mmc_host *host)
  1958. {
  1959. if (host->slot.cd_irq >= 0) {
  1960. mmc_gpio_set_cd_wake(host, false);
  1961. disable_irq(host->slot.cd_irq);
  1962. }
  1963. host->rescan_disable = 1;
  1964. cancel_delayed_work_sync(&host->detect);
  1965. }
  1966. void mmc_stop_host(struct mmc_host *host)
  1967. {
  1968. __mmc_stop_host(host);
  1969. /* clear pm flags now and let card drivers set them as needed */
  1970. host->pm_flags = 0;
  1971. mmc_bus_get(host);
  1972. if (host->bus_ops && !host->bus_dead) {
  1973. /* Calling bus_ops->remove() with a claimed host can deadlock */
  1974. host->bus_ops->remove(host);
  1975. mmc_claim_host(host);
  1976. mmc_detach_bus(host);
  1977. mmc_power_off(host);
  1978. mmc_release_host(host);
  1979. mmc_bus_put(host);
  1980. return;
  1981. }
  1982. mmc_bus_put(host);
  1983. mmc_claim_host(host);
  1984. mmc_power_off(host);
  1985. mmc_release_host(host);
  1986. }
  1987. static int __init mmc_init(void)
  1988. {
  1989. int ret;
  1990. ret = mmc_register_bus();
  1991. if (ret)
  1992. return ret;
  1993. ret = mmc_register_host_class();
  1994. if (ret)
  1995. goto unregister_bus;
  1996. ret = sdio_register_bus();
  1997. if (ret)
  1998. goto unregister_host_class;
  1999. return 0;
  2000. unregister_host_class:
  2001. mmc_unregister_host_class();
  2002. unregister_bus:
  2003. mmc_unregister_bus();
  2004. return ret;
  2005. }
  2006. static void __exit mmc_exit(void)
  2007. {
  2008. sdio_unregister_bus();
  2009. mmc_unregister_host_class();
  2010. mmc_unregister_bus();
  2011. }
  2012. subsys_initcall(mmc_init);
  2013. module_exit(mmc_exit);
  2014. MODULE_LICENSE("GPL");