mmc.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2008, Freescale Semiconductor, Inc
  4. * Andy Fleming
  5. *
  6. * Based vaguely on the Linux code
  7. */
  8. #include <config.h>
  9. #include <common.h>
  10. #include <blk.h>
  11. #include <command.h>
  12. #include <dm.h>
  13. #include <log.h>
  14. #include <dm/device-internal.h>
  15. #include <errno.h>
  16. #include <mmc.h>
  17. #include <part.h>
  18. #include <linux/bitops.h>
  19. #include <linux/delay.h>
  20. #include <power/regulator.h>
  21. #include <malloc.h>
  22. #include <memalign.h>
  23. #include <linux/list.h>
  24. #include <div64.h>
  25. #include "mmc_private.h"
  26. #define DEFAULT_CMD6_TIMEOUT_MS 500
  27. static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
  28. #if !CONFIG_IS_ENABLED(DM_MMC)
  29. static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
  30. {
  31. return -ENOSYS;
  32. }
  33. __weak int board_mmc_getwp(struct mmc *mmc)
  34. {
  35. return -1;
  36. }
  37. int mmc_getwp(struct mmc *mmc)
  38. {
  39. int wp;
  40. wp = board_mmc_getwp(mmc);
  41. if (wp < 0) {
  42. if (mmc->cfg->ops->getwp)
  43. wp = mmc->cfg->ops->getwp(mmc);
  44. else
  45. wp = 0;
  46. }
  47. return wp;
  48. }
  49. __weak int board_mmc_getcd(struct mmc *mmc)
  50. {
  51. return -1;
  52. }
  53. #endif
  54. #ifdef CONFIG_MMC_TRACE
  55. void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
  56. {
  57. printf("CMD_SEND:%d\n", cmd->cmdidx);
  58. printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
  59. }
  60. void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
  61. {
  62. int i;
  63. u8 *ptr;
  64. if (ret) {
  65. printf("\t\tRET\t\t\t %d\n", ret);
  66. } else {
  67. switch (cmd->resp_type) {
  68. case MMC_RSP_NONE:
  69. printf("\t\tMMC_RSP_NONE\n");
  70. break;
  71. case MMC_RSP_R1:
  72. printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
  73. cmd->response[0]);
  74. break;
  75. case MMC_RSP_R1b:
  76. printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
  77. cmd->response[0]);
  78. break;
  79. case MMC_RSP_R2:
  80. printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
  81. cmd->response[0]);
  82. printf("\t\t \t\t 0x%08x \n",
  83. cmd->response[1]);
  84. printf("\t\t \t\t 0x%08x \n",
  85. cmd->response[2]);
  86. printf("\t\t \t\t 0x%08x \n",
  87. cmd->response[3]);
  88. printf("\n");
  89. printf("\t\t\t\t\tDUMPING DATA\n");
  90. for (i = 0; i < 4; i++) {
  91. int j;
  92. printf("\t\t\t\t\t%03d - ", i*4);
  93. ptr = (u8 *)&cmd->response[i];
  94. ptr += 3;
  95. for (j = 0; j < 4; j++)
  96. printf("%02x ", *ptr--);
  97. printf("\n");
  98. }
  99. break;
  100. case MMC_RSP_R3:
  101. printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
  102. cmd->response[0]);
  103. break;
  104. default:
  105. printf("\t\tERROR MMC rsp not supported\n");
  106. break;
  107. }
  108. }
  109. }
  110. void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
  111. {
  112. int status;
  113. status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
  114. printf("CURR STATE:%d\n", status);
  115. }
  116. #endif
  117. #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
  118. const char *mmc_mode_name(enum bus_mode mode)
  119. {
  120. static const char *const names[] = {
  121. [MMC_LEGACY] = "MMC legacy",
  122. [MMC_HS] = "MMC High Speed (26MHz)",
  123. [SD_HS] = "SD High Speed (50MHz)",
  124. [UHS_SDR12] = "UHS SDR12 (25MHz)",
  125. [UHS_SDR25] = "UHS SDR25 (50MHz)",
  126. [UHS_SDR50] = "UHS SDR50 (100MHz)",
  127. [UHS_SDR104] = "UHS SDR104 (208MHz)",
  128. [UHS_DDR50] = "UHS DDR50 (50MHz)",
  129. [MMC_HS_52] = "MMC High Speed (52MHz)",
  130. [MMC_DDR_52] = "MMC DDR52 (52MHz)",
  131. [MMC_HS_200] = "HS200 (200MHz)",
  132. [MMC_HS_400] = "HS400 (200MHz)",
  133. [MMC_HS_400_ES] = "HS400ES (200MHz)",
  134. };
  135. if (mode >= MMC_MODES_END)
  136. return "Unknown mode";
  137. else
  138. return names[mode];
  139. }
  140. #endif
  141. static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
  142. {
  143. static const int freqs[] = {
  144. [MMC_LEGACY] = 25000000,
  145. [MMC_HS] = 26000000,
  146. [SD_HS] = 50000000,
  147. [MMC_HS_52] = 52000000,
  148. [MMC_DDR_52] = 52000000,
  149. [UHS_SDR12] = 25000000,
  150. [UHS_SDR25] = 50000000,
  151. [UHS_SDR50] = 100000000,
  152. [UHS_DDR50] = 50000000,
  153. [UHS_SDR104] = 208000000,
  154. [MMC_HS_200] = 200000000,
  155. [MMC_HS_400] = 200000000,
  156. [MMC_HS_400_ES] = 200000000,
  157. };
  158. if (mode == MMC_LEGACY)
  159. return mmc->legacy_speed;
  160. else if (mode >= MMC_MODES_END)
  161. return 0;
  162. else
  163. return freqs[mode];
  164. }
  165. static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
  166. {
  167. mmc->selected_mode = mode;
  168. mmc->tran_speed = mmc_mode2freq(mmc, mode);
  169. mmc->ddr_mode = mmc_is_mode_ddr(mode);
  170. pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
  171. mmc->tran_speed / 1000000);
  172. return 0;
  173. }
  174. #if !CONFIG_IS_ENABLED(DM_MMC)
  175. int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
  176. {
  177. int ret;
  178. mmmc_trace_before_send(mmc, cmd);
  179. ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
  180. mmmc_trace_after_send(mmc, cmd, ret);
  181. return ret;
  182. }
  183. #endif
  184. int mmc_send_status(struct mmc *mmc, unsigned int *status)
  185. {
  186. struct mmc_cmd cmd;
  187. int err, retries = 5;
  188. cmd.cmdidx = MMC_CMD_SEND_STATUS;
  189. cmd.resp_type = MMC_RSP_R1;
  190. if (!mmc_host_is_spi(mmc))
  191. cmd.cmdarg = mmc->rca << 16;
  192. while (retries--) {
  193. err = mmc_send_cmd(mmc, &cmd, NULL);
  194. if (!err) {
  195. mmc_trace_state(mmc, &cmd);
  196. *status = cmd.response[0];
  197. return 0;
  198. }
  199. }
  200. mmc_trace_state(mmc, &cmd);
  201. return -ECOMM;
  202. }
  203. int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
  204. {
  205. unsigned int status;
  206. int err;
  207. err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
  208. if (err != -ENOSYS)
  209. return err;
  210. while (1) {
  211. err = mmc_send_status(mmc, &status);
  212. if (err)
  213. return err;
  214. if ((status & MMC_STATUS_RDY_FOR_DATA) &&
  215. (status & MMC_STATUS_CURR_STATE) !=
  216. MMC_STATE_PRG)
  217. break;
  218. if (status & MMC_STATUS_MASK) {
  219. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  220. pr_err("Status Error: 0x%08x\n", status);
  221. #endif
  222. return -ECOMM;
  223. }
  224. if (timeout_ms-- <= 0)
  225. break;
  226. udelay(1000);
  227. }
  228. if (timeout_ms <= 0) {
  229. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  230. pr_err("Timeout waiting card ready\n");
  231. #endif
  232. return -ETIMEDOUT;
  233. }
  234. return 0;
  235. }
  236. int mmc_set_blocklen(struct mmc *mmc, int len)
  237. {
  238. struct mmc_cmd cmd;
  239. int err;
  240. if (mmc->ddr_mode)
  241. return 0;
  242. cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
  243. cmd.resp_type = MMC_RSP_R1;
  244. cmd.cmdarg = len;
  245. err = mmc_send_cmd(mmc, &cmd, NULL);
  246. #ifdef CONFIG_MMC_QUIRKS
  247. if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
  248. int retries = 4;
  249. /*
  250. * It has been seen that SET_BLOCKLEN may fail on the first
  251. * attempt, let's try a few more time
  252. */
  253. do {
  254. err = mmc_send_cmd(mmc, &cmd, NULL);
  255. if (!err)
  256. break;
  257. } while (retries--);
  258. }
  259. #endif
  260. return err;
  261. }
  262. #ifdef MMC_SUPPORTS_TUNING
  263. static const u8 tuning_blk_pattern_4bit[] = {
  264. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  265. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  266. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  267. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  268. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  269. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  270. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  271. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  272. };
  273. static const u8 tuning_blk_pattern_8bit[] = {
  274. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  275. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  276. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  277. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  278. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  279. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  280. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  281. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  282. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  283. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  284. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  285. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  286. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  287. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  288. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  289. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  290. };
  291. int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
  292. {
  293. struct mmc_cmd cmd;
  294. struct mmc_data data;
  295. const u8 *tuning_block_pattern;
  296. int size, err;
  297. if (mmc->bus_width == 8) {
  298. tuning_block_pattern = tuning_blk_pattern_8bit;
  299. size = sizeof(tuning_blk_pattern_8bit);
  300. } else if (mmc->bus_width == 4) {
  301. tuning_block_pattern = tuning_blk_pattern_4bit;
  302. size = sizeof(tuning_blk_pattern_4bit);
  303. } else {
  304. return -EINVAL;
  305. }
  306. ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
  307. cmd.cmdidx = opcode;
  308. cmd.cmdarg = 0;
  309. cmd.resp_type = MMC_RSP_R1;
  310. data.dest = (void *)data_buf;
  311. data.blocks = 1;
  312. data.blocksize = size;
  313. data.flags = MMC_DATA_READ;
  314. err = mmc_send_cmd(mmc, &cmd, &data);
  315. if (err)
  316. return err;
  317. if (memcmp(data_buf, tuning_block_pattern, size))
  318. return -EIO;
  319. return 0;
  320. }
  321. #endif
  322. static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
  323. lbaint_t blkcnt)
  324. {
  325. struct mmc_cmd cmd;
  326. struct mmc_data data;
  327. if (blkcnt > 1)
  328. cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
  329. else
  330. cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
  331. if (mmc->high_capacity)
  332. cmd.cmdarg = start;
  333. else
  334. cmd.cmdarg = start * mmc->read_bl_len;
  335. cmd.resp_type = MMC_RSP_R1;
  336. data.dest = dst;
  337. data.blocks = blkcnt;
  338. data.blocksize = mmc->read_bl_len;
  339. data.flags = MMC_DATA_READ;
  340. if (mmc_send_cmd(mmc, &cmd, &data))
  341. return 0;
  342. if (blkcnt > 1) {
  343. cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
  344. cmd.cmdarg = 0;
  345. cmd.resp_type = MMC_RSP_R1b;
  346. if (mmc_send_cmd(mmc, &cmd, NULL)) {
  347. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  348. pr_err("mmc fail to send stop cmd\n");
  349. #endif
  350. return 0;
  351. }
  352. }
  353. return blkcnt;
  354. }
  355. #if !CONFIG_IS_ENABLED(DM_MMC)
  356. static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
  357. {
  358. if (mmc->cfg->ops->get_b_max)
  359. return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
  360. else
  361. return mmc->cfg->b_max;
  362. }
  363. #endif
  364. #if CONFIG_IS_ENABLED(BLK)
  365. ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
  366. #else
  367. ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
  368. void *dst)
  369. #endif
  370. {
  371. #if CONFIG_IS_ENABLED(BLK)
  372. struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
  373. #endif
  374. int dev_num = block_dev->devnum;
  375. int err;
  376. lbaint_t cur, blocks_todo = blkcnt;
  377. uint b_max;
  378. if (blkcnt == 0)
  379. return 0;
  380. struct mmc *mmc = find_mmc_device(dev_num);
  381. if (!mmc)
  382. return 0;
  383. if (CONFIG_IS_ENABLED(MMC_TINY))
  384. err = mmc_switch_part(mmc, block_dev->hwpart);
  385. else
  386. err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
  387. if (err < 0)
  388. return 0;
  389. if ((start + blkcnt) > block_dev->lba) {
  390. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  391. pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
  392. start + blkcnt, block_dev->lba);
  393. #endif
  394. return 0;
  395. }
  396. if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
  397. pr_debug("%s: Failed to set blocklen\n", __func__);
  398. return 0;
  399. }
  400. b_max = mmc_get_b_max(mmc, dst, blkcnt);
  401. do {
  402. cur = (blocks_todo > b_max) ? b_max : blocks_todo;
  403. if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
  404. pr_debug("%s: Failed to read blocks\n", __func__);
  405. return 0;
  406. }
  407. blocks_todo -= cur;
  408. start += cur;
  409. dst += cur * mmc->read_bl_len;
  410. } while (blocks_todo > 0);
  411. return blkcnt;
  412. }
  413. static int mmc_go_idle(struct mmc *mmc)
  414. {
  415. struct mmc_cmd cmd;
  416. int err;
  417. udelay(1000);
  418. cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
  419. cmd.cmdarg = 0;
  420. cmd.resp_type = MMC_RSP_NONE;
  421. err = mmc_send_cmd(mmc, &cmd, NULL);
  422. if (err)
  423. return err;
  424. udelay(2000);
  425. return 0;
  426. }
  427. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  428. static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
  429. {
  430. struct mmc_cmd cmd;
  431. int err = 0;
  432. /*
  433. * Send CMD11 only if the request is to switch the card to
  434. * 1.8V signalling.
  435. */
  436. if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  437. return mmc_set_signal_voltage(mmc, signal_voltage);
  438. cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
  439. cmd.cmdarg = 0;
  440. cmd.resp_type = MMC_RSP_R1;
  441. err = mmc_send_cmd(mmc, &cmd, NULL);
  442. if (err)
  443. return err;
  444. if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
  445. return -EIO;
  446. /*
  447. * The card should drive cmd and dat[0:3] low immediately
  448. * after the response of cmd11, but wait 100 us to be sure
  449. */
  450. err = mmc_wait_dat0(mmc, 0, 100);
  451. if (err == -ENOSYS)
  452. udelay(100);
  453. else if (err)
  454. return -ETIMEDOUT;
  455. /*
  456. * During a signal voltage level switch, the clock must be gated
  457. * for 5 ms according to the SD spec
  458. */
  459. mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
  460. err = mmc_set_signal_voltage(mmc, signal_voltage);
  461. if (err)
  462. return err;
  463. /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
  464. mdelay(10);
  465. mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
  466. /*
  467. * Failure to switch is indicated by the card holding
  468. * dat[0:3] low. Wait for at least 1 ms according to spec
  469. */
  470. err = mmc_wait_dat0(mmc, 1, 1000);
  471. if (err == -ENOSYS)
  472. udelay(1000);
  473. else if (err)
  474. return -ETIMEDOUT;
  475. return 0;
  476. }
  477. #endif
  478. static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
  479. {
  480. int timeout = 1000;
  481. int err;
  482. struct mmc_cmd cmd;
  483. while (1) {
  484. cmd.cmdidx = MMC_CMD_APP_CMD;
  485. cmd.resp_type = MMC_RSP_R1;
  486. cmd.cmdarg = 0;
  487. err = mmc_send_cmd(mmc, &cmd, NULL);
  488. if (err)
  489. return err;
  490. cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
  491. cmd.resp_type = MMC_RSP_R3;
  492. /*
  493. * Most cards do not answer if some reserved bits
  494. * in the ocr are set. However, Some controller
  495. * can set bit 7 (reserved for low voltages), but
  496. * how to manage low voltages SD card is not yet
  497. * specified.
  498. */
  499. cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
  500. (mmc->cfg->voltages & 0xff8000);
  501. if (mmc->version == SD_VERSION_2)
  502. cmd.cmdarg |= OCR_HCS;
  503. if (uhs_en)
  504. cmd.cmdarg |= OCR_S18R;
  505. err = mmc_send_cmd(mmc, &cmd, NULL);
  506. if (err)
  507. return err;
  508. if (cmd.response[0] & OCR_BUSY)
  509. break;
  510. if (timeout-- <= 0)
  511. return -EOPNOTSUPP;
  512. udelay(1000);
  513. }
  514. if (mmc->version != SD_VERSION_2)
  515. mmc->version = SD_VERSION_1_0;
  516. if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
  517. cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
  518. cmd.resp_type = MMC_RSP_R3;
  519. cmd.cmdarg = 0;
  520. err = mmc_send_cmd(mmc, &cmd, NULL);
  521. if (err)
  522. return err;
  523. }
  524. mmc->ocr = cmd.response[0];
  525. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  526. if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
  527. == 0x41000000) {
  528. err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
  529. if (err)
  530. return err;
  531. }
  532. #endif
  533. mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
  534. mmc->rca = 0;
  535. return 0;
  536. }
  537. static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
  538. {
  539. struct mmc_cmd cmd;
  540. int err;
  541. cmd.cmdidx = MMC_CMD_SEND_OP_COND;
  542. cmd.resp_type = MMC_RSP_R3;
  543. cmd.cmdarg = 0;
  544. if (use_arg && !mmc_host_is_spi(mmc))
  545. cmd.cmdarg = OCR_HCS |
  546. (mmc->cfg->voltages &
  547. (mmc->ocr & OCR_VOLTAGE_MASK)) |
  548. (mmc->ocr & OCR_ACCESS_MODE);
  549. err = mmc_send_cmd(mmc, &cmd, NULL);
  550. if (err)
  551. return err;
  552. mmc->ocr = cmd.response[0];
  553. return 0;
  554. }
  555. static int mmc_send_op_cond(struct mmc *mmc)
  556. {
  557. int err, i;
  558. int timeout = 1000;
  559. uint start;
  560. /* Some cards seem to need this */
  561. mmc_go_idle(mmc);
  562. start = get_timer(0);
  563. /* Asking to the card its capabilities */
  564. for (i = 0; ; i++) {
  565. err = mmc_send_op_cond_iter(mmc, i != 0);
  566. if (err)
  567. return err;
  568. /* exit if not busy (flag seems to be inverted) */
  569. if (mmc->ocr & OCR_BUSY)
  570. break;
  571. if (get_timer(start) > timeout)
  572. return -ETIMEDOUT;
  573. udelay(100);
  574. }
  575. mmc->op_cond_pending = 1;
  576. return 0;
  577. }
  578. static int mmc_complete_op_cond(struct mmc *mmc)
  579. {
  580. struct mmc_cmd cmd;
  581. int timeout = 1000;
  582. ulong start;
  583. int err;
  584. mmc->op_cond_pending = 0;
  585. if (!(mmc->ocr & OCR_BUSY)) {
  586. /* Some cards seem to need this */
  587. mmc_go_idle(mmc);
  588. start = get_timer(0);
  589. while (1) {
  590. err = mmc_send_op_cond_iter(mmc, 1);
  591. if (err)
  592. return err;
  593. if (mmc->ocr & OCR_BUSY)
  594. break;
  595. if (get_timer(start) > timeout)
  596. return -EOPNOTSUPP;
  597. udelay(100);
  598. }
  599. }
  600. if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
  601. cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
  602. cmd.resp_type = MMC_RSP_R3;
  603. cmd.cmdarg = 0;
  604. err = mmc_send_cmd(mmc, &cmd, NULL);
  605. if (err)
  606. return err;
  607. mmc->ocr = cmd.response[0];
  608. }
  609. mmc->version = MMC_VERSION_UNKNOWN;
  610. mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
  611. mmc->rca = 1;
  612. return 0;
  613. }
  614. int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
  615. {
  616. struct mmc_cmd cmd;
  617. struct mmc_data data;
  618. int err;
  619. /* Get the Card Status Register */
  620. cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
  621. cmd.resp_type = MMC_RSP_R1;
  622. cmd.cmdarg = 0;
  623. data.dest = (char *)ext_csd;
  624. data.blocks = 1;
  625. data.blocksize = MMC_MAX_BLOCK_LEN;
  626. data.flags = MMC_DATA_READ;
  627. err = mmc_send_cmd(mmc, &cmd, &data);
  628. return err;
  629. }
  630. static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
  631. bool send_status)
  632. {
  633. unsigned int status, start;
  634. struct mmc_cmd cmd;
  635. int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
  636. bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
  637. (index == EXT_CSD_PART_CONF);
  638. int retries = 3;
  639. int ret;
  640. if (mmc->gen_cmd6_time)
  641. timeout_ms = mmc->gen_cmd6_time * 10;
  642. if (is_part_switch && mmc->part_switch_time)
  643. timeout_ms = mmc->part_switch_time * 10;
  644. cmd.cmdidx = MMC_CMD_SWITCH;
  645. cmd.resp_type = MMC_RSP_R1b;
  646. cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  647. (index << 16) |
  648. (value << 8);
  649. do {
  650. ret = mmc_send_cmd(mmc, &cmd, NULL);
  651. } while (ret && retries-- > 0);
  652. if (ret)
  653. return ret;
  654. start = get_timer(0);
  655. /* poll dat0 for rdy/buys status */
  656. ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
  657. if (ret && ret != -ENOSYS)
  658. return ret;
  659. /*
  660. * In cases when not allowed to poll by using CMD13 or because we aren't
  661. * capable of polling by using mmc_wait_dat0, then rely on waiting the
  662. * stated timeout to be sufficient.
  663. */
  664. if (ret == -ENOSYS && !send_status)
  665. mdelay(timeout_ms);
  666. /* Finally wait until the card is ready or indicates a failure
  667. * to switch. It doesn't hurt to use CMD13 here even if send_status
  668. * is false, because by now (after 'timeout_ms' ms) the bus should be
  669. * reliable.
  670. */
  671. do {
  672. ret = mmc_send_status(mmc, &status);
  673. if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
  674. pr_debug("switch failed %d/%d/0x%x !\n", set, index,
  675. value);
  676. return -EIO;
  677. }
  678. if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
  679. return 0;
  680. udelay(100);
  681. } while (get_timer(start) < timeout_ms);
  682. return -ETIMEDOUT;
  683. }
  684. int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
  685. {
  686. return __mmc_switch(mmc, set, index, value, true);
  687. }
  688. int mmc_boot_wp(struct mmc *mmc)
  689. {
  690. return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
  691. }
  692. #if !CONFIG_IS_ENABLED(MMC_TINY)
  693. static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
  694. bool hsdowngrade)
  695. {
  696. int err;
  697. int speed_bits;
  698. ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
  699. switch (mode) {
  700. case MMC_HS:
  701. case MMC_HS_52:
  702. case MMC_DDR_52:
  703. speed_bits = EXT_CSD_TIMING_HS;
  704. break;
  705. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
  706. case MMC_HS_200:
  707. speed_bits = EXT_CSD_TIMING_HS200;
  708. break;
  709. #endif
  710. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  711. case MMC_HS_400:
  712. speed_bits = EXT_CSD_TIMING_HS400;
  713. break;
  714. #endif
  715. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  716. case MMC_HS_400_ES:
  717. speed_bits = EXT_CSD_TIMING_HS400;
  718. break;
  719. #endif
  720. case MMC_LEGACY:
  721. speed_bits = EXT_CSD_TIMING_LEGACY;
  722. break;
  723. default:
  724. return -EINVAL;
  725. }
  726. err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
  727. speed_bits, !hsdowngrade);
  728. if (err)
  729. return err;
  730. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
  731. CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  732. /*
  733. * In case the eMMC is in HS200/HS400 mode and we are downgrading
  734. * to HS mode, the card clock are still running much faster than
  735. * the supported HS mode clock, so we can not reliably read out
  736. * Extended CSD. Reconfigure the controller to run at HS mode.
  737. */
  738. if (hsdowngrade) {
  739. mmc_select_mode(mmc, MMC_HS);
  740. mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
  741. }
  742. #endif
  743. if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
  744. /* Now check to see that it worked */
  745. err = mmc_send_ext_csd(mmc, test_csd);
  746. if (err)
  747. return err;
  748. /* No high-speed support */
  749. if (!test_csd[EXT_CSD_HS_TIMING])
  750. return -ENOTSUPP;
  751. }
  752. return 0;
  753. }
  754. static int mmc_get_capabilities(struct mmc *mmc)
  755. {
  756. u8 *ext_csd = mmc->ext_csd;
  757. char cardtype;
  758. mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
  759. if (mmc_host_is_spi(mmc))
  760. return 0;
  761. /* Only version 4 supports high-speed */
  762. if (mmc->version < MMC_VERSION_4)
  763. return 0;
  764. if (!ext_csd) {
  765. pr_err("No ext_csd found!\n"); /* this should enver happen */
  766. return -ENOTSUPP;
  767. }
  768. mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
  769. cardtype = ext_csd[EXT_CSD_CARD_TYPE];
  770. mmc->cardtype = cardtype;
  771. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
  772. if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
  773. EXT_CSD_CARD_TYPE_HS200_1_8V)) {
  774. mmc->card_caps |= MMC_MODE_HS200;
  775. }
  776. #endif
  777. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
  778. CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  779. if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
  780. EXT_CSD_CARD_TYPE_HS400_1_8V)) {
  781. mmc->card_caps |= MMC_MODE_HS400;
  782. }
  783. #endif
  784. if (cardtype & EXT_CSD_CARD_TYPE_52) {
  785. if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
  786. mmc->card_caps |= MMC_MODE_DDR_52MHz;
  787. mmc->card_caps |= MMC_MODE_HS_52MHz;
  788. }
  789. if (cardtype & EXT_CSD_CARD_TYPE_26)
  790. mmc->card_caps |= MMC_MODE_HS;
  791. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  792. if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
  793. (mmc->card_caps & MMC_MODE_HS400)) {
  794. mmc->card_caps |= MMC_MODE_HS400_ES;
  795. }
  796. #endif
  797. return 0;
  798. }
  799. #endif
  800. static int mmc_set_capacity(struct mmc *mmc, int part_num)
  801. {
  802. switch (part_num) {
  803. case 0:
  804. mmc->capacity = mmc->capacity_user;
  805. break;
  806. case 1:
  807. case 2:
  808. mmc->capacity = mmc->capacity_boot;
  809. break;
  810. case 3:
  811. mmc->capacity = mmc->capacity_rpmb;
  812. break;
  813. case 4:
  814. case 5:
  815. case 6:
  816. case 7:
  817. mmc->capacity = mmc->capacity_gp[part_num - 4];
  818. break;
  819. default:
  820. return -1;
  821. }
  822. mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
  823. return 0;
  824. }
  825. int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
  826. {
  827. int ret;
  828. int retry = 3;
  829. do {
  830. ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  831. EXT_CSD_PART_CONF,
  832. (mmc->part_config & ~PART_ACCESS_MASK)
  833. | (part_num & PART_ACCESS_MASK));
  834. } while (ret && retry--);
  835. /*
  836. * Set the capacity if the switch succeeded or was intended
  837. * to return to representing the raw device.
  838. */
  839. if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
  840. ret = mmc_set_capacity(mmc, part_num);
  841. mmc_get_blk_desc(mmc)->hwpart = part_num;
  842. }
  843. return ret;
  844. }
  845. #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
  846. int mmc_hwpart_config(struct mmc *mmc,
  847. const struct mmc_hwpart_conf *conf,
  848. enum mmc_hwpart_conf_mode mode)
  849. {
  850. u8 part_attrs = 0;
  851. u32 enh_size_mult;
  852. u32 enh_start_addr;
  853. u32 gp_size_mult[4];
  854. u32 max_enh_size_mult;
  855. u32 tot_enh_size_mult = 0;
  856. u8 wr_rel_set;
  857. int i, pidx, err;
  858. ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
  859. if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
  860. return -EINVAL;
  861. if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
  862. pr_err("eMMC >= 4.4 required for enhanced user data area\n");
  863. return -EMEDIUMTYPE;
  864. }
  865. if (!(mmc->part_support & PART_SUPPORT)) {
  866. pr_err("Card does not support partitioning\n");
  867. return -EMEDIUMTYPE;
  868. }
  869. if (!mmc->hc_wp_grp_size) {
  870. pr_err("Card does not define HC WP group size\n");
  871. return -EMEDIUMTYPE;
  872. }
  873. /* check partition alignment and total enhanced size */
  874. if (conf->user.enh_size) {
  875. if (conf->user.enh_size % mmc->hc_wp_grp_size ||
  876. conf->user.enh_start % mmc->hc_wp_grp_size) {
  877. pr_err("User data enhanced area not HC WP group "
  878. "size aligned\n");
  879. return -EINVAL;
  880. }
  881. part_attrs |= EXT_CSD_ENH_USR;
  882. enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
  883. if (mmc->high_capacity) {
  884. enh_start_addr = conf->user.enh_start;
  885. } else {
  886. enh_start_addr = (conf->user.enh_start << 9);
  887. }
  888. } else {
  889. enh_size_mult = 0;
  890. enh_start_addr = 0;
  891. }
  892. tot_enh_size_mult += enh_size_mult;
  893. for (pidx = 0; pidx < 4; pidx++) {
  894. if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
  895. pr_err("GP%i partition not HC WP group size "
  896. "aligned\n", pidx+1);
  897. return -EINVAL;
  898. }
  899. gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
  900. if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
  901. part_attrs |= EXT_CSD_ENH_GP(pidx);
  902. tot_enh_size_mult += gp_size_mult[pidx];
  903. }
  904. }
  905. if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
  906. pr_err("Card does not support enhanced attribute\n");
  907. return -EMEDIUMTYPE;
  908. }
  909. err = mmc_send_ext_csd(mmc, ext_csd);
  910. if (err)
  911. return err;
  912. max_enh_size_mult =
  913. (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
  914. (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
  915. ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
  916. if (tot_enh_size_mult > max_enh_size_mult) {
  917. pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
  918. tot_enh_size_mult, max_enh_size_mult);
  919. return -EMEDIUMTYPE;
  920. }
  921. /* The default value of EXT_CSD_WR_REL_SET is device
  922. * dependent, the values can only be changed if the
  923. * EXT_CSD_HS_CTRL_REL bit is set. The values can be
  924. * changed only once and before partitioning is completed. */
  925. wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
  926. if (conf->user.wr_rel_change) {
  927. if (conf->user.wr_rel_set)
  928. wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
  929. else
  930. wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
  931. }
  932. for (pidx = 0; pidx < 4; pidx++) {
  933. if (conf->gp_part[pidx].wr_rel_change) {
  934. if (conf->gp_part[pidx].wr_rel_set)
  935. wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
  936. else
  937. wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
  938. }
  939. }
  940. if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
  941. !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
  942. puts("Card does not support host controlled partition write "
  943. "reliability settings\n");
  944. return -EMEDIUMTYPE;
  945. }
  946. if (ext_csd[EXT_CSD_PARTITION_SETTING] &
  947. EXT_CSD_PARTITION_SETTING_COMPLETED) {
  948. pr_err("Card already partitioned\n");
  949. return -EPERM;
  950. }
  951. if (mode == MMC_HWPART_CONF_CHECK)
  952. return 0;
  953. /* Partitioning requires high-capacity size definitions */
  954. if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
  955. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  956. EXT_CSD_ERASE_GROUP_DEF, 1);
  957. if (err)
  958. return err;
  959. ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
  960. #if CONFIG_IS_ENABLED(MMC_WRITE)
  961. /* update erase group size to be high-capacity */
  962. mmc->erase_grp_size =
  963. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
  964. #endif
  965. }
  966. /* all OK, write the configuration */
  967. for (i = 0; i < 4; i++) {
  968. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  969. EXT_CSD_ENH_START_ADDR+i,
  970. (enh_start_addr >> (i*8)) & 0xFF);
  971. if (err)
  972. return err;
  973. }
  974. for (i = 0; i < 3; i++) {
  975. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  976. EXT_CSD_ENH_SIZE_MULT+i,
  977. (enh_size_mult >> (i*8)) & 0xFF);
  978. if (err)
  979. return err;
  980. }
  981. for (pidx = 0; pidx < 4; pidx++) {
  982. for (i = 0; i < 3; i++) {
  983. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  984. EXT_CSD_GP_SIZE_MULT+pidx*3+i,
  985. (gp_size_mult[pidx] >> (i*8)) & 0xFF);
  986. if (err)
  987. return err;
  988. }
  989. }
  990. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  991. EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
  992. if (err)
  993. return err;
  994. if (mode == MMC_HWPART_CONF_SET)
  995. return 0;
  996. /* The WR_REL_SET is a write-once register but shall be
  997. * written before setting PART_SETTING_COMPLETED. As it is
  998. * write-once we can only write it when completing the
  999. * partitioning. */
  1000. if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
  1001. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1002. EXT_CSD_WR_REL_SET, wr_rel_set);
  1003. if (err)
  1004. return err;
  1005. }
  1006. /* Setting PART_SETTING_COMPLETED confirms the partition
  1007. * configuration but it only becomes effective after power
  1008. * cycle, so we do not adjust the partition related settings
  1009. * in the mmc struct. */
  1010. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1011. EXT_CSD_PARTITION_SETTING,
  1012. EXT_CSD_PARTITION_SETTING_COMPLETED);
  1013. if (err)
  1014. return err;
  1015. return 0;
  1016. }
  1017. #endif
  1018. #if !CONFIG_IS_ENABLED(DM_MMC)
  1019. int mmc_getcd(struct mmc *mmc)
  1020. {
  1021. int cd;
  1022. cd = board_mmc_getcd(mmc);
  1023. if (cd < 0) {
  1024. if (mmc->cfg->ops->getcd)
  1025. cd = mmc->cfg->ops->getcd(mmc);
  1026. else
  1027. cd = 1;
  1028. }
  1029. return cd;
  1030. }
  1031. #endif
  1032. #if !CONFIG_IS_ENABLED(MMC_TINY)
  1033. static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
  1034. {
  1035. struct mmc_cmd cmd;
  1036. struct mmc_data data;
  1037. /* Switch the frequency */
  1038. cmd.cmdidx = SD_CMD_SWITCH_FUNC;
  1039. cmd.resp_type = MMC_RSP_R1;
  1040. cmd.cmdarg = (mode << 31) | 0xffffff;
  1041. cmd.cmdarg &= ~(0xf << (group * 4));
  1042. cmd.cmdarg |= value << (group * 4);
  1043. data.dest = (char *)resp;
  1044. data.blocksize = 64;
  1045. data.blocks = 1;
  1046. data.flags = MMC_DATA_READ;
  1047. return mmc_send_cmd(mmc, &cmd, &data);
  1048. }
  1049. static int sd_get_capabilities(struct mmc *mmc)
  1050. {
  1051. int err;
  1052. struct mmc_cmd cmd;
  1053. ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
  1054. ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
  1055. struct mmc_data data;
  1056. int timeout;
  1057. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1058. u32 sd3_bus_mode;
  1059. #endif
  1060. mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
  1061. if (mmc_host_is_spi(mmc))
  1062. return 0;
  1063. /* Read the SCR to find out if this card supports higher speeds */
  1064. cmd.cmdidx = MMC_CMD_APP_CMD;
  1065. cmd.resp_type = MMC_RSP_R1;
  1066. cmd.cmdarg = mmc->rca << 16;
  1067. err = mmc_send_cmd(mmc, &cmd, NULL);
  1068. if (err)
  1069. return err;
  1070. cmd.cmdidx = SD_CMD_APP_SEND_SCR;
  1071. cmd.resp_type = MMC_RSP_R1;
  1072. cmd.cmdarg = 0;
  1073. timeout = 3;
  1074. retry_scr:
  1075. data.dest = (char *)scr;
  1076. data.blocksize = 8;
  1077. data.blocks = 1;
  1078. data.flags = MMC_DATA_READ;
  1079. err = mmc_send_cmd(mmc, &cmd, &data);
  1080. if (err) {
  1081. if (timeout--)
  1082. goto retry_scr;
  1083. return err;
  1084. }
  1085. mmc->scr[0] = __be32_to_cpu(scr[0]);
  1086. mmc->scr[1] = __be32_to_cpu(scr[1]);
  1087. switch ((mmc->scr[0] >> 24) & 0xf) {
  1088. case 0:
  1089. mmc->version = SD_VERSION_1_0;
  1090. break;
  1091. case 1:
  1092. mmc->version = SD_VERSION_1_10;
  1093. break;
  1094. case 2:
  1095. mmc->version = SD_VERSION_2;
  1096. if ((mmc->scr[0] >> 15) & 0x1)
  1097. mmc->version = SD_VERSION_3;
  1098. break;
  1099. default:
  1100. mmc->version = SD_VERSION_1_0;
  1101. break;
  1102. }
  1103. if (mmc->scr[0] & SD_DATA_4BIT)
  1104. mmc->card_caps |= MMC_MODE_4BIT;
  1105. /* Version 1.0 doesn't support switching */
  1106. if (mmc->version == SD_VERSION_1_0)
  1107. return 0;
  1108. timeout = 4;
  1109. while (timeout--) {
  1110. err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
  1111. (u8 *)switch_status);
  1112. if (err)
  1113. return err;
  1114. /* The high-speed function is busy. Try again */
  1115. if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
  1116. break;
  1117. }
  1118. /* If high-speed isn't supported, we return */
  1119. if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
  1120. mmc->card_caps |= MMC_CAP(SD_HS);
  1121. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1122. /* Version before 3.0 don't support UHS modes */
  1123. if (mmc->version < SD_VERSION_3)
  1124. return 0;
  1125. sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
  1126. if (sd3_bus_mode & SD_MODE_UHS_SDR104)
  1127. mmc->card_caps |= MMC_CAP(UHS_SDR104);
  1128. if (sd3_bus_mode & SD_MODE_UHS_SDR50)
  1129. mmc->card_caps |= MMC_CAP(UHS_SDR50);
  1130. if (sd3_bus_mode & SD_MODE_UHS_SDR25)
  1131. mmc->card_caps |= MMC_CAP(UHS_SDR25);
  1132. if (sd3_bus_mode & SD_MODE_UHS_SDR12)
  1133. mmc->card_caps |= MMC_CAP(UHS_SDR12);
  1134. if (sd3_bus_mode & SD_MODE_UHS_DDR50)
  1135. mmc->card_caps |= MMC_CAP(UHS_DDR50);
  1136. #endif
  1137. return 0;
  1138. }
  1139. static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
  1140. {
  1141. int err;
  1142. ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
  1143. int speed;
  1144. /* SD version 1.00 and 1.01 does not support CMD 6 */
  1145. if (mmc->version == SD_VERSION_1_0)
  1146. return 0;
  1147. switch (mode) {
  1148. case MMC_LEGACY:
  1149. speed = UHS_SDR12_BUS_SPEED;
  1150. break;
  1151. case SD_HS:
  1152. speed = HIGH_SPEED_BUS_SPEED;
  1153. break;
  1154. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1155. case UHS_SDR12:
  1156. speed = UHS_SDR12_BUS_SPEED;
  1157. break;
  1158. case UHS_SDR25:
  1159. speed = UHS_SDR25_BUS_SPEED;
  1160. break;
  1161. case UHS_SDR50:
  1162. speed = UHS_SDR50_BUS_SPEED;
  1163. break;
  1164. case UHS_DDR50:
  1165. speed = UHS_DDR50_BUS_SPEED;
  1166. break;
  1167. case UHS_SDR104:
  1168. speed = UHS_SDR104_BUS_SPEED;
  1169. break;
  1170. #endif
  1171. default:
  1172. return -EINVAL;
  1173. }
  1174. err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
  1175. if (err)
  1176. return err;
  1177. if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
  1178. return -ENOTSUPP;
  1179. return 0;
  1180. }
  1181. static int sd_select_bus_width(struct mmc *mmc, int w)
  1182. {
  1183. int err;
  1184. struct mmc_cmd cmd;
  1185. if ((w != 4) && (w != 1))
  1186. return -EINVAL;
  1187. cmd.cmdidx = MMC_CMD_APP_CMD;
  1188. cmd.resp_type = MMC_RSP_R1;
  1189. cmd.cmdarg = mmc->rca << 16;
  1190. err = mmc_send_cmd(mmc, &cmd, NULL);
  1191. if (err)
  1192. return err;
  1193. cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
  1194. cmd.resp_type = MMC_RSP_R1;
  1195. if (w == 4)
  1196. cmd.cmdarg = 2;
  1197. else if (w == 1)
  1198. cmd.cmdarg = 0;
  1199. err = mmc_send_cmd(mmc, &cmd, NULL);
  1200. if (err)
  1201. return err;
  1202. return 0;
  1203. }
  1204. #endif
  1205. #if CONFIG_IS_ENABLED(MMC_WRITE)
  1206. static int sd_read_ssr(struct mmc *mmc)
  1207. {
  1208. static const unsigned int sd_au_size[] = {
  1209. 0, SZ_16K / 512, SZ_32K / 512,
  1210. SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
  1211. SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
  1212. SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
  1213. SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
  1214. SZ_64M / 512,
  1215. };
  1216. int err, i;
  1217. struct mmc_cmd cmd;
  1218. ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
  1219. struct mmc_data data;
  1220. int timeout = 3;
  1221. unsigned int au, eo, et, es;
  1222. cmd.cmdidx = MMC_CMD_APP_CMD;
  1223. cmd.resp_type = MMC_RSP_R1;
  1224. cmd.cmdarg = mmc->rca << 16;
  1225. err = mmc_send_cmd(mmc, &cmd, NULL);
  1226. #ifdef CONFIG_MMC_QUIRKS
  1227. if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
  1228. int retries = 4;
  1229. /*
  1230. * It has been seen that APP_CMD may fail on the first
  1231. * attempt, let's try a few more times
  1232. */
  1233. do {
  1234. err = mmc_send_cmd(mmc, &cmd, NULL);
  1235. if (!err)
  1236. break;
  1237. } while (retries--);
  1238. }
  1239. #endif
  1240. if (err)
  1241. return err;
  1242. cmd.cmdidx = SD_CMD_APP_SD_STATUS;
  1243. cmd.resp_type = MMC_RSP_R1;
  1244. cmd.cmdarg = 0;
  1245. retry_ssr:
  1246. data.dest = (char *)ssr;
  1247. data.blocksize = 64;
  1248. data.blocks = 1;
  1249. data.flags = MMC_DATA_READ;
  1250. err = mmc_send_cmd(mmc, &cmd, &data);
  1251. if (err) {
  1252. if (timeout--)
  1253. goto retry_ssr;
  1254. return err;
  1255. }
  1256. for (i = 0; i < 16; i++)
  1257. ssr[i] = be32_to_cpu(ssr[i]);
  1258. au = (ssr[2] >> 12) & 0xF;
  1259. if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
  1260. mmc->ssr.au = sd_au_size[au];
  1261. es = (ssr[3] >> 24) & 0xFF;
  1262. es |= (ssr[2] & 0xFF) << 8;
  1263. et = (ssr[3] >> 18) & 0x3F;
  1264. if (es && et) {
  1265. eo = (ssr[3] >> 16) & 0x3;
  1266. mmc->ssr.erase_timeout = (et * 1000) / es;
  1267. mmc->ssr.erase_offset = eo * 1000;
  1268. }
  1269. } else {
  1270. pr_debug("Invalid Allocation Unit Size.\n");
  1271. }
  1272. return 0;
  1273. }
  1274. #endif
  1275. /* frequency bases */
  1276. /* divided by 10 to be nice to platforms without floating point */
  1277. static const int fbase[] = {
  1278. 10000,
  1279. 100000,
  1280. 1000000,
  1281. 10000000,
  1282. };
  1283. /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
  1284. * to platforms without floating point.
  1285. */
  1286. static const u8 multipliers[] = {
  1287. 0, /* reserved */
  1288. 10,
  1289. 12,
  1290. 13,
  1291. 15,
  1292. 20,
  1293. 25,
  1294. 30,
  1295. 35,
  1296. 40,
  1297. 45,
  1298. 50,
  1299. 55,
  1300. 60,
  1301. 70,
  1302. 80,
  1303. };
  1304. static inline int bus_width(uint cap)
  1305. {
  1306. if (cap == MMC_MODE_8BIT)
  1307. return 8;
  1308. if (cap == MMC_MODE_4BIT)
  1309. return 4;
  1310. if (cap == MMC_MODE_1BIT)
  1311. return 1;
  1312. pr_warn("invalid bus witdh capability 0x%x\n", cap);
  1313. return 0;
  1314. }
  1315. #if !CONFIG_IS_ENABLED(DM_MMC)
  1316. #ifdef MMC_SUPPORTS_TUNING
  1317. static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
  1318. {
  1319. return -ENOTSUPP;
  1320. }
  1321. #endif
  1322. static int mmc_set_ios(struct mmc *mmc)
  1323. {
  1324. int ret = 0;
  1325. if (mmc->cfg->ops->set_ios)
  1326. ret = mmc->cfg->ops->set_ios(mmc);
  1327. return ret;
  1328. }
  1329. static int mmc_host_power_cycle(struct mmc *mmc)
  1330. {
  1331. int ret = 0;
  1332. if (mmc->cfg->ops->host_power_cycle)
  1333. ret = mmc->cfg->ops->host_power_cycle(mmc);
  1334. return ret;
  1335. }
  1336. #endif
  1337. int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
  1338. {
  1339. if (!disable) {
  1340. if (clock > mmc->cfg->f_max)
  1341. clock = mmc->cfg->f_max;
  1342. if (clock < mmc->cfg->f_min)
  1343. clock = mmc->cfg->f_min;
  1344. }
  1345. mmc->clock = clock;
  1346. mmc->clk_disable = disable;
  1347. debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
  1348. return mmc_set_ios(mmc);
  1349. }
  1350. static int mmc_set_bus_width(struct mmc *mmc, uint width)
  1351. {
  1352. mmc->bus_width = width;
  1353. return mmc_set_ios(mmc);
  1354. }
  1355. #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
  1356. /*
  1357. * helper function to display the capabilities in a human
  1358. * friendly manner. The capabilities include bus width and
  1359. * supported modes.
  1360. */
  1361. void mmc_dump_capabilities(const char *text, uint caps)
  1362. {
  1363. enum bus_mode mode;
  1364. pr_debug("%s: widths [", text);
  1365. if (caps & MMC_MODE_8BIT)
  1366. pr_debug("8, ");
  1367. if (caps & MMC_MODE_4BIT)
  1368. pr_debug("4, ");
  1369. if (caps & MMC_MODE_1BIT)
  1370. pr_debug("1, ");
  1371. pr_debug("\b\b] modes [");
  1372. for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
  1373. if (MMC_CAP(mode) & caps)
  1374. pr_debug("%s, ", mmc_mode_name(mode));
  1375. pr_debug("\b\b]\n");
  1376. }
  1377. #endif
  1378. struct mode_width_tuning {
  1379. enum bus_mode mode;
  1380. uint widths;
  1381. #ifdef MMC_SUPPORTS_TUNING
  1382. uint tuning;
  1383. #endif
  1384. };
  1385. #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
  1386. int mmc_voltage_to_mv(enum mmc_voltage voltage)
  1387. {
  1388. switch (voltage) {
  1389. case MMC_SIGNAL_VOLTAGE_000: return 0;
  1390. case MMC_SIGNAL_VOLTAGE_330: return 3300;
  1391. case MMC_SIGNAL_VOLTAGE_180: return 1800;
  1392. case MMC_SIGNAL_VOLTAGE_120: return 1200;
  1393. }
  1394. return -EINVAL;
  1395. }
  1396. static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
  1397. {
  1398. int err;
  1399. if (mmc->signal_voltage == signal_voltage)
  1400. return 0;
  1401. mmc->signal_voltage = signal_voltage;
  1402. err = mmc_set_ios(mmc);
  1403. if (err)
  1404. pr_debug("unable to set voltage (err %d)\n", err);
  1405. return err;
  1406. }
  1407. #else
  1408. static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
  1409. {
  1410. return 0;
  1411. }
  1412. #endif
  1413. #if !CONFIG_IS_ENABLED(MMC_TINY)
  1414. static const struct mode_width_tuning sd_modes_by_pref[] = {
  1415. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1416. #ifdef MMC_SUPPORTS_TUNING
  1417. {
  1418. .mode = UHS_SDR104,
  1419. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1420. .tuning = MMC_CMD_SEND_TUNING_BLOCK
  1421. },
  1422. #endif
  1423. {
  1424. .mode = UHS_SDR50,
  1425. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1426. },
  1427. {
  1428. .mode = UHS_DDR50,
  1429. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1430. },
  1431. {
  1432. .mode = UHS_SDR25,
  1433. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1434. },
  1435. #endif
  1436. {
  1437. .mode = SD_HS,
  1438. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1439. },
  1440. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1441. {
  1442. .mode = UHS_SDR12,
  1443. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1444. },
  1445. #endif
  1446. {
  1447. .mode = MMC_LEGACY,
  1448. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1449. }
  1450. };
  1451. #define for_each_sd_mode_by_pref(caps, mwt) \
  1452. for (mwt = sd_modes_by_pref;\
  1453. mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
  1454. mwt++) \
  1455. if (caps & MMC_CAP(mwt->mode))
  1456. static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
  1457. {
  1458. int err;
  1459. uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
  1460. const struct mode_width_tuning *mwt;
  1461. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1462. bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
  1463. #else
  1464. bool uhs_en = false;
  1465. #endif
  1466. uint caps;
  1467. #ifdef DEBUG
  1468. mmc_dump_capabilities("sd card", card_caps);
  1469. mmc_dump_capabilities("host", mmc->host_caps);
  1470. #endif
  1471. if (mmc_host_is_spi(mmc)) {
  1472. mmc_set_bus_width(mmc, 1);
  1473. mmc_select_mode(mmc, MMC_LEGACY);
  1474. mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
  1475. return 0;
  1476. }
  1477. /* Restrict card's capabilities by what the host can do */
  1478. caps = card_caps & mmc->host_caps;
  1479. if (!uhs_en)
  1480. caps &= ~UHS_CAPS;
  1481. for_each_sd_mode_by_pref(caps, mwt) {
  1482. uint *w;
  1483. for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
  1484. if (*w & caps & mwt->widths) {
  1485. pr_debug("trying mode %s width %d (at %d MHz)\n",
  1486. mmc_mode_name(mwt->mode),
  1487. bus_width(*w),
  1488. mmc_mode2freq(mmc, mwt->mode) / 1000000);
  1489. /* configure the bus width (card + host) */
  1490. err = sd_select_bus_width(mmc, bus_width(*w));
  1491. if (err)
  1492. goto error;
  1493. mmc_set_bus_width(mmc, bus_width(*w));
  1494. /* configure the bus mode (card) */
  1495. err = sd_set_card_speed(mmc, mwt->mode);
  1496. if (err)
  1497. goto error;
  1498. /* configure the bus mode (host) */
  1499. mmc_select_mode(mmc, mwt->mode);
  1500. mmc_set_clock(mmc, mmc->tran_speed,
  1501. MMC_CLK_ENABLE);
  1502. #ifdef MMC_SUPPORTS_TUNING
  1503. /* execute tuning if needed */
  1504. if (mwt->tuning && !mmc_host_is_spi(mmc)) {
  1505. err = mmc_execute_tuning(mmc,
  1506. mwt->tuning);
  1507. if (err) {
  1508. pr_debug("tuning failed\n");
  1509. goto error;
  1510. }
  1511. }
  1512. #endif
  1513. #if CONFIG_IS_ENABLED(MMC_WRITE)
  1514. err = sd_read_ssr(mmc);
  1515. if (err)
  1516. pr_warn("unable to read ssr\n");
  1517. #endif
  1518. if (!err)
  1519. return 0;
  1520. error:
  1521. /* revert to a safer bus speed */
  1522. mmc_select_mode(mmc, MMC_LEGACY);
  1523. mmc_set_clock(mmc, mmc->tran_speed,
  1524. MMC_CLK_ENABLE);
  1525. }
  1526. }
  1527. }
  1528. pr_err("unable to select a mode\n");
  1529. return -ENOTSUPP;
  1530. }
  1531. /*
  1532. * read the compare the part of ext csd that is constant.
  1533. * This can be used to check that the transfer is working
  1534. * as expected.
  1535. */
  1536. static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
  1537. {
  1538. int err;
  1539. const u8 *ext_csd = mmc->ext_csd;
  1540. ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
  1541. if (mmc->version < MMC_VERSION_4)
  1542. return 0;
  1543. err = mmc_send_ext_csd(mmc, test_csd);
  1544. if (err)
  1545. return err;
  1546. /* Only compare read only fields */
  1547. if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
  1548. == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
  1549. ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
  1550. == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
  1551. ext_csd[EXT_CSD_REV]
  1552. == test_csd[EXT_CSD_REV] &&
  1553. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
  1554. == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
  1555. memcmp(&ext_csd[EXT_CSD_SEC_CNT],
  1556. &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
  1557. return 0;
  1558. return -EBADMSG;
  1559. }
  1560. #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
  1561. static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
  1562. uint32_t allowed_mask)
  1563. {
  1564. u32 card_mask = 0;
  1565. switch (mode) {
  1566. case MMC_HS_400_ES:
  1567. case MMC_HS_400:
  1568. case MMC_HS_200:
  1569. if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
  1570. EXT_CSD_CARD_TYPE_HS400_1_8V))
  1571. card_mask |= MMC_SIGNAL_VOLTAGE_180;
  1572. if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
  1573. EXT_CSD_CARD_TYPE_HS400_1_2V))
  1574. card_mask |= MMC_SIGNAL_VOLTAGE_120;
  1575. break;
  1576. case MMC_DDR_52:
  1577. if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
  1578. card_mask |= MMC_SIGNAL_VOLTAGE_330 |
  1579. MMC_SIGNAL_VOLTAGE_180;
  1580. if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
  1581. card_mask |= MMC_SIGNAL_VOLTAGE_120;
  1582. break;
  1583. default:
  1584. card_mask |= MMC_SIGNAL_VOLTAGE_330;
  1585. break;
  1586. }
  1587. while (card_mask & allowed_mask) {
  1588. enum mmc_voltage best_match;
  1589. best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
  1590. if (!mmc_set_signal_voltage(mmc, best_match))
  1591. return 0;
  1592. allowed_mask &= ~best_match;
  1593. }
  1594. return -ENOTSUPP;
  1595. }
  1596. #else
  1597. static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
  1598. uint32_t allowed_mask)
  1599. {
  1600. return 0;
  1601. }
  1602. #endif
  1603. static const struct mode_width_tuning mmc_modes_by_pref[] = {
  1604. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  1605. {
  1606. .mode = MMC_HS_400_ES,
  1607. .widths = MMC_MODE_8BIT,
  1608. },
  1609. #endif
  1610. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  1611. {
  1612. .mode = MMC_HS_400,
  1613. .widths = MMC_MODE_8BIT,
  1614. .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
  1615. },
  1616. #endif
  1617. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
  1618. {
  1619. .mode = MMC_HS_200,
  1620. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
  1621. .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
  1622. },
  1623. #endif
  1624. {
  1625. .mode = MMC_DDR_52,
  1626. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
  1627. },
  1628. {
  1629. .mode = MMC_HS_52,
  1630. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
  1631. },
  1632. {
  1633. .mode = MMC_HS,
  1634. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
  1635. },
  1636. {
  1637. .mode = MMC_LEGACY,
  1638. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
  1639. }
  1640. };
  1641. #define for_each_mmc_mode_by_pref(caps, mwt) \
  1642. for (mwt = mmc_modes_by_pref;\
  1643. mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
  1644. mwt++) \
  1645. if (caps & MMC_CAP(mwt->mode))
  1646. static const struct ext_csd_bus_width {
  1647. uint cap;
  1648. bool is_ddr;
  1649. uint ext_csd_bits;
  1650. } ext_csd_bus_width[] = {
  1651. {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
  1652. {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
  1653. {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
  1654. {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
  1655. {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
  1656. };
  1657. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  1658. static int mmc_select_hs400(struct mmc *mmc)
  1659. {
  1660. int err;
  1661. /* Set timing to HS200 for tuning */
  1662. err = mmc_set_card_speed(mmc, MMC_HS_200, false);
  1663. if (err)
  1664. return err;
  1665. /* configure the bus mode (host) */
  1666. mmc_select_mode(mmc, MMC_HS_200);
  1667. mmc_set_clock(mmc, mmc->tran_speed, false);
  1668. /* execute tuning if needed */
  1669. err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
  1670. if (err) {
  1671. debug("tuning failed\n");
  1672. return err;
  1673. }
  1674. /* Set back to HS */
  1675. mmc_set_card_speed(mmc, MMC_HS, true);
  1676. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
  1677. EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
  1678. if (err)
  1679. return err;
  1680. err = mmc_set_card_speed(mmc, MMC_HS_400, false);
  1681. if (err)
  1682. return err;
  1683. mmc_select_mode(mmc, MMC_HS_400);
  1684. err = mmc_set_clock(mmc, mmc->tran_speed, false);
  1685. if (err)
  1686. return err;
  1687. return 0;
  1688. }
  1689. #else
  1690. static int mmc_select_hs400(struct mmc *mmc)
  1691. {
  1692. return -ENOTSUPP;
  1693. }
  1694. #endif
  1695. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  1696. #if !CONFIG_IS_ENABLED(DM_MMC)
  1697. static int mmc_set_enhanced_strobe(struct mmc *mmc)
  1698. {
  1699. return -ENOTSUPP;
  1700. }
  1701. #endif
  1702. static int mmc_select_hs400es(struct mmc *mmc)
  1703. {
  1704. int err;
  1705. err = mmc_set_card_speed(mmc, MMC_HS, true);
  1706. if (err)
  1707. return err;
  1708. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
  1709. EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
  1710. EXT_CSD_BUS_WIDTH_STROBE);
  1711. if (err) {
  1712. printf("switch to bus width for hs400 failed\n");
  1713. return err;
  1714. }
  1715. /* TODO: driver strength */
  1716. err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
  1717. if (err)
  1718. return err;
  1719. mmc_select_mode(mmc, MMC_HS_400_ES);
  1720. err = mmc_set_clock(mmc, mmc->tran_speed, false);
  1721. if (err)
  1722. return err;
  1723. return mmc_set_enhanced_strobe(mmc);
  1724. }
  1725. #else
  1726. static int mmc_select_hs400es(struct mmc *mmc)
  1727. {
  1728. return -ENOTSUPP;
  1729. }
  1730. #endif
  1731. #define for_each_supported_width(caps, ddr, ecbv) \
  1732. for (ecbv = ext_csd_bus_width;\
  1733. ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
  1734. ecbv++) \
  1735. if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
  1736. static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
  1737. {
  1738. int err;
  1739. const struct mode_width_tuning *mwt;
  1740. const struct ext_csd_bus_width *ecbw;
  1741. #ifdef DEBUG
  1742. mmc_dump_capabilities("mmc", card_caps);
  1743. mmc_dump_capabilities("host", mmc->host_caps);
  1744. #endif
  1745. if (mmc_host_is_spi(mmc)) {
  1746. mmc_set_bus_width(mmc, 1);
  1747. mmc_select_mode(mmc, MMC_LEGACY);
  1748. mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
  1749. return 0;
  1750. }
  1751. /* Restrict card's capabilities by what the host can do */
  1752. card_caps &= mmc->host_caps;
  1753. /* Only version 4 of MMC supports wider bus widths */
  1754. if (mmc->version < MMC_VERSION_4)
  1755. return 0;
  1756. if (!mmc->ext_csd) {
  1757. pr_debug("No ext_csd found!\n"); /* this should enver happen */
  1758. return -ENOTSUPP;
  1759. }
  1760. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
  1761. CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  1762. /*
  1763. * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
  1764. * before doing anything else, since a transition from either of
  1765. * the HS200/HS400 mode directly to legacy mode is not supported.
  1766. */
  1767. if (mmc->selected_mode == MMC_HS_200 ||
  1768. mmc->selected_mode == MMC_HS_400)
  1769. mmc_set_card_speed(mmc, MMC_HS, true);
  1770. else
  1771. #endif
  1772. mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
  1773. for_each_mmc_mode_by_pref(card_caps, mwt) {
  1774. for_each_supported_width(card_caps & mwt->widths,
  1775. mmc_is_mode_ddr(mwt->mode), ecbw) {
  1776. enum mmc_voltage old_voltage;
  1777. pr_debug("trying mode %s width %d (at %d MHz)\n",
  1778. mmc_mode_name(mwt->mode),
  1779. bus_width(ecbw->cap),
  1780. mmc_mode2freq(mmc, mwt->mode) / 1000000);
  1781. old_voltage = mmc->signal_voltage;
  1782. err = mmc_set_lowest_voltage(mmc, mwt->mode,
  1783. MMC_ALL_SIGNAL_VOLTAGE);
  1784. if (err)
  1785. continue;
  1786. /* configure the bus width (card + host) */
  1787. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1788. EXT_CSD_BUS_WIDTH,
  1789. ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
  1790. if (err)
  1791. goto error;
  1792. mmc_set_bus_width(mmc, bus_width(ecbw->cap));
  1793. if (mwt->mode == MMC_HS_400) {
  1794. err = mmc_select_hs400(mmc);
  1795. if (err) {
  1796. printf("Select HS400 failed %d\n", err);
  1797. goto error;
  1798. }
  1799. } else if (mwt->mode == MMC_HS_400_ES) {
  1800. err = mmc_select_hs400es(mmc);
  1801. if (err) {
  1802. printf("Select HS400ES failed %d\n",
  1803. err);
  1804. goto error;
  1805. }
  1806. } else {
  1807. /* configure the bus speed (card) */
  1808. err = mmc_set_card_speed(mmc, mwt->mode, false);
  1809. if (err)
  1810. goto error;
  1811. /*
  1812. * configure the bus width AND the ddr mode
  1813. * (card). The host side will be taken care
  1814. * of in the next step
  1815. */
  1816. if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
  1817. err = mmc_switch(mmc,
  1818. EXT_CSD_CMD_SET_NORMAL,
  1819. EXT_CSD_BUS_WIDTH,
  1820. ecbw->ext_csd_bits);
  1821. if (err)
  1822. goto error;
  1823. }
  1824. /* configure the bus mode (host) */
  1825. mmc_select_mode(mmc, mwt->mode);
  1826. mmc_set_clock(mmc, mmc->tran_speed,
  1827. MMC_CLK_ENABLE);
  1828. #ifdef MMC_SUPPORTS_TUNING
  1829. /* execute tuning if needed */
  1830. if (mwt->tuning) {
  1831. err = mmc_execute_tuning(mmc,
  1832. mwt->tuning);
  1833. if (err) {
  1834. pr_debug("tuning failed\n");
  1835. goto error;
  1836. }
  1837. }
  1838. #endif
  1839. }
  1840. /* do a transfer to check the configuration */
  1841. err = mmc_read_and_compare_ext_csd(mmc);
  1842. if (!err)
  1843. return 0;
  1844. error:
  1845. mmc_set_signal_voltage(mmc, old_voltage);
  1846. /* if an error occured, revert to a safer bus mode */
  1847. mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1848. EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
  1849. mmc_select_mode(mmc, MMC_LEGACY);
  1850. mmc_set_bus_width(mmc, 1);
  1851. }
  1852. }
  1853. pr_err("unable to select a mode\n");
  1854. return -ENOTSUPP;
  1855. }
  1856. #endif
  1857. #if CONFIG_IS_ENABLED(MMC_TINY)
  1858. DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
  1859. #endif
  1860. static int mmc_startup_v4(struct mmc *mmc)
  1861. {
  1862. int err, i;
  1863. u64 capacity;
  1864. bool has_parts = false;
  1865. bool part_completed;
  1866. static const u32 mmc_versions[] = {
  1867. MMC_VERSION_4,
  1868. MMC_VERSION_4_1,
  1869. MMC_VERSION_4_2,
  1870. MMC_VERSION_4_3,
  1871. MMC_VERSION_4_4,
  1872. MMC_VERSION_4_41,
  1873. MMC_VERSION_4_5,
  1874. MMC_VERSION_5_0,
  1875. MMC_VERSION_5_1
  1876. };
  1877. #if CONFIG_IS_ENABLED(MMC_TINY)
  1878. u8 *ext_csd = ext_csd_bkup;
  1879. if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
  1880. return 0;
  1881. if (!mmc->ext_csd)
  1882. memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
  1883. err = mmc_send_ext_csd(mmc, ext_csd);
  1884. if (err)
  1885. goto error;
  1886. /* store the ext csd for future reference */
  1887. if (!mmc->ext_csd)
  1888. mmc->ext_csd = ext_csd;
  1889. #else
  1890. ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
  1891. if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
  1892. return 0;
  1893. /* check ext_csd version and capacity */
  1894. err = mmc_send_ext_csd(mmc, ext_csd);
  1895. if (err)
  1896. goto error;
  1897. /* store the ext csd for future reference */
  1898. if (!mmc->ext_csd)
  1899. mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
  1900. if (!mmc->ext_csd)
  1901. return -ENOMEM;
  1902. memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
  1903. #endif
  1904. if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
  1905. return -EINVAL;
  1906. mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
  1907. if (mmc->version >= MMC_VERSION_4_2) {
  1908. /*
  1909. * According to the JEDEC Standard, the value of
  1910. * ext_csd's capacity is valid if the value is more
  1911. * than 2GB
  1912. */
  1913. capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
  1914. | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
  1915. | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
  1916. | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
  1917. capacity *= MMC_MAX_BLOCK_LEN;
  1918. if ((capacity >> 20) > 2 * 1024)
  1919. mmc->capacity_user = capacity;
  1920. }
  1921. if (mmc->version >= MMC_VERSION_4_5)
  1922. mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
  1923. /* The partition data may be non-zero but it is only
  1924. * effective if PARTITION_SETTING_COMPLETED is set in
  1925. * EXT_CSD, so ignore any data if this bit is not set,
  1926. * except for enabling the high-capacity group size
  1927. * definition (see below).
  1928. */
  1929. part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
  1930. EXT_CSD_PARTITION_SETTING_COMPLETED);
  1931. mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
  1932. /* Some eMMC set the value too low so set a minimum */
  1933. if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
  1934. mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
  1935. /* store the partition info of emmc */
  1936. mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
  1937. if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
  1938. ext_csd[EXT_CSD_BOOT_MULT])
  1939. mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
  1940. if (part_completed &&
  1941. (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
  1942. mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
  1943. mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
  1944. mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
  1945. for (i = 0; i < 4; i++) {
  1946. int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
  1947. uint mult = (ext_csd[idx + 2] << 16) +
  1948. (ext_csd[idx + 1] << 8) + ext_csd[idx];
  1949. if (mult)
  1950. has_parts = true;
  1951. if (!part_completed)
  1952. continue;
  1953. mmc->capacity_gp[i] = mult;
  1954. mmc->capacity_gp[i] *=
  1955. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  1956. mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  1957. mmc->capacity_gp[i] <<= 19;
  1958. }
  1959. #ifndef CONFIG_SPL_BUILD
  1960. if (part_completed) {
  1961. mmc->enh_user_size =
  1962. (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
  1963. (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
  1964. ext_csd[EXT_CSD_ENH_SIZE_MULT];
  1965. mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  1966. mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  1967. mmc->enh_user_size <<= 19;
  1968. mmc->enh_user_start =
  1969. (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
  1970. (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
  1971. (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
  1972. ext_csd[EXT_CSD_ENH_START_ADDR];
  1973. if (mmc->high_capacity)
  1974. mmc->enh_user_start <<= 9;
  1975. }
  1976. #endif
  1977. /*
  1978. * Host needs to enable ERASE_GRP_DEF bit if device is
  1979. * partitioned. This bit will be lost every time after a reset
  1980. * or power off. This will affect erase size.
  1981. */
  1982. if (part_completed)
  1983. has_parts = true;
  1984. if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
  1985. (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
  1986. has_parts = true;
  1987. if (has_parts) {
  1988. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1989. EXT_CSD_ERASE_GROUP_DEF, 1);
  1990. if (err)
  1991. goto error;
  1992. ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
  1993. }
  1994. if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
  1995. #if CONFIG_IS_ENABLED(MMC_WRITE)
  1996. /* Read out group size from ext_csd */
  1997. mmc->erase_grp_size =
  1998. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
  1999. #endif
  2000. /*
  2001. * if high capacity and partition setting completed
  2002. * SEC_COUNT is valid even if it is smaller than 2 GiB
  2003. * JEDEC Standard JESD84-B45, 6.2.4
  2004. */
  2005. if (mmc->high_capacity && part_completed) {
  2006. capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
  2007. (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
  2008. (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
  2009. (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
  2010. capacity *= MMC_MAX_BLOCK_LEN;
  2011. mmc->capacity_user = capacity;
  2012. }
  2013. }
  2014. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2015. else {
  2016. /* Calculate the group size from the csd value. */
  2017. int erase_gsz, erase_gmul;
  2018. erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
  2019. erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
  2020. mmc->erase_grp_size = (erase_gsz + 1)
  2021. * (erase_gmul + 1);
  2022. }
  2023. #endif
  2024. #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
  2025. mmc->hc_wp_grp_size = 1024
  2026. * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
  2027. * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  2028. #endif
  2029. mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
  2030. return 0;
  2031. error:
  2032. if (mmc->ext_csd) {
  2033. #if !CONFIG_IS_ENABLED(MMC_TINY)
  2034. free(mmc->ext_csd);
  2035. #endif
  2036. mmc->ext_csd = NULL;
  2037. }
  2038. return err;
  2039. }
  2040. static int mmc_startup(struct mmc *mmc)
  2041. {
  2042. int err, i;
  2043. uint mult, freq;
  2044. u64 cmult, csize;
  2045. struct mmc_cmd cmd;
  2046. struct blk_desc *bdesc;
  2047. #ifdef CONFIG_MMC_SPI_CRC_ON
  2048. if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
  2049. cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
  2050. cmd.resp_type = MMC_RSP_R1;
  2051. cmd.cmdarg = 1;
  2052. err = mmc_send_cmd(mmc, &cmd, NULL);
  2053. if (err)
  2054. return err;
  2055. }
  2056. #endif
  2057. /* Put the Card in Identify Mode */
  2058. cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
  2059. MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
  2060. cmd.resp_type = MMC_RSP_R2;
  2061. cmd.cmdarg = 0;
  2062. err = mmc_send_cmd(mmc, &cmd, NULL);
  2063. #ifdef CONFIG_MMC_QUIRKS
  2064. if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
  2065. int retries = 4;
  2066. /*
  2067. * It has been seen that SEND_CID may fail on the first
  2068. * attempt, let's try a few more time
  2069. */
  2070. do {
  2071. err = mmc_send_cmd(mmc, &cmd, NULL);
  2072. if (!err)
  2073. break;
  2074. } while (retries--);
  2075. }
  2076. #endif
  2077. if (err)
  2078. return err;
  2079. memcpy(mmc->cid, cmd.response, 16);
  2080. /*
  2081. * For MMC cards, set the Relative Address.
  2082. * For SD cards, get the Relatvie Address.
  2083. * This also puts the cards into Standby State
  2084. */
  2085. if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
  2086. cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
  2087. cmd.cmdarg = mmc->rca << 16;
  2088. cmd.resp_type = MMC_RSP_R6;
  2089. err = mmc_send_cmd(mmc, &cmd, NULL);
  2090. if (err)
  2091. return err;
  2092. if (IS_SD(mmc))
  2093. mmc->rca = (cmd.response[0] >> 16) & 0xffff;
  2094. }
  2095. /* Get the Card-Specific Data */
  2096. cmd.cmdidx = MMC_CMD_SEND_CSD;
  2097. cmd.resp_type = MMC_RSP_R2;
  2098. cmd.cmdarg = mmc->rca << 16;
  2099. err = mmc_send_cmd(mmc, &cmd, NULL);
  2100. if (err)
  2101. return err;
  2102. mmc->csd[0] = cmd.response[0];
  2103. mmc->csd[1] = cmd.response[1];
  2104. mmc->csd[2] = cmd.response[2];
  2105. mmc->csd[3] = cmd.response[3];
  2106. if (mmc->version == MMC_VERSION_UNKNOWN) {
  2107. int version = (cmd.response[0] >> 26) & 0xf;
  2108. switch (version) {
  2109. case 0:
  2110. mmc->version = MMC_VERSION_1_2;
  2111. break;
  2112. case 1:
  2113. mmc->version = MMC_VERSION_1_4;
  2114. break;
  2115. case 2:
  2116. mmc->version = MMC_VERSION_2_2;
  2117. break;
  2118. case 3:
  2119. mmc->version = MMC_VERSION_3;
  2120. break;
  2121. case 4:
  2122. mmc->version = MMC_VERSION_4;
  2123. break;
  2124. default:
  2125. mmc->version = MMC_VERSION_1_2;
  2126. break;
  2127. }
  2128. }
  2129. /* divide frequency by 10, since the mults are 10x bigger */
  2130. freq = fbase[(cmd.response[0] & 0x7)];
  2131. mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
  2132. mmc->legacy_speed = freq * mult;
  2133. mmc_select_mode(mmc, MMC_LEGACY);
  2134. mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
  2135. mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
  2136. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2137. if (IS_SD(mmc))
  2138. mmc->write_bl_len = mmc->read_bl_len;
  2139. else
  2140. mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
  2141. #endif
  2142. if (mmc->high_capacity) {
  2143. csize = (mmc->csd[1] & 0x3f) << 16
  2144. | (mmc->csd[2] & 0xffff0000) >> 16;
  2145. cmult = 8;
  2146. } else {
  2147. csize = (mmc->csd[1] & 0x3ff) << 2
  2148. | (mmc->csd[2] & 0xc0000000) >> 30;
  2149. cmult = (mmc->csd[2] & 0x00038000) >> 15;
  2150. }
  2151. mmc->capacity_user = (csize + 1) << (cmult + 2);
  2152. mmc->capacity_user *= mmc->read_bl_len;
  2153. mmc->capacity_boot = 0;
  2154. mmc->capacity_rpmb = 0;
  2155. for (i = 0; i < 4; i++)
  2156. mmc->capacity_gp[i] = 0;
  2157. if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
  2158. mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
  2159. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2160. if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
  2161. mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
  2162. #endif
  2163. if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
  2164. cmd.cmdidx = MMC_CMD_SET_DSR;
  2165. cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
  2166. cmd.resp_type = MMC_RSP_NONE;
  2167. if (mmc_send_cmd(mmc, &cmd, NULL))
  2168. pr_warn("MMC: SET_DSR failed\n");
  2169. }
  2170. /* Select the card, and put it into Transfer Mode */
  2171. if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
  2172. cmd.cmdidx = MMC_CMD_SELECT_CARD;
  2173. cmd.resp_type = MMC_RSP_R1;
  2174. cmd.cmdarg = mmc->rca << 16;
  2175. err = mmc_send_cmd(mmc, &cmd, NULL);
  2176. if (err)
  2177. return err;
  2178. }
  2179. /*
  2180. * For SD, its erase group is always one sector
  2181. */
  2182. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2183. mmc->erase_grp_size = 1;
  2184. #endif
  2185. mmc->part_config = MMCPART_NOAVAILABLE;
  2186. err = mmc_startup_v4(mmc);
  2187. if (err)
  2188. return err;
  2189. err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
  2190. if (err)
  2191. return err;
  2192. #if CONFIG_IS_ENABLED(MMC_TINY)
  2193. mmc_set_clock(mmc, mmc->legacy_speed, false);
  2194. mmc_select_mode(mmc, MMC_LEGACY);
  2195. mmc_set_bus_width(mmc, 1);
  2196. #else
  2197. if (IS_SD(mmc)) {
  2198. err = sd_get_capabilities(mmc);
  2199. if (err)
  2200. return err;
  2201. err = sd_select_mode_and_width(mmc, mmc->card_caps);
  2202. } else {
  2203. err = mmc_get_capabilities(mmc);
  2204. if (err)
  2205. return err;
  2206. err = mmc_select_mode_and_width(mmc, mmc->card_caps);
  2207. }
  2208. #endif
  2209. if (err)
  2210. return err;
  2211. mmc->best_mode = mmc->selected_mode;
  2212. /* Fix the block length for DDR mode */
  2213. if (mmc->ddr_mode) {
  2214. mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
  2215. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2216. mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
  2217. #endif
  2218. }
  2219. /* fill in device description */
  2220. bdesc = mmc_get_blk_desc(mmc);
  2221. bdesc->lun = 0;
  2222. bdesc->hwpart = 0;
  2223. bdesc->type = 0;
  2224. bdesc->blksz = mmc->read_bl_len;
  2225. bdesc->log2blksz = LOG2(bdesc->blksz);
  2226. bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
  2227. #if !defined(CONFIG_SPL_BUILD) || \
  2228. (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
  2229. !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
  2230. sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
  2231. mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
  2232. (mmc->cid[3] >> 16) & 0xffff);
  2233. sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
  2234. (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
  2235. (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
  2236. (mmc->cid[2] >> 24) & 0xff);
  2237. sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
  2238. (mmc->cid[2] >> 16) & 0xf);
  2239. #else
  2240. bdesc->vendor[0] = 0;
  2241. bdesc->product[0] = 0;
  2242. bdesc->revision[0] = 0;
  2243. #endif
  2244. #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
  2245. part_init(bdesc);
  2246. #endif
  2247. return 0;
  2248. }
  2249. static int mmc_send_if_cond(struct mmc *mmc)
  2250. {
  2251. struct mmc_cmd cmd;
  2252. int err;
  2253. cmd.cmdidx = SD_CMD_SEND_IF_COND;
  2254. /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
  2255. cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
  2256. cmd.resp_type = MMC_RSP_R7;
  2257. err = mmc_send_cmd(mmc, &cmd, NULL);
  2258. if (err)
  2259. return err;
  2260. if ((cmd.response[0] & 0xff) != 0xaa)
  2261. return -EOPNOTSUPP;
  2262. else
  2263. mmc->version = SD_VERSION_2;
  2264. return 0;
  2265. }
  2266. #if !CONFIG_IS_ENABLED(DM_MMC)
  2267. /* board-specific MMC power initializations. */
  2268. __weak void board_mmc_power_init(void)
  2269. {
  2270. }
  2271. #endif
  2272. static int mmc_power_init(struct mmc *mmc)
  2273. {
  2274. #if CONFIG_IS_ENABLED(DM_MMC)
  2275. #if CONFIG_IS_ENABLED(DM_REGULATOR)
  2276. int ret;
  2277. ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
  2278. &mmc->vmmc_supply);
  2279. if (ret)
  2280. pr_debug("%s: No vmmc supply\n", mmc->dev->name);
  2281. ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
  2282. &mmc->vqmmc_supply);
  2283. if (ret)
  2284. pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
  2285. #endif
  2286. #else /* !CONFIG_DM_MMC */
  2287. /*
  2288. * Driver model should use a regulator, as above, rather than calling
  2289. * out to board code.
  2290. */
  2291. board_mmc_power_init();
  2292. #endif
  2293. return 0;
  2294. }
  2295. /*
  2296. * put the host in the initial state:
  2297. * - turn on Vdd (card power supply)
  2298. * - configure the bus width and clock to minimal values
  2299. */
  2300. static void mmc_set_initial_state(struct mmc *mmc)
  2301. {
  2302. int err;
  2303. /* First try to set 3.3V. If it fails set to 1.8V */
  2304. err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
  2305. if (err != 0)
  2306. err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
  2307. if (err != 0)
  2308. pr_warn("mmc: failed to set signal voltage\n");
  2309. mmc_select_mode(mmc, MMC_LEGACY);
  2310. mmc_set_bus_width(mmc, 1);
  2311. mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
  2312. }
  2313. static int mmc_power_on(struct mmc *mmc)
  2314. {
  2315. #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
  2316. if (mmc->vmmc_supply) {
  2317. int ret = regulator_set_enable(mmc->vmmc_supply, true);
  2318. if (ret) {
  2319. puts("Error enabling VMMC supply\n");
  2320. return ret;
  2321. }
  2322. }
  2323. #endif
  2324. return 0;
  2325. }
  2326. static int mmc_power_off(struct mmc *mmc)
  2327. {
  2328. mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
  2329. #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
  2330. if (mmc->vmmc_supply) {
  2331. int ret = regulator_set_enable(mmc->vmmc_supply, false);
  2332. if (ret) {
  2333. pr_debug("Error disabling VMMC supply\n");
  2334. return ret;
  2335. }
  2336. }
  2337. #endif
  2338. return 0;
  2339. }
  2340. static int mmc_power_cycle(struct mmc *mmc)
  2341. {
  2342. int ret;
  2343. ret = mmc_power_off(mmc);
  2344. if (ret)
  2345. return ret;
  2346. ret = mmc_host_power_cycle(mmc);
  2347. if (ret)
  2348. return ret;
  2349. /*
  2350. * SD spec recommends at least 1ms of delay. Let's wait for 2ms
  2351. * to be on the safer side.
  2352. */
  2353. udelay(2000);
  2354. return mmc_power_on(mmc);
  2355. }
  2356. int mmc_get_op_cond(struct mmc *mmc)
  2357. {
  2358. bool uhs_en = supports_uhs(mmc->cfg->host_caps);
  2359. int err;
  2360. if (mmc->has_init)
  2361. return 0;
  2362. #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
  2363. mmc_adapter_card_type_ident();
  2364. #endif
  2365. err = mmc_power_init(mmc);
  2366. if (err)
  2367. return err;
  2368. #ifdef CONFIG_MMC_QUIRKS
  2369. mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
  2370. MMC_QUIRK_RETRY_SEND_CID |
  2371. MMC_QUIRK_RETRY_APP_CMD;
  2372. #endif
  2373. err = mmc_power_cycle(mmc);
  2374. if (err) {
  2375. /*
  2376. * if power cycling is not supported, we should not try
  2377. * to use the UHS modes, because we wouldn't be able to
  2378. * recover from an error during the UHS initialization.
  2379. */
  2380. pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
  2381. uhs_en = false;
  2382. mmc->host_caps &= ~UHS_CAPS;
  2383. err = mmc_power_on(mmc);
  2384. }
  2385. if (err)
  2386. return err;
  2387. #if CONFIG_IS_ENABLED(DM_MMC)
  2388. /* The device has already been probed ready for use */
  2389. #else
  2390. /* made sure it's not NULL earlier */
  2391. err = mmc->cfg->ops->init(mmc);
  2392. if (err)
  2393. return err;
  2394. #endif
  2395. mmc->ddr_mode = 0;
  2396. retry:
  2397. mmc_set_initial_state(mmc);
  2398. /* Reset the Card */
  2399. err = mmc_go_idle(mmc);
  2400. if (err)
  2401. return err;
  2402. /* The internal partition reset to user partition(0) at every CMD0 */
  2403. mmc_get_blk_desc(mmc)->hwpart = 0;
  2404. /* Test for SD version 2 */
  2405. err = mmc_send_if_cond(mmc);
  2406. /* Now try to get the SD card's operating condition */
  2407. err = sd_send_op_cond(mmc, uhs_en);
  2408. if (err && uhs_en) {
  2409. uhs_en = false;
  2410. mmc_power_cycle(mmc);
  2411. goto retry;
  2412. }
  2413. /* If the command timed out, we check for an MMC card */
  2414. if (err == -ETIMEDOUT) {
  2415. err = mmc_send_op_cond(mmc);
  2416. if (err) {
  2417. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  2418. pr_err("Card did not respond to voltage select!\n");
  2419. #endif
  2420. return -EOPNOTSUPP;
  2421. }
  2422. }
  2423. return err;
  2424. }
  2425. int mmc_start_init(struct mmc *mmc)
  2426. {
  2427. bool no_card;
  2428. int err = 0;
  2429. /*
  2430. * all hosts are capable of 1 bit bus-width and able to use the legacy
  2431. * timings.
  2432. */
  2433. mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
  2434. MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
  2435. #if CONFIG_IS_ENABLED(DM_MMC)
  2436. mmc_deferred_probe(mmc);
  2437. #endif
  2438. #if !defined(CONFIG_MMC_BROKEN_CD)
  2439. no_card = mmc_getcd(mmc) == 0;
  2440. #else
  2441. no_card = 0;
  2442. #endif
  2443. #if !CONFIG_IS_ENABLED(DM_MMC)
  2444. /* we pretend there's no card when init is NULL */
  2445. no_card = no_card || (mmc->cfg->ops->init == NULL);
  2446. #endif
  2447. if (no_card) {
  2448. mmc->has_init = 0;
  2449. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  2450. pr_err("MMC: no card present\n");
  2451. #endif
  2452. return -ENOMEDIUM;
  2453. }
  2454. err = mmc_get_op_cond(mmc);
  2455. if (!err)
  2456. mmc->init_in_progress = 1;
  2457. return err;
  2458. }
  2459. static int mmc_complete_init(struct mmc *mmc)
  2460. {
  2461. int err = 0;
  2462. mmc->init_in_progress = 0;
  2463. if (mmc->op_cond_pending)
  2464. err = mmc_complete_op_cond(mmc);
  2465. if (!err)
  2466. err = mmc_startup(mmc);
  2467. if (err)
  2468. mmc->has_init = 0;
  2469. else
  2470. mmc->has_init = 1;
  2471. return err;
  2472. }
  2473. int mmc_init(struct mmc *mmc)
  2474. {
  2475. int err = 0;
  2476. __maybe_unused ulong start;
  2477. #if CONFIG_IS_ENABLED(DM_MMC)
  2478. struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
  2479. upriv->mmc = mmc;
  2480. #endif
  2481. if (mmc->has_init)
  2482. return 0;
  2483. start = get_timer(0);
  2484. if (!mmc->init_in_progress)
  2485. err = mmc_start_init(mmc);
  2486. if (!err)
  2487. err = mmc_complete_init(mmc);
  2488. if (err)
  2489. pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
  2490. return err;
  2491. }
  2492. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
  2493. CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
  2494. CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  2495. int mmc_deinit(struct mmc *mmc)
  2496. {
  2497. u32 caps_filtered;
  2498. if (!mmc->has_init)
  2499. return 0;
  2500. if (IS_SD(mmc)) {
  2501. caps_filtered = mmc->card_caps &
  2502. ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
  2503. MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
  2504. MMC_CAP(UHS_SDR104));
  2505. return sd_select_mode_and_width(mmc, caps_filtered);
  2506. } else {
  2507. caps_filtered = mmc->card_caps &
  2508. ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
  2509. return mmc_select_mode_and_width(mmc, caps_filtered);
  2510. }
  2511. }
  2512. #endif
  2513. int mmc_set_dsr(struct mmc *mmc, u16 val)
  2514. {
  2515. mmc->dsr = val;
  2516. return 0;
  2517. }
  2518. /* CPU-specific MMC initializations */
  2519. __weak int cpu_mmc_init(bd_t *bis)
  2520. {
  2521. return -1;
  2522. }
  2523. /* board-specific MMC initializations. */
  2524. __weak int board_mmc_init(bd_t *bis)
  2525. {
  2526. return -1;
  2527. }
  2528. void mmc_set_preinit(struct mmc *mmc, int preinit)
  2529. {
  2530. mmc->preinit = preinit;
  2531. }
  2532. #if CONFIG_IS_ENABLED(DM_MMC)
  2533. static int mmc_probe(bd_t *bis)
  2534. {
  2535. int ret, i;
  2536. struct uclass *uc;
  2537. struct udevice *dev;
  2538. ret = uclass_get(UCLASS_MMC, &uc);
  2539. if (ret)
  2540. return ret;
  2541. /*
  2542. * Try to add them in sequence order. Really with driver model we
  2543. * should allow holes, but the current MMC list does not allow that.
  2544. * So if we request 0, 1, 3 we will get 0, 1, 2.
  2545. */
  2546. for (i = 0; ; i++) {
  2547. ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
  2548. if (ret == -ENODEV)
  2549. break;
  2550. }
  2551. uclass_foreach_dev(dev, uc) {
  2552. ret = device_probe(dev);
  2553. if (ret)
  2554. pr_err("%s - probe failed: %d\n", dev->name, ret);
  2555. }
  2556. return 0;
  2557. }
  2558. #else
  2559. static int mmc_probe(bd_t *bis)
  2560. {
  2561. if (board_mmc_init(bis) < 0)
  2562. cpu_mmc_init(bis);
  2563. return 0;
  2564. }
  2565. #endif
  2566. int mmc_initialize(bd_t *bis)
  2567. {
  2568. static int initialized = 0;
  2569. int ret;
  2570. if (initialized) /* Avoid initializing mmc multiple times */
  2571. return 0;
  2572. initialized = 1;
  2573. #if !CONFIG_IS_ENABLED(BLK)
  2574. #if !CONFIG_IS_ENABLED(MMC_TINY)
  2575. mmc_list_init();
  2576. #endif
  2577. #endif
  2578. ret = mmc_probe(bis);
  2579. if (ret)
  2580. return ret;
  2581. #ifndef CONFIG_SPL_BUILD
  2582. print_mmc_devices(',');
  2583. #endif
  2584. mmc_do_preinit();
  2585. return 0;
  2586. }
  2587. #if CONFIG_IS_ENABLED(DM_MMC)
  2588. int mmc_init_device(int num)
  2589. {
  2590. struct udevice *dev;
  2591. struct mmc *m;
  2592. int ret;
  2593. ret = uclass_get_device(UCLASS_MMC, num, &dev);
  2594. if (ret)
  2595. return ret;
  2596. m = mmc_get_mmc_dev(dev);
  2597. if (!m)
  2598. return 0;
  2599. #ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
  2600. mmc_set_preinit(m, 1);
  2601. #endif
  2602. if (m->preinit)
  2603. mmc_start_init(m);
  2604. return 0;
  2605. }
  2606. #endif
  2607. #ifdef CONFIG_CMD_BKOPS_ENABLE
  2608. int mmc_set_bkops_enable(struct mmc *mmc)
  2609. {
  2610. int err;
  2611. ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
  2612. err = mmc_send_ext_csd(mmc, ext_csd);
  2613. if (err) {
  2614. puts("Could not get ext_csd register values\n");
  2615. return err;
  2616. }
  2617. if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
  2618. puts("Background operations not supported on device\n");
  2619. return -EMEDIUMTYPE;
  2620. }
  2621. if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
  2622. puts("Background operations already enabled\n");
  2623. return 0;
  2624. }
  2625. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
  2626. if (err) {
  2627. puts("Failed to enable manual background operations\n");
  2628. return err;
  2629. }
  2630. puts("Enabled manual background operations\n");
  2631. return 0;
  2632. }
  2633. #endif