mmc.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2008, Freescale Semiconductor, Inc
  4. * Copyright 2020 NXP
  5. * Andy Fleming
  6. *
  7. * Based vaguely on the Linux code
  8. */
  9. #include <config.h>
  10. #include <common.h>
  11. #include <blk.h>
  12. #include <command.h>
  13. #include <dm.h>
  14. #include <log.h>
  15. #include <dm/device-internal.h>
  16. #include <errno.h>
  17. #include <mmc.h>
  18. #include <part.h>
  19. #include <linux/bitops.h>
  20. #include <linux/delay.h>
  21. #include <power/regulator.h>
  22. #include <malloc.h>
  23. #include <memalign.h>
  24. #include <linux/list.h>
  25. #include <div64.h>
  26. #include "mmc_private.h"
  27. #define DEFAULT_CMD6_TIMEOUT_MS 500
  28. static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
  29. #if !CONFIG_IS_ENABLED(DM_MMC)
  30. static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
  31. {
  32. return -ENOSYS;
  33. }
  34. __weak int board_mmc_getwp(struct mmc *mmc)
  35. {
  36. return -1;
  37. }
  38. int mmc_getwp(struct mmc *mmc)
  39. {
  40. int wp;
  41. wp = board_mmc_getwp(mmc);
  42. if (wp < 0) {
  43. if (mmc->cfg->ops->getwp)
  44. wp = mmc->cfg->ops->getwp(mmc);
  45. else
  46. wp = 0;
  47. }
  48. return wp;
  49. }
  50. __weak int board_mmc_getcd(struct mmc *mmc)
  51. {
  52. return -1;
  53. }
  54. #endif
  55. #ifdef CONFIG_MMC_TRACE
  56. void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
  57. {
  58. printf("CMD_SEND:%d\n", cmd->cmdidx);
  59. printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
  60. }
  61. void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
  62. {
  63. int i;
  64. u8 *ptr;
  65. if (ret) {
  66. printf("\t\tRET\t\t\t %d\n", ret);
  67. } else {
  68. switch (cmd->resp_type) {
  69. case MMC_RSP_NONE:
  70. printf("\t\tMMC_RSP_NONE\n");
  71. break;
  72. case MMC_RSP_R1:
  73. printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
  74. cmd->response[0]);
  75. break;
  76. case MMC_RSP_R1b:
  77. printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
  78. cmd->response[0]);
  79. break;
  80. case MMC_RSP_R2:
  81. printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
  82. cmd->response[0]);
  83. printf("\t\t \t\t 0x%08x \n",
  84. cmd->response[1]);
  85. printf("\t\t \t\t 0x%08x \n",
  86. cmd->response[2]);
  87. printf("\t\t \t\t 0x%08x \n",
  88. cmd->response[3]);
  89. printf("\n");
  90. printf("\t\t\t\t\tDUMPING DATA\n");
  91. for (i = 0; i < 4; i++) {
  92. int j;
  93. printf("\t\t\t\t\t%03d - ", i*4);
  94. ptr = (u8 *)&cmd->response[i];
  95. ptr += 3;
  96. for (j = 0; j < 4; j++)
  97. printf("%02x ", *ptr--);
  98. printf("\n");
  99. }
  100. break;
  101. case MMC_RSP_R3:
  102. printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
  103. cmd->response[0]);
  104. break;
  105. default:
  106. printf("\t\tERROR MMC rsp not supported\n");
  107. break;
  108. }
  109. }
  110. }
  111. void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
  112. {
  113. int status;
  114. status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
  115. printf("CURR STATE:%d\n", status);
  116. }
  117. #endif
  118. #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
  119. const char *mmc_mode_name(enum bus_mode mode)
  120. {
  121. static const char *const names[] = {
  122. [MMC_LEGACY] = "MMC legacy",
  123. [MMC_HS] = "MMC High Speed (26MHz)",
  124. [SD_HS] = "SD High Speed (50MHz)",
  125. [UHS_SDR12] = "UHS SDR12 (25MHz)",
  126. [UHS_SDR25] = "UHS SDR25 (50MHz)",
  127. [UHS_SDR50] = "UHS SDR50 (100MHz)",
  128. [UHS_SDR104] = "UHS SDR104 (208MHz)",
  129. [UHS_DDR50] = "UHS DDR50 (50MHz)",
  130. [MMC_HS_52] = "MMC High Speed (52MHz)",
  131. [MMC_DDR_52] = "MMC DDR52 (52MHz)",
  132. [MMC_HS_200] = "HS200 (200MHz)",
  133. [MMC_HS_400] = "HS400 (200MHz)",
  134. [MMC_HS_400_ES] = "HS400ES (200MHz)",
  135. };
  136. if (mode >= MMC_MODES_END)
  137. return "Unknown mode";
  138. else
  139. return names[mode];
  140. }
  141. #endif
  142. static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
  143. {
  144. static const int freqs[] = {
  145. [MMC_LEGACY] = 25000000,
  146. [MMC_HS] = 26000000,
  147. [SD_HS] = 50000000,
  148. [MMC_HS_52] = 52000000,
  149. [MMC_DDR_52] = 52000000,
  150. [UHS_SDR12] = 25000000,
  151. [UHS_SDR25] = 50000000,
  152. [UHS_SDR50] = 100000000,
  153. [UHS_DDR50] = 50000000,
  154. [UHS_SDR104] = 208000000,
  155. [MMC_HS_200] = 200000000,
  156. [MMC_HS_400] = 200000000,
  157. [MMC_HS_400_ES] = 200000000,
  158. };
  159. if (mode == MMC_LEGACY)
  160. return mmc->legacy_speed;
  161. else if (mode >= MMC_MODES_END)
  162. return 0;
  163. else
  164. return freqs[mode];
  165. }
  166. static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
  167. {
  168. mmc->selected_mode = mode;
  169. mmc->tran_speed = mmc_mode2freq(mmc, mode);
  170. mmc->ddr_mode = mmc_is_mode_ddr(mode);
  171. pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
  172. mmc->tran_speed / 1000000);
  173. return 0;
  174. }
  175. #if !CONFIG_IS_ENABLED(DM_MMC)
  176. int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
  177. {
  178. int ret;
  179. mmmc_trace_before_send(mmc, cmd);
  180. ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
  181. mmmc_trace_after_send(mmc, cmd, ret);
  182. return ret;
  183. }
  184. #endif
  185. int mmc_send_status(struct mmc *mmc, unsigned int *status)
  186. {
  187. struct mmc_cmd cmd;
  188. int err, retries = 5;
  189. cmd.cmdidx = MMC_CMD_SEND_STATUS;
  190. cmd.resp_type = MMC_RSP_R1;
  191. if (!mmc_host_is_spi(mmc))
  192. cmd.cmdarg = mmc->rca << 16;
  193. while (retries--) {
  194. err = mmc_send_cmd(mmc, &cmd, NULL);
  195. if (!err) {
  196. mmc_trace_state(mmc, &cmd);
  197. *status = cmd.response[0];
  198. return 0;
  199. }
  200. }
  201. mmc_trace_state(mmc, &cmd);
  202. return -ECOMM;
  203. }
  204. int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
  205. {
  206. unsigned int status;
  207. int err;
  208. err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
  209. if (err != -ENOSYS)
  210. return err;
  211. while (1) {
  212. err = mmc_send_status(mmc, &status);
  213. if (err)
  214. return err;
  215. if ((status & MMC_STATUS_RDY_FOR_DATA) &&
  216. (status & MMC_STATUS_CURR_STATE) !=
  217. MMC_STATE_PRG)
  218. break;
  219. if (status & MMC_STATUS_MASK) {
  220. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  221. pr_err("Status Error: 0x%08x\n", status);
  222. #endif
  223. return -ECOMM;
  224. }
  225. if (timeout_ms-- <= 0)
  226. break;
  227. udelay(1000);
  228. }
  229. if (timeout_ms <= 0) {
  230. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  231. pr_err("Timeout waiting card ready\n");
  232. #endif
  233. return -ETIMEDOUT;
  234. }
  235. return 0;
  236. }
  237. int mmc_set_blocklen(struct mmc *mmc, int len)
  238. {
  239. struct mmc_cmd cmd;
  240. int err;
  241. if (mmc->ddr_mode)
  242. return 0;
  243. cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
  244. cmd.resp_type = MMC_RSP_R1;
  245. cmd.cmdarg = len;
  246. err = mmc_send_cmd(mmc, &cmd, NULL);
  247. #ifdef CONFIG_MMC_QUIRKS
  248. if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
  249. int retries = 4;
  250. /*
  251. * It has been seen that SET_BLOCKLEN may fail on the first
  252. * attempt, let's try a few more time
  253. */
  254. do {
  255. err = mmc_send_cmd(mmc, &cmd, NULL);
  256. if (!err)
  257. break;
  258. } while (retries--);
  259. }
  260. #endif
  261. return err;
  262. }
  263. #ifdef MMC_SUPPORTS_TUNING
  264. static const u8 tuning_blk_pattern_4bit[] = {
  265. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  266. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  267. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  268. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  269. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  270. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  271. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  272. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  273. };
  274. static const u8 tuning_blk_pattern_8bit[] = {
  275. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  276. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  277. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  278. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  279. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  280. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  281. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  282. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  283. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  284. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  285. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  286. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  287. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  288. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  289. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  290. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  291. };
  292. int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
  293. {
  294. struct mmc_cmd cmd;
  295. struct mmc_data data;
  296. const u8 *tuning_block_pattern;
  297. int size, err;
  298. if (mmc->bus_width == 8) {
  299. tuning_block_pattern = tuning_blk_pattern_8bit;
  300. size = sizeof(tuning_blk_pattern_8bit);
  301. } else if (mmc->bus_width == 4) {
  302. tuning_block_pattern = tuning_blk_pattern_4bit;
  303. size = sizeof(tuning_blk_pattern_4bit);
  304. } else {
  305. return -EINVAL;
  306. }
  307. ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
  308. cmd.cmdidx = opcode;
  309. cmd.cmdarg = 0;
  310. cmd.resp_type = MMC_RSP_R1;
  311. data.dest = (void *)data_buf;
  312. data.blocks = 1;
  313. data.blocksize = size;
  314. data.flags = MMC_DATA_READ;
  315. err = mmc_send_cmd(mmc, &cmd, &data);
  316. if (err)
  317. return err;
  318. if (memcmp(data_buf, tuning_block_pattern, size))
  319. return -EIO;
  320. return 0;
  321. }
  322. #endif
  323. static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
  324. lbaint_t blkcnt)
  325. {
  326. struct mmc_cmd cmd;
  327. struct mmc_data data;
  328. if (blkcnt > 1)
  329. cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
  330. else
  331. cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
  332. if (mmc->high_capacity)
  333. cmd.cmdarg = start;
  334. else
  335. cmd.cmdarg = start * mmc->read_bl_len;
  336. cmd.resp_type = MMC_RSP_R1;
  337. data.dest = dst;
  338. data.blocks = blkcnt;
  339. data.blocksize = mmc->read_bl_len;
  340. data.flags = MMC_DATA_READ;
  341. if (mmc_send_cmd(mmc, &cmd, &data))
  342. return 0;
  343. if (blkcnt > 1) {
  344. cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
  345. cmd.cmdarg = 0;
  346. cmd.resp_type = MMC_RSP_R1b;
  347. if (mmc_send_cmd(mmc, &cmd, NULL)) {
  348. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  349. pr_err("mmc fail to send stop cmd\n");
  350. #endif
  351. return 0;
  352. }
  353. }
  354. return blkcnt;
  355. }
  356. #if !CONFIG_IS_ENABLED(DM_MMC)
  357. static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
  358. {
  359. if (mmc->cfg->ops->get_b_max)
  360. return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
  361. else
  362. return mmc->cfg->b_max;
  363. }
  364. #endif
  365. #if CONFIG_IS_ENABLED(BLK)
  366. ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
  367. #else
  368. ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
  369. void *dst)
  370. #endif
  371. {
  372. #if CONFIG_IS_ENABLED(BLK)
  373. struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
  374. #endif
  375. int dev_num = block_dev->devnum;
  376. int err;
  377. lbaint_t cur, blocks_todo = blkcnt;
  378. uint b_max;
  379. if (blkcnt == 0)
  380. return 0;
  381. struct mmc *mmc = find_mmc_device(dev_num);
  382. if (!mmc)
  383. return 0;
  384. if (CONFIG_IS_ENABLED(MMC_TINY))
  385. err = mmc_switch_part(mmc, block_dev->hwpart);
  386. else
  387. err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
  388. if (err < 0)
  389. return 0;
  390. if ((start + blkcnt) > block_dev->lba) {
  391. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  392. pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
  393. start + blkcnt, block_dev->lba);
  394. #endif
  395. return 0;
  396. }
  397. if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
  398. pr_debug("%s: Failed to set blocklen\n", __func__);
  399. return 0;
  400. }
  401. b_max = mmc_get_b_max(mmc, dst, blkcnt);
  402. do {
  403. cur = (blocks_todo > b_max) ? b_max : blocks_todo;
  404. if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
  405. pr_debug("%s: Failed to read blocks\n", __func__);
  406. return 0;
  407. }
  408. blocks_todo -= cur;
  409. start += cur;
  410. dst += cur * mmc->read_bl_len;
  411. } while (blocks_todo > 0);
  412. return blkcnt;
  413. }
  414. static int mmc_go_idle(struct mmc *mmc)
  415. {
  416. struct mmc_cmd cmd;
  417. int err;
  418. udelay(1000);
  419. cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
  420. cmd.cmdarg = 0;
  421. cmd.resp_type = MMC_RSP_NONE;
  422. err = mmc_send_cmd(mmc, &cmd, NULL);
  423. if (err)
  424. return err;
  425. udelay(2000);
  426. return 0;
  427. }
  428. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  429. static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
  430. {
  431. struct mmc_cmd cmd;
  432. int err = 0;
  433. /*
  434. * Send CMD11 only if the request is to switch the card to
  435. * 1.8V signalling.
  436. */
  437. if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
  438. return mmc_set_signal_voltage(mmc, signal_voltage);
  439. cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
  440. cmd.cmdarg = 0;
  441. cmd.resp_type = MMC_RSP_R1;
  442. err = mmc_send_cmd(mmc, &cmd, NULL);
  443. if (err)
  444. return err;
  445. if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
  446. return -EIO;
  447. /*
  448. * The card should drive cmd and dat[0:3] low immediately
  449. * after the response of cmd11, but wait 100 us to be sure
  450. */
  451. err = mmc_wait_dat0(mmc, 0, 100);
  452. if (err == -ENOSYS)
  453. udelay(100);
  454. else if (err)
  455. return -ETIMEDOUT;
  456. /*
  457. * During a signal voltage level switch, the clock must be gated
  458. * for 5 ms according to the SD spec
  459. */
  460. mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
  461. err = mmc_set_signal_voltage(mmc, signal_voltage);
  462. if (err)
  463. return err;
  464. /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
  465. mdelay(10);
  466. mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
  467. /*
  468. * Failure to switch is indicated by the card holding
  469. * dat[0:3] low. Wait for at least 1 ms according to spec
  470. */
  471. err = mmc_wait_dat0(mmc, 1, 1000);
  472. if (err == -ENOSYS)
  473. udelay(1000);
  474. else if (err)
  475. return -ETIMEDOUT;
  476. return 0;
  477. }
  478. #endif
  479. static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
  480. {
  481. int timeout = 1000;
  482. int err;
  483. struct mmc_cmd cmd;
  484. while (1) {
  485. cmd.cmdidx = MMC_CMD_APP_CMD;
  486. cmd.resp_type = MMC_RSP_R1;
  487. cmd.cmdarg = 0;
  488. err = mmc_send_cmd(mmc, &cmd, NULL);
  489. if (err)
  490. return err;
  491. cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
  492. cmd.resp_type = MMC_RSP_R3;
  493. /*
  494. * Most cards do not answer if some reserved bits
  495. * in the ocr are set. However, Some controller
  496. * can set bit 7 (reserved for low voltages), but
  497. * how to manage low voltages SD card is not yet
  498. * specified.
  499. */
  500. cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
  501. (mmc->cfg->voltages & 0xff8000);
  502. if (mmc->version == SD_VERSION_2)
  503. cmd.cmdarg |= OCR_HCS;
  504. if (uhs_en)
  505. cmd.cmdarg |= OCR_S18R;
  506. err = mmc_send_cmd(mmc, &cmd, NULL);
  507. if (err)
  508. return err;
  509. if (cmd.response[0] & OCR_BUSY)
  510. break;
  511. if (timeout-- <= 0)
  512. return -EOPNOTSUPP;
  513. udelay(1000);
  514. }
  515. if (mmc->version != SD_VERSION_2)
  516. mmc->version = SD_VERSION_1_0;
  517. if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
  518. cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
  519. cmd.resp_type = MMC_RSP_R3;
  520. cmd.cmdarg = 0;
  521. err = mmc_send_cmd(mmc, &cmd, NULL);
  522. if (err)
  523. return err;
  524. }
  525. mmc->ocr = cmd.response[0];
  526. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  527. if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
  528. == 0x41000000) {
  529. err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
  530. if (err)
  531. return err;
  532. }
  533. #endif
  534. mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
  535. mmc->rca = 0;
  536. return 0;
  537. }
  538. static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
  539. {
  540. struct mmc_cmd cmd;
  541. int err;
  542. cmd.cmdidx = MMC_CMD_SEND_OP_COND;
  543. cmd.resp_type = MMC_RSP_R3;
  544. cmd.cmdarg = 0;
  545. if (use_arg && !mmc_host_is_spi(mmc))
  546. cmd.cmdarg = OCR_HCS |
  547. (mmc->cfg->voltages &
  548. (mmc->ocr & OCR_VOLTAGE_MASK)) |
  549. (mmc->ocr & OCR_ACCESS_MODE);
  550. err = mmc_send_cmd(mmc, &cmd, NULL);
  551. if (err)
  552. return err;
  553. mmc->ocr = cmd.response[0];
  554. return 0;
  555. }
  556. static int mmc_send_op_cond(struct mmc *mmc)
  557. {
  558. int err, i;
  559. int timeout = 1000;
  560. uint start;
  561. /* Some cards seem to need this */
  562. mmc_go_idle(mmc);
  563. start = get_timer(0);
  564. /* Asking to the card its capabilities */
  565. for (i = 0; ; i++) {
  566. err = mmc_send_op_cond_iter(mmc, i != 0);
  567. if (err)
  568. return err;
  569. /* exit if not busy (flag seems to be inverted) */
  570. if (mmc->ocr & OCR_BUSY)
  571. break;
  572. if (get_timer(start) > timeout)
  573. return -ETIMEDOUT;
  574. udelay(100);
  575. }
  576. mmc->op_cond_pending = 1;
  577. return 0;
  578. }
  579. static int mmc_complete_op_cond(struct mmc *mmc)
  580. {
  581. struct mmc_cmd cmd;
  582. int timeout = 1000;
  583. ulong start;
  584. int err;
  585. mmc->op_cond_pending = 0;
  586. if (!(mmc->ocr & OCR_BUSY)) {
  587. /* Some cards seem to need this */
  588. mmc_go_idle(mmc);
  589. start = get_timer(0);
  590. while (1) {
  591. err = mmc_send_op_cond_iter(mmc, 1);
  592. if (err)
  593. return err;
  594. if (mmc->ocr & OCR_BUSY)
  595. break;
  596. if (get_timer(start) > timeout)
  597. return -EOPNOTSUPP;
  598. udelay(100);
  599. }
  600. }
  601. if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
  602. cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
  603. cmd.resp_type = MMC_RSP_R3;
  604. cmd.cmdarg = 0;
  605. err = mmc_send_cmd(mmc, &cmd, NULL);
  606. if (err)
  607. return err;
  608. mmc->ocr = cmd.response[0];
  609. }
  610. mmc->version = MMC_VERSION_UNKNOWN;
  611. mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
  612. mmc->rca = 1;
  613. return 0;
  614. }
  615. int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
  616. {
  617. struct mmc_cmd cmd;
  618. struct mmc_data data;
  619. int err;
  620. /* Get the Card Status Register */
  621. cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
  622. cmd.resp_type = MMC_RSP_R1;
  623. cmd.cmdarg = 0;
  624. data.dest = (char *)ext_csd;
  625. data.blocks = 1;
  626. data.blocksize = MMC_MAX_BLOCK_LEN;
  627. data.flags = MMC_DATA_READ;
  628. err = mmc_send_cmd(mmc, &cmd, &data);
  629. return err;
  630. }
  631. static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
  632. bool send_status)
  633. {
  634. unsigned int status, start;
  635. struct mmc_cmd cmd;
  636. int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
  637. bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
  638. (index == EXT_CSD_PART_CONF);
  639. int retries = 3;
  640. int ret;
  641. if (mmc->gen_cmd6_time)
  642. timeout_ms = mmc->gen_cmd6_time * 10;
  643. if (is_part_switch && mmc->part_switch_time)
  644. timeout_ms = mmc->part_switch_time * 10;
  645. cmd.cmdidx = MMC_CMD_SWITCH;
  646. cmd.resp_type = MMC_RSP_R1b;
  647. cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  648. (index << 16) |
  649. (value << 8);
  650. do {
  651. ret = mmc_send_cmd(mmc, &cmd, NULL);
  652. } while (ret && retries-- > 0);
  653. if (ret)
  654. return ret;
  655. start = get_timer(0);
  656. /* poll dat0 for rdy/buys status */
  657. ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
  658. if (ret && ret != -ENOSYS)
  659. return ret;
  660. /*
  661. * In cases when not allowed to poll by using CMD13 or because we aren't
  662. * capable of polling by using mmc_wait_dat0, then rely on waiting the
  663. * stated timeout to be sufficient.
  664. */
  665. if (ret == -ENOSYS && !send_status)
  666. mdelay(timeout_ms);
  667. /* Finally wait until the card is ready or indicates a failure
  668. * to switch. It doesn't hurt to use CMD13 here even if send_status
  669. * is false, because by now (after 'timeout_ms' ms) the bus should be
  670. * reliable.
  671. */
  672. do {
  673. ret = mmc_send_status(mmc, &status);
  674. if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
  675. pr_debug("switch failed %d/%d/0x%x !\n", set, index,
  676. value);
  677. return -EIO;
  678. }
  679. if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
  680. return 0;
  681. udelay(100);
  682. } while (get_timer(start) < timeout_ms);
  683. return -ETIMEDOUT;
  684. }
  685. int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
  686. {
  687. return __mmc_switch(mmc, set, index, value, true);
  688. }
  689. int mmc_boot_wp(struct mmc *mmc)
  690. {
  691. return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
  692. }
  693. #if !CONFIG_IS_ENABLED(MMC_TINY)
  694. static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
  695. bool hsdowngrade)
  696. {
  697. int err;
  698. int speed_bits;
  699. ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
  700. switch (mode) {
  701. case MMC_HS:
  702. case MMC_HS_52:
  703. case MMC_DDR_52:
  704. speed_bits = EXT_CSD_TIMING_HS;
  705. break;
  706. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
  707. case MMC_HS_200:
  708. speed_bits = EXT_CSD_TIMING_HS200;
  709. break;
  710. #endif
  711. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  712. case MMC_HS_400:
  713. speed_bits = EXT_CSD_TIMING_HS400;
  714. break;
  715. #endif
  716. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  717. case MMC_HS_400_ES:
  718. speed_bits = EXT_CSD_TIMING_HS400;
  719. break;
  720. #endif
  721. case MMC_LEGACY:
  722. speed_bits = EXT_CSD_TIMING_LEGACY;
  723. break;
  724. default:
  725. return -EINVAL;
  726. }
  727. err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
  728. speed_bits, !hsdowngrade);
  729. if (err)
  730. return err;
  731. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
  732. CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  733. /*
  734. * In case the eMMC is in HS200/HS400 mode and we are downgrading
  735. * to HS mode, the card clock are still running much faster than
  736. * the supported HS mode clock, so we can not reliably read out
  737. * Extended CSD. Reconfigure the controller to run at HS mode.
  738. */
  739. if (hsdowngrade) {
  740. mmc_select_mode(mmc, MMC_HS);
  741. mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
  742. }
  743. #endif
  744. if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
  745. /* Now check to see that it worked */
  746. err = mmc_send_ext_csd(mmc, test_csd);
  747. if (err)
  748. return err;
  749. /* No high-speed support */
  750. if (!test_csd[EXT_CSD_HS_TIMING])
  751. return -ENOTSUPP;
  752. }
  753. return 0;
  754. }
  755. static int mmc_get_capabilities(struct mmc *mmc)
  756. {
  757. u8 *ext_csd = mmc->ext_csd;
  758. char cardtype;
  759. mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
  760. if (mmc_host_is_spi(mmc))
  761. return 0;
  762. /* Only version 4 supports high-speed */
  763. if (mmc->version < MMC_VERSION_4)
  764. return 0;
  765. if (!ext_csd) {
  766. pr_err("No ext_csd found!\n"); /* this should enver happen */
  767. return -ENOTSUPP;
  768. }
  769. mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
  770. cardtype = ext_csd[EXT_CSD_CARD_TYPE];
  771. mmc->cardtype = cardtype;
  772. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
  773. if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
  774. EXT_CSD_CARD_TYPE_HS200_1_8V)) {
  775. mmc->card_caps |= MMC_MODE_HS200;
  776. }
  777. #endif
  778. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
  779. CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  780. if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
  781. EXT_CSD_CARD_TYPE_HS400_1_8V)) {
  782. mmc->card_caps |= MMC_MODE_HS400;
  783. }
  784. #endif
  785. if (cardtype & EXT_CSD_CARD_TYPE_52) {
  786. if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
  787. mmc->card_caps |= MMC_MODE_DDR_52MHz;
  788. mmc->card_caps |= MMC_MODE_HS_52MHz;
  789. }
  790. if (cardtype & EXT_CSD_CARD_TYPE_26)
  791. mmc->card_caps |= MMC_MODE_HS;
  792. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  793. if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
  794. (mmc->card_caps & MMC_MODE_HS400)) {
  795. mmc->card_caps |= MMC_MODE_HS400_ES;
  796. }
  797. #endif
  798. return 0;
  799. }
  800. #endif
  801. static int mmc_set_capacity(struct mmc *mmc, int part_num)
  802. {
  803. switch (part_num) {
  804. case 0:
  805. mmc->capacity = mmc->capacity_user;
  806. break;
  807. case 1:
  808. case 2:
  809. mmc->capacity = mmc->capacity_boot;
  810. break;
  811. case 3:
  812. mmc->capacity = mmc->capacity_rpmb;
  813. break;
  814. case 4:
  815. case 5:
  816. case 6:
  817. case 7:
  818. mmc->capacity = mmc->capacity_gp[part_num - 4];
  819. break;
  820. default:
  821. return -1;
  822. }
  823. mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
  824. return 0;
  825. }
  826. int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
  827. {
  828. int ret;
  829. int retry = 3;
  830. do {
  831. ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  832. EXT_CSD_PART_CONF,
  833. (mmc->part_config & ~PART_ACCESS_MASK)
  834. | (part_num & PART_ACCESS_MASK));
  835. } while (ret && retry--);
  836. /*
  837. * Set the capacity if the switch succeeded or was intended
  838. * to return to representing the raw device.
  839. */
  840. if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
  841. ret = mmc_set_capacity(mmc, part_num);
  842. mmc_get_blk_desc(mmc)->hwpart = part_num;
  843. }
  844. return ret;
  845. }
  846. #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
  847. int mmc_hwpart_config(struct mmc *mmc,
  848. const struct mmc_hwpart_conf *conf,
  849. enum mmc_hwpart_conf_mode mode)
  850. {
  851. u8 part_attrs = 0;
  852. u32 enh_size_mult;
  853. u32 enh_start_addr;
  854. u32 gp_size_mult[4];
  855. u32 max_enh_size_mult;
  856. u32 tot_enh_size_mult = 0;
  857. u8 wr_rel_set;
  858. int i, pidx, err;
  859. ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
  860. if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
  861. return -EINVAL;
  862. if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
  863. pr_err("eMMC >= 4.4 required for enhanced user data area\n");
  864. return -EMEDIUMTYPE;
  865. }
  866. if (!(mmc->part_support & PART_SUPPORT)) {
  867. pr_err("Card does not support partitioning\n");
  868. return -EMEDIUMTYPE;
  869. }
  870. if (!mmc->hc_wp_grp_size) {
  871. pr_err("Card does not define HC WP group size\n");
  872. return -EMEDIUMTYPE;
  873. }
  874. /* check partition alignment and total enhanced size */
  875. if (conf->user.enh_size) {
  876. if (conf->user.enh_size % mmc->hc_wp_grp_size ||
  877. conf->user.enh_start % mmc->hc_wp_grp_size) {
  878. pr_err("User data enhanced area not HC WP group "
  879. "size aligned\n");
  880. return -EINVAL;
  881. }
  882. part_attrs |= EXT_CSD_ENH_USR;
  883. enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
  884. if (mmc->high_capacity) {
  885. enh_start_addr = conf->user.enh_start;
  886. } else {
  887. enh_start_addr = (conf->user.enh_start << 9);
  888. }
  889. } else {
  890. enh_size_mult = 0;
  891. enh_start_addr = 0;
  892. }
  893. tot_enh_size_mult += enh_size_mult;
  894. for (pidx = 0; pidx < 4; pidx++) {
  895. if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
  896. pr_err("GP%i partition not HC WP group size "
  897. "aligned\n", pidx+1);
  898. return -EINVAL;
  899. }
  900. gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
  901. if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
  902. part_attrs |= EXT_CSD_ENH_GP(pidx);
  903. tot_enh_size_mult += gp_size_mult[pidx];
  904. }
  905. }
  906. if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
  907. pr_err("Card does not support enhanced attribute\n");
  908. return -EMEDIUMTYPE;
  909. }
  910. err = mmc_send_ext_csd(mmc, ext_csd);
  911. if (err)
  912. return err;
  913. max_enh_size_mult =
  914. (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
  915. (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
  916. ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
  917. if (tot_enh_size_mult > max_enh_size_mult) {
  918. pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
  919. tot_enh_size_mult, max_enh_size_mult);
  920. return -EMEDIUMTYPE;
  921. }
  922. /* The default value of EXT_CSD_WR_REL_SET is device
  923. * dependent, the values can only be changed if the
  924. * EXT_CSD_HS_CTRL_REL bit is set. The values can be
  925. * changed only once and before partitioning is completed. */
  926. wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
  927. if (conf->user.wr_rel_change) {
  928. if (conf->user.wr_rel_set)
  929. wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
  930. else
  931. wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
  932. }
  933. for (pidx = 0; pidx < 4; pidx++) {
  934. if (conf->gp_part[pidx].wr_rel_change) {
  935. if (conf->gp_part[pidx].wr_rel_set)
  936. wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
  937. else
  938. wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
  939. }
  940. }
  941. if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
  942. !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
  943. puts("Card does not support host controlled partition write "
  944. "reliability settings\n");
  945. return -EMEDIUMTYPE;
  946. }
  947. if (ext_csd[EXT_CSD_PARTITION_SETTING] &
  948. EXT_CSD_PARTITION_SETTING_COMPLETED) {
  949. pr_err("Card already partitioned\n");
  950. return -EPERM;
  951. }
  952. if (mode == MMC_HWPART_CONF_CHECK)
  953. return 0;
  954. /* Partitioning requires high-capacity size definitions */
  955. if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
  956. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  957. EXT_CSD_ERASE_GROUP_DEF, 1);
  958. if (err)
  959. return err;
  960. ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
  961. #if CONFIG_IS_ENABLED(MMC_WRITE)
  962. /* update erase group size to be high-capacity */
  963. mmc->erase_grp_size =
  964. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
  965. #endif
  966. }
  967. /* all OK, write the configuration */
  968. for (i = 0; i < 4; i++) {
  969. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  970. EXT_CSD_ENH_START_ADDR+i,
  971. (enh_start_addr >> (i*8)) & 0xFF);
  972. if (err)
  973. return err;
  974. }
  975. for (i = 0; i < 3; i++) {
  976. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  977. EXT_CSD_ENH_SIZE_MULT+i,
  978. (enh_size_mult >> (i*8)) & 0xFF);
  979. if (err)
  980. return err;
  981. }
  982. for (pidx = 0; pidx < 4; pidx++) {
  983. for (i = 0; i < 3; i++) {
  984. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  985. EXT_CSD_GP_SIZE_MULT+pidx*3+i,
  986. (gp_size_mult[pidx] >> (i*8)) & 0xFF);
  987. if (err)
  988. return err;
  989. }
  990. }
  991. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  992. EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
  993. if (err)
  994. return err;
  995. if (mode == MMC_HWPART_CONF_SET)
  996. return 0;
  997. /* The WR_REL_SET is a write-once register but shall be
  998. * written before setting PART_SETTING_COMPLETED. As it is
  999. * write-once we can only write it when completing the
  1000. * partitioning. */
  1001. if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
  1002. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1003. EXT_CSD_WR_REL_SET, wr_rel_set);
  1004. if (err)
  1005. return err;
  1006. }
  1007. /* Setting PART_SETTING_COMPLETED confirms the partition
  1008. * configuration but it only becomes effective after power
  1009. * cycle, so we do not adjust the partition related settings
  1010. * in the mmc struct. */
  1011. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1012. EXT_CSD_PARTITION_SETTING,
  1013. EXT_CSD_PARTITION_SETTING_COMPLETED);
  1014. if (err)
  1015. return err;
  1016. return 0;
  1017. }
  1018. #endif
  1019. #if !CONFIG_IS_ENABLED(DM_MMC)
  1020. int mmc_getcd(struct mmc *mmc)
  1021. {
  1022. int cd;
  1023. cd = board_mmc_getcd(mmc);
  1024. if (cd < 0) {
  1025. if (mmc->cfg->ops->getcd)
  1026. cd = mmc->cfg->ops->getcd(mmc);
  1027. else
  1028. cd = 1;
  1029. }
  1030. return cd;
  1031. }
  1032. #endif
  1033. #if !CONFIG_IS_ENABLED(MMC_TINY)
  1034. static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
  1035. {
  1036. struct mmc_cmd cmd;
  1037. struct mmc_data data;
  1038. /* Switch the frequency */
  1039. cmd.cmdidx = SD_CMD_SWITCH_FUNC;
  1040. cmd.resp_type = MMC_RSP_R1;
  1041. cmd.cmdarg = (mode << 31) | 0xffffff;
  1042. cmd.cmdarg &= ~(0xf << (group * 4));
  1043. cmd.cmdarg |= value << (group * 4);
  1044. data.dest = (char *)resp;
  1045. data.blocksize = 64;
  1046. data.blocks = 1;
  1047. data.flags = MMC_DATA_READ;
  1048. return mmc_send_cmd(mmc, &cmd, &data);
  1049. }
  1050. static int sd_get_capabilities(struct mmc *mmc)
  1051. {
  1052. int err;
  1053. struct mmc_cmd cmd;
  1054. ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
  1055. ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
  1056. struct mmc_data data;
  1057. int timeout;
  1058. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1059. u32 sd3_bus_mode;
  1060. #endif
  1061. mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
  1062. if (mmc_host_is_spi(mmc))
  1063. return 0;
  1064. /* Read the SCR to find out if this card supports higher speeds */
  1065. cmd.cmdidx = MMC_CMD_APP_CMD;
  1066. cmd.resp_type = MMC_RSP_R1;
  1067. cmd.cmdarg = mmc->rca << 16;
  1068. err = mmc_send_cmd(mmc, &cmd, NULL);
  1069. if (err)
  1070. return err;
  1071. cmd.cmdidx = SD_CMD_APP_SEND_SCR;
  1072. cmd.resp_type = MMC_RSP_R1;
  1073. cmd.cmdarg = 0;
  1074. timeout = 3;
  1075. retry_scr:
  1076. data.dest = (char *)scr;
  1077. data.blocksize = 8;
  1078. data.blocks = 1;
  1079. data.flags = MMC_DATA_READ;
  1080. err = mmc_send_cmd(mmc, &cmd, &data);
  1081. if (err) {
  1082. if (timeout--)
  1083. goto retry_scr;
  1084. return err;
  1085. }
  1086. mmc->scr[0] = __be32_to_cpu(scr[0]);
  1087. mmc->scr[1] = __be32_to_cpu(scr[1]);
  1088. switch ((mmc->scr[0] >> 24) & 0xf) {
  1089. case 0:
  1090. mmc->version = SD_VERSION_1_0;
  1091. break;
  1092. case 1:
  1093. mmc->version = SD_VERSION_1_10;
  1094. break;
  1095. case 2:
  1096. mmc->version = SD_VERSION_2;
  1097. if ((mmc->scr[0] >> 15) & 0x1)
  1098. mmc->version = SD_VERSION_3;
  1099. break;
  1100. default:
  1101. mmc->version = SD_VERSION_1_0;
  1102. break;
  1103. }
  1104. if (mmc->scr[0] & SD_DATA_4BIT)
  1105. mmc->card_caps |= MMC_MODE_4BIT;
  1106. /* Version 1.0 doesn't support switching */
  1107. if (mmc->version == SD_VERSION_1_0)
  1108. return 0;
  1109. timeout = 4;
  1110. while (timeout--) {
  1111. err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
  1112. (u8 *)switch_status);
  1113. if (err)
  1114. return err;
  1115. /* The high-speed function is busy. Try again */
  1116. if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
  1117. break;
  1118. }
  1119. /* If high-speed isn't supported, we return */
  1120. if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
  1121. mmc->card_caps |= MMC_CAP(SD_HS);
  1122. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1123. /* Version before 3.0 don't support UHS modes */
  1124. if (mmc->version < SD_VERSION_3)
  1125. return 0;
  1126. sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
  1127. if (sd3_bus_mode & SD_MODE_UHS_SDR104)
  1128. mmc->card_caps |= MMC_CAP(UHS_SDR104);
  1129. if (sd3_bus_mode & SD_MODE_UHS_SDR50)
  1130. mmc->card_caps |= MMC_CAP(UHS_SDR50);
  1131. if (sd3_bus_mode & SD_MODE_UHS_SDR25)
  1132. mmc->card_caps |= MMC_CAP(UHS_SDR25);
  1133. if (sd3_bus_mode & SD_MODE_UHS_SDR12)
  1134. mmc->card_caps |= MMC_CAP(UHS_SDR12);
  1135. if (sd3_bus_mode & SD_MODE_UHS_DDR50)
  1136. mmc->card_caps |= MMC_CAP(UHS_DDR50);
  1137. #endif
  1138. return 0;
  1139. }
  1140. static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
  1141. {
  1142. int err;
  1143. ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
  1144. int speed;
  1145. /* SD version 1.00 and 1.01 does not support CMD 6 */
  1146. if (mmc->version == SD_VERSION_1_0)
  1147. return 0;
  1148. switch (mode) {
  1149. case MMC_LEGACY:
  1150. speed = UHS_SDR12_BUS_SPEED;
  1151. break;
  1152. case SD_HS:
  1153. speed = HIGH_SPEED_BUS_SPEED;
  1154. break;
  1155. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1156. case UHS_SDR12:
  1157. speed = UHS_SDR12_BUS_SPEED;
  1158. break;
  1159. case UHS_SDR25:
  1160. speed = UHS_SDR25_BUS_SPEED;
  1161. break;
  1162. case UHS_SDR50:
  1163. speed = UHS_SDR50_BUS_SPEED;
  1164. break;
  1165. case UHS_DDR50:
  1166. speed = UHS_DDR50_BUS_SPEED;
  1167. break;
  1168. case UHS_SDR104:
  1169. speed = UHS_SDR104_BUS_SPEED;
  1170. break;
  1171. #endif
  1172. default:
  1173. return -EINVAL;
  1174. }
  1175. err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
  1176. if (err)
  1177. return err;
  1178. if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
  1179. return -ENOTSUPP;
  1180. return 0;
  1181. }
  1182. static int sd_select_bus_width(struct mmc *mmc, int w)
  1183. {
  1184. int err;
  1185. struct mmc_cmd cmd;
  1186. if ((w != 4) && (w != 1))
  1187. return -EINVAL;
  1188. cmd.cmdidx = MMC_CMD_APP_CMD;
  1189. cmd.resp_type = MMC_RSP_R1;
  1190. cmd.cmdarg = mmc->rca << 16;
  1191. err = mmc_send_cmd(mmc, &cmd, NULL);
  1192. if (err)
  1193. return err;
  1194. cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
  1195. cmd.resp_type = MMC_RSP_R1;
  1196. if (w == 4)
  1197. cmd.cmdarg = 2;
  1198. else if (w == 1)
  1199. cmd.cmdarg = 0;
  1200. err = mmc_send_cmd(mmc, &cmd, NULL);
  1201. if (err)
  1202. return err;
  1203. return 0;
  1204. }
  1205. #endif
  1206. #if CONFIG_IS_ENABLED(MMC_WRITE)
  1207. static int sd_read_ssr(struct mmc *mmc)
  1208. {
  1209. static const unsigned int sd_au_size[] = {
  1210. 0, SZ_16K / 512, SZ_32K / 512,
  1211. SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
  1212. SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
  1213. SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
  1214. SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
  1215. SZ_64M / 512,
  1216. };
  1217. int err, i;
  1218. struct mmc_cmd cmd;
  1219. ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
  1220. struct mmc_data data;
  1221. int timeout = 3;
  1222. unsigned int au, eo, et, es;
  1223. cmd.cmdidx = MMC_CMD_APP_CMD;
  1224. cmd.resp_type = MMC_RSP_R1;
  1225. cmd.cmdarg = mmc->rca << 16;
  1226. err = mmc_send_cmd(mmc, &cmd, NULL);
  1227. #ifdef CONFIG_MMC_QUIRKS
  1228. if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
  1229. int retries = 4;
  1230. /*
  1231. * It has been seen that APP_CMD may fail on the first
  1232. * attempt, let's try a few more times
  1233. */
  1234. do {
  1235. err = mmc_send_cmd(mmc, &cmd, NULL);
  1236. if (!err)
  1237. break;
  1238. } while (retries--);
  1239. }
  1240. #endif
  1241. if (err)
  1242. return err;
  1243. cmd.cmdidx = SD_CMD_APP_SD_STATUS;
  1244. cmd.resp_type = MMC_RSP_R1;
  1245. cmd.cmdarg = 0;
  1246. retry_ssr:
  1247. data.dest = (char *)ssr;
  1248. data.blocksize = 64;
  1249. data.blocks = 1;
  1250. data.flags = MMC_DATA_READ;
  1251. err = mmc_send_cmd(mmc, &cmd, &data);
  1252. if (err) {
  1253. if (timeout--)
  1254. goto retry_ssr;
  1255. return err;
  1256. }
  1257. for (i = 0; i < 16; i++)
  1258. ssr[i] = be32_to_cpu(ssr[i]);
  1259. au = (ssr[2] >> 12) & 0xF;
  1260. if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
  1261. mmc->ssr.au = sd_au_size[au];
  1262. es = (ssr[3] >> 24) & 0xFF;
  1263. es |= (ssr[2] & 0xFF) << 8;
  1264. et = (ssr[3] >> 18) & 0x3F;
  1265. if (es && et) {
  1266. eo = (ssr[3] >> 16) & 0x3;
  1267. mmc->ssr.erase_timeout = (et * 1000) / es;
  1268. mmc->ssr.erase_offset = eo * 1000;
  1269. }
  1270. } else {
  1271. pr_debug("Invalid Allocation Unit Size.\n");
  1272. }
  1273. return 0;
  1274. }
  1275. #endif
  1276. /* frequency bases */
  1277. /* divided by 10 to be nice to platforms without floating point */
  1278. static const int fbase[] = {
  1279. 10000,
  1280. 100000,
  1281. 1000000,
  1282. 10000000,
  1283. };
  1284. /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
  1285. * to platforms without floating point.
  1286. */
  1287. static const u8 multipliers[] = {
  1288. 0, /* reserved */
  1289. 10,
  1290. 12,
  1291. 13,
  1292. 15,
  1293. 20,
  1294. 25,
  1295. 30,
  1296. 35,
  1297. 40,
  1298. 45,
  1299. 50,
  1300. 55,
  1301. 60,
  1302. 70,
  1303. 80,
  1304. };
  1305. static inline int bus_width(uint cap)
  1306. {
  1307. if (cap == MMC_MODE_8BIT)
  1308. return 8;
  1309. if (cap == MMC_MODE_4BIT)
  1310. return 4;
  1311. if (cap == MMC_MODE_1BIT)
  1312. return 1;
  1313. pr_warn("invalid bus witdh capability 0x%x\n", cap);
  1314. return 0;
  1315. }
  1316. #if !CONFIG_IS_ENABLED(DM_MMC)
  1317. #ifdef MMC_SUPPORTS_TUNING
  1318. static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
  1319. {
  1320. return -ENOTSUPP;
  1321. }
  1322. #endif
  1323. static int mmc_set_ios(struct mmc *mmc)
  1324. {
  1325. int ret = 0;
  1326. if (mmc->cfg->ops->set_ios)
  1327. ret = mmc->cfg->ops->set_ios(mmc);
  1328. return ret;
  1329. }
  1330. static int mmc_host_power_cycle(struct mmc *mmc)
  1331. {
  1332. int ret = 0;
  1333. if (mmc->cfg->ops->host_power_cycle)
  1334. ret = mmc->cfg->ops->host_power_cycle(mmc);
  1335. return ret;
  1336. }
  1337. #endif
  1338. int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
  1339. {
  1340. if (!disable) {
  1341. if (clock > mmc->cfg->f_max)
  1342. clock = mmc->cfg->f_max;
  1343. if (clock < mmc->cfg->f_min)
  1344. clock = mmc->cfg->f_min;
  1345. }
  1346. mmc->clock = clock;
  1347. mmc->clk_disable = disable;
  1348. debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
  1349. return mmc_set_ios(mmc);
  1350. }
  1351. static int mmc_set_bus_width(struct mmc *mmc, uint width)
  1352. {
  1353. mmc->bus_width = width;
  1354. return mmc_set_ios(mmc);
  1355. }
  1356. #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
  1357. /*
  1358. * helper function to display the capabilities in a human
  1359. * friendly manner. The capabilities include bus width and
  1360. * supported modes.
  1361. */
  1362. void mmc_dump_capabilities(const char *text, uint caps)
  1363. {
  1364. enum bus_mode mode;
  1365. pr_debug("%s: widths [", text);
  1366. if (caps & MMC_MODE_8BIT)
  1367. pr_debug("8, ");
  1368. if (caps & MMC_MODE_4BIT)
  1369. pr_debug("4, ");
  1370. if (caps & MMC_MODE_1BIT)
  1371. pr_debug("1, ");
  1372. pr_debug("\b\b] modes [");
  1373. for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
  1374. if (MMC_CAP(mode) & caps)
  1375. pr_debug("%s, ", mmc_mode_name(mode));
  1376. pr_debug("\b\b]\n");
  1377. }
  1378. #endif
  1379. struct mode_width_tuning {
  1380. enum bus_mode mode;
  1381. uint widths;
  1382. #ifdef MMC_SUPPORTS_TUNING
  1383. uint tuning;
  1384. #endif
  1385. };
  1386. #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
  1387. int mmc_voltage_to_mv(enum mmc_voltage voltage)
  1388. {
  1389. switch (voltage) {
  1390. case MMC_SIGNAL_VOLTAGE_000: return 0;
  1391. case MMC_SIGNAL_VOLTAGE_330: return 3300;
  1392. case MMC_SIGNAL_VOLTAGE_180: return 1800;
  1393. case MMC_SIGNAL_VOLTAGE_120: return 1200;
  1394. }
  1395. return -EINVAL;
  1396. }
  1397. static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
  1398. {
  1399. int err;
  1400. if (mmc->signal_voltage == signal_voltage)
  1401. return 0;
  1402. mmc->signal_voltage = signal_voltage;
  1403. err = mmc_set_ios(mmc);
  1404. if (err)
  1405. pr_debug("unable to set voltage (err %d)\n", err);
  1406. return err;
  1407. }
  1408. #else
  1409. static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
  1410. {
  1411. return 0;
  1412. }
  1413. #endif
  1414. #if !CONFIG_IS_ENABLED(MMC_TINY)
  1415. static const struct mode_width_tuning sd_modes_by_pref[] = {
  1416. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1417. #ifdef MMC_SUPPORTS_TUNING
  1418. {
  1419. .mode = UHS_SDR104,
  1420. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1421. .tuning = MMC_CMD_SEND_TUNING_BLOCK
  1422. },
  1423. #endif
  1424. {
  1425. .mode = UHS_SDR50,
  1426. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1427. },
  1428. {
  1429. .mode = UHS_DDR50,
  1430. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1431. },
  1432. {
  1433. .mode = UHS_SDR25,
  1434. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1435. },
  1436. #endif
  1437. {
  1438. .mode = SD_HS,
  1439. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1440. },
  1441. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1442. {
  1443. .mode = UHS_SDR12,
  1444. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1445. },
  1446. #endif
  1447. {
  1448. .mode = MMC_LEGACY,
  1449. .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
  1450. }
  1451. };
  1452. #define for_each_sd_mode_by_pref(caps, mwt) \
  1453. for (mwt = sd_modes_by_pref;\
  1454. mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
  1455. mwt++) \
  1456. if (caps & MMC_CAP(mwt->mode))
  1457. static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
  1458. {
  1459. int err;
  1460. uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
  1461. const struct mode_width_tuning *mwt;
  1462. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
  1463. bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
  1464. #else
  1465. bool uhs_en = false;
  1466. #endif
  1467. uint caps;
  1468. #ifdef DEBUG
  1469. mmc_dump_capabilities("sd card", card_caps);
  1470. mmc_dump_capabilities("host", mmc->host_caps);
  1471. #endif
  1472. if (mmc_host_is_spi(mmc)) {
  1473. mmc_set_bus_width(mmc, 1);
  1474. mmc_select_mode(mmc, MMC_LEGACY);
  1475. mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
  1476. #if CONFIG_IS_ENABLED(MMC_WRITE)
  1477. err = sd_read_ssr(mmc);
  1478. if (err)
  1479. pr_warn("unable to read ssr\n");
  1480. #endif
  1481. return 0;
  1482. }
  1483. /* Restrict card's capabilities by what the host can do */
  1484. caps = card_caps & mmc->host_caps;
  1485. if (!uhs_en)
  1486. caps &= ~UHS_CAPS;
  1487. for_each_sd_mode_by_pref(caps, mwt) {
  1488. uint *w;
  1489. for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
  1490. if (*w & caps & mwt->widths) {
  1491. pr_debug("trying mode %s width %d (at %d MHz)\n",
  1492. mmc_mode_name(mwt->mode),
  1493. bus_width(*w),
  1494. mmc_mode2freq(mmc, mwt->mode) / 1000000);
  1495. /* configure the bus width (card + host) */
  1496. err = sd_select_bus_width(mmc, bus_width(*w));
  1497. if (err)
  1498. goto error;
  1499. mmc_set_bus_width(mmc, bus_width(*w));
  1500. /* configure the bus mode (card) */
  1501. err = sd_set_card_speed(mmc, mwt->mode);
  1502. if (err)
  1503. goto error;
  1504. /* configure the bus mode (host) */
  1505. mmc_select_mode(mmc, mwt->mode);
  1506. mmc_set_clock(mmc, mmc->tran_speed,
  1507. MMC_CLK_ENABLE);
  1508. #ifdef MMC_SUPPORTS_TUNING
  1509. /* execute tuning if needed */
  1510. if (mwt->tuning && !mmc_host_is_spi(mmc)) {
  1511. err = mmc_execute_tuning(mmc,
  1512. mwt->tuning);
  1513. if (err) {
  1514. pr_debug("tuning failed\n");
  1515. goto error;
  1516. }
  1517. }
  1518. #endif
  1519. #if CONFIG_IS_ENABLED(MMC_WRITE)
  1520. err = sd_read_ssr(mmc);
  1521. if (err)
  1522. pr_warn("unable to read ssr\n");
  1523. #endif
  1524. if (!err)
  1525. return 0;
  1526. error:
  1527. /* revert to a safer bus speed */
  1528. mmc_select_mode(mmc, MMC_LEGACY);
  1529. mmc_set_clock(mmc, mmc->tran_speed,
  1530. MMC_CLK_ENABLE);
  1531. }
  1532. }
  1533. }
  1534. pr_err("unable to select a mode\n");
  1535. return -ENOTSUPP;
  1536. }
  1537. /*
  1538. * read the compare the part of ext csd that is constant.
  1539. * This can be used to check that the transfer is working
  1540. * as expected.
  1541. */
  1542. static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
  1543. {
  1544. int err;
  1545. const u8 *ext_csd = mmc->ext_csd;
  1546. ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
  1547. if (mmc->version < MMC_VERSION_4)
  1548. return 0;
  1549. err = mmc_send_ext_csd(mmc, test_csd);
  1550. if (err)
  1551. return err;
  1552. /* Only compare read only fields */
  1553. if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
  1554. == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
  1555. ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
  1556. == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
  1557. ext_csd[EXT_CSD_REV]
  1558. == test_csd[EXT_CSD_REV] &&
  1559. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
  1560. == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
  1561. memcmp(&ext_csd[EXT_CSD_SEC_CNT],
  1562. &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
  1563. return 0;
  1564. return -EBADMSG;
  1565. }
  1566. #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
  1567. static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
  1568. uint32_t allowed_mask)
  1569. {
  1570. u32 card_mask = 0;
  1571. switch (mode) {
  1572. case MMC_HS_400_ES:
  1573. case MMC_HS_400:
  1574. case MMC_HS_200:
  1575. if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
  1576. EXT_CSD_CARD_TYPE_HS400_1_8V))
  1577. card_mask |= MMC_SIGNAL_VOLTAGE_180;
  1578. if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
  1579. EXT_CSD_CARD_TYPE_HS400_1_2V))
  1580. card_mask |= MMC_SIGNAL_VOLTAGE_120;
  1581. break;
  1582. case MMC_DDR_52:
  1583. if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
  1584. card_mask |= MMC_SIGNAL_VOLTAGE_330 |
  1585. MMC_SIGNAL_VOLTAGE_180;
  1586. if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
  1587. card_mask |= MMC_SIGNAL_VOLTAGE_120;
  1588. break;
  1589. default:
  1590. card_mask |= MMC_SIGNAL_VOLTAGE_330;
  1591. break;
  1592. }
  1593. while (card_mask & allowed_mask) {
  1594. enum mmc_voltage best_match;
  1595. best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
  1596. if (!mmc_set_signal_voltage(mmc, best_match))
  1597. return 0;
  1598. allowed_mask &= ~best_match;
  1599. }
  1600. return -ENOTSUPP;
  1601. }
  1602. #else
  1603. static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
  1604. uint32_t allowed_mask)
  1605. {
  1606. return 0;
  1607. }
  1608. #endif
  1609. static const struct mode_width_tuning mmc_modes_by_pref[] = {
  1610. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  1611. {
  1612. .mode = MMC_HS_400_ES,
  1613. .widths = MMC_MODE_8BIT,
  1614. },
  1615. #endif
  1616. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  1617. {
  1618. .mode = MMC_HS_400,
  1619. .widths = MMC_MODE_8BIT,
  1620. .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
  1621. },
  1622. #endif
  1623. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
  1624. {
  1625. .mode = MMC_HS_200,
  1626. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
  1627. .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
  1628. },
  1629. #endif
  1630. {
  1631. .mode = MMC_DDR_52,
  1632. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
  1633. },
  1634. {
  1635. .mode = MMC_HS_52,
  1636. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
  1637. },
  1638. {
  1639. .mode = MMC_HS,
  1640. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
  1641. },
  1642. {
  1643. .mode = MMC_LEGACY,
  1644. .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
  1645. }
  1646. };
  1647. #define for_each_mmc_mode_by_pref(caps, mwt) \
  1648. for (mwt = mmc_modes_by_pref;\
  1649. mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
  1650. mwt++) \
  1651. if (caps & MMC_CAP(mwt->mode))
  1652. static const struct ext_csd_bus_width {
  1653. uint cap;
  1654. bool is_ddr;
  1655. uint ext_csd_bits;
  1656. } ext_csd_bus_width[] = {
  1657. {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
  1658. {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
  1659. {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
  1660. {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
  1661. {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
  1662. };
  1663. #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  1664. static int mmc_select_hs400(struct mmc *mmc)
  1665. {
  1666. int err;
  1667. /* Set timing to HS200 for tuning */
  1668. err = mmc_set_card_speed(mmc, MMC_HS_200, false);
  1669. if (err)
  1670. return err;
  1671. /* configure the bus mode (host) */
  1672. mmc_select_mode(mmc, MMC_HS_200);
  1673. mmc_set_clock(mmc, mmc->tran_speed, false);
  1674. /* execute tuning if needed */
  1675. err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
  1676. if (err) {
  1677. debug("tuning failed\n");
  1678. return err;
  1679. }
  1680. /* Set back to HS */
  1681. mmc_set_card_speed(mmc, MMC_HS, true);
  1682. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
  1683. EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
  1684. if (err)
  1685. return err;
  1686. err = mmc_set_card_speed(mmc, MMC_HS_400, false);
  1687. if (err)
  1688. return err;
  1689. mmc_select_mode(mmc, MMC_HS_400);
  1690. err = mmc_set_clock(mmc, mmc->tran_speed, false);
  1691. if (err)
  1692. return err;
  1693. return 0;
  1694. }
  1695. #else
  1696. static int mmc_select_hs400(struct mmc *mmc)
  1697. {
  1698. return -ENOTSUPP;
  1699. }
  1700. #endif
  1701. #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
  1702. #if !CONFIG_IS_ENABLED(DM_MMC)
  1703. static int mmc_set_enhanced_strobe(struct mmc *mmc)
  1704. {
  1705. return -ENOTSUPP;
  1706. }
  1707. #endif
  1708. static int mmc_select_hs400es(struct mmc *mmc)
  1709. {
  1710. int err;
  1711. err = mmc_set_card_speed(mmc, MMC_HS, true);
  1712. if (err)
  1713. return err;
  1714. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
  1715. EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
  1716. EXT_CSD_BUS_WIDTH_STROBE);
  1717. if (err) {
  1718. printf("switch to bus width for hs400 failed\n");
  1719. return err;
  1720. }
  1721. /* TODO: driver strength */
  1722. err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
  1723. if (err)
  1724. return err;
  1725. mmc_select_mode(mmc, MMC_HS_400_ES);
  1726. err = mmc_set_clock(mmc, mmc->tran_speed, false);
  1727. if (err)
  1728. return err;
  1729. return mmc_set_enhanced_strobe(mmc);
  1730. }
  1731. #else
  1732. static int mmc_select_hs400es(struct mmc *mmc)
  1733. {
  1734. return -ENOTSUPP;
  1735. }
  1736. #endif
  1737. #define for_each_supported_width(caps, ddr, ecbv) \
  1738. for (ecbv = ext_csd_bus_width;\
  1739. ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
  1740. ecbv++) \
  1741. if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
  1742. static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
  1743. {
  1744. int err;
  1745. const struct mode_width_tuning *mwt;
  1746. const struct ext_csd_bus_width *ecbw;
  1747. #ifdef DEBUG
  1748. mmc_dump_capabilities("mmc", card_caps);
  1749. mmc_dump_capabilities("host", mmc->host_caps);
  1750. #endif
  1751. if (mmc_host_is_spi(mmc)) {
  1752. mmc_set_bus_width(mmc, 1);
  1753. mmc_select_mode(mmc, MMC_LEGACY);
  1754. mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
  1755. return 0;
  1756. }
  1757. /* Restrict card's capabilities by what the host can do */
  1758. card_caps &= mmc->host_caps;
  1759. /* Only version 4 of MMC supports wider bus widths */
  1760. if (mmc->version < MMC_VERSION_4)
  1761. return 0;
  1762. if (!mmc->ext_csd) {
  1763. pr_debug("No ext_csd found!\n"); /* this should enver happen */
  1764. return -ENOTSUPP;
  1765. }
  1766. #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
  1767. CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  1768. /*
  1769. * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
  1770. * before doing anything else, since a transition from either of
  1771. * the HS200/HS400 mode directly to legacy mode is not supported.
  1772. */
  1773. if (mmc->selected_mode == MMC_HS_200 ||
  1774. mmc->selected_mode == MMC_HS_400)
  1775. mmc_set_card_speed(mmc, MMC_HS, true);
  1776. else
  1777. #endif
  1778. mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
  1779. for_each_mmc_mode_by_pref(card_caps, mwt) {
  1780. for_each_supported_width(card_caps & mwt->widths,
  1781. mmc_is_mode_ddr(mwt->mode), ecbw) {
  1782. enum mmc_voltage old_voltage;
  1783. pr_debug("trying mode %s width %d (at %d MHz)\n",
  1784. mmc_mode_name(mwt->mode),
  1785. bus_width(ecbw->cap),
  1786. mmc_mode2freq(mmc, mwt->mode) / 1000000);
  1787. old_voltage = mmc->signal_voltage;
  1788. err = mmc_set_lowest_voltage(mmc, mwt->mode,
  1789. MMC_ALL_SIGNAL_VOLTAGE);
  1790. if (err)
  1791. continue;
  1792. /* configure the bus width (card + host) */
  1793. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1794. EXT_CSD_BUS_WIDTH,
  1795. ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
  1796. if (err)
  1797. goto error;
  1798. mmc_set_bus_width(mmc, bus_width(ecbw->cap));
  1799. if (mwt->mode == MMC_HS_400) {
  1800. err = mmc_select_hs400(mmc);
  1801. if (err) {
  1802. printf("Select HS400 failed %d\n", err);
  1803. goto error;
  1804. }
  1805. } else if (mwt->mode == MMC_HS_400_ES) {
  1806. err = mmc_select_hs400es(mmc);
  1807. if (err) {
  1808. printf("Select HS400ES failed %d\n",
  1809. err);
  1810. goto error;
  1811. }
  1812. } else {
  1813. /* configure the bus speed (card) */
  1814. err = mmc_set_card_speed(mmc, mwt->mode, false);
  1815. if (err)
  1816. goto error;
  1817. /*
  1818. * configure the bus width AND the ddr mode
  1819. * (card). The host side will be taken care
  1820. * of in the next step
  1821. */
  1822. if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
  1823. err = mmc_switch(mmc,
  1824. EXT_CSD_CMD_SET_NORMAL,
  1825. EXT_CSD_BUS_WIDTH,
  1826. ecbw->ext_csd_bits);
  1827. if (err)
  1828. goto error;
  1829. }
  1830. /* configure the bus mode (host) */
  1831. mmc_select_mode(mmc, mwt->mode);
  1832. mmc_set_clock(mmc, mmc->tran_speed,
  1833. MMC_CLK_ENABLE);
  1834. #ifdef MMC_SUPPORTS_TUNING
  1835. /* execute tuning if needed */
  1836. if (mwt->tuning) {
  1837. err = mmc_execute_tuning(mmc,
  1838. mwt->tuning);
  1839. if (err) {
  1840. pr_debug("tuning failed\n");
  1841. goto error;
  1842. }
  1843. }
  1844. #endif
  1845. }
  1846. /* do a transfer to check the configuration */
  1847. err = mmc_read_and_compare_ext_csd(mmc);
  1848. if (!err)
  1849. return 0;
  1850. error:
  1851. mmc_set_signal_voltage(mmc, old_voltage);
  1852. /* if an error occured, revert to a safer bus mode */
  1853. mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1854. EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
  1855. mmc_select_mode(mmc, MMC_LEGACY);
  1856. mmc_set_bus_width(mmc, 1);
  1857. }
  1858. }
  1859. pr_err("unable to select a mode\n");
  1860. return -ENOTSUPP;
  1861. }
  1862. #endif
  1863. #if CONFIG_IS_ENABLED(MMC_TINY)
  1864. DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
  1865. #endif
  1866. static int mmc_startup_v4(struct mmc *mmc)
  1867. {
  1868. int err, i;
  1869. u64 capacity;
  1870. bool has_parts = false;
  1871. bool part_completed;
  1872. static const u32 mmc_versions[] = {
  1873. MMC_VERSION_4,
  1874. MMC_VERSION_4_1,
  1875. MMC_VERSION_4_2,
  1876. MMC_VERSION_4_3,
  1877. MMC_VERSION_4_4,
  1878. MMC_VERSION_4_41,
  1879. MMC_VERSION_4_5,
  1880. MMC_VERSION_5_0,
  1881. MMC_VERSION_5_1
  1882. };
  1883. #if CONFIG_IS_ENABLED(MMC_TINY)
  1884. u8 *ext_csd = ext_csd_bkup;
  1885. if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
  1886. return 0;
  1887. if (!mmc->ext_csd)
  1888. memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
  1889. err = mmc_send_ext_csd(mmc, ext_csd);
  1890. if (err)
  1891. goto error;
  1892. /* store the ext csd for future reference */
  1893. if (!mmc->ext_csd)
  1894. mmc->ext_csd = ext_csd;
  1895. #else
  1896. ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
  1897. if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
  1898. return 0;
  1899. /* check ext_csd version and capacity */
  1900. err = mmc_send_ext_csd(mmc, ext_csd);
  1901. if (err)
  1902. goto error;
  1903. /* store the ext csd for future reference */
  1904. if (!mmc->ext_csd)
  1905. mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
  1906. if (!mmc->ext_csd)
  1907. return -ENOMEM;
  1908. memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
  1909. #endif
  1910. if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
  1911. return -EINVAL;
  1912. mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
  1913. if (mmc->version >= MMC_VERSION_4_2) {
  1914. /*
  1915. * According to the JEDEC Standard, the value of
  1916. * ext_csd's capacity is valid if the value is more
  1917. * than 2GB
  1918. */
  1919. capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
  1920. | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
  1921. | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
  1922. | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
  1923. capacity *= MMC_MAX_BLOCK_LEN;
  1924. if ((capacity >> 20) > 2 * 1024)
  1925. mmc->capacity_user = capacity;
  1926. }
  1927. if (mmc->version >= MMC_VERSION_4_5)
  1928. mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
  1929. /* The partition data may be non-zero but it is only
  1930. * effective if PARTITION_SETTING_COMPLETED is set in
  1931. * EXT_CSD, so ignore any data if this bit is not set,
  1932. * except for enabling the high-capacity group size
  1933. * definition (see below).
  1934. */
  1935. part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
  1936. EXT_CSD_PARTITION_SETTING_COMPLETED);
  1937. mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
  1938. /* Some eMMC set the value too low so set a minimum */
  1939. if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
  1940. mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
  1941. /* store the partition info of emmc */
  1942. mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
  1943. if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
  1944. ext_csd[EXT_CSD_BOOT_MULT])
  1945. mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
  1946. if (part_completed &&
  1947. (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
  1948. mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
  1949. mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
  1950. mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
  1951. for (i = 0; i < 4; i++) {
  1952. int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
  1953. uint mult = (ext_csd[idx + 2] << 16) +
  1954. (ext_csd[idx + 1] << 8) + ext_csd[idx];
  1955. if (mult)
  1956. has_parts = true;
  1957. if (!part_completed)
  1958. continue;
  1959. mmc->capacity_gp[i] = mult;
  1960. mmc->capacity_gp[i] *=
  1961. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  1962. mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  1963. mmc->capacity_gp[i] <<= 19;
  1964. }
  1965. #ifndef CONFIG_SPL_BUILD
  1966. if (part_completed) {
  1967. mmc->enh_user_size =
  1968. (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
  1969. (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
  1970. ext_csd[EXT_CSD_ENH_SIZE_MULT];
  1971. mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
  1972. mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  1973. mmc->enh_user_size <<= 19;
  1974. mmc->enh_user_start =
  1975. (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
  1976. (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
  1977. (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
  1978. ext_csd[EXT_CSD_ENH_START_ADDR];
  1979. if (mmc->high_capacity)
  1980. mmc->enh_user_start <<= 9;
  1981. }
  1982. #endif
  1983. /*
  1984. * Host needs to enable ERASE_GRP_DEF bit if device is
  1985. * partitioned. This bit will be lost every time after a reset
  1986. * or power off. This will affect erase size.
  1987. */
  1988. if (part_completed)
  1989. has_parts = true;
  1990. if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
  1991. (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
  1992. has_parts = true;
  1993. if (has_parts) {
  1994. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
  1995. EXT_CSD_ERASE_GROUP_DEF, 1);
  1996. if (err)
  1997. goto error;
  1998. ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
  1999. }
  2000. if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
  2001. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2002. /* Read out group size from ext_csd */
  2003. mmc->erase_grp_size =
  2004. ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
  2005. #endif
  2006. /*
  2007. * if high capacity and partition setting completed
  2008. * SEC_COUNT is valid even if it is smaller than 2 GiB
  2009. * JEDEC Standard JESD84-B45, 6.2.4
  2010. */
  2011. if (mmc->high_capacity && part_completed) {
  2012. capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
  2013. (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
  2014. (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
  2015. (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
  2016. capacity *= MMC_MAX_BLOCK_LEN;
  2017. mmc->capacity_user = capacity;
  2018. }
  2019. }
  2020. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2021. else {
  2022. /* Calculate the group size from the csd value. */
  2023. int erase_gsz, erase_gmul;
  2024. erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
  2025. erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
  2026. mmc->erase_grp_size = (erase_gsz + 1)
  2027. * (erase_gmul + 1);
  2028. }
  2029. #endif
  2030. #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
  2031. mmc->hc_wp_grp_size = 1024
  2032. * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
  2033. * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
  2034. #endif
  2035. mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
  2036. return 0;
  2037. error:
  2038. if (mmc->ext_csd) {
  2039. #if !CONFIG_IS_ENABLED(MMC_TINY)
  2040. free(mmc->ext_csd);
  2041. #endif
  2042. mmc->ext_csd = NULL;
  2043. }
  2044. return err;
  2045. }
  2046. static int mmc_startup(struct mmc *mmc)
  2047. {
  2048. int err, i;
  2049. uint mult, freq;
  2050. u64 cmult, csize;
  2051. struct mmc_cmd cmd;
  2052. struct blk_desc *bdesc;
  2053. #ifdef CONFIG_MMC_SPI_CRC_ON
  2054. if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
  2055. cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
  2056. cmd.resp_type = MMC_RSP_R1;
  2057. cmd.cmdarg = 1;
  2058. err = mmc_send_cmd(mmc, &cmd, NULL);
  2059. if (err)
  2060. return err;
  2061. }
  2062. #endif
  2063. /* Put the Card in Identify Mode */
  2064. cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
  2065. MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
  2066. cmd.resp_type = MMC_RSP_R2;
  2067. cmd.cmdarg = 0;
  2068. err = mmc_send_cmd(mmc, &cmd, NULL);
  2069. #ifdef CONFIG_MMC_QUIRKS
  2070. if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
  2071. int retries = 4;
  2072. /*
  2073. * It has been seen that SEND_CID may fail on the first
  2074. * attempt, let's try a few more time
  2075. */
  2076. do {
  2077. err = mmc_send_cmd(mmc, &cmd, NULL);
  2078. if (!err)
  2079. break;
  2080. } while (retries--);
  2081. }
  2082. #endif
  2083. if (err)
  2084. return err;
  2085. memcpy(mmc->cid, cmd.response, 16);
  2086. /*
  2087. * For MMC cards, set the Relative Address.
  2088. * For SD cards, get the Relatvie Address.
  2089. * This also puts the cards into Standby State
  2090. */
  2091. if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
  2092. cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
  2093. cmd.cmdarg = mmc->rca << 16;
  2094. cmd.resp_type = MMC_RSP_R6;
  2095. err = mmc_send_cmd(mmc, &cmd, NULL);
  2096. if (err)
  2097. return err;
  2098. if (IS_SD(mmc))
  2099. mmc->rca = (cmd.response[0] >> 16) & 0xffff;
  2100. }
  2101. /* Get the Card-Specific Data */
  2102. cmd.cmdidx = MMC_CMD_SEND_CSD;
  2103. cmd.resp_type = MMC_RSP_R2;
  2104. cmd.cmdarg = mmc->rca << 16;
  2105. err = mmc_send_cmd(mmc, &cmd, NULL);
  2106. if (err)
  2107. return err;
  2108. mmc->csd[0] = cmd.response[0];
  2109. mmc->csd[1] = cmd.response[1];
  2110. mmc->csd[2] = cmd.response[2];
  2111. mmc->csd[3] = cmd.response[3];
  2112. if (mmc->version == MMC_VERSION_UNKNOWN) {
  2113. int version = (cmd.response[0] >> 26) & 0xf;
  2114. switch (version) {
  2115. case 0:
  2116. mmc->version = MMC_VERSION_1_2;
  2117. break;
  2118. case 1:
  2119. mmc->version = MMC_VERSION_1_4;
  2120. break;
  2121. case 2:
  2122. mmc->version = MMC_VERSION_2_2;
  2123. break;
  2124. case 3:
  2125. mmc->version = MMC_VERSION_3;
  2126. break;
  2127. case 4:
  2128. mmc->version = MMC_VERSION_4;
  2129. break;
  2130. default:
  2131. mmc->version = MMC_VERSION_1_2;
  2132. break;
  2133. }
  2134. }
  2135. /* divide frequency by 10, since the mults are 10x bigger */
  2136. freq = fbase[(cmd.response[0] & 0x7)];
  2137. mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
  2138. mmc->legacy_speed = freq * mult;
  2139. mmc_select_mode(mmc, MMC_LEGACY);
  2140. mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
  2141. mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
  2142. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2143. if (IS_SD(mmc))
  2144. mmc->write_bl_len = mmc->read_bl_len;
  2145. else
  2146. mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
  2147. #endif
  2148. if (mmc->high_capacity) {
  2149. csize = (mmc->csd[1] & 0x3f) << 16
  2150. | (mmc->csd[2] & 0xffff0000) >> 16;
  2151. cmult = 8;
  2152. } else {
  2153. csize = (mmc->csd[1] & 0x3ff) << 2
  2154. | (mmc->csd[2] & 0xc0000000) >> 30;
  2155. cmult = (mmc->csd[2] & 0x00038000) >> 15;
  2156. }
  2157. mmc->capacity_user = (csize + 1) << (cmult + 2);
  2158. mmc->capacity_user *= mmc->read_bl_len;
  2159. mmc->capacity_boot = 0;
  2160. mmc->capacity_rpmb = 0;
  2161. for (i = 0; i < 4; i++)
  2162. mmc->capacity_gp[i] = 0;
  2163. if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
  2164. mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
  2165. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2166. if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
  2167. mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
  2168. #endif
  2169. if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
  2170. cmd.cmdidx = MMC_CMD_SET_DSR;
  2171. cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
  2172. cmd.resp_type = MMC_RSP_NONE;
  2173. if (mmc_send_cmd(mmc, &cmd, NULL))
  2174. pr_warn("MMC: SET_DSR failed\n");
  2175. }
  2176. /* Select the card, and put it into Transfer Mode */
  2177. if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
  2178. cmd.cmdidx = MMC_CMD_SELECT_CARD;
  2179. cmd.resp_type = MMC_RSP_R1;
  2180. cmd.cmdarg = mmc->rca << 16;
  2181. err = mmc_send_cmd(mmc, &cmd, NULL);
  2182. if (err)
  2183. return err;
  2184. }
  2185. /*
  2186. * For SD, its erase group is always one sector
  2187. */
  2188. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2189. mmc->erase_grp_size = 1;
  2190. #endif
  2191. mmc->part_config = MMCPART_NOAVAILABLE;
  2192. err = mmc_startup_v4(mmc);
  2193. if (err)
  2194. return err;
  2195. err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
  2196. if (err)
  2197. return err;
  2198. #if CONFIG_IS_ENABLED(MMC_TINY)
  2199. mmc_set_clock(mmc, mmc->legacy_speed, false);
  2200. mmc_select_mode(mmc, MMC_LEGACY);
  2201. mmc_set_bus_width(mmc, 1);
  2202. #else
  2203. if (IS_SD(mmc)) {
  2204. err = sd_get_capabilities(mmc);
  2205. if (err)
  2206. return err;
  2207. err = sd_select_mode_and_width(mmc, mmc->card_caps);
  2208. } else {
  2209. err = mmc_get_capabilities(mmc);
  2210. if (err)
  2211. return err;
  2212. err = mmc_select_mode_and_width(mmc, mmc->card_caps);
  2213. }
  2214. #endif
  2215. if (err)
  2216. return err;
  2217. mmc->best_mode = mmc->selected_mode;
  2218. /* Fix the block length for DDR mode */
  2219. if (mmc->ddr_mode) {
  2220. mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
  2221. #if CONFIG_IS_ENABLED(MMC_WRITE)
  2222. mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
  2223. #endif
  2224. }
  2225. /* fill in device description */
  2226. bdesc = mmc_get_blk_desc(mmc);
  2227. bdesc->lun = 0;
  2228. bdesc->hwpart = 0;
  2229. bdesc->type = 0;
  2230. bdesc->blksz = mmc->read_bl_len;
  2231. bdesc->log2blksz = LOG2(bdesc->blksz);
  2232. bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
  2233. #if !defined(CONFIG_SPL_BUILD) || \
  2234. (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
  2235. !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
  2236. sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
  2237. mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
  2238. (mmc->cid[3] >> 16) & 0xffff);
  2239. sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
  2240. (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
  2241. (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
  2242. (mmc->cid[2] >> 24) & 0xff);
  2243. sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
  2244. (mmc->cid[2] >> 16) & 0xf);
  2245. #else
  2246. bdesc->vendor[0] = 0;
  2247. bdesc->product[0] = 0;
  2248. bdesc->revision[0] = 0;
  2249. #endif
  2250. #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
  2251. part_init(bdesc);
  2252. #endif
  2253. return 0;
  2254. }
  2255. static int mmc_send_if_cond(struct mmc *mmc)
  2256. {
  2257. struct mmc_cmd cmd;
  2258. int err;
  2259. cmd.cmdidx = SD_CMD_SEND_IF_COND;
  2260. /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
  2261. cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
  2262. cmd.resp_type = MMC_RSP_R7;
  2263. err = mmc_send_cmd(mmc, &cmd, NULL);
  2264. if (err)
  2265. return err;
  2266. if ((cmd.response[0] & 0xff) != 0xaa)
  2267. return -EOPNOTSUPP;
  2268. else
  2269. mmc->version = SD_VERSION_2;
  2270. return 0;
  2271. }
  2272. #if !CONFIG_IS_ENABLED(DM_MMC)
  2273. /* board-specific MMC power initializations. */
  2274. __weak void board_mmc_power_init(void)
  2275. {
  2276. }
  2277. #endif
  2278. static int mmc_power_init(struct mmc *mmc)
  2279. {
  2280. #if CONFIG_IS_ENABLED(DM_MMC)
  2281. #if CONFIG_IS_ENABLED(DM_REGULATOR)
  2282. int ret;
  2283. ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
  2284. &mmc->vmmc_supply);
  2285. if (ret)
  2286. pr_debug("%s: No vmmc supply\n", mmc->dev->name);
  2287. ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
  2288. &mmc->vqmmc_supply);
  2289. if (ret)
  2290. pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
  2291. #endif
  2292. #else /* !CONFIG_DM_MMC */
  2293. /*
  2294. * Driver model should use a regulator, as above, rather than calling
  2295. * out to board code.
  2296. */
  2297. board_mmc_power_init();
  2298. #endif
  2299. return 0;
  2300. }
  2301. /*
  2302. * put the host in the initial state:
  2303. * - turn on Vdd (card power supply)
  2304. * - configure the bus width and clock to minimal values
  2305. */
  2306. static void mmc_set_initial_state(struct mmc *mmc)
  2307. {
  2308. int err;
  2309. /* First try to set 3.3V. If it fails set to 1.8V */
  2310. err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
  2311. if (err != 0)
  2312. err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
  2313. if (err != 0)
  2314. pr_warn("mmc: failed to set signal voltage\n");
  2315. mmc_select_mode(mmc, MMC_LEGACY);
  2316. mmc_set_bus_width(mmc, 1);
  2317. mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
  2318. }
  2319. static int mmc_power_on(struct mmc *mmc)
  2320. {
  2321. #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
  2322. if (mmc->vmmc_supply) {
  2323. int ret = regulator_set_enable(mmc->vmmc_supply, true);
  2324. if (ret) {
  2325. puts("Error enabling VMMC supply\n");
  2326. return ret;
  2327. }
  2328. }
  2329. #endif
  2330. return 0;
  2331. }
  2332. static int mmc_power_off(struct mmc *mmc)
  2333. {
  2334. mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
  2335. #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
  2336. if (mmc->vmmc_supply) {
  2337. int ret = regulator_set_enable(mmc->vmmc_supply, false);
  2338. if (ret) {
  2339. pr_debug("Error disabling VMMC supply\n");
  2340. return ret;
  2341. }
  2342. }
  2343. #endif
  2344. return 0;
  2345. }
  2346. static int mmc_power_cycle(struct mmc *mmc)
  2347. {
  2348. int ret;
  2349. ret = mmc_power_off(mmc);
  2350. if (ret)
  2351. return ret;
  2352. ret = mmc_host_power_cycle(mmc);
  2353. if (ret)
  2354. return ret;
  2355. /*
  2356. * SD spec recommends at least 1ms of delay. Let's wait for 2ms
  2357. * to be on the safer side.
  2358. */
  2359. udelay(2000);
  2360. return mmc_power_on(mmc);
  2361. }
  2362. int mmc_get_op_cond(struct mmc *mmc)
  2363. {
  2364. bool uhs_en = supports_uhs(mmc->cfg->host_caps);
  2365. int err;
  2366. if (mmc->has_init)
  2367. return 0;
  2368. err = mmc_power_init(mmc);
  2369. if (err)
  2370. return err;
  2371. #ifdef CONFIG_MMC_QUIRKS
  2372. mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
  2373. MMC_QUIRK_RETRY_SEND_CID |
  2374. MMC_QUIRK_RETRY_APP_CMD;
  2375. #endif
  2376. err = mmc_power_cycle(mmc);
  2377. if (err) {
  2378. /*
  2379. * if power cycling is not supported, we should not try
  2380. * to use the UHS modes, because we wouldn't be able to
  2381. * recover from an error during the UHS initialization.
  2382. */
  2383. pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
  2384. uhs_en = false;
  2385. mmc->host_caps &= ~UHS_CAPS;
  2386. err = mmc_power_on(mmc);
  2387. }
  2388. if (err)
  2389. return err;
  2390. #if CONFIG_IS_ENABLED(DM_MMC)
  2391. /* The device has already been probed ready for use */
  2392. #else
  2393. /* made sure it's not NULL earlier */
  2394. err = mmc->cfg->ops->init(mmc);
  2395. if (err)
  2396. return err;
  2397. #endif
  2398. mmc->ddr_mode = 0;
  2399. retry:
  2400. mmc_set_initial_state(mmc);
  2401. /* Reset the Card */
  2402. err = mmc_go_idle(mmc);
  2403. if (err)
  2404. return err;
  2405. /* The internal partition reset to user partition(0) at every CMD0 */
  2406. mmc_get_blk_desc(mmc)->hwpart = 0;
  2407. /* Test for SD version 2 */
  2408. err = mmc_send_if_cond(mmc);
  2409. /* Now try to get the SD card's operating condition */
  2410. err = sd_send_op_cond(mmc, uhs_en);
  2411. if (err && uhs_en) {
  2412. uhs_en = false;
  2413. mmc_power_cycle(mmc);
  2414. goto retry;
  2415. }
  2416. /* If the command timed out, we check for an MMC card */
  2417. if (err == -ETIMEDOUT) {
  2418. err = mmc_send_op_cond(mmc);
  2419. if (err) {
  2420. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  2421. pr_err("Card did not respond to voltage select!\n");
  2422. #endif
  2423. return -EOPNOTSUPP;
  2424. }
  2425. }
  2426. return err;
  2427. }
  2428. int mmc_start_init(struct mmc *mmc)
  2429. {
  2430. bool no_card;
  2431. int err = 0;
  2432. /*
  2433. * all hosts are capable of 1 bit bus-width and able to use the legacy
  2434. * timings.
  2435. */
  2436. mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
  2437. MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
  2438. #if CONFIG_IS_ENABLED(DM_MMC)
  2439. mmc_deferred_probe(mmc);
  2440. #endif
  2441. #if !defined(CONFIG_MMC_BROKEN_CD)
  2442. no_card = mmc_getcd(mmc) == 0;
  2443. #else
  2444. no_card = 0;
  2445. #endif
  2446. #if !CONFIG_IS_ENABLED(DM_MMC)
  2447. /* we pretend there's no card when init is NULL */
  2448. no_card = no_card || (mmc->cfg->ops->init == NULL);
  2449. #endif
  2450. if (no_card) {
  2451. mmc->has_init = 0;
  2452. #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
  2453. pr_err("MMC: no card present\n");
  2454. #endif
  2455. return -ENOMEDIUM;
  2456. }
  2457. err = mmc_get_op_cond(mmc);
  2458. if (!err)
  2459. mmc->init_in_progress = 1;
  2460. return err;
  2461. }
  2462. static int mmc_complete_init(struct mmc *mmc)
  2463. {
  2464. int err = 0;
  2465. mmc->init_in_progress = 0;
  2466. if (mmc->op_cond_pending)
  2467. err = mmc_complete_op_cond(mmc);
  2468. if (!err)
  2469. err = mmc_startup(mmc);
  2470. if (err)
  2471. mmc->has_init = 0;
  2472. else
  2473. mmc->has_init = 1;
  2474. return err;
  2475. }
  2476. int mmc_init(struct mmc *mmc)
  2477. {
  2478. int err = 0;
  2479. __maybe_unused ulong start;
  2480. #if CONFIG_IS_ENABLED(DM_MMC)
  2481. struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
  2482. upriv->mmc = mmc;
  2483. #endif
  2484. if (mmc->has_init)
  2485. return 0;
  2486. start = get_timer(0);
  2487. if (!mmc->init_in_progress)
  2488. err = mmc_start_init(mmc);
  2489. if (!err)
  2490. err = mmc_complete_init(mmc);
  2491. if (err)
  2492. pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
  2493. return err;
  2494. }
  2495. #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
  2496. CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
  2497. CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
  2498. int mmc_deinit(struct mmc *mmc)
  2499. {
  2500. u32 caps_filtered;
  2501. if (!mmc->has_init)
  2502. return 0;
  2503. if (IS_SD(mmc)) {
  2504. caps_filtered = mmc->card_caps &
  2505. ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
  2506. MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
  2507. MMC_CAP(UHS_SDR104));
  2508. return sd_select_mode_and_width(mmc, caps_filtered);
  2509. } else {
  2510. caps_filtered = mmc->card_caps &
  2511. ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
  2512. return mmc_select_mode_and_width(mmc, caps_filtered);
  2513. }
  2514. }
  2515. #endif
  2516. int mmc_set_dsr(struct mmc *mmc, u16 val)
  2517. {
  2518. mmc->dsr = val;
  2519. return 0;
  2520. }
  2521. /* CPU-specific MMC initializations */
  2522. __weak int cpu_mmc_init(struct bd_info *bis)
  2523. {
  2524. return -1;
  2525. }
  2526. /* board-specific MMC initializations. */
  2527. __weak int board_mmc_init(struct bd_info *bis)
  2528. {
  2529. return -1;
  2530. }
  2531. void mmc_set_preinit(struct mmc *mmc, int preinit)
  2532. {
  2533. mmc->preinit = preinit;
  2534. }
  2535. #if CONFIG_IS_ENABLED(DM_MMC)
  2536. static int mmc_probe(struct bd_info *bis)
  2537. {
  2538. int ret, i;
  2539. struct uclass *uc;
  2540. struct udevice *dev;
  2541. ret = uclass_get(UCLASS_MMC, &uc);
  2542. if (ret)
  2543. return ret;
  2544. /*
  2545. * Try to add them in sequence order. Really with driver model we
  2546. * should allow holes, but the current MMC list does not allow that.
  2547. * So if we request 0, 1, 3 we will get 0, 1, 2.
  2548. */
  2549. for (i = 0; ; i++) {
  2550. ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
  2551. if (ret == -ENODEV)
  2552. break;
  2553. }
  2554. uclass_foreach_dev(dev, uc) {
  2555. ret = device_probe(dev);
  2556. if (ret)
  2557. pr_err("%s - probe failed: %d\n", dev->name, ret);
  2558. }
  2559. return 0;
  2560. }
  2561. #else
  2562. static int mmc_probe(struct bd_info *bis)
  2563. {
  2564. if (board_mmc_init(bis) < 0)
  2565. cpu_mmc_init(bis);
  2566. return 0;
  2567. }
  2568. #endif
  2569. int mmc_initialize(struct bd_info *bis)
  2570. {
  2571. static int initialized = 0;
  2572. int ret;
  2573. if (initialized) /* Avoid initializing mmc multiple times */
  2574. return 0;
  2575. initialized = 1;
  2576. #if !CONFIG_IS_ENABLED(BLK)
  2577. #if !CONFIG_IS_ENABLED(MMC_TINY)
  2578. mmc_list_init();
  2579. #endif
  2580. #endif
  2581. ret = mmc_probe(bis);
  2582. if (ret)
  2583. return ret;
  2584. #ifndef CONFIG_SPL_BUILD
  2585. print_mmc_devices(',');
  2586. #endif
  2587. mmc_do_preinit();
  2588. return 0;
  2589. }
  2590. #if CONFIG_IS_ENABLED(DM_MMC)
  2591. int mmc_init_device(int num)
  2592. {
  2593. struct udevice *dev;
  2594. struct mmc *m;
  2595. int ret;
  2596. ret = uclass_get_device(UCLASS_MMC, num, &dev);
  2597. if (ret)
  2598. return ret;
  2599. m = mmc_get_mmc_dev(dev);
  2600. if (!m)
  2601. return 0;
  2602. if (m->preinit)
  2603. mmc_start_init(m);
  2604. return 0;
  2605. }
  2606. #endif
  2607. #ifdef CONFIG_CMD_BKOPS_ENABLE
  2608. int mmc_set_bkops_enable(struct mmc *mmc)
  2609. {
  2610. int err;
  2611. ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
  2612. err = mmc_send_ext_csd(mmc, ext_csd);
  2613. if (err) {
  2614. puts("Could not get ext_csd register values\n");
  2615. return err;
  2616. }
  2617. if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
  2618. puts("Background operations not supported on device\n");
  2619. return -EMEDIUMTYPE;
  2620. }
  2621. if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
  2622. puts("Background operations already enabled\n");
  2623. return 0;
  2624. }
  2625. err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
  2626. if (err) {
  2627. puts("Failed to enable manual background operations\n");
  2628. return err;
  2629. }
  2630. puts("Enabled manual background operations\n");
  2631. return 0;
  2632. }
  2633. #endif