ufs.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /**
  3. * ufs.c - Universal Flash Subsystem (UFS) driver
  4. *
  5. * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported
  6. * to u-boot.
  7. *
  8. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
  9. */
  10. #include <charset.h>
  11. #include <common.h>
  12. #include <dm.h>
  13. #include <dm/lists.h>
  14. #include <dm/device-internal.h>
  15. #include <malloc.h>
  16. #include <hexdump.h>
  17. #include <scsi.h>
  18. #include <asm/dma-mapping.h>
  19. #include "ufs.h"
  20. #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
  21. UTP_TASK_REQ_COMPL |\
  22. UFSHCD_ERROR_MASK)
  23. /* maximum number of link-startup retries */
  24. #define DME_LINKSTARTUP_RETRIES 3
  25. /* maximum number of retries for a general UIC command */
  26. #define UFS_UIC_COMMAND_RETRIES 3
  27. /* Query request retries */
  28. #define QUERY_REQ_RETRIES 3
  29. /* Query request timeout */
  30. #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
  31. /* maximum timeout in ms for a general UIC command */
  32. #define UFS_UIC_CMD_TIMEOUT 1000
  33. /* NOP OUT retries waiting for NOP IN response */
  34. #define NOP_OUT_RETRIES 10
  35. /* Timeout after 30 msecs if NOP OUT hangs without response */
  36. #define NOP_OUT_TIMEOUT 30 /* msecs */
  37. /* Only use one Task Tag for all requests */
  38. #define TASK_TAG 0
  39. /* Expose the flag value from utp_upiu_query.value */
  40. #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  41. #define MAX_PRDT_ENTRY 262144
  42. /* maximum bytes per request */
  43. #define UFS_MAX_BYTES (128 * 256 * 1024)
  44. static inline bool ufshcd_is_hba_active(struct ufs_hba *hba);
  45. static inline void ufshcd_hba_stop(struct ufs_hba *hba);
  46. static int ufshcd_hba_enable(struct ufs_hba *hba);
  47. /*
  48. * ufshcd_wait_for_register - wait for register value to change
  49. */
  50. static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  51. u32 val, unsigned long timeout_ms)
  52. {
  53. int err = 0;
  54. unsigned long start = get_timer(0);
  55. /* ignore bits that we don't intend to wait on */
  56. val = val & mask;
  57. while ((ufshcd_readl(hba, reg) & mask) != val) {
  58. if (get_timer(start) > timeout_ms) {
  59. if ((ufshcd_readl(hba, reg) & mask) != val)
  60. err = -ETIMEDOUT;
  61. break;
  62. }
  63. }
  64. return err;
  65. }
  66. /**
  67. * ufshcd_init_pwr_info - setting the POR (power on reset)
  68. * values in hba power info
  69. */
  70. static void ufshcd_init_pwr_info(struct ufs_hba *hba)
  71. {
  72. hba->pwr_info.gear_rx = UFS_PWM_G1;
  73. hba->pwr_info.gear_tx = UFS_PWM_G1;
  74. hba->pwr_info.lane_rx = 1;
  75. hba->pwr_info.lane_tx = 1;
  76. hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
  77. hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
  78. hba->pwr_info.hs_rate = 0;
  79. }
  80. /**
  81. * ufshcd_print_pwr_info - print power params as saved in hba
  82. * power info
  83. */
  84. static void ufshcd_print_pwr_info(struct ufs_hba *hba)
  85. {
  86. static const char * const names[] = {
  87. "INVALID MODE",
  88. "FAST MODE",
  89. "SLOW_MODE",
  90. "INVALID MODE",
  91. "FASTAUTO_MODE",
  92. "SLOWAUTO_MODE",
  93. "INVALID MODE",
  94. };
  95. dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
  96. hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
  97. hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
  98. names[hba->pwr_info.pwr_rx],
  99. names[hba->pwr_info.pwr_tx],
  100. hba->pwr_info.hs_rate);
  101. }
  102. /**
  103. * ufshcd_ready_for_uic_cmd - Check if controller is ready
  104. * to accept UIC commands
  105. */
  106. static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
  107. {
  108. if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
  109. return true;
  110. else
  111. return false;
  112. }
  113. /**
  114. * ufshcd_get_uic_cmd_result - Get the UIC command result
  115. */
  116. static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
  117. {
  118. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
  119. MASK_UIC_COMMAND_RESULT;
  120. }
  121. /**
  122. * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
  123. */
  124. static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
  125. {
  126. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
  127. }
  128. /**
  129. * ufshcd_is_device_present - Check if any device connected to
  130. * the host controller
  131. */
  132. static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
  133. {
  134. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
  135. DEVICE_PRESENT) ? true : false;
  136. }
  137. /**
  138. * ufshcd_send_uic_cmd - UFS Interconnect layer command API
  139. *
  140. */
  141. static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  142. {
  143. unsigned long start = 0;
  144. u32 intr_status;
  145. u32 enabled_intr_status;
  146. if (!ufshcd_ready_for_uic_cmd(hba)) {
  147. dev_err(hba->dev,
  148. "Controller not ready to accept UIC commands\n");
  149. return -EIO;
  150. }
  151. debug("sending uic command:%d\n", uic_cmd->command);
  152. /* Write Args */
  153. ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
  154. ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
  155. ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
  156. /* Write UIC Cmd */
  157. ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
  158. REG_UIC_COMMAND);
  159. start = get_timer(0);
  160. do {
  161. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  162. enabled_intr_status = intr_status & hba->intr_mask;
  163. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  164. if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
  165. dev_err(hba->dev,
  166. "Timedout waiting for UIC response\n");
  167. return -ETIMEDOUT;
  168. }
  169. if (enabled_intr_status & UFSHCD_ERROR_MASK) {
  170. dev_err(hba->dev, "Error in status:%08x\n",
  171. enabled_intr_status);
  172. return -1;
  173. }
  174. } while (!(enabled_intr_status & UFSHCD_UIC_MASK));
  175. uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba);
  176. uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba);
  177. debug("Sent successfully\n");
  178. return 0;
  179. }
  180. /**
  181. * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  182. *
  183. */
  184. int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set,
  185. u32 mib_val, u8 peer)
  186. {
  187. struct uic_command uic_cmd = {0};
  188. static const char *const action[] = {
  189. "dme-set",
  190. "dme-peer-set"
  191. };
  192. const char *set = action[!!peer];
  193. int ret;
  194. int retries = UFS_UIC_COMMAND_RETRIES;
  195. uic_cmd.command = peer ?
  196. UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
  197. uic_cmd.argument1 = attr_sel;
  198. uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
  199. uic_cmd.argument3 = mib_val;
  200. do {
  201. /* for peer attributes we retry upon failure */
  202. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  203. if (ret)
  204. dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
  205. set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
  206. } while (ret && peer && --retries);
  207. if (ret)
  208. dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
  209. set, UIC_GET_ATTR_ID(attr_sel), mib_val,
  210. UFS_UIC_COMMAND_RETRIES - retries);
  211. return ret;
  212. }
  213. /**
  214. * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
  215. *
  216. */
  217. int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
  218. u32 *mib_val, u8 peer)
  219. {
  220. struct uic_command uic_cmd = {0};
  221. static const char *const action[] = {
  222. "dme-get",
  223. "dme-peer-get"
  224. };
  225. const char *get = action[!!peer];
  226. int ret;
  227. int retries = UFS_UIC_COMMAND_RETRIES;
  228. uic_cmd.command = peer ?
  229. UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
  230. uic_cmd.argument1 = attr_sel;
  231. do {
  232. /* for peer attributes we retry upon failure */
  233. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  234. if (ret)
  235. dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
  236. get, UIC_GET_ATTR_ID(attr_sel), ret);
  237. } while (ret && peer && --retries);
  238. if (ret)
  239. dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
  240. get, UIC_GET_ATTR_ID(attr_sel),
  241. UFS_UIC_COMMAND_RETRIES - retries);
  242. if (mib_val && !ret)
  243. *mib_val = uic_cmd.argument3;
  244. return ret;
  245. }
  246. static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
  247. {
  248. u32 tx_lanes, i, err = 0;
  249. if (!peer)
  250. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  251. &tx_lanes);
  252. else
  253. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  254. &tx_lanes);
  255. for (i = 0; i < tx_lanes; i++) {
  256. if (!peer)
  257. err = ufshcd_dme_set(hba,
  258. UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
  259. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
  260. 0);
  261. else
  262. err = ufshcd_dme_peer_set(hba,
  263. UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
  264. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
  265. 0);
  266. if (err) {
  267. dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
  268. __func__, peer, i, err);
  269. break;
  270. }
  271. }
  272. return err;
  273. }
  274. static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
  275. {
  276. return ufshcd_disable_tx_lcc(hba, true);
  277. }
  278. /**
  279. * ufshcd_dme_link_startup - Notify Unipro to perform link startup
  280. *
  281. */
  282. static int ufshcd_dme_link_startup(struct ufs_hba *hba)
  283. {
  284. struct uic_command uic_cmd = {0};
  285. int ret;
  286. uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
  287. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  288. if (ret)
  289. dev_dbg(hba->dev,
  290. "dme-link-startup: error code %d\n", ret);
  291. return ret;
  292. }
  293. /**
  294. * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
  295. *
  296. */
  297. static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
  298. {
  299. ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  300. }
  301. /**
  302. * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  303. */
  304. static inline int ufshcd_get_lists_status(u32 reg)
  305. {
  306. return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
  307. }
  308. /**
  309. * ufshcd_enable_run_stop_reg - Enable run-stop registers,
  310. * When run-stop registers are set to 1, it indicates the
  311. * host controller that it can process the requests
  312. */
  313. static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
  314. {
  315. ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
  316. REG_UTP_TASK_REQ_LIST_RUN_STOP);
  317. ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
  318. REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
  319. }
  320. /**
  321. * ufshcd_enable_intr - enable interrupts
  322. */
  323. static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
  324. {
  325. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  326. u32 rw;
  327. if (hba->version == UFSHCI_VERSION_10) {
  328. rw = set & INTERRUPT_MASK_RW_VER_10;
  329. set = rw | ((set ^ intrs) & intrs);
  330. } else {
  331. set |= intrs;
  332. }
  333. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  334. hba->intr_mask = set;
  335. }
  336. /**
  337. * ufshcd_make_hba_operational - Make UFS controller operational
  338. *
  339. * To bring UFS host controller to operational state,
  340. * 1. Enable required interrupts
  341. * 2. Configure interrupt aggregation
  342. * 3. Program UTRL and UTMRL base address
  343. * 4. Configure run-stop-registers
  344. *
  345. */
  346. static int ufshcd_make_hba_operational(struct ufs_hba *hba)
  347. {
  348. int err = 0;
  349. u32 reg;
  350. /* Enable required interrupts */
  351. ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
  352. /* Disable interrupt aggregation */
  353. ufshcd_disable_intr_aggr(hba);
  354. /* Configure UTRL and UTMRL base address registers */
  355. ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl),
  356. REG_UTP_TRANSFER_REQ_LIST_BASE_L);
  357. ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl),
  358. REG_UTP_TRANSFER_REQ_LIST_BASE_H);
  359. ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl),
  360. REG_UTP_TASK_REQ_LIST_BASE_L);
  361. ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl),
  362. REG_UTP_TASK_REQ_LIST_BASE_H);
  363. /*
  364. * UCRDY, UTMRLDY and UTRLRDY bits must be 1
  365. */
  366. reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
  367. if (!(ufshcd_get_lists_status(reg))) {
  368. ufshcd_enable_run_stop_reg(hba);
  369. } else {
  370. dev_err(hba->dev,
  371. "Host controller not ready to process requests");
  372. err = -EIO;
  373. goto out;
  374. }
  375. out:
  376. return err;
  377. }
  378. /**
  379. * ufshcd_link_startup - Initialize unipro link startup
  380. */
  381. static int ufshcd_link_startup(struct ufs_hba *hba)
  382. {
  383. int ret;
  384. int retries = DME_LINKSTARTUP_RETRIES;
  385. bool link_startup_again = true;
  386. link_startup:
  387. do {
  388. ufshcd_ops_link_startup_notify(hba, PRE_CHANGE);
  389. ret = ufshcd_dme_link_startup(hba);
  390. /* check if device is detected by inter-connect layer */
  391. if (!ret && !ufshcd_is_device_present(hba)) {
  392. dev_err(hba->dev, "%s: Device not present\n", __func__);
  393. ret = -ENXIO;
  394. goto out;
  395. }
  396. /*
  397. * DME link lost indication is only received when link is up,
  398. * but we can't be sure if the link is up until link startup
  399. * succeeds. So reset the local Uni-Pro and try again.
  400. */
  401. if (ret && ufshcd_hba_enable(hba))
  402. goto out;
  403. } while (ret && retries--);
  404. if (ret)
  405. /* failed to get the link up... retire */
  406. goto out;
  407. if (link_startup_again) {
  408. link_startup_again = false;
  409. retries = DME_LINKSTARTUP_RETRIES;
  410. goto link_startup;
  411. }
  412. /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
  413. ufshcd_init_pwr_info(hba);
  414. if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
  415. ret = ufshcd_disable_device_tx_lcc(hba);
  416. if (ret)
  417. goto out;
  418. }
  419. /* Include any host controller configuration via UIC commands */
  420. ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE);
  421. if (ret)
  422. goto out;
  423. ret = ufshcd_make_hba_operational(hba);
  424. out:
  425. if (ret)
  426. dev_err(hba->dev, "link startup failed %d\n", ret);
  427. return ret;
  428. }
  429. /**
  430. * ufshcd_hba_stop - Send controller to reset state
  431. */
  432. static inline void ufshcd_hba_stop(struct ufs_hba *hba)
  433. {
  434. int err;
  435. ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
  436. err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
  437. CONTROLLER_ENABLE, CONTROLLER_DISABLE,
  438. 10);
  439. if (err)
  440. dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
  441. }
  442. /**
  443. * ufshcd_is_hba_active - Get controller state
  444. */
  445. static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
  446. {
  447. return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
  448. ? false : true;
  449. }
  450. /**
  451. * ufshcd_hba_start - Start controller initialization sequence
  452. */
  453. static inline void ufshcd_hba_start(struct ufs_hba *hba)
  454. {
  455. ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
  456. }
  457. /**
  458. * ufshcd_hba_enable - initialize the controller
  459. */
  460. static int ufshcd_hba_enable(struct ufs_hba *hba)
  461. {
  462. int retry;
  463. if (!ufshcd_is_hba_active(hba))
  464. /* change controller state to "reset state" */
  465. ufshcd_hba_stop(hba);
  466. ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE);
  467. /* start controller initialization sequence */
  468. ufshcd_hba_start(hba);
  469. /*
  470. * To initialize a UFS host controller HCE bit must be set to 1.
  471. * During initialization the HCE bit value changes from 1->0->1.
  472. * When the host controller completes initialization sequence
  473. * it sets the value of HCE bit to 1. The same HCE bit is read back
  474. * to check if the controller has completed initialization sequence.
  475. * So without this delay the value HCE = 1, set in the previous
  476. * instruction might be read back.
  477. * This delay can be changed based on the controller.
  478. */
  479. mdelay(1);
  480. /* wait for the host controller to complete initialization */
  481. retry = 10;
  482. while (ufshcd_is_hba_active(hba)) {
  483. if (retry) {
  484. retry--;
  485. } else {
  486. dev_err(hba->dev, "Controller enable failed\n");
  487. return -EIO;
  488. }
  489. mdelay(5);
  490. }
  491. /* enable UIC related interrupts */
  492. ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
  493. ufshcd_ops_hce_enable_notify(hba, POST_CHANGE);
  494. return 0;
  495. }
  496. /**
  497. * ufshcd_host_memory_configure - configure local reference block with
  498. * memory offsets
  499. */
  500. static void ufshcd_host_memory_configure(struct ufs_hba *hba)
  501. {
  502. struct utp_transfer_req_desc *utrdlp;
  503. dma_addr_t cmd_desc_dma_addr;
  504. u16 response_offset;
  505. u16 prdt_offset;
  506. utrdlp = hba->utrdl;
  507. cmd_desc_dma_addr = (dma_addr_t)hba->ucdl;
  508. utrdlp->command_desc_base_addr_lo =
  509. cpu_to_le32(lower_32_bits(cmd_desc_dma_addr));
  510. utrdlp->command_desc_base_addr_hi =
  511. cpu_to_le32(upper_32_bits(cmd_desc_dma_addr));
  512. response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu);
  513. prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
  514. utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2);
  515. utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2);
  516. utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  517. hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl;
  518. hba->ucd_rsp_ptr =
  519. (struct utp_upiu_rsp *)&hba->ucdl->response_upiu;
  520. hba->ucd_prdt_ptr =
  521. (struct ufshcd_sg_entry *)&hba->ucdl->prd_table;
  522. }
  523. /**
  524. * ufshcd_memory_alloc - allocate memory for host memory space data structures
  525. */
  526. static int ufshcd_memory_alloc(struct ufs_hba *hba)
  527. {
  528. /* Allocate one Transfer Request Descriptor
  529. * Should be aligned to 1k boundary.
  530. */
  531. hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc));
  532. if (!hba->utrdl) {
  533. dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n");
  534. return -ENOMEM;
  535. }
  536. /* Allocate one Command Descriptor
  537. * Should be aligned to 1k boundary.
  538. */
  539. hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc));
  540. if (!hba->ucdl) {
  541. dev_err(hba->dev, "Command descriptor memory allocation failed\n");
  542. return -ENOMEM;
  543. }
  544. return 0;
  545. }
  546. /**
  547. * ufshcd_get_intr_mask - Get the interrupt bit mask
  548. */
  549. static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  550. {
  551. u32 intr_mask = 0;
  552. switch (hba->version) {
  553. case UFSHCI_VERSION_10:
  554. intr_mask = INTERRUPT_MASK_ALL_VER_10;
  555. break;
  556. case UFSHCI_VERSION_11:
  557. case UFSHCI_VERSION_20:
  558. intr_mask = INTERRUPT_MASK_ALL_VER_11;
  559. break;
  560. case UFSHCI_VERSION_21:
  561. default:
  562. intr_mask = INTERRUPT_MASK_ALL_VER_21;
  563. break;
  564. }
  565. return intr_mask;
  566. }
  567. /**
  568. * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  569. */
  570. static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  571. {
  572. return ufshcd_readl(hba, REG_UFS_VERSION);
  573. }
  574. /**
  575. * ufshcd_get_upmcrs - Get the power mode change request status
  576. */
  577. static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
  578. {
  579. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
  580. }
  581. /**
  582. * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  583. * descriptor according to request
  584. */
  585. static void ufshcd_prepare_req_desc_hdr(struct utp_transfer_req_desc *req_desc,
  586. u32 *upiu_flags,
  587. enum dma_data_direction cmd_dir)
  588. {
  589. u32 data_direction;
  590. u32 dword_0;
  591. if (cmd_dir == DMA_FROM_DEVICE) {
  592. data_direction = UTP_DEVICE_TO_HOST;
  593. *upiu_flags = UPIU_CMD_FLAGS_READ;
  594. } else if (cmd_dir == DMA_TO_DEVICE) {
  595. data_direction = UTP_HOST_TO_DEVICE;
  596. *upiu_flags = UPIU_CMD_FLAGS_WRITE;
  597. } else {
  598. data_direction = UTP_NO_DATA_TRANSFER;
  599. *upiu_flags = UPIU_CMD_FLAGS_NONE;
  600. }
  601. dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET);
  602. /* Enable Interrupt for command */
  603. dword_0 |= UTP_REQ_DESC_INT_CMD;
  604. /* Transfer request descriptor header fields */
  605. req_desc->header.dword_0 = cpu_to_le32(dword_0);
  606. /* dword_1 is reserved, hence it is set to 0 */
  607. req_desc->header.dword_1 = 0;
  608. /*
  609. * assigning invalid value for command status. Controller
  610. * updates OCS on command completion, with the command
  611. * status
  612. */
  613. req_desc->header.dword_2 =
  614. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  615. /* dword_3 is reserved, hence it is set to 0 */
  616. req_desc->header.dword_3 = 0;
  617. req_desc->prd_table_length = 0;
  618. }
  619. static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
  620. u32 upiu_flags)
  621. {
  622. struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
  623. struct ufs_query *query = &hba->dev_cmd.query;
  624. u16 len = be16_to_cpu(query->request.upiu_req.length);
  625. /* Query request header */
  626. ucd_req_ptr->header.dword_0 =
  627. UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ,
  628. upiu_flags, 0, TASK_TAG);
  629. ucd_req_ptr->header.dword_1 =
  630. UPIU_HEADER_DWORD(0, query->request.query_func,
  631. 0, 0);
  632. /* Data segment length only need for WRITE_DESC */
  633. if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
  634. ucd_req_ptr->header.dword_2 =
  635. UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
  636. else
  637. ucd_req_ptr->header.dword_2 = 0;
  638. /* Copy the Query Request buffer as is */
  639. memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE);
  640. /* Copy the Descriptor */
  641. if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
  642. memcpy(ucd_req_ptr + 1, query->descriptor, len);
  643. memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  644. }
  645. static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba)
  646. {
  647. struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
  648. memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
  649. /* command descriptor fields */
  650. ucd_req_ptr->header.dword_0 =
  651. UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, 0x1f);
  652. /* clear rest of the fields of basic header */
  653. ucd_req_ptr->header.dword_1 = 0;
  654. ucd_req_ptr->header.dword_2 = 0;
  655. memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  656. }
  657. /**
  658. * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
  659. * for Device Management Purposes
  660. */
  661. static int ufshcd_comp_devman_upiu(struct ufs_hba *hba,
  662. enum dev_cmd_type cmd_type)
  663. {
  664. u32 upiu_flags;
  665. int ret = 0;
  666. struct utp_transfer_req_desc *req_desc = hba->utrdl;
  667. hba->dev_cmd.type = cmd_type;
  668. ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, DMA_NONE);
  669. switch (cmd_type) {
  670. case DEV_CMD_TYPE_QUERY:
  671. ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags);
  672. break;
  673. case DEV_CMD_TYPE_NOP:
  674. ufshcd_prepare_utp_nop_upiu(hba);
  675. break;
  676. default:
  677. ret = -EINVAL;
  678. }
  679. return ret;
  680. }
  681. static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  682. {
  683. unsigned long start;
  684. u32 intr_status;
  685. u32 enabled_intr_status;
  686. ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  687. start = get_timer(0);
  688. do {
  689. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  690. enabled_intr_status = intr_status & hba->intr_mask;
  691. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  692. if (get_timer(start) > QUERY_REQ_TIMEOUT) {
  693. dev_err(hba->dev,
  694. "Timedout waiting for UTP response\n");
  695. return -ETIMEDOUT;
  696. }
  697. if (enabled_intr_status & UFSHCD_ERROR_MASK) {
  698. dev_err(hba->dev, "Error in status:%08x\n",
  699. enabled_intr_status);
  700. return -1;
  701. }
  702. } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL));
  703. return 0;
  704. }
  705. /**
  706. * ufshcd_get_req_rsp - returns the TR response transaction type
  707. */
  708. static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
  709. {
  710. return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
  711. }
  712. /**
  713. * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
  714. *
  715. */
  716. static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba)
  717. {
  718. return le32_to_cpu(hba->utrdl->header.dword_2) & MASK_OCS;
  719. }
  720. static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
  721. {
  722. return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
  723. }
  724. static int ufshcd_check_query_response(struct ufs_hba *hba)
  725. {
  726. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  727. /* Get the UPIU response */
  728. query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >>
  729. UPIU_RSP_CODE_OFFSET;
  730. return query_res->response;
  731. }
  732. /**
  733. * ufshcd_copy_query_response() - Copy the Query Response and the data
  734. * descriptor
  735. */
  736. static int ufshcd_copy_query_response(struct ufs_hba *hba)
  737. {
  738. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  739. memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
  740. /* Get the descriptor */
  741. if (hba->dev_cmd.query.descriptor &&
  742. hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
  743. u8 *descp = (u8 *)hba->ucd_rsp_ptr +
  744. GENERAL_UPIU_REQUEST_SIZE;
  745. u16 resp_len;
  746. u16 buf_len;
  747. /* data segment length */
  748. resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) &
  749. MASK_QUERY_DATA_SEG_LEN;
  750. buf_len =
  751. be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length);
  752. if (likely(buf_len >= resp_len)) {
  753. memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
  754. } else {
  755. dev_warn(hba->dev,
  756. "%s: Response size is bigger than buffer",
  757. __func__);
  758. return -EINVAL;
  759. }
  760. }
  761. return 0;
  762. }
  763. /**
  764. * ufshcd_exec_dev_cmd - API for sending device management requests
  765. */
  766. static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type,
  767. int timeout)
  768. {
  769. int err;
  770. int resp;
  771. err = ufshcd_comp_devman_upiu(hba, cmd_type);
  772. if (err)
  773. return err;
  774. err = ufshcd_send_command(hba, TASK_TAG);
  775. if (err)
  776. return err;
  777. err = ufshcd_get_tr_ocs(hba);
  778. if (err) {
  779. dev_err(hba->dev, "Error in OCS:%d\n", err);
  780. return -EINVAL;
  781. }
  782. resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
  783. switch (resp) {
  784. case UPIU_TRANSACTION_NOP_IN:
  785. break;
  786. case UPIU_TRANSACTION_QUERY_RSP:
  787. err = ufshcd_check_query_response(hba);
  788. if (!err)
  789. err = ufshcd_copy_query_response(hba);
  790. break;
  791. case UPIU_TRANSACTION_REJECT_UPIU:
  792. /* TODO: handle Reject UPIU Response */
  793. err = -EPERM;
  794. dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
  795. __func__);
  796. break;
  797. default:
  798. err = -EINVAL;
  799. dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
  800. __func__, resp);
  801. }
  802. return err;
  803. }
  804. /**
  805. * ufshcd_init_query() - init the query response and request parameters
  806. */
  807. static inline void ufshcd_init_query(struct ufs_hba *hba,
  808. struct ufs_query_req **request,
  809. struct ufs_query_res **response,
  810. enum query_opcode opcode,
  811. u8 idn, u8 index, u8 selector)
  812. {
  813. *request = &hba->dev_cmd.query.request;
  814. *response = &hba->dev_cmd.query.response;
  815. memset(*request, 0, sizeof(struct ufs_query_req));
  816. memset(*response, 0, sizeof(struct ufs_query_res));
  817. (*request)->upiu_req.opcode = opcode;
  818. (*request)->upiu_req.idn = idn;
  819. (*request)->upiu_req.index = index;
  820. (*request)->upiu_req.selector = selector;
  821. }
  822. /**
  823. * ufshcd_query_flag() - API function for sending flag query requests
  824. */
  825. int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
  826. enum flag_idn idn, bool *flag_res)
  827. {
  828. struct ufs_query_req *request = NULL;
  829. struct ufs_query_res *response = NULL;
  830. int err, index = 0, selector = 0;
  831. int timeout = QUERY_REQ_TIMEOUT;
  832. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  833. selector);
  834. switch (opcode) {
  835. case UPIU_QUERY_OPCODE_SET_FLAG:
  836. case UPIU_QUERY_OPCODE_CLEAR_FLAG:
  837. case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
  838. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  839. break;
  840. case UPIU_QUERY_OPCODE_READ_FLAG:
  841. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  842. if (!flag_res) {
  843. /* No dummy reads */
  844. dev_err(hba->dev, "%s: Invalid argument for read request\n",
  845. __func__);
  846. err = -EINVAL;
  847. goto out;
  848. }
  849. break;
  850. default:
  851. dev_err(hba->dev,
  852. "%s: Expected query flag opcode but got = %d\n",
  853. __func__, opcode);
  854. err = -EINVAL;
  855. goto out;
  856. }
  857. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
  858. if (err) {
  859. dev_err(hba->dev,
  860. "%s: Sending flag query for idn %d failed, err = %d\n",
  861. __func__, idn, err);
  862. goto out;
  863. }
  864. if (flag_res)
  865. *flag_res = (be32_to_cpu(response->upiu_res.value) &
  866. MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
  867. out:
  868. return err;
  869. }
  870. static int ufshcd_query_flag_retry(struct ufs_hba *hba,
  871. enum query_opcode opcode,
  872. enum flag_idn idn, bool *flag_res)
  873. {
  874. int ret;
  875. int retries;
  876. for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
  877. ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
  878. if (ret)
  879. dev_dbg(hba->dev,
  880. "%s: failed with error %d, retries %d\n",
  881. __func__, ret, retries);
  882. else
  883. break;
  884. }
  885. if (ret)
  886. dev_err(hba->dev,
  887. "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
  888. __func__, opcode, idn, ret, retries);
  889. return ret;
  890. }
  891. static int __ufshcd_query_descriptor(struct ufs_hba *hba,
  892. enum query_opcode opcode,
  893. enum desc_idn idn, u8 index, u8 selector,
  894. u8 *desc_buf, int *buf_len)
  895. {
  896. struct ufs_query_req *request = NULL;
  897. struct ufs_query_res *response = NULL;
  898. int err;
  899. if (!desc_buf) {
  900. dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
  901. __func__, opcode);
  902. err = -EINVAL;
  903. goto out;
  904. }
  905. if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
  906. dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
  907. __func__, *buf_len);
  908. err = -EINVAL;
  909. goto out;
  910. }
  911. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  912. selector);
  913. hba->dev_cmd.query.descriptor = desc_buf;
  914. request->upiu_req.length = cpu_to_be16(*buf_len);
  915. switch (opcode) {
  916. case UPIU_QUERY_OPCODE_WRITE_DESC:
  917. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  918. break;
  919. case UPIU_QUERY_OPCODE_READ_DESC:
  920. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  921. break;
  922. default:
  923. dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n",
  924. __func__, opcode);
  925. err = -EINVAL;
  926. goto out;
  927. }
  928. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  929. if (err) {
  930. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
  931. __func__, opcode, idn, index, err);
  932. goto out;
  933. }
  934. hba->dev_cmd.query.descriptor = NULL;
  935. *buf_len = be16_to_cpu(response->upiu_res.length);
  936. out:
  937. return err;
  938. }
  939. /**
  940. * ufshcd_query_descriptor_retry - API function for sending descriptor requests
  941. */
  942. int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode,
  943. enum desc_idn idn, u8 index, u8 selector,
  944. u8 *desc_buf, int *buf_len)
  945. {
  946. int err;
  947. int retries;
  948. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  949. err = __ufshcd_query_descriptor(hba, opcode, idn, index,
  950. selector, desc_buf, buf_len);
  951. if (!err || err == -EINVAL)
  952. break;
  953. }
  954. return err;
  955. }
  956. /**
  957. * ufshcd_read_desc_length - read the specified descriptor length from header
  958. */
  959. static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id,
  960. int desc_index, int *desc_length)
  961. {
  962. int ret;
  963. u8 header[QUERY_DESC_HDR_SIZE];
  964. int header_len = QUERY_DESC_HDR_SIZE;
  965. if (desc_id >= QUERY_DESC_IDN_MAX)
  966. return -EINVAL;
  967. ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
  968. desc_id, desc_index, 0, header,
  969. &header_len);
  970. if (ret) {
  971. dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
  972. __func__, desc_id);
  973. return ret;
  974. } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
  975. dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
  976. __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
  977. desc_id);
  978. ret = -EINVAL;
  979. }
  980. *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
  981. return ret;
  982. }
  983. static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
  984. {
  985. int err;
  986. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
  987. &hba->desc_size.dev_desc);
  988. if (err)
  989. hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
  990. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
  991. &hba->desc_size.pwr_desc);
  992. if (err)
  993. hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
  994. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
  995. &hba->desc_size.interc_desc);
  996. if (err)
  997. hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
  998. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
  999. &hba->desc_size.conf_desc);
  1000. if (err)
  1001. hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
  1002. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
  1003. &hba->desc_size.unit_desc);
  1004. if (err)
  1005. hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
  1006. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
  1007. &hba->desc_size.geom_desc);
  1008. if (err)
  1009. hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
  1010. err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
  1011. &hba->desc_size.hlth_desc);
  1012. if (err)
  1013. hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
  1014. }
  1015. /**
  1016. * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
  1017. *
  1018. */
  1019. int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
  1020. int *desc_len)
  1021. {
  1022. switch (desc_id) {
  1023. case QUERY_DESC_IDN_DEVICE:
  1024. *desc_len = hba->desc_size.dev_desc;
  1025. break;
  1026. case QUERY_DESC_IDN_POWER:
  1027. *desc_len = hba->desc_size.pwr_desc;
  1028. break;
  1029. case QUERY_DESC_IDN_GEOMETRY:
  1030. *desc_len = hba->desc_size.geom_desc;
  1031. break;
  1032. case QUERY_DESC_IDN_CONFIGURATION:
  1033. *desc_len = hba->desc_size.conf_desc;
  1034. break;
  1035. case QUERY_DESC_IDN_UNIT:
  1036. *desc_len = hba->desc_size.unit_desc;
  1037. break;
  1038. case QUERY_DESC_IDN_INTERCONNECT:
  1039. *desc_len = hba->desc_size.interc_desc;
  1040. break;
  1041. case QUERY_DESC_IDN_STRING:
  1042. *desc_len = QUERY_DESC_MAX_SIZE;
  1043. break;
  1044. case QUERY_DESC_IDN_HEALTH:
  1045. *desc_len = hba->desc_size.hlth_desc;
  1046. break;
  1047. case QUERY_DESC_IDN_RFU_0:
  1048. case QUERY_DESC_IDN_RFU_1:
  1049. *desc_len = 0;
  1050. break;
  1051. default:
  1052. *desc_len = 0;
  1053. return -EINVAL;
  1054. }
  1055. return 0;
  1056. }
  1057. EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
  1058. /**
  1059. * ufshcd_read_desc_param - read the specified descriptor parameter
  1060. *
  1061. */
  1062. int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id,
  1063. int desc_index, u8 param_offset, u8 *param_read_buf,
  1064. u8 param_size)
  1065. {
  1066. int ret;
  1067. u8 *desc_buf;
  1068. int buff_len;
  1069. bool is_kmalloc = true;
  1070. /* Safety check */
  1071. if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
  1072. return -EINVAL;
  1073. /* Get the max length of descriptor from structure filled up at probe
  1074. * time.
  1075. */
  1076. ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
  1077. /* Sanity checks */
  1078. if (ret || !buff_len) {
  1079. dev_err(hba->dev, "%s: Failed to get full descriptor length",
  1080. __func__);
  1081. return ret;
  1082. }
  1083. /* Check whether we need temp memory */
  1084. if (param_offset != 0 || param_size < buff_len) {
  1085. desc_buf = kmalloc(buff_len, GFP_KERNEL);
  1086. if (!desc_buf)
  1087. return -ENOMEM;
  1088. } else {
  1089. desc_buf = param_read_buf;
  1090. is_kmalloc = false;
  1091. }
  1092. /* Request for full descriptor */
  1093. ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
  1094. desc_id, desc_index, 0, desc_buf,
  1095. &buff_len);
  1096. if (ret) {
  1097. dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
  1098. __func__, desc_id, desc_index, param_offset, ret);
  1099. goto out;
  1100. }
  1101. /* Sanity check */
  1102. if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
  1103. dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
  1104. __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
  1105. ret = -EINVAL;
  1106. goto out;
  1107. }
  1108. /* Check wherher we will not copy more data, than available */
  1109. if (is_kmalloc && param_size > buff_len)
  1110. param_size = buff_len;
  1111. if (is_kmalloc)
  1112. memcpy(param_read_buf, &desc_buf[param_offset], param_size);
  1113. out:
  1114. if (is_kmalloc)
  1115. kfree(desc_buf);
  1116. return ret;
  1117. }
  1118. /* replace non-printable or non-ASCII characters with spaces */
  1119. static inline void ufshcd_remove_non_printable(uint8_t *val)
  1120. {
  1121. if (!val)
  1122. return;
  1123. if (*val < 0x20 || *val > 0x7e)
  1124. *val = ' ';
  1125. }
  1126. /**
  1127. * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  1128. * state) and waits for it to take effect.
  1129. *
  1130. */
  1131. static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
  1132. {
  1133. unsigned long start = 0;
  1134. u8 status;
  1135. int ret;
  1136. ret = ufshcd_send_uic_cmd(hba, cmd);
  1137. if (ret) {
  1138. dev_err(hba->dev,
  1139. "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
  1140. cmd->command, cmd->argument3, ret);
  1141. return ret;
  1142. }
  1143. start = get_timer(0);
  1144. do {
  1145. status = ufshcd_get_upmcrs(hba);
  1146. if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) {
  1147. dev_err(hba->dev,
  1148. "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
  1149. cmd->command, status);
  1150. ret = (status != PWR_OK) ? status : -1;
  1151. break;
  1152. }
  1153. } while (status != PWR_LOCAL);
  1154. return ret;
  1155. }
  1156. /**
  1157. * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change
  1158. * using DME_SET primitives.
  1159. */
  1160. static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
  1161. {
  1162. struct uic_command uic_cmd = {0};
  1163. int ret;
  1164. uic_cmd.command = UIC_CMD_DME_SET;
  1165. uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
  1166. uic_cmd.argument3 = mode;
  1167. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  1168. return ret;
  1169. }
  1170. static
  1171. void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba,
  1172. struct scsi_cmd *pccb, u32 upiu_flags)
  1173. {
  1174. struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr;
  1175. unsigned int cdb_len;
  1176. /* command descriptor fields */
  1177. ucd_req_ptr->header.dword_0 =
  1178. UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags,
  1179. pccb->lun, TASK_TAG);
  1180. ucd_req_ptr->header.dword_1 =
  1181. UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
  1182. /* Total EHS length and Data segment length will be zero */
  1183. ucd_req_ptr->header.dword_2 = 0;
  1184. ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen);
  1185. cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE);
  1186. memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
  1187. memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len);
  1188. memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
  1189. }
  1190. static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry,
  1191. unsigned char *buf, ulong len)
  1192. {
  1193. entry->size = cpu_to_le32(len) | GENMASK(1, 0);
  1194. entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf));
  1195. entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf));
  1196. }
  1197. static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb)
  1198. {
  1199. struct utp_transfer_req_desc *req_desc = hba->utrdl;
  1200. struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr;
  1201. ulong datalen = pccb->datalen;
  1202. int table_length;
  1203. u8 *buf;
  1204. int i;
  1205. if (!datalen) {
  1206. req_desc->prd_table_length = 0;
  1207. return;
  1208. }
  1209. table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY);
  1210. buf = pccb->pdata;
  1211. i = table_length;
  1212. while (--i) {
  1213. prepare_prdt_desc(&prd_table[table_length - i - 1], buf,
  1214. MAX_PRDT_ENTRY - 1);
  1215. buf += MAX_PRDT_ENTRY;
  1216. datalen -= MAX_PRDT_ENTRY;
  1217. }
  1218. prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1);
  1219. req_desc->prd_table_length = table_length;
  1220. }
  1221. static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb)
  1222. {
  1223. struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent);
  1224. struct utp_transfer_req_desc *req_desc = hba->utrdl;
  1225. u32 upiu_flags;
  1226. int ocs, result = 0;
  1227. u8 scsi_status;
  1228. ufshcd_prepare_req_desc_hdr(req_desc, &upiu_flags, pccb->dma_dir);
  1229. ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags);
  1230. prepare_prdt_table(hba, pccb);
  1231. ufshcd_send_command(hba, TASK_TAG);
  1232. ocs = ufshcd_get_tr_ocs(hba);
  1233. switch (ocs) {
  1234. case OCS_SUCCESS:
  1235. result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr);
  1236. switch (result) {
  1237. case UPIU_TRANSACTION_RESPONSE:
  1238. result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr);
  1239. scsi_status = result & MASK_SCSI_STATUS;
  1240. if (scsi_status)
  1241. return -EINVAL;
  1242. break;
  1243. case UPIU_TRANSACTION_REJECT_UPIU:
  1244. /* TODO: handle Reject UPIU Response */
  1245. dev_err(hba->dev,
  1246. "Reject UPIU not fully implemented\n");
  1247. return -EINVAL;
  1248. default:
  1249. dev_err(hba->dev,
  1250. "Unexpected request response code = %x\n",
  1251. result);
  1252. return -EINVAL;
  1253. }
  1254. break;
  1255. default:
  1256. dev_err(hba->dev, "OCS error from controller = %x\n", ocs);
  1257. return -EINVAL;
  1258. }
  1259. return 0;
  1260. }
  1261. static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id,
  1262. int desc_index, u8 *buf, u32 size)
  1263. {
  1264. return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
  1265. }
  1266. static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
  1267. {
  1268. return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
  1269. }
  1270. /**
  1271. * ufshcd_read_string_desc - read string descriptor
  1272. *
  1273. */
  1274. int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
  1275. u8 *buf, u32 size, bool ascii)
  1276. {
  1277. int err = 0;
  1278. err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf,
  1279. size);
  1280. if (err) {
  1281. dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
  1282. __func__, QUERY_REQ_RETRIES, err);
  1283. goto out;
  1284. }
  1285. if (ascii) {
  1286. int desc_len;
  1287. int ascii_len;
  1288. int i;
  1289. u8 *buff_ascii;
  1290. desc_len = buf[0];
  1291. /* remove header and divide by 2 to move from UTF16 to UTF8 */
  1292. ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
  1293. if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
  1294. dev_err(hba->dev, "%s: buffer allocated size is too small\n",
  1295. __func__);
  1296. err = -ENOMEM;
  1297. goto out;
  1298. }
  1299. buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
  1300. if (!buff_ascii) {
  1301. err = -ENOMEM;
  1302. goto out;
  1303. }
  1304. /*
  1305. * the descriptor contains string in UTF16 format
  1306. * we need to convert to utf-8 so it can be displayed
  1307. */
  1308. utf16_to_utf8(buff_ascii,
  1309. (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len);
  1310. /* replace non-printable or non-ASCII characters with spaces */
  1311. for (i = 0; i < ascii_len; i++)
  1312. ufshcd_remove_non_printable(&buff_ascii[i]);
  1313. memset(buf + QUERY_DESC_HDR_SIZE, 0,
  1314. size - QUERY_DESC_HDR_SIZE);
  1315. memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
  1316. buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
  1317. kfree(buff_ascii);
  1318. }
  1319. out:
  1320. return err;
  1321. }
  1322. static int ufs_get_device_desc(struct ufs_hba *hba,
  1323. struct ufs_dev_desc *dev_desc)
  1324. {
  1325. int err;
  1326. size_t buff_len;
  1327. u8 model_index;
  1328. u8 *desc_buf;
  1329. buff_len = max_t(size_t, hba->desc_size.dev_desc,
  1330. QUERY_DESC_MAX_SIZE + 1);
  1331. desc_buf = kmalloc(buff_len, GFP_KERNEL);
  1332. if (!desc_buf) {
  1333. err = -ENOMEM;
  1334. goto out;
  1335. }
  1336. err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
  1337. if (err) {
  1338. dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
  1339. __func__, err);
  1340. goto out;
  1341. }
  1342. /*
  1343. * getting vendor (manufacturerID) and Bank Index in big endian
  1344. * format
  1345. */
  1346. dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
  1347. desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
  1348. model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
  1349. /* Zero-pad entire buffer for string termination. */
  1350. memset(desc_buf, 0, buff_len);
  1351. err = ufshcd_read_string_desc(hba, model_index, desc_buf,
  1352. QUERY_DESC_MAX_SIZE, true/*ASCII*/);
  1353. if (err) {
  1354. dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
  1355. __func__, err);
  1356. goto out;
  1357. }
  1358. desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
  1359. strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE),
  1360. min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
  1361. MAX_MODEL_LEN));
  1362. /* Null terminate the model string */
  1363. dev_desc->model[MAX_MODEL_LEN] = '\0';
  1364. out:
  1365. kfree(desc_buf);
  1366. return err;
  1367. }
  1368. /**
  1369. * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  1370. */
  1371. static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
  1372. {
  1373. struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
  1374. if (hba->max_pwr_info.is_valid)
  1375. return 0;
  1376. pwr_info->pwr_tx = FAST_MODE;
  1377. pwr_info->pwr_rx = FAST_MODE;
  1378. pwr_info->hs_rate = PA_HS_MODE_B;
  1379. /* Get the connected lane count */
  1380. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
  1381. &pwr_info->lane_rx);
  1382. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  1383. &pwr_info->lane_tx);
  1384. if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
  1385. dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
  1386. __func__, pwr_info->lane_rx, pwr_info->lane_tx);
  1387. return -EINVAL;
  1388. }
  1389. /*
  1390. * First, get the maximum gears of HS speed.
  1391. * If a zero value, it means there is no HSGEAR capability.
  1392. * Then, get the maximum gears of PWM speed.
  1393. */
  1394. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
  1395. if (!pwr_info->gear_rx) {
  1396. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  1397. &pwr_info->gear_rx);
  1398. if (!pwr_info->gear_rx) {
  1399. dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
  1400. __func__, pwr_info->gear_rx);
  1401. return -EINVAL;
  1402. }
  1403. pwr_info->pwr_rx = SLOW_MODE;
  1404. }
  1405. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
  1406. &pwr_info->gear_tx);
  1407. if (!pwr_info->gear_tx) {
  1408. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  1409. &pwr_info->gear_tx);
  1410. if (!pwr_info->gear_tx) {
  1411. dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
  1412. __func__, pwr_info->gear_tx);
  1413. return -EINVAL;
  1414. }
  1415. pwr_info->pwr_tx = SLOW_MODE;
  1416. }
  1417. hba->max_pwr_info.is_valid = true;
  1418. return 0;
  1419. }
  1420. static int ufshcd_change_power_mode(struct ufs_hba *hba,
  1421. struct ufs_pa_layer_attr *pwr_mode)
  1422. {
  1423. int ret;
  1424. /* if already configured to the requested pwr_mode */
  1425. if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
  1426. pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
  1427. pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
  1428. pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
  1429. pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
  1430. pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
  1431. pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
  1432. dev_dbg(hba->dev, "%s: power already configured\n", __func__);
  1433. return 0;
  1434. }
  1435. /*
  1436. * Configure attributes for power mode change with below.
  1437. * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
  1438. * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
  1439. * - PA_HSSERIES
  1440. */
  1441. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
  1442. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
  1443. pwr_mode->lane_rx);
  1444. if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE)
  1445. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
  1446. else
  1447. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
  1448. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
  1449. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
  1450. pwr_mode->lane_tx);
  1451. if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE)
  1452. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
  1453. else
  1454. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
  1455. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  1456. pwr_mode->pwr_tx == FASTAUTO_MODE ||
  1457. pwr_mode->pwr_rx == FAST_MODE ||
  1458. pwr_mode->pwr_tx == FAST_MODE)
  1459. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
  1460. pwr_mode->hs_rate);
  1461. ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
  1462. pwr_mode->pwr_tx);
  1463. if (ret) {
  1464. dev_err(hba->dev,
  1465. "%s: power mode change failed %d\n", __func__, ret);
  1466. return ret;
  1467. }
  1468. /* Copy new Power Mode to power info */
  1469. memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr));
  1470. return ret;
  1471. }
  1472. /**
  1473. * ufshcd_verify_dev_init() - Verify device initialization
  1474. *
  1475. */
  1476. static int ufshcd_verify_dev_init(struct ufs_hba *hba)
  1477. {
  1478. int retries;
  1479. int err;
  1480. for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
  1481. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
  1482. NOP_OUT_TIMEOUT);
  1483. if (!err || err == -ETIMEDOUT)
  1484. break;
  1485. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
  1486. }
  1487. if (err)
  1488. dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
  1489. return err;
  1490. }
  1491. /**
  1492. * ufshcd_complete_dev_init() - checks device readiness
  1493. */
  1494. static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  1495. {
  1496. int i;
  1497. int err;
  1498. bool flag_res = 1;
  1499. err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  1500. QUERY_FLAG_IDN_FDEVICEINIT, NULL);
  1501. if (err) {
  1502. dev_err(hba->dev,
  1503. "%s setting fDeviceInit flag failed with error %d\n",
  1504. __func__, err);
  1505. goto out;
  1506. }
  1507. /* poll for max. 1000 iterations for fDeviceInit flag to clear */
  1508. for (i = 0; i < 1000 && !err && flag_res; i++)
  1509. err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
  1510. QUERY_FLAG_IDN_FDEVICEINIT,
  1511. &flag_res);
  1512. if (err)
  1513. dev_err(hba->dev,
  1514. "%s reading fDeviceInit flag failed with error %d\n",
  1515. __func__, err);
  1516. else if (flag_res)
  1517. dev_err(hba->dev,
  1518. "%s fDeviceInit was not cleared by the device\n",
  1519. __func__);
  1520. out:
  1521. return err;
  1522. }
  1523. static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
  1524. {
  1525. hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
  1526. hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
  1527. hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
  1528. hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
  1529. hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
  1530. hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
  1531. hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
  1532. }
  1533. int ufs_start(struct ufs_hba *hba)
  1534. {
  1535. struct ufs_dev_desc card = {0};
  1536. int ret;
  1537. ret = ufshcd_link_startup(hba);
  1538. if (ret)
  1539. return ret;
  1540. ret = ufshcd_verify_dev_init(hba);
  1541. if (ret)
  1542. return ret;
  1543. ret = ufshcd_complete_dev_init(hba);
  1544. if (ret)
  1545. return ret;
  1546. /* Init check for device descriptor sizes */
  1547. ufshcd_init_desc_sizes(hba);
  1548. ret = ufs_get_device_desc(hba, &card);
  1549. if (ret) {
  1550. dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
  1551. __func__, ret);
  1552. return ret;
  1553. }
  1554. if (ufshcd_get_max_pwr_mode(hba)) {
  1555. dev_err(hba->dev,
  1556. "%s: Failed getting max supported power mode\n",
  1557. __func__);
  1558. } else {
  1559. ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info);
  1560. if (ret) {
  1561. dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
  1562. __func__, ret);
  1563. return ret;
  1564. }
  1565. printf("Device at %s up at:", hba->dev->name);
  1566. ufshcd_print_pwr_info(hba);
  1567. }
  1568. return 0;
  1569. }
  1570. int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops)
  1571. {
  1572. struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev);
  1573. struct scsi_platdata *scsi_plat;
  1574. struct udevice *scsi_dev;
  1575. int err;
  1576. device_find_first_child(ufs_dev, &scsi_dev);
  1577. if (!scsi_dev)
  1578. return -ENODEV;
  1579. scsi_plat = dev_get_uclass_platdata(scsi_dev);
  1580. scsi_plat->max_id = UFSHCD_MAX_ID;
  1581. scsi_plat->max_lun = UFS_MAX_LUNS;
  1582. scsi_plat->max_bytes_per_req = UFS_MAX_BYTES;
  1583. hba->dev = ufs_dev;
  1584. hba->ops = hba_ops;
  1585. hba->mmio_base = (void *)dev_read_addr(ufs_dev);
  1586. /* Set descriptor lengths to specification defaults */
  1587. ufshcd_def_desc_sizes(hba);
  1588. ufshcd_ops_init(hba);
  1589. /* Read capabilties registers */
  1590. hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
  1591. /* Get UFS version supported by the controller */
  1592. hba->version = ufshcd_get_ufs_version(hba);
  1593. if (hba->version != UFSHCI_VERSION_10 &&
  1594. hba->version != UFSHCI_VERSION_11 &&
  1595. hba->version != UFSHCI_VERSION_20 &&
  1596. hba->version != UFSHCI_VERSION_21)
  1597. dev_err(hba->dev, "invalid UFS version 0x%x\n",
  1598. hba->version);
  1599. /* Get Interrupt bit mask per version */
  1600. hba->intr_mask = ufshcd_get_intr_mask(hba);
  1601. /* Allocate memory for host memory space */
  1602. err = ufshcd_memory_alloc(hba);
  1603. if (err) {
  1604. dev_err(hba->dev, "Memory allocation failed\n");
  1605. return err;
  1606. }
  1607. /* Configure Local data structures */
  1608. ufshcd_host_memory_configure(hba);
  1609. /*
  1610. * In order to avoid any spurious interrupt immediately after
  1611. * registering UFS controller interrupt handler, clear any pending UFS
  1612. * interrupt status and disable all the UFS interrupts.
  1613. */
  1614. ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
  1615. REG_INTERRUPT_STATUS);
  1616. ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
  1617. err = ufshcd_hba_enable(hba);
  1618. if (err) {
  1619. dev_err(hba->dev, "Host controller enable failed\n");
  1620. return err;
  1621. }
  1622. err = ufs_start(hba);
  1623. if (err)
  1624. return err;
  1625. return 0;
  1626. }
  1627. int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp)
  1628. {
  1629. int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi",
  1630. scsi_devp);
  1631. return ret;
  1632. }
  1633. static struct scsi_ops ufs_ops = {
  1634. .exec = ufs_scsi_exec,
  1635. };
  1636. int ufs_probe_dev(int index)
  1637. {
  1638. struct udevice *dev;
  1639. return uclass_get_device(UCLASS_UFS, index, &dev);
  1640. }
  1641. int ufs_probe(void)
  1642. {
  1643. struct udevice *dev;
  1644. int ret, i;
  1645. for (i = 0;; i++) {
  1646. ret = uclass_get_device(UCLASS_UFS, i, &dev);
  1647. if (ret == -ENODEV)
  1648. break;
  1649. }
  1650. return 0;
  1651. }
  1652. U_BOOT_DRIVER(ufs_scsi) = {
  1653. .id = UCLASS_SCSI,
  1654. .name = "ufs_scsi",
  1655. .ops = &ufs_ops,
  1656. };