cadence_qspi_apb.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /*
  2. * Copyright (C) 2012 Altera Corporation <www.altera.com>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. * - Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * - Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * - Neither the name of the Altera Corporation nor the
  13. * names of its contributors may be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
  20. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  21. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  22. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  23. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include <comdef.h>
  28. #include <spi.h>
  29. #include <sys.h>
  30. #include <cadence_qspi.h>
  31. #define CQSPI_REG_POLL_US (1) /* 1us */
  32. #define CQSPI_REG_RETRY (10000)
  33. #define CQSPI_POLL_IDLE_RETRY (3)
  34. #define CQSPI_FIFO_WIDTH (4)
  35. #define CQSPI_REG_SRAM_THRESHOLD_WORDS (50)
  36. /* Transfer mode */
  37. #define CQSPI_INST_TYPE_SINGLE (0)
  38. #define CQSPI_INST_TYPE_DUAL (1)
  39. #define CQSPI_INST_TYPE_QUAD (2)
  40. #define CQSPI_STIG_DATA_LEN_MAX (8)
  41. //#define CQSPI_INDIRECTTRIGGER_ADDR_MASK (0xFFFFFFFF)
  42. #define CQSPI_INDIRECTTRIGGER_ADDR_MASK (0x0) //libo
  43. #define CQSPI_DUMMY_CLKS_PER_BYTE (8)
  44. #define CQSPI_DUMMY_BYTES_MAX (4)
  45. #define CONFIG_SPI_FLASH_QUAD (0)
  46. #define CQSPI_REG_SRAM_FILL_THRESHOLD \
  47. ((CQSPI_REG_SRAM_SIZE_WORD / 2) * CQSPI_FIFO_WIDTH)
  48. /****************************************************************************
  49. * Controller's configuration and status register (offset from QSPI_BASE)
  50. ****************************************************************************/
  51. #define CQSPI_REG_CONFIG 0x00
  52. #define CQSPI_REG_CONFIG_CLK_POL_LSB 1
  53. #define CQSPI_REG_CONFIG_CLK_PHA_LSB 2
  54. #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
  55. #define CQSPI_REG_CONFIG_DIRECT_MASK BIT(7)
  56. #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
  57. #define CQSPI_REG_CONFIG_XIP_IMM_MASK BIT(18)
  58. #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
  59. #define CQSPI_REG_CONFIG_BAUD_LSB 19
  60. #define CQSPI_REG_CONFIG_IDLE_LSB 31
  61. #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
  62. #define CQSPI_REG_CONFIG_BAUD_MASK 0xF
  63. #define CQSPI_REG_RD_INSTR 0x04
  64. #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
  65. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
  66. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
  67. #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
  68. #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
  69. #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
  70. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
  71. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
  72. #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
  73. #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
  74. #define CQSPI_REG_WR_INSTR 0x08
  75. #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
  76. #define CQSPI_REG_WR_INSTR_TYPE_INSTR_LSB 8
  77. #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
  78. #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
  79. #define CQSPI_REG_WR_INSTR_MODE_EN_LSB 20
  80. #define CQSPI_REG_WR_INSTR_DUMMY_LSB 24
  81. #define CQSPI_REG_WR_INSTR_TYPE_INSTR_MASK 0x3
  82. #define CQSPI_REG_WR_INSTR_TYPE_ADDR_MASK 0x3
  83. #define CQSPI_REG_WR_INSTR_TYPE_DATA_MASK 0x3
  84. #define CQSPI_REG_WR_INSTR_DUMMY_MASK 0x1F
  85. #define CQSPI_REG_DELAY 0x0C
  86. #define CQSPI_REG_DELAY_TSLCH_LSB 0
  87. #define CQSPI_REG_DELAY_TCHSH_LSB 8
  88. #define CQSPI_REG_DELAY_TSD2D_LSB 16
  89. #define CQSPI_REG_DELAY_TSHSL_LSB 24
  90. #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
  91. #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
  92. #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
  93. #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
  94. #define CQSPI_READLCAPTURE 0x10
  95. #define CQSPI_READLCAPTURE_BYPASS_LSB 0
  96. #define CQSPI_READLCAPTURE_DELAY_LSB 1
  97. #define CQSPI_READLCAPTURE_DELAY_MASK 0xF
  98. #define CQSPI_REG_SIZE 0x14
  99. #define CQSPI_REG_SIZE_ADDRESS_LSB 0
  100. #define CQSPI_REG_SIZE_PAGE_LSB 4
  101. #define CQSPI_REG_SIZE_BLOCK_LSB 16
  102. #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
  103. #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
  104. #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
  105. #define CQSPI_REG_SRAMPARTITION 0x18
  106. #define CQSPI_REG_INDIRECTTRIGGER 0x1C
  107. #define CQSPI_REG_REMAP 0x24
  108. #define CQSPI_REG_MODE_BIT 0x28
  109. #define CQSPI_REG_SDRAMLEVEL 0x2C
  110. #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
  111. #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
  112. #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
  113. #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
  114. #define CQSPI_REG_IRQSTATUS 0x40
  115. #define CQSPI_REG_IRQMASK 0x44
  116. #define CQSPI_REG_INDIRECTRD 0x60
  117. #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
  118. #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
  119. #define CQSPI_REG_INDIRECTRD_INPROGRESS_MASK BIT(2)
  120. #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
  121. #define CQSPI_REG_INDIRECTRDWATERMARK 0x64
  122. #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
  123. #define CQSPI_REG_INDIRECTRDBYTES 0x6C
  124. #define CQSPI_REG_INDIRECTTRI_ADDR_RANGE 0X80
  125. #define CQSPI_REG_CMDCTRL 0x90
  126. #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
  127. #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
  128. #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
  129. #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
  130. #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
  131. #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
  132. #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
  133. #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
  134. #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
  135. #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
  136. #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
  137. #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
  138. #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
  139. #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
  140. #define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
  141. #define CQSPI_REG_INDIRECTWR 0x70
  142. #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
  143. #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
  144. #define CQSPI_REG_INDIRECTWR_INPROGRESS_MASK BIT(2)
  145. #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
  146. #define CQSPI_REG_INDIRECTWRWATERMARK 0x74
  147. #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
  148. #define CQSPI_REG_INDIRECTWRBYTES 0x7C
  149. #define CQSPI_REG_CMDADDRESS 0x94
  150. #define CQSPI_REG_CMDREADDATALOWER 0xA0
  151. #define CQSPI_REG_CMDREADDATAUPPER 0xA4
  152. #define CQSPI_REG_CMDWRITEDATALOWER 0xA8
  153. #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
  154. #define CQSPI_REG_IS_IDLE(base) \
  155. ((readl(base + CQSPI_REG_CONFIG) >> \
  156. CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
  157. #define CQSPI_CAL_DELAY(tdelay_ns, tref_ns, tsclk_ns) \
  158. ((((tdelay_ns) - (tsclk_ns)) / (tref_ns)))
  159. #define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
  160. (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
  161. CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
  162. #define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
  163. (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
  164. CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
  165. static unsigned int cadence_qspi_apb_cmd2addr(const unsigned char *addr_buf,
  166. unsigned int addr_width)
  167. {
  168. unsigned int addr;
  169. addr = (addr_buf[0] << 16) | (addr_buf[1] << 8) | addr_buf[2];
  170. if (addr_width == 4)
  171. addr = (addr << 8) | addr_buf[3];
  172. return addr;
  173. }
  174. static void cadence_qspi_apb_read_fifo_data(void *dest,
  175. const void *src_ahb_addr, unsigned int bytes)
  176. {
  177. unsigned int temp;
  178. int remaining = bytes;
  179. unsigned int *dest_ptr = (unsigned int *)dest;
  180. unsigned int *src_ptr = (unsigned int *)src_ahb_addr;
  181. while (remaining >= 4) {
  182. *dest_ptr = readl(src_ptr);
  183. remaining -= 4;
  184. dest_ptr++;
  185. }
  186. if (remaining) {
  187. /* dangling bytes */
  188. temp = readl(src_ptr);
  189. sys_memcpy(dest_ptr, &temp, remaining);
  190. }
  191. return;
  192. }
  193. static void cadence_qspi_apb_write_fifo_data(const void *dest_ahb_addr,
  194. const void *src, unsigned int bytes)
  195. {
  196. unsigned int temp = 0;
  197. int i;
  198. int remaining = bytes;
  199. unsigned int *dest_ptr = (unsigned int *)dest_ahb_addr;
  200. unsigned int *src_ptr = (unsigned int *)src;
  201. //uart_printf("src_ptr = 0x%x, src = 0x%x\r\n", src_ptr,src);
  202. //uart_printf("*src_ptr = 0x%x\r\n", *src_ptr);
  203. while (remaining >= CQSPI_FIFO_WIDTH) {
  204. for (i = CQSPI_FIFO_WIDTH/4 - 1; i >= 0; i--)
  205. writel(*(src_ptr+i), dest_ptr+i);
  206. //dest_ptr += CQSPI_FIFO_WIDTH/sizeof(src_ptr);
  207. src_ptr += CQSPI_FIFO_WIDTH/4;
  208. remaining -= CQSPI_FIFO_WIDTH;
  209. }
  210. if (remaining) {
  211. /* dangling bytes */
  212. i = remaining/4;
  213. sys_memcpy(&temp, src_ptr+i, remaining % 4);
  214. writel(temp, dest_ptr+i);
  215. for (--i; i >= 0; i--)
  216. writel(*(src_ptr+i), dest_ptr+i);
  217. }
  218. return;
  219. }
  220. /* Read from SRAM FIFO with polling SRAM fill level. */
  221. static int qspi_read_sram_fifo_poll(const void * reg_base, void *dest_addr,
  222. const void *src_addr, unsigned int num_bytes)
  223. {
  224. unsigned int remaining = num_bytes;
  225. unsigned int retry;
  226. unsigned int sram_level = 0;
  227. unsigned char *dest = (unsigned char *)dest_addr;
  228. while (remaining > 0) {
  229. retry = CQSPI_REG_RETRY;
  230. while (retry--) {
  231. sram_level = CQSPI_GET_RD_SRAM_LEVEL((u32)reg_base);
  232. if (sram_level)
  233. break;
  234. delay(100);
  235. }
  236. if (!retry) {
  237. //uart_printf("QSPI: No receive data after polling for %d times\n",
  238. //CQSPI_REG_RETRY);
  239. return -1;
  240. }
  241. sram_level *= CQSPI_FIFO_WIDTH;
  242. sram_level = sram_level > remaining ? remaining : sram_level;
  243. /* Read data from FIFO. */
  244. cadence_qspi_apb_read_fifo_data(dest, src_addr, sram_level);
  245. dest += sram_level;
  246. remaining -= sram_level;
  247. delay(100);
  248. }
  249. return 0;
  250. }
  251. /* Write to SRAM FIFO with polling SRAM fill level. */
  252. static int qpsi_write_sram_fifo_push(struct cadence_spi_platdata *plat,
  253. const void *src_addr, unsigned int num_bytes)
  254. {
  255. const void * reg_base = plat->regbase;
  256. void *dest_addr = plat->ahbbase;
  257. unsigned int retry = CQSPI_REG_RETRY;
  258. unsigned int sram_level;
  259. unsigned int wr_bytes;
  260. unsigned char *src = (unsigned char *)src_addr;
  261. int remaining = num_bytes;
  262. unsigned int page_size = plat->page_size;
  263. unsigned int sram_threshold_words = CQSPI_REG_SRAM_THRESHOLD_WORDS;
  264. while (remaining > 0) {
  265. retry = CQSPI_REG_RETRY;
  266. while (retry--) {
  267. sram_level = CQSPI_GET_WR_SRAM_LEVEL((u32)reg_base);
  268. if (sram_level <= sram_threshold_words)
  269. break;
  270. }
  271. if (!retry) {
  272. //uart_printf("QSPI: SRAM fill level (0x%08x) not hit lower expected level (0x%08x)",
  273. //sram_level, sram_threshold_words);
  274. return -1;
  275. }
  276. /* Write a page or remaining bytes. */
  277. wr_bytes = (remaining > page_size) ?
  278. page_size : remaining;
  279. cadence_qspi_apb_write_fifo_data(dest_addr, src, wr_bytes);
  280. src += wr_bytes;
  281. remaining -= wr_bytes;
  282. }
  283. return 0;
  284. }
  285. void cadence_qspi_apb_controller_enable( void * reg_base)
  286. {
  287. unsigned int reg;
  288. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  289. reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
  290. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  291. return;
  292. }
  293. void cadence_qspi_apb_controller_disable(void * reg_base)
  294. {
  295. unsigned int reg;
  296. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  297. reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
  298. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  299. return;
  300. }
  301. /* Return 1 if idle, otherwise return 0 (busy). */
  302. static unsigned int cadence_qspi_wait_idle(void * reg_base)
  303. {
  304. unsigned int start = 5000, count = 0;
  305. /* timeout in unit of ms */
  306. unsigned int timeout = 5000;
  307. #if 0
  308. start = get_timer(0);
  309. for ( ; get_timer(start) < timeout ; ) {
  310. if (CQSPI_REG_IS_IDLE((u32)reg_base))
  311. count++;
  312. else
  313. count = 0;
  314. /*
  315. * Ensure the QSPI controller is in true idle state after
  316. * reading back the same idle status consecutively
  317. */
  318. if (count >= CQSPI_POLL_IDLE_RETRY)
  319. {
  320. //uart_printf("count = %d, get_timer(start) = %d\n, ", count , get_timer(start));
  321. return 1;
  322. }
  323. }
  324. #endif
  325. while(1) {
  326. if (CQSPI_REG_IS_IDLE((u32)reg_base))
  327. return 1;
  328. else
  329. {
  330. count++;
  331. //return 0;
  332. }
  333. /*
  334. * Ensure the QSPI controller is in true idle state after
  335. * reading back the same idle status consecutively
  336. */
  337. if (count >= CQSPI_REG_RETRY)
  338. {
  339. //uart_printf("count = %d\r\n, ", count);
  340. return 1;
  341. }
  342. }
  343. /* Timeout, still in busy mode. */
  344. //uart_printf("QSPI: QSPI is still busy after poll for %d times.\n",
  345. //CQSPI_REG_RETRY);
  346. return 0;
  347. }
  348. void cadence_qspi_apb_readdata_capture(void * reg_base,
  349. unsigned int bypass, unsigned int delay)
  350. {
  351. unsigned int reg;
  352. cadence_qspi_apb_controller_disable(reg_base);
  353. reg = readl((u32)reg_base + CQSPI_READLCAPTURE);
  354. if (bypass)
  355. reg |= (1 << CQSPI_READLCAPTURE_BYPASS_LSB);
  356. else
  357. reg &= ~(1 << CQSPI_READLCAPTURE_BYPASS_LSB);
  358. reg &= ~(CQSPI_READLCAPTURE_DELAY_MASK
  359. << CQSPI_READLCAPTURE_DELAY_LSB);
  360. reg |= ((delay & CQSPI_READLCAPTURE_DELAY_MASK)
  361. << CQSPI_READLCAPTURE_DELAY_LSB);
  362. writel(reg, (u32)reg_base + CQSPI_READLCAPTURE);
  363. //writel(0x21, (u32)reg_base + CQSPI_READLCAPTURE);
  364. cadence_qspi_apb_controller_enable(reg_base);
  365. return;
  366. }
  367. void cadence_qspi_apb_config_baudrate_div(void * reg_base,
  368. unsigned int ref_clk_hz, unsigned int sclk_hz)
  369. {
  370. unsigned int reg;
  371. unsigned int div;
  372. cadence_qspi_apb_controller_disable(reg_base);
  373. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  374. reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
  375. div = ref_clk_hz / sclk_hz;
  376. if (div > 32)
  377. div = 32;
  378. /* Check if even number. */
  379. if ((div & 1)) {
  380. div = (div / 2);
  381. } else {
  382. if (ref_clk_hz % sclk_hz)
  383. /* ensure generated SCLK doesn't exceed user
  384. specified sclk_hz */
  385. div = (div / 2);
  386. else
  387. div = (div / 2) - 1;
  388. }
  389. //uart_printf("%s: ref_clk %dHz sclk %dHz Div 0x%x\n", __func__,
  390. //ref_clk_hz, sclk_hz, div);
  391. div = (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
  392. reg |= div;
  393. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  394. cadence_qspi_apb_controller_enable(reg_base);
  395. return;
  396. }
  397. void cadence_qspi_apb_set_clk_mode(void * reg_base,
  398. unsigned int clk_pol, unsigned int clk_pha)
  399. {
  400. unsigned int reg;
  401. cadence_qspi_apb_controller_disable(reg_base);
  402. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  403. reg &= ~(1 <<
  404. (CQSPI_REG_CONFIG_CLK_POL_LSB | CQSPI_REG_CONFIG_CLK_PHA_LSB));
  405. reg |= ((clk_pol & 0x1) << CQSPI_REG_CONFIG_CLK_POL_LSB);
  406. reg |= ((clk_pha & 0x1) << CQSPI_REG_CONFIG_CLK_PHA_LSB);
  407. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  408. cadence_qspi_apb_controller_enable(reg_base);
  409. return;
  410. }
  411. void cadence_qspi_apb_chipselect(void * reg_base,
  412. unsigned int chip_select, unsigned int decoder_enable)
  413. {
  414. unsigned int reg;
  415. cadence_qspi_apb_controller_disable(reg_base);
  416. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  417. /* docoder */
  418. if (decoder_enable) {
  419. reg |= CQSPI_REG_CONFIG_DECODE_MASK;
  420. } else {
  421. reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
  422. /* Convert CS if without decoder.
  423. * CS0 to 4b'1110
  424. * CS1 to 4b'1101
  425. * CS2 to 4b'1011
  426. * CS3 to 4b'0111
  427. */
  428. chip_select = 0xF & ~(1 << chip_select);
  429. }
  430. reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
  431. << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
  432. reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
  433. << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
  434. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  435. cadence_qspi_apb_controller_enable(reg_base);
  436. return;
  437. }
  438. void cadence_qspi_apb_delay(void * reg_base,
  439. unsigned int ref_clk, unsigned int sclk_hz,
  440. unsigned int tshsl_ns, unsigned int tsd2d_ns,
  441. unsigned int tchsh_ns, unsigned int tslch_ns)
  442. {
  443. unsigned int ref_clk_ns;
  444. unsigned int sclk_ns;
  445. unsigned int tshsl, tchsh, tslch, tsd2d;
  446. unsigned int reg;
  447. cadence_qspi_apb_controller_disable(reg_base);
  448. /* Convert to ns. */
  449. ref_clk_ns = (10000000) / ref_clk;
  450. /* Convert to ns. */
  451. sclk_ns = (10000000) / sclk_hz;
  452. #if 0
  453. /* Plus 1 to round up 1 clock cycle. */
  454. tshsl = CQSPI_CAL_DELAY(tshsl_ns, ref_clk_ns, sclk_ns) + 1;
  455. tchsh = CQSPI_CAL_DELAY(tchsh_ns, ref_clk_ns, sclk_ns) + 1;
  456. tslch = CQSPI_CAL_DELAY(tslch_ns, ref_clk_ns, sclk_ns) + 1;
  457. tsd2d = CQSPI_CAL_DELAY(tsd2d_ns, ref_clk_ns, sclk_ns) + 1;
  458. #endif
  459. tshsl = 1;
  460. tchsh = 1;
  461. tslch = 1;
  462. tsd2d = 1;
  463. reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
  464. << CQSPI_REG_DELAY_TSHSL_LSB);
  465. reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
  466. << CQSPI_REG_DELAY_TCHSH_LSB);
  467. reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
  468. << CQSPI_REG_DELAY_TSLCH_LSB);
  469. reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
  470. << CQSPI_REG_DELAY_TSD2D_LSB);
  471. writel(reg, (u32)reg_base + CQSPI_REG_DELAY);
  472. //writel(0x0, (u32)reg_base + CQSPI_REG_DELAY);
  473. cadence_qspi_apb_controller_enable(reg_base);
  474. return;
  475. }
  476. void cadence_qspi_apb_controller_init(struct cadence_spi_platdata *plat)
  477. {
  478. unsigned reg;
  479. cadence_qspi_apb_controller_disable(plat->regbase);
  480. /* Configure the device size and address bytes */
  481. reg = readl((u32)plat->regbase + CQSPI_REG_SIZE);
  482. /* Clear the previous value */
  483. reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
  484. reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
  485. reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
  486. reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
  487. writel(reg, (u32)plat->regbase + CQSPI_REG_SIZE);
  488. /* Configure the remap address register, no remap */
  489. writel(0,(u32) plat->regbase + CQSPI_REG_REMAP);
  490. //writel(0X8,(u32) plat->regbase + CQSPI_REG_INDIRECTTRI_ADDR_RANGE);
  491. /* Indirect mode configurations */
  492. writel((plat->sram_size/2), (u32)plat->regbase + CQSPI_REG_SRAMPARTITION);
  493. /* Disable all interrupts */
  494. writel(0, (u32)plat->regbase + CQSPI_REG_IRQMASK);
  495. cadence_qspi_apb_controller_enable(plat->regbase);
  496. return;
  497. }
  498. static int cadence_qspi_apb_exec_flash_cmd(u32 reg_base,
  499. unsigned int reg)
  500. {
  501. unsigned int retry = CQSPI_REG_RETRY;
  502. /* Write the CMDCTRL without start execution. */
  503. writel(reg, (u32)reg_base + CQSPI_REG_CMDCTRL);
  504. /* Start execute */
  505. reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
  506. writel(reg, (u32)reg_base + CQSPI_REG_CMDCTRL);
  507. while (retry--) {
  508. reg = readl((u32)reg_base + CQSPI_REG_CMDCTRL);
  509. if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS_MASK) == 0)
  510. break;
  511. delay(1000);
  512. }
  513. if (!retry) {
  514. //uart_printf("QSPI: flash command execution timeout\n");
  515. return -1;
  516. }
  517. /* Polling QSPI idle status. */
  518. if (!cadence_qspi_wait_idle(reg_base))
  519. return -1;
  520. return 0;
  521. }
  522. /* For command RDID, RDSR. */
  523. int cadence_qspi_apb_command_read(void *reg_base,
  524. unsigned int cmdlen, const u8 *cmdbuf, unsigned int rxlen,
  525. u8 *rxbuf)
  526. {
  527. unsigned int reg;
  528. unsigned int read_len;
  529. int status;
  530. if (!cmdlen || rxlen > CQSPI_STIG_DATA_LEN_MAX || rxbuf == NULL) {
  531. //uart_printf("QSPI: Invalid input arguments cmdlen %d rxlen %d\n",
  532. //cmdlen, rxlen);
  533. return -1;
  534. }
  535. reg = cmdbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  536. reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
  537. /* 0 means 1 byte. */
  538. reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
  539. << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
  540. status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
  541. if (status != 0)
  542. return status;
  543. reg = readl((u32)reg_base + CQSPI_REG_CMDREADDATALOWER);
  544. /* Put the read value into rx_buf */
  545. read_len = (rxlen > 4) ? 4 : rxlen;
  546. sys_memcpy(rxbuf, &reg, read_len);
  547. rxbuf += read_len;
  548. if (rxlen > 4) {
  549. reg = readl((u32)reg_base + CQSPI_REG_CMDREADDATAUPPER);
  550. read_len = rxlen - read_len;
  551. sys_memcpy(rxbuf, &reg, read_len);
  552. }
  553. return 0;
  554. }
  555. /* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
  556. int cadence_qspi_apb_command_write(void * reg_base, unsigned int cmdlen,
  557. const u8 *cmdbuf, unsigned int txlen, const u8 *txbuf)
  558. {
  559. unsigned int reg = 0;
  560. unsigned int addr_value;
  561. unsigned int wr_data;
  562. unsigned int wr_len;
  563. if (!cmdlen || cmdlen > 5 || txlen > 8 || cmdbuf == NULL) {
  564. //uart_printf("QSPI: Invalid input arguments cmdlen %d txlen %d\n",
  565. //cmdlen, txlen);
  566. return -1;
  567. }
  568. reg |= cmdbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  569. if (cmdlen == 4 || cmdlen == 5) {
  570. /* Command with address */
  571. reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
  572. /* Number of bytes to write. */
  573. reg |= ((cmdlen - 2) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
  574. << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
  575. /* Get address */
  576. addr_value = cadence_qspi_apb_cmd2addr(&cmdbuf[1],
  577. cmdlen >= 5 ? 4 : 3);
  578. writel(addr_value, (u32)reg_base + CQSPI_REG_CMDADDRESS);
  579. }
  580. if (txlen) {
  581. /* writing data = yes */
  582. reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
  583. reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
  584. << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
  585. wr_len = txlen > 4 ? 4 : txlen;
  586. sys_memcpy(&wr_data, txbuf, wr_len);
  587. writel(wr_data, (u32)reg_base +
  588. CQSPI_REG_CMDWRITEDATALOWER);
  589. if (txlen > 4) {
  590. txbuf += wr_len;
  591. wr_len = txlen - wr_len;
  592. sys_memcpy(&wr_data, txbuf, wr_len);
  593. writel(wr_data, (u32)reg_base +
  594. CQSPI_REG_CMDWRITEDATAUPPER);
  595. }
  596. }
  597. /* Execute the command */
  598. return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
  599. }
  600. /* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
  601. int cadence_qspi_apb_indirect_read_setup(struct cadence_spi_platdata *plat,
  602. unsigned int cmdlen, const u8 *cmdbuf)
  603. {
  604. unsigned int reg;
  605. unsigned int rd_reg;
  606. unsigned int addr_value;
  607. unsigned int dummy_clk;
  608. unsigned int dummy_bytes;
  609. unsigned int addr_bytes;
  610. /*
  611. * Identify addr_byte. All NOR flash device drivers are using fast read
  612. * which always expecting 1 dummy byte, 1 cmd byte and 3/4 addr byte.
  613. * With that, the length is in value of 5 or 6. Only FRAM chip from
  614. * ramtron using normal read (which won't need dummy byte).
  615. * Unlikely NOR flash using normal read due to performance issue.
  616. */
  617. if (cmdlen >= 5)
  618. /* to cater fast read where cmd + addr + dummy */
  619. addr_bytes = cmdlen - 2;
  620. else
  621. /* for normal read (only ramtron as of now) */
  622. addr_bytes = cmdlen - 1;
  623. /* Setup the indirect trigger address */
  624. writel(((u32)plat->ahbbase & CQSPI_INDIRECTTRIGGER_ADDR_MASK),
  625. (u32)plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
  626. /* Configure the opcode */
  627. rd_reg = cmdbuf[0] << CQSPI_REG_RD_INSTR_OPCODE_LSB;
  628. if(plat->bit_mode == 4)
  629. {
  630. /* Instruction and address at DQ0, data at DQ0-3. */
  631. rd_reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
  632. }
  633. else
  634. {
  635. rd_reg &= ~(CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB);
  636. }
  637. /* Get address */
  638. addr_value = cadence_qspi_apb_cmd2addr(&cmdbuf[1], addr_bytes);
  639. writel(addr_value, (u32)plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
  640. /* The remaining lenght is dummy bytes. */
  641. dummy_bytes = cmdlen - addr_bytes - 1;
  642. if (dummy_bytes) {
  643. if (dummy_bytes > CQSPI_DUMMY_BYTES_MAX)
  644. dummy_bytes = CQSPI_DUMMY_BYTES_MAX;
  645. rd_reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
  646. #if defined(CONFIG_SPL_SPI_XIP) && defined(CONFIG_SPL_BUILD)
  647. writel(0x0, plat->regbase + CQSPI_REG_MODE_BIT);
  648. #else
  649. writel(0xFF, (u32)plat->regbase + CQSPI_REG_MODE_BIT);
  650. #endif
  651. /* Convert to clock cycles. */
  652. dummy_clk = dummy_bytes * CQSPI_DUMMY_CLKS_PER_BYTE;
  653. /* Need to minus the mode byte (8 clocks). */
  654. dummy_clk -= CQSPI_DUMMY_CLKS_PER_BYTE;
  655. if (dummy_clk)
  656. rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
  657. << CQSPI_REG_RD_INSTR_DUMMY_LSB;
  658. }
  659. writel(rd_reg, (u32)plat->regbase + CQSPI_REG_RD_INSTR);
  660. //writel(0x0012006b, (u32)plat->regbase + CQSPI_REG_RD_INSTR);
  661. //writel(0x041220eb, (u32)plat->regbase + CQSPI_REG_RD_INSTR);
  662. /* set device size */
  663. reg = readl((u32)plat->regbase + CQSPI_REG_SIZE);
  664. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  665. reg |= (addr_bytes - 1);
  666. writel(reg, (u32)plat->regbase + CQSPI_REG_SIZE);
  667. return 0;
  668. }
  669. int cadence_qspi_apb_indirect_read_execute(struct cadence_spi_platdata *plat,
  670. unsigned int rxlen, u8 *rxbuf)
  671. {
  672. unsigned int reg;
  673. writel(rxlen, (u32)plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
  674. /* Start the indirect read transfer */
  675. writel(CQSPI_REG_INDIRECTRD_START_MASK,
  676. (u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  677. if (qspi_read_sram_fifo_poll(plat->regbase, (void *)rxbuf,
  678. (const void *)plat->ahbbase, rxlen))
  679. goto failrd;
  680. /* Check flash indirect controller */
  681. reg = readl((u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  682. if (!(reg & CQSPI_REG_INDIRECTRD_DONE_MASK)) {
  683. reg = readl((u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  684. //uart_printf("QSPI read_execute: indirect completion status error with reg 0x%x\n",
  685. //reg);
  686. goto failrd;
  687. }
  688. /* Clear indirect completion status */
  689. writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
  690. (u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  691. return 0;
  692. failrd:
  693. /* Cancel the indirect read */
  694. writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK,
  695. (u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  696. return -1;
  697. }
  698. /* Opcode + Address (3/4 bytes) */
  699. int cadence_qspi_apb_indirect_write_setup(struct cadence_spi_platdata *plat,
  700. unsigned int cmdlen, const u8 *cmdbuf)
  701. {
  702. unsigned int reg;
  703. unsigned int addr_bytes = cmdlen > 4 ? 4 : 3;
  704. if (cmdlen < 4 || cmdbuf == NULL) {
  705. //uart_printf("QSPI: iInvalid input argument, len %d cmdbuf 0x%x\n",
  706. //cmdlen, (unsigned int)cmdbuf);
  707. return -1;
  708. }
  709. /* Setup the indirect trigger address */
  710. writel(((u32)plat->ahbbase & CQSPI_INDIRECTTRIGGER_ADDR_MASK),
  711. (u32)plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
  712. /* Configure the opcode */
  713. reg = cmdbuf[0] << CQSPI_REG_WR_INSTR_OPCODE_LSB;
  714. if(plat->bit_mode == 4)
  715. {
  716. /* Instruction and address at DQ0, data at DQ0-3. */
  717. reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
  718. }
  719. else
  720. {
  721. reg &= ~(CQSPI_INST_TYPE_QUAD << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB);
  722. }
  723. writel(reg, (u32)plat->regbase + CQSPI_REG_WR_INSTR);
  724. //writel(0x00020032, (u32)plat->regbase + CQSPI_REG_WR_INSTR);
  725. /* Setup write address. */
  726. reg = cadence_qspi_apb_cmd2addr(&cmdbuf[1], addr_bytes);
  727. writel(reg, (u32)plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
  728. reg = readl((u32)plat->regbase + CQSPI_REG_SIZE);
  729. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  730. reg |= (addr_bytes - 1);
  731. writel(reg, (u32)plat->regbase + CQSPI_REG_SIZE);
  732. return 0;
  733. }
  734. int cadence_qspi_apb_indirect_write_execute(struct cadence_spi_platdata *plat,
  735. unsigned int txlen, const u8 *txbuf)
  736. {
  737. unsigned int reg = 0;
  738. unsigned int retry;
  739. /* Configure the indirect read transfer bytes */
  740. writel(txlen, (u32)plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
  741. /* Start the indirect write transfer */
  742. writel(CQSPI_REG_INDIRECTWR_START_MASK,
  743. (u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  744. if (qpsi_write_sram_fifo_push(plat, (const void *)txbuf, txlen))
  745. goto failwr;
  746. #if 1
  747. /* Wait until last write is completed (FIFO empty) */
  748. retry = CQSPI_REG_RETRY;
  749. while (retry--) {
  750. reg = CQSPI_GET_WR_SRAM_LEVEL((u32)plat->regbase);
  751. if (reg == 0)
  752. break;
  753. delay(1000);
  754. }
  755. if (reg != 0) {
  756. //uart_printf("QSPI: timeout for indirect write\n");
  757. goto failwr;
  758. }
  759. /* Check flash indirect controller status */
  760. retry = CQSPI_REG_RETRY;
  761. while (retry--) {
  762. reg = readl((u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  763. if (reg & CQSPI_REG_INDIRECTWR_DONE_MASK)
  764. break;
  765. delay(1000);
  766. }
  767. if (!(reg & CQSPI_REG_INDIRECTWR_DONE_MASK)) {
  768. //uart_printf("QSPI write_execute: indirect completion status error with reg 0x%x\n",
  769. // reg);
  770. goto failwr;
  771. }
  772. /* Clear indirect completion status */
  773. writel(CQSPI_REG_INDIRECTWR_DONE_MASK,
  774. (u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  775. #endif
  776. return 0;
  777. failwr:
  778. /* Cancel the indirect write */
  779. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  780. (u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  781. return -1;
  782. }