cadence_qspi_apb.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950
  1. /*
  2. * Copyright (C) 2012 Altera Corporation <www.altera.com>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. * - Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * - Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. * - Neither the name of the Altera Corporation nor the
  13. * names of its contributors may be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  17. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  18. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  19. * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
  20. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  21. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  22. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  23. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include <comdef.h>
  28. #include <spi.h>
  29. #include <sys.h>
  30. #include <cadence_qspi.h>
  31. #include <timer.h>
  32. #define CQSPI_REG_POLL_US (1) /* 1us */
  33. #define CQSPI_REG_RETRY (10000)
  34. #define CQSPI_POLL_IDLE_RETRY (3)
  35. #define CQSPI_FIFO_WIDTH (4)
  36. #define CQSPI_REG_SRAM_THRESHOLD_WORDS (50)
  37. /* Transfer mode */
  38. #define CQSPI_INST_TYPE_SINGLE (0)
  39. #define CQSPI_INST_TYPE_DUAL (1)
  40. #define CQSPI_INST_TYPE_QUAD (2)
  41. #define CQSPI_STIG_DATA_LEN_MAX (8)
  42. //#define CQSPI_INDIRECTTRIGGER_ADDR_MASK (0xFFFFFFFF)
  43. #define CQSPI_INDIRECTTRIGGER_ADDR_MASK (0x0) //libo
  44. #define CQSPI_DUMMY_CLKS_PER_BYTE (8)
  45. #define CQSPI_DUMMY_BYTES_MAX (4)
  46. #define CONFIG_SPI_FLASH_QUAD (0)
  47. #define CQSPI_REG_SRAM_FILL_THRESHOLD \
  48. ((CQSPI_REG_SRAM_SIZE_WORD / 2) * CQSPI_FIFO_WIDTH)
  49. /****************************************************************************
  50. * Controller's configuration and status register (offset from QSPI_BASE)
  51. ****************************************************************************/
  52. #define CQSPI_REG_CONFIG 0x00
  53. #define CQSPI_REG_CONFIG_CLK_POL_LSB 1
  54. #define CQSPI_REG_CONFIG_CLK_PHA_LSB 2
  55. #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
  56. #define CQSPI_REG_CONFIG_DIRECT_MASK BIT(7)
  57. #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
  58. #define CQSPI_REG_CONFIG_XIP_IMM_MASK BIT(18)
  59. #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
  60. #define CQSPI_REG_CONFIG_BAUD_LSB 19
  61. #define CQSPI_REG_CONFIG_IDLE_LSB 31
  62. #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
  63. #define CQSPI_REG_CONFIG_BAUD_MASK 0xF
  64. #define CQSPI_REG_RD_INSTR 0x04
  65. #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
  66. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
  67. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
  68. #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
  69. #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
  70. #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
  71. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
  72. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
  73. #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
  74. #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
  75. #define CQSPI_REG_WR_INSTR 0x08
  76. #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
  77. #define CQSPI_REG_WR_INSTR_TYPE_INSTR_LSB 8
  78. #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
  79. #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
  80. #define CQSPI_REG_WR_INSTR_MODE_EN_LSB 20
  81. #define CQSPI_REG_WR_INSTR_DUMMY_LSB 24
  82. #define CQSPI_REG_WR_INSTR_TYPE_INSTR_MASK 0x3
  83. #define CQSPI_REG_WR_INSTR_TYPE_ADDR_MASK 0x3
  84. #define CQSPI_REG_WR_INSTR_TYPE_DATA_MASK 0x3
  85. #define CQSPI_REG_WR_INSTR_DUMMY_MASK 0x1F
  86. #define CQSPI_REG_DELAY 0x0C
  87. #define CQSPI_REG_DELAY_TSLCH_LSB 0
  88. #define CQSPI_REG_DELAY_TCHSH_LSB 8
  89. #define CQSPI_REG_DELAY_TSD2D_LSB 16
  90. #define CQSPI_REG_DELAY_TSHSL_LSB 24
  91. #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
  92. #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
  93. #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
  94. #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
  95. #define CQSPI_READLCAPTURE 0x10
  96. #define CQSPI_READLCAPTURE_BYPASS_LSB 0
  97. #define CQSPI_READLCAPTURE_DELAY_LSB 1
  98. #define CQSPI_READLCAPTURE_DELAY_MASK 0xF
  99. #define CQSPI_REG_SIZE 0x14
  100. #define CQSPI_REG_SIZE_ADDRESS_LSB 0
  101. #define CQSPI_REG_SIZE_PAGE_LSB 4
  102. #define CQSPI_REG_SIZE_BLOCK_LSB 16
  103. #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
  104. #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
  105. #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
  106. #define CQSPI_REG_SRAMPARTITION 0x18
  107. #define CQSPI_REG_INDIRECTTRIGGER 0x1C
  108. #define CQSPI_REG_REMAP 0x24
  109. #define CQSPI_REG_MODE_BIT 0x28
  110. #define CQSPI_REG_SDRAMLEVEL 0x2C
  111. #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
  112. #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
  113. #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
  114. #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
  115. #define CQSPI_REG_IRQSTATUS 0x40
  116. #define CQSPI_REG_IRQMASK 0x44
  117. #define CQSPI_REG_INDIRECTRD 0x60
  118. #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
  119. #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
  120. #define CQSPI_REG_INDIRECTRD_INPROGRESS_MASK BIT(2)
  121. #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
  122. #define CQSPI_REG_INDIRECTRDWATERMARK 0x64
  123. #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
  124. #define CQSPI_REG_INDIRECTRDBYTES 0x6C
  125. #define CQSPI_REG_INDIRECTTRI_ADDR_RANGE 0X80
  126. #define CQSPI_REG_CMDCTRL 0x90
  127. #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
  128. #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
  129. #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
  130. #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
  131. #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
  132. #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
  133. #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
  134. #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
  135. #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
  136. #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
  137. #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
  138. #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
  139. #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
  140. #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
  141. #define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
  142. #define CQSPI_REG_INDIRECTWR 0x70
  143. #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
  144. #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
  145. #define CQSPI_REG_INDIRECTWR_INPROGRESS_MASK BIT(2)
  146. #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
  147. #define CQSPI_REG_INDIRECTWRWATERMARK 0x74
  148. #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
  149. #define CQSPI_REG_INDIRECTWRBYTES 0x7C
  150. #define CQSPI_REG_CMDADDRESS 0x94
  151. #define CQSPI_REG_CMDREADDATALOWER 0xA0
  152. #define CQSPI_REG_CMDREADDATAUPPER 0xA4
  153. #define CQSPI_REG_CMDWRITEDATALOWER 0xA8
  154. #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
  155. #define CQSPI_REG_IS_IDLE(base) \
  156. ((readl(base + CQSPI_REG_CONFIG) >> \
  157. CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
  158. #define CQSPI_CAL_DELAY(tdelay_ns, tref_ns, tsclk_ns) \
  159. ((((tdelay_ns) - (tsclk_ns)) / (tref_ns)))
  160. #define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
  161. (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
  162. CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
  163. #define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
  164. (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
  165. CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
  166. static unsigned int cadence_qspi_apb_cmd2addr(const unsigned char *addr_buf,
  167. unsigned int addr_width)
  168. {
  169. unsigned int addr;
  170. addr = (addr_buf[0] << 16) | (addr_buf[1] << 8) | addr_buf[2];
  171. if (addr_width == 4)
  172. addr = (addr << 8) | addr_buf[3];
  173. return addr;
  174. }
  175. static void cadence_qspi_apb_read_fifo_data(void *dest,
  176. const void *src_ahb_addr, unsigned int bytes)
  177. {
  178. unsigned int temp;
  179. int remaining = bytes;
  180. unsigned int *dest_ptr = (unsigned int *)dest;
  181. unsigned int *src_ptr = (unsigned int *)src_ahb_addr;
  182. while (remaining >= 4) {
  183. *dest_ptr = readl(src_ptr);
  184. remaining -= 4;
  185. dest_ptr++;
  186. }
  187. if (remaining) {
  188. /* dangling bytes */
  189. temp = readl(src_ptr);
  190. sys_memcpy(dest_ptr, &temp, remaining);
  191. }
  192. return;
  193. }
  194. static void cadence_qspi_apb_write_fifo_data(const void *dest_ahb_addr,
  195. const void *src, unsigned int bytes)
  196. {
  197. unsigned int temp = 0;
  198. int i;
  199. int remaining = bytes;
  200. unsigned int *dest_ptr = (unsigned int *)dest_ahb_addr;
  201. unsigned int *src_ptr = (unsigned int *)src;
  202. //uart_printf("src_ptr = 0x%x, src = 0x%x\r\n", src_ptr,src);
  203. //uart_printf("*src_ptr = 0x%x\r\n", *src_ptr);
  204. while (remaining >= CQSPI_FIFO_WIDTH) {
  205. for (i = CQSPI_FIFO_WIDTH/4 - 1; i >= 0; i--)
  206. writel(*(src_ptr+i), dest_ptr+i);
  207. //dest_ptr += CQSPI_FIFO_WIDTH/sizeof(src_ptr);
  208. src_ptr += CQSPI_FIFO_WIDTH/4;
  209. remaining -= CQSPI_FIFO_WIDTH;
  210. }
  211. if (remaining) {
  212. /* dangling bytes */
  213. i = remaining/4;
  214. sys_memcpy(&temp, src_ptr+i, remaining % 4);
  215. writel(temp, dest_ptr+i);
  216. for (--i; i >= 0; i--)
  217. writel(*(src_ptr+i), dest_ptr+i);
  218. }
  219. return;
  220. }
  221. /* Read from SRAM FIFO with polling SRAM fill level. */
  222. static int qspi_read_sram_fifo_poll(const void * reg_base, void *dest_addr,
  223. const void *src_addr, unsigned int num_bytes)
  224. {
  225. unsigned int remaining = num_bytes;
  226. unsigned int retry;
  227. unsigned int sram_level = 0;
  228. unsigned char *dest = (unsigned char *)dest_addr;
  229. while (remaining > 0) {
  230. retry = CQSPI_REG_RETRY;
  231. while (retry--) {
  232. sram_level = CQSPI_GET_RD_SRAM_LEVEL((u32)reg_base);
  233. if (sram_level)
  234. break;
  235. delay(100);
  236. }
  237. if (!retry) {
  238. //uart_printf("QSPI: No receive data after polling for %d times\n",
  239. //CQSPI_REG_RETRY);
  240. printk("fifo_poll timeout.\n");
  241. return -1;
  242. }
  243. sram_level *= CQSPI_FIFO_WIDTH;
  244. sram_level = sram_level > remaining ? remaining : sram_level;
  245. /* Read data from FIFO. */
  246. cadence_qspi_apb_read_fifo_data(dest, src_addr, sram_level);
  247. dest += sram_level;
  248. remaining -= sram_level;
  249. delay(100);
  250. }
  251. return 0;
  252. }
  253. /* Write to SRAM FIFO with polling SRAM fill level. */
  254. static int qpsi_write_sram_fifo_push(struct cadence_spi_platdata *plat,
  255. const void *src_addr, unsigned int num_bytes)
  256. {
  257. const void * reg_base = plat->regbase;
  258. void *dest_addr = plat->ahbbase;
  259. unsigned int retry = CQSPI_REG_RETRY;
  260. unsigned int sram_level;
  261. unsigned int wr_bytes;
  262. unsigned char *src = (unsigned char *)src_addr;
  263. int remaining = num_bytes;
  264. unsigned int page_size = plat->page_size;
  265. unsigned int sram_threshold_words = CQSPI_REG_SRAM_THRESHOLD_WORDS;
  266. while (remaining > 0) {
  267. retry = CQSPI_REG_RETRY;
  268. while (retry--) {
  269. sram_level = CQSPI_GET_WR_SRAM_LEVEL((u32)reg_base);
  270. if (sram_level <= sram_threshold_words)
  271. break;
  272. }
  273. if (!retry) {
  274. //uart_printf("QSPI: SRAM fill level (0x%08x) not hit lower expected level (0x%08x)",
  275. //sram_level, sram_threshold_words);
  276. return -1;
  277. }
  278. /* Write a page or remaining bytes. */
  279. wr_bytes = (remaining > page_size) ?
  280. page_size : remaining;
  281. cadence_qspi_apb_write_fifo_data(dest_addr, src, wr_bytes);
  282. src += wr_bytes;
  283. remaining -= wr_bytes;
  284. }
  285. return 0;
  286. }
  287. void cadence_qspi_apb_controller_enable( void * reg_base)
  288. {
  289. unsigned int reg;
  290. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  291. reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
  292. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  293. return;
  294. }
  295. void cadence_qspi_apb_controller_disable(void * reg_base)
  296. {
  297. unsigned int reg;
  298. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  299. reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
  300. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  301. return;
  302. }
  303. /* Return 1 if idle, otherwise return 0 (busy). */
  304. static unsigned int cadence_qspi_wait_idle(void * reg_base)
  305. {
  306. unsigned int start = 5000, count = 0;
  307. /* timeout in unit of ms */
  308. unsigned int timeout = 5000;
  309. #if 0
  310. start = get_timer(0);
  311. for ( ; get_timer(start) < timeout ; ) {
  312. if (CQSPI_REG_IS_IDLE((u32)reg_base))
  313. count++;
  314. else
  315. count = 0;
  316. /*
  317. * Ensure the QSPI controller is in true idle state after
  318. * reading back the same idle status consecutively
  319. */
  320. if (count >= CQSPI_POLL_IDLE_RETRY)
  321. {
  322. //uart_printf("count = %d, get_timer(start) = %d\n, ", count , get_timer(start));
  323. return 1;
  324. }
  325. }
  326. #endif
  327. while(1) {
  328. if (CQSPI_REG_IS_IDLE((u32)reg_base))
  329. return 1;
  330. else
  331. {
  332. count++;
  333. //return 0;
  334. }
  335. /*
  336. * Ensure the QSPI controller is in true idle state after
  337. * reading back the same idle status consecutively
  338. */
  339. if (count >= CQSPI_REG_RETRY)
  340. {
  341. //uart_printf("count = %d\r\n, ", count);
  342. return 1;
  343. }
  344. }
  345. /* Timeout, still in busy mode. */
  346. //uart_printf("QSPI: QSPI is still busy after poll for %d times.\n",
  347. //CQSPI_REG_RETRY);
  348. return 0;
  349. }
  350. void cadence_qspi_apb_readdata_capture(void * reg_base,
  351. unsigned int bypass, unsigned int delay)
  352. {
  353. unsigned int reg;
  354. cadence_qspi_apb_controller_disable(reg_base);
  355. reg = readl((u32)reg_base + CQSPI_READLCAPTURE);
  356. if (bypass)
  357. reg |= (1 << CQSPI_READLCAPTURE_BYPASS_LSB);
  358. else
  359. reg &= ~(1 << CQSPI_READLCAPTURE_BYPASS_LSB);
  360. reg &= ~(CQSPI_READLCAPTURE_DELAY_MASK
  361. << CQSPI_READLCAPTURE_DELAY_LSB);
  362. reg |= ((delay & CQSPI_READLCAPTURE_DELAY_MASK)
  363. << CQSPI_READLCAPTURE_DELAY_LSB);
  364. writel(reg, (u32)reg_base + CQSPI_READLCAPTURE);
  365. //writel(0x21, (u32)reg_base + CQSPI_READLCAPTURE);
  366. cadence_qspi_apb_controller_enable(reg_base);
  367. return;
  368. }
  369. void cadence_qspi_apb_config_baudrate_div(void * reg_base,
  370. unsigned int ref_clk_hz, unsigned int sclk_hz)
  371. {
  372. unsigned int reg;
  373. unsigned int div;
  374. cadence_qspi_apb_controller_disable(reg_base);
  375. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  376. reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
  377. /*
  378. * The baud_div field in the config reg is 4 bits, and the ref clock is
  379. * divided by 2 * (baud_div + 1). Round up the divider to ensure the
  380. * SPI clock rate is less than or equal to the requested clock rate.
  381. */
  382. div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
  383. /* ensure the baud rate doesn't exceed the max value */
  384. if (div > CQSPI_REG_CONFIG_BAUD_MASK)
  385. div = CQSPI_REG_CONFIG_BAUD_MASK;
  386. #if 0
  387. div = ref_clk_hz / sclk_hz;
  388. if (div > 32)
  389. div = 32;
  390. /* Check if even number. */
  391. if ((div & 1)) {
  392. div = (div / 2);
  393. } else {
  394. if (ref_clk_hz % sclk_hz)
  395. /* ensure generated SCLK doesn't exceed user
  396. specified sclk_hz */
  397. div = (div / 2);
  398. else
  399. div = (div / 2) - 1;
  400. }
  401. #endif
  402. // printk("%s: ref_clk %dHz sclk %dHz Div 0x%x actual:%dHz\n", __func__,
  403. // ref_clk_hz, sclk_hz, div,ref_clk_hz / (2 * (div + 1)));
  404. div = (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
  405. reg |= div;
  406. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  407. cadence_qspi_apb_controller_enable(reg_base);
  408. return;
  409. }
  410. void cadence_qspi_apb_set_clk_mode(void * reg_base,
  411. unsigned int clk_pol, unsigned int clk_pha)
  412. {
  413. unsigned int reg;
  414. cadence_qspi_apb_controller_disable(reg_base);
  415. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  416. reg &= ~(1 <<
  417. (CQSPI_REG_CONFIG_CLK_POL_LSB | CQSPI_REG_CONFIG_CLK_PHA_LSB));
  418. reg |= ((clk_pol & 0x1) << CQSPI_REG_CONFIG_CLK_POL_LSB);
  419. reg |= ((clk_pha & 0x1) << CQSPI_REG_CONFIG_CLK_PHA_LSB);
  420. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  421. cadence_qspi_apb_controller_enable(reg_base);
  422. return;
  423. }
  424. void cadence_qspi_apb_chipselect(void * reg_base,
  425. unsigned int chip_select, unsigned int decoder_enable)
  426. {
  427. unsigned int reg;
  428. cadence_qspi_apb_controller_disable(reg_base);
  429. reg = readl((u32)reg_base + CQSPI_REG_CONFIG);
  430. /* docoder */
  431. if (decoder_enable) {
  432. reg |= CQSPI_REG_CONFIG_DECODE_MASK;
  433. } else {
  434. reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
  435. /* Convert CS if without decoder.
  436. * CS0 to 4b'1110
  437. * CS1 to 4b'1101
  438. * CS2 to 4b'1011
  439. * CS3 to 4b'0111
  440. */
  441. chip_select = 0xF & ~(1 << chip_select);
  442. }
  443. reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
  444. << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
  445. reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
  446. << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
  447. writel(reg, (u32)reg_base + CQSPI_REG_CONFIG);
  448. cadence_qspi_apb_controller_enable(reg_base);
  449. return;
  450. }
  451. void cadence_qspi_apb_delay(void * reg_base,
  452. unsigned int ref_clk, unsigned int sclk_hz,
  453. unsigned int tshsl_ns, unsigned int tsd2d_ns,
  454. unsigned int tchsh_ns, unsigned int tslch_ns)
  455. {
  456. unsigned int ref_clk_ns;
  457. unsigned int sclk_ns;
  458. unsigned int tshsl, tchsh, tslch, tsd2d;
  459. unsigned int reg;
  460. cadence_qspi_apb_controller_disable(reg_base);
  461. /* Convert to ns. */
  462. ref_clk_ns = (10000000) / ref_clk;
  463. /* Convert to ns. */
  464. sclk_ns = (10000000) / sclk_hz;
  465. #if 0
  466. /* Plus 1 to round up 1 clock cycle. */
  467. tshsl = CQSPI_CAL_DELAY(tshsl_ns, ref_clk_ns, sclk_ns) + 1;
  468. tchsh = CQSPI_CAL_DELAY(tchsh_ns, ref_clk_ns, sclk_ns) + 1;
  469. tslch = CQSPI_CAL_DELAY(tslch_ns, ref_clk_ns, sclk_ns) + 1;
  470. tsd2d = CQSPI_CAL_DELAY(tsd2d_ns, ref_clk_ns, sclk_ns) + 1;
  471. #endif
  472. tshsl = 1;
  473. tchsh = 1;
  474. tslch = 1;
  475. tsd2d = 1;
  476. reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
  477. << CQSPI_REG_DELAY_TSHSL_LSB);
  478. reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
  479. << CQSPI_REG_DELAY_TCHSH_LSB);
  480. reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
  481. << CQSPI_REG_DELAY_TSLCH_LSB);
  482. reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
  483. << CQSPI_REG_DELAY_TSD2D_LSB);
  484. writel(reg, (u32)reg_base + CQSPI_REG_DELAY);
  485. //writel(0x0, (u32)reg_base + CQSPI_REG_DELAY);
  486. cadence_qspi_apb_controller_enable(reg_base);
  487. return;
  488. }
  489. void cadence_qspi_apb_controller_init(struct cadence_spi_platdata *plat)
  490. {
  491. unsigned reg;
  492. cadence_qspi_apb_controller_disable(plat->regbase);
  493. /* Configure the device size and address bytes */
  494. reg = readl((u32)plat->regbase + CQSPI_REG_SIZE);
  495. /* Clear the previous value */
  496. reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
  497. reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
  498. reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
  499. reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
  500. writel(reg, (u32)plat->regbase + CQSPI_REG_SIZE);
  501. /* Configure the remap address register, no remap */
  502. writel(0,(u32) plat->regbase + CQSPI_REG_REMAP);
  503. //writel(0X8,(u32) plat->regbase + CQSPI_REG_INDIRECTTRI_ADDR_RANGE);
  504. /* Indirect mode configurations */
  505. writel((plat->sram_size/2), (u32)plat->regbase + CQSPI_REG_SRAMPARTITION);
  506. /* Disable all interrupts */
  507. writel(0, (u32)plat->regbase + CQSPI_REG_IRQMASK);
  508. cadence_qspi_apb_controller_enable(plat->regbase);
  509. return;
  510. }
  511. static int cadence_qspi_apb_exec_flash_cmd(u32 reg_base,
  512. unsigned int reg)
  513. {
  514. unsigned int retry = CQSPI_REG_RETRY;
  515. /* Write the CMDCTRL without start execution. */
  516. writel(reg, (u32)reg_base + CQSPI_REG_CMDCTRL);
  517. /* Start execute */
  518. reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
  519. writel(reg, (u32)reg_base + CQSPI_REG_CMDCTRL);
  520. while (retry--) {
  521. reg = readl((u32)reg_base + CQSPI_REG_CMDCTRL);
  522. if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS_MASK) == 0)
  523. break;
  524. delay(1000);
  525. }
  526. if (!retry) {
  527. //uart_printf("QSPI: flash command execution timeout\n");
  528. return -1;
  529. }
  530. /* Polling QSPI idle status. */
  531. if (!cadence_qspi_wait_idle(reg_base))
  532. return -1;
  533. return 0;
  534. }
  535. /* For command RDID, RDSR. */
  536. int cadence_qspi_apb_command_read(void * reg_base,
  537. unsigned int cmdlen, const u8 *cmdbuf, unsigned int rxlen,
  538. u8 *rxbuf)
  539. {
  540. unsigned int reg;
  541. unsigned int read_len;
  542. int status;
  543. if (!cmdlen || rxlen > CQSPI_STIG_DATA_LEN_MAX || rxbuf == NULL) {
  544. //uart_printf("QSPI: Invalid input arguments cmdlen %d rxlen %d\n",
  545. //cmdlen, rxlen);
  546. return -1;
  547. }
  548. reg = cmdbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  549. reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
  550. /* 0 means 1 byte. */
  551. reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
  552. << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
  553. status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
  554. if (status != 0)
  555. return status;
  556. reg = readl((u32)reg_base + CQSPI_REG_CMDREADDATALOWER);
  557. /* Put the read value into rx_buf */
  558. read_len = (rxlen > 4) ? 4 : rxlen;
  559. sys_memcpy(rxbuf, &reg, read_len);
  560. rxbuf += read_len;
  561. if (rxlen > 4) {
  562. reg = readl((u32)reg_base + CQSPI_REG_CMDREADDATAUPPER);
  563. read_len = rxlen - read_len;
  564. sys_memcpy(rxbuf, &reg, read_len);
  565. }
  566. return 0;
  567. }
  568. /* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
  569. int cadence_qspi_apb_command_write(void * reg_base, unsigned int cmdlen,
  570. const u8 *cmdbuf, unsigned int txlen, const u8 *txbuf)
  571. {
  572. unsigned int reg = 0;
  573. unsigned int addr_value;
  574. unsigned int wr_data;
  575. unsigned int wr_len;
  576. if (!cmdlen || cmdlen > 5 || txlen > 8 || cmdbuf == NULL) {
  577. //uart_printf("QSPI: Invalid input arguments cmdlen %d txlen %d\n",
  578. //cmdlen, txlen);
  579. return -1;
  580. }
  581. reg |= cmdbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  582. if (cmdlen == 4 || cmdlen == 5) {
  583. /* Command with address */
  584. reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
  585. /* Number of bytes to write. */
  586. reg |= ((cmdlen - 2) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
  587. << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
  588. /* Get address */
  589. addr_value = cadence_qspi_apb_cmd2addr(&cmdbuf[1],
  590. cmdlen >= 5 ? 4 : 3);
  591. writel(addr_value, (u32)reg_base + CQSPI_REG_CMDADDRESS);
  592. }
  593. if (txlen) {
  594. /* writing data = yes */
  595. reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
  596. reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
  597. << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
  598. wr_len = txlen > 4 ? 4 : txlen;
  599. sys_memcpy(&wr_data, txbuf, wr_len);
  600. writel(wr_data, (u32)reg_base +
  601. CQSPI_REG_CMDWRITEDATALOWER);
  602. if (txlen > 4) {
  603. txbuf += wr_len;
  604. wr_len = txlen - wr_len;
  605. sys_memcpy(&wr_data, txbuf, wr_len);
  606. writel(wr_data, (u32)reg_base +
  607. CQSPI_REG_CMDWRITEDATAUPPER);
  608. }
  609. }
  610. /* Execute the command */
  611. return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
  612. }
  613. /* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
  614. int cadence_qspi_apb_indirect_read_setup(struct cadence_spi_platdata *plat,
  615. unsigned int cmdlen, const u8 *cmdbuf)
  616. {
  617. unsigned int reg;
  618. unsigned int rd_reg;
  619. unsigned int addr_value;
  620. unsigned int dummy_clk;
  621. unsigned int dummy_bytes;
  622. unsigned int addr_bytes;
  623. /*
  624. * Identify addr_byte. All NOR flash device drivers are using fast read
  625. * which always expecting 1 dummy byte, 1 cmd byte and 3/4 addr byte.
  626. * With that, the length is in value of 5 or 6. Only FRAM chip from
  627. * ramtron using normal read (which won't need dummy byte).
  628. * Unlikely NOR flash using normal read due to performance issue.
  629. */
  630. if (cmdlen >= 5)
  631. /* to cater fast read where cmd + addr + dummy */
  632. addr_bytes = cmdlen - 2;
  633. else
  634. /* for normal read (only ramtron as of now) */
  635. addr_bytes = cmdlen - 1;
  636. /* Setup the indirect trigger address */
  637. writel(((u32)plat->ahbbase & CQSPI_INDIRECTTRIGGER_ADDR_MASK),
  638. (u32)plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
  639. /* Configure the opcode */
  640. rd_reg = cmdbuf[0] << CQSPI_REG_RD_INSTR_OPCODE_LSB;
  641. if(plat->bit_mode == 4)
  642. {
  643. /* Instruction and address at DQ0, data at DQ0-3. */
  644. rd_reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
  645. }
  646. else
  647. {
  648. rd_reg &= ~(CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB);
  649. }
  650. /* Get address */
  651. addr_value = cadence_qspi_apb_cmd2addr(&cmdbuf[1], addr_bytes);
  652. writel(addr_value, (u32)plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
  653. /* The remaining lenght is dummy bytes. */
  654. dummy_bytes = cmdlen - addr_bytes - 1;
  655. if (dummy_bytes) {
  656. if (dummy_bytes > CQSPI_DUMMY_BYTES_MAX)
  657. dummy_bytes = CQSPI_DUMMY_BYTES_MAX;
  658. rd_reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
  659. #if defined(CONFIG_SPL_SPI_XIP) && defined(CONFIG_SPL_BUILD)
  660. writel(0x0, plat->regbase + CQSPI_REG_MODE_BIT);
  661. #else
  662. writel(0xFF, (u32)plat->regbase + CQSPI_REG_MODE_BIT);
  663. #endif
  664. /* Convert to clock cycles. */
  665. dummy_clk = dummy_bytes * CQSPI_DUMMY_CLKS_PER_BYTE;
  666. /* Need to minus the mode byte (8 clocks). */
  667. dummy_clk -= CQSPI_DUMMY_CLKS_PER_BYTE;
  668. if (dummy_clk)
  669. rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
  670. << CQSPI_REG_RD_INSTR_DUMMY_LSB;
  671. }
  672. writel(rd_reg, (u32)plat->regbase + CQSPI_REG_RD_INSTR);
  673. //writel(0x0012006b, (u32)plat->regbase + CQSPI_REG_RD_INSTR);
  674. //writel(0x041220eb, (u32)plat->regbase + CQSPI_REG_RD_INSTR);
  675. /* set device size */
  676. reg = readl((u32)plat->regbase + CQSPI_REG_SIZE);
  677. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  678. reg |= (addr_bytes - 1);
  679. writel(reg, (u32)plat->regbase + CQSPI_REG_SIZE);
  680. return 0;
  681. }
  682. int cadence_qspi_apb_indirect_read_execute(struct cadence_spi_platdata *plat,
  683. unsigned int rxlen, u8 *rxbuf)
  684. {
  685. unsigned int reg;
  686. writel(rxlen, (u32)plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
  687. /* Start the indirect read transfer */
  688. writel(CQSPI_REG_INDIRECTRD_START_MASK,
  689. (u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  690. if (qspi_read_sram_fifo_poll(plat->regbase, (void *)rxbuf,
  691. (const void *)plat->ahbbase, rxlen))
  692. goto failrd;
  693. /* Check flash indirect controller */
  694. reg = readl((u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  695. if (!(reg & CQSPI_REG_INDIRECTRD_DONE_MASK)) {
  696. reg = readl((u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  697. //uart_printf("QSPI read_execute: indirect completion status error with reg 0x%x\n",
  698. //reg);
  699. goto failrd;
  700. }
  701. /* Clear indirect completion status */
  702. writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
  703. (u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  704. return 0;
  705. failrd:
  706. /* Cancel the indirect read */
  707. writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK,
  708. (u32)plat->regbase + CQSPI_REG_INDIRECTRD);
  709. return -1;
  710. }
  711. /* Opcode + Address (3/4 bytes) */
  712. int cadence_qspi_apb_indirect_write_setup(struct cadence_spi_platdata *plat,
  713. unsigned int cmdlen, const u8 *cmdbuf)
  714. {
  715. unsigned int reg;
  716. unsigned int addr_bytes = cmdlen > 4 ? 4 : 3;
  717. if (cmdlen < 4 || cmdbuf == NULL) {
  718. //uart_printf("QSPI: iInvalid input argument, len %d cmdbuf 0x%x\n",
  719. //cmdlen, (unsigned int)cmdbuf);
  720. return -1;
  721. }
  722. /* Setup the indirect trigger address */
  723. writel(((u32)plat->ahbbase & CQSPI_INDIRECTTRIGGER_ADDR_MASK),
  724. (u32)plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
  725. /* Configure the opcode */
  726. reg = cmdbuf[0] << CQSPI_REG_WR_INSTR_OPCODE_LSB;
  727. if(plat->bit_mode == 4)
  728. {
  729. /* Instruction and address at DQ0, data at DQ0-3. */
  730. reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
  731. }
  732. else
  733. {
  734. reg &= ~(CQSPI_INST_TYPE_QUAD << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB);
  735. }
  736. writel(reg, (u32)plat->regbase + CQSPI_REG_WR_INSTR);
  737. //writel(0x00020032, (u32)plat->regbase + CQSPI_REG_WR_INSTR);
  738. /* Setup write address. */
  739. reg = cadence_qspi_apb_cmd2addr(&cmdbuf[1], addr_bytes);
  740. writel(reg, (u32)plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
  741. reg = readl((u32)plat->regbase + CQSPI_REG_SIZE);
  742. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  743. reg |= (addr_bytes - 1);
  744. writel(reg, (u32)plat->regbase + CQSPI_REG_SIZE);
  745. return 0;
  746. }
  747. int cadence_qspi_apb_indirect_write_execute(struct cadence_spi_platdata *plat,
  748. unsigned int txlen, const u8 *txbuf)
  749. {
  750. unsigned int reg = 0;
  751. unsigned int retry;
  752. /* Configure the indirect read transfer bytes */
  753. writel(txlen, (u32)plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
  754. /* Start the indirect write transfer */
  755. writel(CQSPI_REG_INDIRECTWR_START_MASK,
  756. (u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  757. if (qpsi_write_sram_fifo_push(plat, (const void *)txbuf, txlen))
  758. goto failwr;
  759. #if 1
  760. /* Wait until last write is completed (FIFO empty) */
  761. retry = CQSPI_REG_RETRY;
  762. while (retry--) {
  763. reg = CQSPI_GET_WR_SRAM_LEVEL((u32)plat->regbase);
  764. if (reg == 0)
  765. break;
  766. delay(1000);
  767. }
  768. if (reg != 0) {
  769. //uart_printf("QSPI: timeout for indirect write\n");
  770. goto failwr;
  771. }
  772. /* Check flash indirect controller status */
  773. retry = CQSPI_REG_RETRY;
  774. while (retry--) {
  775. reg = readl((u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  776. if (reg & CQSPI_REG_INDIRECTWR_DONE_MASK)
  777. break;
  778. delay(1000);
  779. }
  780. if (!(reg & CQSPI_REG_INDIRECTWR_DONE_MASK)) {
  781. //uart_printf("QSPI write_execute: indirect completion status error with reg 0x%x\n",
  782. // reg);
  783. goto failwr;
  784. }
  785. /* Clear indirect completion status */
  786. writel(CQSPI_REG_INDIRECTWR_DONE_MASK,
  787. (u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  788. #endif
  789. return 0;
  790. failwr:
  791. /* Cancel the indirect write */
  792. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  793. (u32)plat->regbase + CQSPI_REG_INDIRECTWR);
  794. return -1;
  795. }