mtk_snor.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Mediatek SPI-NOR controller driver
  4. //
  5. // Copyright (C) 2020 SkyLake Huang <SkyLake.Huang@mediatek.com>
  6. //
  7. // Some parts are based on drivers/spi/spi-mtk-nor.c of linux version
  8. #include <clk.h>
  9. #include <common.h>
  10. #include <cpu_func.h>
  11. #include <dm.h>
  12. #include <dm/device.h>
  13. #include <dm/device_compat.h>
  14. #include <dm/devres.h>
  15. #include <dm/pinctrl.h>
  16. #include <linux/bitops.h>
  17. #include <linux/completion.h>
  18. #include <linux/io.h>
  19. #include <linux/iopoll.h>
  20. #include <spi.h>
  21. #include <spi-mem.h>
  22. #include <stdbool.h>
  23. #include <watchdog.h>
  24. #include <linux/dma-mapping.h>
  25. #define DRIVER_NAME "mtk-spi-nor"
  26. #define MTK_NOR_REG_CMD 0x00
  27. #define MTK_NOR_CMD_WRSR BIT(5)
  28. #define MTK_NOR_CMD_WRITE BIT(4)
  29. #define MTK_NOR_CMD_PROGRAM BIT(2)
  30. #define MTK_NOR_CMD_RDSR BIT(1)
  31. #define MTK_NOR_CMD_READ BIT(0)
  32. #define MTK_NOR_CMD_MASK GENMASK(5, 0)
  33. #define MTK_NOR_REG_PRG_CNT 0x04
  34. #define MTK_NOR_REG_RDSR 0x08
  35. #define MTK_NOR_REG_RDATA 0x0c
  36. #define MTK_NOR_REG_RADR0 0x10
  37. #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
  38. #define MTK_NOR_REG_RADR3 0xc8
  39. #define MTK_NOR_REG_WDATA 0x1c
  40. #define MTK_NOR_REG_PRGDATA0 0x20
  41. #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
  42. #define MTK_NOR_REG_PRGDATA_MAX 5
  43. #define MTK_NOR_REG_SHIFT0 0x38
  44. #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
  45. #define MTK_NOR_REG_SHIFT_MAX 9
  46. #define MTK_NOR_REG_CFG1 0x60
  47. #define MTK_NOR_FAST_READ BIT(0)
  48. #define MTK_NOR_REG_CFG2 0x64
  49. #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
  50. #define MTK_NOR_WR_BUF_EN BIT(0)
  51. #define MTK_NOR_REG_PP_DATA 0x98
  52. #define MTK_NOR_REG_IRQ_STAT 0xa8
  53. #define MTK_NOR_REG_IRQ_EN 0xac
  54. #define MTK_NOR_IRQ_DMA BIT(7)
  55. #define MTK_NOR_IRQ_WRSR BIT(5)
  56. #define MTK_NOR_IRQ_MASK GENMASK(7, 0)
  57. #define MTK_NOR_REG_CFG3 0xb4
  58. #define MTK_NOR_DISABLE_WREN BIT(7)
  59. #define MTK_NOR_DISABLE_SR_POLL BIT(5)
  60. #define MTK_NOR_REG_WP 0xc4
  61. #define MTK_NOR_ENABLE_SF_CMD 0x30
  62. #define MTK_NOR_REG_BUSCFG 0xcc
  63. #define MTK_NOR_4B_ADDR BIT(4)
  64. #define MTK_NOR_QUAD_ADDR BIT(3)
  65. #define MTK_NOR_QUAD_READ BIT(2)
  66. #define MTK_NOR_DUAL_ADDR BIT(1)
  67. #define MTK_NOR_DUAL_READ BIT(0)
  68. #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
  69. #define MTK_NOR_REG_DMA_CTL 0x718
  70. #define MTK_NOR_DMA_START BIT(0)
  71. #define MTK_NOR_REG_DMA_FADR 0x71c
  72. #define MTK_NOR_REG_DMA_DADR 0x720
  73. #define MTK_NOR_REG_DMA_END_DADR 0x724
  74. #define MTK_NOR_PRG_MAX_SIZE 6
  75. // Reading DMA src/dst addresses have to be 16-byte aligned
  76. #define MTK_NOR_DMA_ALIGN 16
  77. #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
  78. // and we allocate a bounce buffer if destination address isn't aligned.
  79. #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
  80. // Buffered page program can do one 128-byte transfer
  81. #define MTK_NOR_PP_SIZE 128
  82. #define CLK_TO_US(priv, clkcnt) DIV_ROUND_UP(clkcnt, (priv)->spi_freq / 1000000)
  83. #define MTK_NOR_UNLOCK_ALL 0x0
  84. struct mtk_snor_priv {
  85. struct device *dev;
  86. void __iomem *base;
  87. u8 *buffer;
  88. struct clk spi_clk;
  89. struct clk ctlr_clk;
  90. unsigned int spi_freq;
  91. bool wbuf_en;
  92. };
  93. static inline void mtk_snor_rmw(struct mtk_snor_priv *priv, u32 reg, u32 set,
  94. u32 clr)
  95. {
  96. u32 val = readl(priv->base + reg);
  97. val &= ~clr;
  98. val |= set;
  99. writel(val, priv->base + reg);
  100. }
  101. static inline int mtk_snor_cmd_exec(struct mtk_snor_priv *priv, u32 cmd,
  102. ulong clk)
  103. {
  104. unsigned long long delay = CLK_TO_US(priv, clk);
  105. u32 reg;
  106. int ret;
  107. writel(cmd, priv->base + MTK_NOR_REG_CMD);
  108. delay = (delay + 1) * 200;
  109. ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CMD, reg,
  110. !(reg & cmd), delay);
  111. if (ret < 0)
  112. dev_err(priv->dev, "command %u timeout.\n", cmd);
  113. return ret;
  114. }
  115. static void mtk_snor_set_addr(struct mtk_snor_priv *priv,
  116. const struct spi_mem_op *op)
  117. {
  118. u32 addr = op->addr.val;
  119. int i;
  120. for (i = 0; i < 3; i++) {
  121. writeb(addr & 0xff, priv->base + MTK_NOR_REG_RADR(i));
  122. addr >>= 8;
  123. }
  124. if (op->addr.nbytes == 4) {
  125. writeb(addr & 0xff, priv->base + MTK_NOR_REG_RADR3);
  126. mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
  127. } else {
  128. mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
  129. }
  130. }
  131. static bool need_bounce(const struct spi_mem_op *op)
  132. {
  133. return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
  134. }
  135. static int mtk_snor_adjust_op_size(struct spi_slave *slave,
  136. struct spi_mem_op *op)
  137. {
  138. if (!op->data.nbytes)
  139. return 0;
  140. if (op->addr.nbytes == 3 || op->addr.nbytes == 4) {
  141. if (op->data.dir == SPI_MEM_DATA_IN) { //&&
  142. // limit size to prevent timeout calculation overflow
  143. if (op->data.nbytes > 0x400000)
  144. op->data.nbytes = 0x400000;
  145. if (op->addr.val & MTK_NOR_DMA_ALIGN_MASK ||
  146. op->data.nbytes < MTK_NOR_DMA_ALIGN)
  147. op->data.nbytes = 1;
  148. else if (!need_bounce(op))
  149. op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
  150. else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
  151. op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
  152. return 0;
  153. } else if (op->data.dir == SPI_MEM_DATA_OUT) {
  154. if (op->data.nbytes >= MTK_NOR_PP_SIZE)
  155. op->data.nbytes = MTK_NOR_PP_SIZE;
  156. else
  157. op->data.nbytes = 1;
  158. return 0;
  159. }
  160. }
  161. return 0;
  162. }
  163. static bool mtk_snor_supports_op(struct spi_slave *slave,
  164. const struct spi_mem_op *op)
  165. {
  166. /* This controller only supports 1-1-1 write mode */
  167. if (op->data.dir == SPI_MEM_DATA_OUT &&
  168. (op->cmd.buswidth != 1 || op->data.buswidth != 1))
  169. return false;
  170. return true;
  171. }
  172. static void mtk_snor_setup_bus(struct mtk_snor_priv *priv,
  173. const struct spi_mem_op *op)
  174. {
  175. u32 reg = 0;
  176. if (op->addr.nbytes == 4)
  177. reg |= MTK_NOR_4B_ADDR;
  178. if (op->data.buswidth == 4) {
  179. reg |= MTK_NOR_QUAD_READ;
  180. writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA(4));
  181. if (op->addr.buswidth == 4)
  182. reg |= MTK_NOR_QUAD_ADDR;
  183. } else if (op->data.buswidth == 2) {
  184. reg |= MTK_NOR_DUAL_READ;
  185. writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA(3));
  186. if (op->addr.buswidth == 2)
  187. reg |= MTK_NOR_DUAL_ADDR;
  188. } else {
  189. if (op->cmd.opcode == 0x0b)
  190. mtk_snor_rmw(priv, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ,
  191. 0);
  192. else
  193. mtk_snor_rmw(priv, MTK_NOR_REG_CFG1, 0,
  194. MTK_NOR_FAST_READ);
  195. }
  196. mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
  197. }
  198. static int mtk_snor_dma_exec(struct mtk_snor_priv *priv, u32 from,
  199. unsigned int length, dma_addr_t dma_addr)
  200. {
  201. int ret = 0;
  202. ulong delay;
  203. u32 reg;
  204. writel(from, priv->base + MTK_NOR_REG_DMA_FADR);
  205. writel(dma_addr, priv->base + MTK_NOR_REG_DMA_DADR);
  206. writel(dma_addr + length, priv->base + MTK_NOR_REG_DMA_END_DADR);
  207. mtk_snor_rmw(priv, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
  208. delay = CLK_TO_US(priv, (length + 5) * BITS_PER_BYTE);
  209. delay = (delay + 1) * 100;
  210. ret = readl_poll_timeout(priv->base + MTK_NOR_REG_DMA_CTL, reg,
  211. !(reg & MTK_NOR_DMA_START), delay);
  212. if (ret < 0)
  213. dev_err(priv->dev, "dma read timeout.\n");
  214. return ret;
  215. }
  216. static int mtk_snor_read_bounce(struct mtk_snor_priv *priv,
  217. const struct spi_mem_op *op)
  218. {
  219. unsigned int rdlen;
  220. int ret;
  221. if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
  222. rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) &
  223. ~MTK_NOR_DMA_ALIGN_MASK;
  224. else
  225. rdlen = op->data.nbytes;
  226. ret = mtk_snor_dma_exec(priv, op->addr.val, rdlen,
  227. (dma_addr_t)priv->buffer);
  228. if (!ret)
  229. memcpy(op->data.buf.in, priv->buffer, op->data.nbytes);
  230. return ret;
  231. }
  232. static int mtk_snor_read_dma(struct mtk_snor_priv *priv,
  233. const struct spi_mem_op *op)
  234. {
  235. int ret;
  236. dma_addr_t dma_addr;
  237. if (need_bounce(op))
  238. return mtk_snor_read_bounce(priv, op);
  239. dma_addr = dma_map_single(op->data.buf.in, op->data.nbytes,
  240. DMA_FROM_DEVICE);
  241. if (dma_mapping_error(priv->dev, dma_addr))
  242. return -EINVAL;
  243. ret = mtk_snor_dma_exec(priv, op->addr.val, op->data.nbytes, dma_addr);
  244. dma_unmap_single(dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
  245. return ret;
  246. }
  247. static int mtk_snor_read_pio(struct mtk_snor_priv *priv,
  248. const struct spi_mem_op *op)
  249. {
  250. u8 *buf = op->data.buf.in;
  251. int ret;
  252. ret = mtk_snor_cmd_exec(priv, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
  253. if (!ret)
  254. buf[0] = readb(priv->base + MTK_NOR_REG_RDATA);
  255. return ret;
  256. }
  257. static int mtk_snor_write_buffer_enable(struct mtk_snor_priv *priv)
  258. {
  259. int ret;
  260. u32 val;
  261. if (priv->wbuf_en)
  262. return 0;
  263. val = readl(priv->base + MTK_NOR_REG_CFG2);
  264. writel(val | MTK_NOR_WR_BUF_EN, priv->base + MTK_NOR_REG_CFG2);
  265. ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CFG2, val,
  266. val & MTK_NOR_WR_BUF_EN, 10000);
  267. if (!ret)
  268. priv->wbuf_en = true;
  269. return ret;
  270. }
  271. static int mtk_snor_write_buffer_disable(struct mtk_snor_priv *priv)
  272. {
  273. int ret;
  274. u32 val;
  275. if (!priv->wbuf_en)
  276. return 0;
  277. val = readl(priv->base + MTK_NOR_REG_CFG2);
  278. writel(val & ~MTK_NOR_WR_BUF_EN, priv->base + MTK_NOR_REG_CFG2);
  279. ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CFG2, val,
  280. !(val & MTK_NOR_WR_BUF_EN), 10000);
  281. if (!ret)
  282. priv->wbuf_en = false;
  283. return ret;
  284. }
  285. static int mtk_snor_pp_buffered(struct mtk_snor_priv *priv,
  286. const struct spi_mem_op *op)
  287. {
  288. const u8 *buf = op->data.buf.out;
  289. u32 val;
  290. int ret, i;
  291. ret = mtk_snor_write_buffer_enable(priv);
  292. if (ret < 0)
  293. return ret;
  294. for (i = 0; i < op->data.nbytes; i += 4) {
  295. val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
  296. buf[i];
  297. writel(val, priv->base + MTK_NOR_REG_PP_DATA);
  298. }
  299. mtk_snor_cmd_exec(priv, MTK_NOR_CMD_WRITE,
  300. (op->data.nbytes + 5) * BITS_PER_BYTE);
  301. return mtk_snor_write_buffer_disable(priv);
  302. }
  303. static int mtk_snor_pp_unbuffered(struct mtk_snor_priv *priv,
  304. const struct spi_mem_op *op)
  305. {
  306. const u8 *buf = op->data.buf.out;
  307. int ret;
  308. ret = mtk_snor_write_buffer_disable(priv);
  309. if (ret < 0)
  310. return ret;
  311. writeb(buf[0], priv->base + MTK_NOR_REG_WDATA);
  312. return mtk_snor_cmd_exec(priv, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
  313. }
  314. static int mtk_snor_cmd_program(struct mtk_snor_priv *priv,
  315. const struct spi_mem_op *op)
  316. {
  317. u32 tx_len = 0;
  318. u32 trx_len = 0;
  319. int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
  320. void __iomem *reg;
  321. u8 *txbuf;
  322. int tx_cnt = 0;
  323. u8 *rxbuf = op->data.buf.in;
  324. int i = 0;
  325. tx_len = 1 + op->addr.nbytes + op->dummy.nbytes;
  326. trx_len = tx_len + op->data.nbytes;
  327. if (op->data.dir == SPI_MEM_DATA_OUT)
  328. tx_len += op->data.nbytes;
  329. txbuf = kmalloc_array(tx_len, sizeof(u8), GFP_KERNEL);
  330. memset(txbuf, 0x0, tx_len * sizeof(u8));
  331. /* Join all bytes to be transferred */
  332. txbuf[tx_cnt] = op->cmd.opcode;
  333. tx_cnt++;
  334. for (i = op->addr.nbytes; i > 0; i--, tx_cnt++)
  335. txbuf[tx_cnt] = ((u8 *)&op->addr.val)[i - 1];
  336. for (i = op->dummy.nbytes; i > 0; i--, tx_cnt++)
  337. txbuf[tx_cnt] = 0x0;
  338. if (op->data.dir == SPI_MEM_DATA_OUT)
  339. for (i = op->data.nbytes; i > 0; i--, tx_cnt++)
  340. txbuf[tx_cnt] = ((u8 *)op->data.buf.out)[i - 1];
  341. for (i = MTK_NOR_REG_PRGDATA_MAX; i >= 0; i--)
  342. writeb(0, priv->base + MTK_NOR_REG_PRGDATA(i));
  343. for (i = 0; i < tx_len; i++, reg_offset--)
  344. writeb(txbuf[i], priv->base + MTK_NOR_REG_PRGDATA(reg_offset));
  345. kfree(txbuf);
  346. writel(trx_len * BITS_PER_BYTE, priv->base + MTK_NOR_REG_PRG_CNT);
  347. mtk_snor_cmd_exec(priv, MTK_NOR_CMD_PROGRAM, trx_len * BITS_PER_BYTE);
  348. reg_offset = op->data.nbytes - 1;
  349. for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
  350. reg = priv->base + MTK_NOR_REG_SHIFT(reg_offset);
  351. rxbuf[i] = readb(reg);
  352. }
  353. return 0;
  354. }
  355. static int mtk_snor_exec_op(struct spi_slave *slave,
  356. const struct spi_mem_op *op)
  357. {
  358. struct udevice *bus = dev_get_parent(slave->dev);
  359. struct mtk_snor_priv *priv = dev_get_priv(bus);
  360. int ret;
  361. if (op->data.dir == SPI_MEM_NO_DATA || op->addr.nbytes == 0) {
  362. return mtk_snor_cmd_program(priv, op);
  363. } else if (op->data.dir == SPI_MEM_DATA_OUT) {
  364. mtk_snor_set_addr(priv, op);
  365. writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA0);
  366. if (op->data.nbytes == MTK_NOR_PP_SIZE)
  367. return mtk_snor_pp_buffered(priv, op);
  368. return mtk_snor_pp_unbuffered(priv, op);
  369. } else if (op->data.dir == SPI_MEM_DATA_IN) {
  370. ret = mtk_snor_write_buffer_disable(priv);
  371. if (ret < 0)
  372. return ret;
  373. mtk_snor_setup_bus(priv, op);
  374. if (op->data.nbytes == 1) {
  375. mtk_snor_set_addr(priv, op);
  376. return mtk_snor_read_pio(priv, op);
  377. } else {
  378. return mtk_snor_read_dma(priv, op);
  379. }
  380. }
  381. return -ENOTSUPP;
  382. }
  383. static int mtk_snor_probe(struct udevice *bus)
  384. {
  385. struct mtk_snor_priv *priv = dev_get_priv(bus);
  386. u8 *buffer;
  387. int ret;
  388. u32 reg;
  389. priv->base = (void __iomem *)devfdt_get_addr(bus);
  390. if (!priv->base)
  391. return -EINVAL;
  392. ret = clk_get_by_name(bus, "spi", &priv->spi_clk);
  393. if (ret < 0)
  394. return ret;
  395. ret = clk_get_by_name(bus, "sf", &priv->ctlr_clk);
  396. if (ret < 0)
  397. return ret;
  398. buffer = devm_kmalloc(bus, MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
  399. GFP_KERNEL);
  400. if (!buffer)
  401. return -ENOMEM;
  402. if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
  403. buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
  404. ~MTK_NOR_DMA_ALIGN_MASK);
  405. priv->buffer = buffer;
  406. clk_enable(&priv->spi_clk);
  407. clk_enable(&priv->ctlr_clk);
  408. priv->spi_freq = clk_get_rate(&priv->spi_clk);
  409. printf("spi frequency: %d Hz\n", priv->spi_freq);
  410. /* With this setting, we issue one command at a time to
  411. * accommodate to SPI-mem framework.
  412. */
  413. writel(MTK_NOR_ENABLE_SF_CMD, priv->base + MTK_NOR_REG_WP);
  414. mtk_snor_rmw(priv, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
  415. mtk_snor_rmw(priv, MTK_NOR_REG_CFG3,
  416. MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
  417. /* Unlock all blocks using write status command.
  418. * SPI-MEM hasn't implemented unlock procedure on MXIC devices.
  419. * We may remove this later.
  420. */
  421. writel(2 * BITS_PER_BYTE, priv->base + MTK_NOR_REG_PRG_CNT);
  422. writel(MTK_NOR_UNLOCK_ALL, priv->base + MTK_NOR_REG_PRGDATA(5));
  423. writel(MTK_NOR_IRQ_WRSR, priv->base + MTK_NOR_REG_IRQ_EN);
  424. writel(MTK_NOR_CMD_WRSR, priv->base + MTK_NOR_REG_CMD);
  425. ret = readl_poll_timeout(priv->base + MTK_NOR_REG_IRQ_STAT, reg,
  426. !(reg & MTK_NOR_IRQ_WRSR),
  427. ((3 * BITS_PER_BYTE) + 1) * 200);
  428. return 0;
  429. }
  430. static int mtk_snor_set_speed(struct udevice *bus, uint speed)
  431. {
  432. /* MTK's SNOR controller does not have a bus clock divider.
  433. * We setup maximum bus clock in dts.
  434. */
  435. return 0;
  436. }
  437. static int mtk_snor_set_mode(struct udevice *bus, uint mode)
  438. {
  439. /* We set up mode later for each transmission.
  440. */
  441. return 0;
  442. }
  443. static const struct spi_controller_mem_ops mtk_snor_mem_ops = {
  444. .adjust_op_size = mtk_snor_adjust_op_size,
  445. .supports_op = mtk_snor_supports_op,
  446. .exec_op = mtk_snor_exec_op
  447. };
  448. static const struct dm_spi_ops mtk_snor_ops = {
  449. .mem_ops = &mtk_snor_mem_ops,
  450. .set_speed = mtk_snor_set_speed,
  451. .set_mode = mtk_snor_set_mode,
  452. };
  453. static const struct udevice_id mtk_snor_ids[] = {
  454. { .compatible = "mediatek,mtk-snor" },
  455. {}
  456. };
  457. U_BOOT_DRIVER(mtk_snor) = {
  458. .name = "mtk_snor",
  459. .id = UCLASS_SPI,
  460. .of_match = mtk_snor_ids,
  461. .ops = &mtk_snor_ops,
  462. .priv_auto = sizeof(struct mtk_snor_priv),
  463. .probe = mtk_snor_probe,
  464. };