ich.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) 2011-12 The Chromium OS Authors.
  4. *
  5. * This file is derived from the flashrom project.
  6. */
  7. #define LOG_CATEGORY UCLASS_SPI
  8. #include <common.h>
  9. #include <div64.h>
  10. #include <dm.h>
  11. #include <dt-structs.h>
  12. #include <errno.h>
  13. #include <malloc.h>
  14. #include <pch.h>
  15. #include <pci.h>
  16. #include <pci_ids.h>
  17. #include <spi.h>
  18. #include <spi_flash.h>
  19. #include <spi-mem.h>
  20. #include <spl.h>
  21. #include <asm/fast_spi.h>
  22. #include <asm/io.h>
  23. #include <asm/mtrr.h>
  24. #include <linux/sizes.h>
  25. #include "ich.h"
  26. #ifdef DEBUG_TRACE
  27. #define debug_trace(fmt, args...) debug(fmt, ##args)
  28. #else
  29. #define debug_trace(x, args...)
  30. #endif
  31. struct ich_spi_platdata {
  32. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  33. struct dtd_intel_fast_spi dtplat;
  34. #endif
  35. enum ich_version ich_version; /* Controller version, 7 or 9 */
  36. bool lockdown; /* lock down controller settings? */
  37. ulong mmio_base; /* Base of MMIO registers */
  38. pci_dev_t bdf; /* PCI address used by of-platdata */
  39. bool hwseq; /* Use hardware sequencing (not s/w) */
  40. };
  41. static u8 ich_readb(struct ich_spi_priv *priv, int reg)
  42. {
  43. u8 value = readb(priv->base + reg);
  44. debug_trace("read %2.2x from %4.4x\n", value, reg);
  45. return value;
  46. }
  47. static u16 ich_readw(struct ich_spi_priv *priv, int reg)
  48. {
  49. u16 value = readw(priv->base + reg);
  50. debug_trace("read %4.4x from %4.4x\n", value, reg);
  51. return value;
  52. }
  53. static u32 ich_readl(struct ich_spi_priv *priv, int reg)
  54. {
  55. u32 value = readl(priv->base + reg);
  56. debug_trace("read %8.8x from %4.4x\n", value, reg);
  57. return value;
  58. }
  59. static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
  60. {
  61. writeb(value, priv->base + reg);
  62. debug_trace("wrote %2.2x to %4.4x\n", value, reg);
  63. }
  64. static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
  65. {
  66. writew(value, priv->base + reg);
  67. debug_trace("wrote %4.4x to %4.4x\n", value, reg);
  68. }
  69. static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
  70. {
  71. writel(value, priv->base + reg);
  72. debug_trace("wrote %8.8x to %4.4x\n", value, reg);
  73. }
  74. static void write_reg(struct ich_spi_priv *priv, const void *value,
  75. int dest_reg, uint32_t size)
  76. {
  77. memcpy_toio(priv->base + dest_reg, value, size);
  78. }
  79. static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
  80. uint32_t size)
  81. {
  82. memcpy_fromio(value, priv->base + src_reg, size);
  83. }
  84. static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
  85. {
  86. const uint32_t bbar_mask = 0x00ffff00;
  87. uint32_t ichspi_bbar;
  88. if (ctlr->bbar) {
  89. minaddr &= bbar_mask;
  90. ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
  91. ichspi_bbar |= minaddr;
  92. ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
  93. }
  94. }
  95. /* @return 1 if the SPI flash supports the 33MHz speed */
  96. static bool ich9_can_do_33mhz(struct udevice *dev)
  97. {
  98. struct ich_spi_priv *priv = dev_get_priv(dev);
  99. u32 fdod, speed;
  100. if (!CONFIG_IS_ENABLED(PCI))
  101. return false;
  102. /* Observe SPI Descriptor Component Section 0 */
  103. dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
  104. /* Extract the Write/Erase SPI Frequency from descriptor */
  105. dm_pci_read_config32(priv->pch, 0xb4, &fdod);
  106. /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
  107. speed = (fdod >> 21) & 7;
  108. return speed == 1;
  109. }
  110. static void spi_lock_down(struct ich_spi_platdata *plat, void *sbase)
  111. {
  112. if (plat->ich_version == ICHV_7) {
  113. struct ich7_spi_regs *ich7_spi = sbase;
  114. setbits_le16(&ich7_spi->spis, SPIS_LOCK);
  115. } else if (plat->ich_version == ICHV_9) {
  116. struct ich9_spi_regs *ich9_spi = sbase;
  117. setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
  118. }
  119. }
  120. static bool spi_lock_status(struct ich_spi_platdata *plat, void *sbase)
  121. {
  122. int lock = 0;
  123. if (plat->ich_version == ICHV_7) {
  124. struct ich7_spi_regs *ich7_spi = sbase;
  125. lock = readw(&ich7_spi->spis) & SPIS_LOCK;
  126. } else if (plat->ich_version == ICHV_9) {
  127. struct ich9_spi_regs *ich9_spi = sbase;
  128. lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
  129. }
  130. return lock != 0;
  131. }
  132. static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
  133. bool lock)
  134. {
  135. uint16_t optypes;
  136. uint8_t opmenu[ctlr->menubytes];
  137. if (!lock) {
  138. /* The lock is off, so just use index 0. */
  139. ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
  140. optypes = ich_readw(ctlr, ctlr->optype);
  141. optypes = (optypes & 0xfffc) | (trans->type & 0x3);
  142. ich_writew(ctlr, optypes, ctlr->optype);
  143. return 0;
  144. } else {
  145. /* The lock is on. See if what we need is on the menu. */
  146. uint8_t optype;
  147. uint16_t opcode_index;
  148. /* Write Enable is handled as atomic prefix */
  149. if (trans->opcode == SPI_OPCODE_WREN)
  150. return 0;
  151. read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
  152. for (opcode_index = 0; opcode_index < ctlr->menubytes;
  153. opcode_index++) {
  154. if (opmenu[opcode_index] == trans->opcode)
  155. break;
  156. }
  157. if (opcode_index == ctlr->menubytes) {
  158. debug("ICH SPI: Opcode %x not found\n", trans->opcode);
  159. return -EINVAL;
  160. }
  161. optypes = ich_readw(ctlr, ctlr->optype);
  162. optype = (optypes >> (opcode_index * 2)) & 0x3;
  163. if (optype != trans->type) {
  164. debug("ICH SPI: Transaction doesn't fit type %d\n",
  165. optype);
  166. return -ENOSPC;
  167. }
  168. return opcode_index;
  169. }
  170. }
  171. /*
  172. * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
  173. * below is true) or 0. In case the wait was for the bit(s) to set - write
  174. * those bits back, which would cause resetting them.
  175. *
  176. * Return the last read status value on success or -1 on failure.
  177. */
  178. static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
  179. int wait_til_set)
  180. {
  181. int timeout = 600000; /* This will result in 6s */
  182. u16 status = 0;
  183. while (timeout--) {
  184. status = ich_readw(ctlr, ctlr->status);
  185. if (wait_til_set ^ ((status & bitmask) == 0)) {
  186. if (wait_til_set) {
  187. ich_writew(ctlr, status & bitmask,
  188. ctlr->status);
  189. }
  190. return status;
  191. }
  192. udelay(10);
  193. }
  194. debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
  195. status, bitmask, wait_til_set, status & bitmask);
  196. return -ETIMEDOUT;
  197. }
  198. static void ich_spi_config_opcode(struct udevice *dev)
  199. {
  200. struct ich_spi_priv *ctlr = dev_get_priv(dev);
  201. /*
  202. * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
  203. * to prevent accidental or intentional writes. Before they get
  204. * locked down, these registers should be initialized properly.
  205. */
  206. ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
  207. ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
  208. ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
  209. ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
  210. }
  211. static int ich_spi_exec_op_swseq(struct spi_slave *slave,
  212. const struct spi_mem_op *op)
  213. {
  214. struct udevice *bus = dev_get_parent(slave->dev);
  215. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  216. struct ich_spi_priv *ctlr = dev_get_priv(bus);
  217. uint16_t control;
  218. int16_t opcode_index;
  219. int with_address;
  220. int status;
  221. struct spi_trans *trans = &ctlr->trans;
  222. bool lock = spi_lock_status(plat, ctlr->base);
  223. int ret = 0;
  224. trans->in = NULL;
  225. trans->out = NULL;
  226. trans->type = 0xFF;
  227. if (op->data.nbytes) {
  228. if (op->data.dir == SPI_MEM_DATA_IN) {
  229. trans->in = op->data.buf.in;
  230. trans->bytesin = op->data.nbytes;
  231. } else {
  232. trans->out = op->data.buf.out;
  233. trans->bytesout = op->data.nbytes;
  234. }
  235. }
  236. if (trans->opcode != op->cmd.opcode)
  237. trans->opcode = op->cmd.opcode;
  238. if (lock && trans->opcode == SPI_OPCODE_WRDIS)
  239. return 0;
  240. if (trans->opcode == SPI_OPCODE_WREN) {
  241. /*
  242. * Treat Write Enable as Atomic Pre-Op if possible
  243. * in order to prevent the Management Engine from
  244. * issuing a transaction between WREN and DATA.
  245. */
  246. if (!lock)
  247. ich_writew(ctlr, trans->opcode, ctlr->preop);
  248. return 0;
  249. }
  250. ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
  251. if (ret < 0)
  252. return ret;
  253. if (plat->ich_version == ICHV_7)
  254. ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
  255. else
  256. ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
  257. /* Try to guess spi transaction type */
  258. if (op->data.dir == SPI_MEM_DATA_OUT) {
  259. if (op->addr.nbytes)
  260. trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
  261. else
  262. trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
  263. } else {
  264. if (op->addr.nbytes)
  265. trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
  266. else
  267. trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
  268. }
  269. /* Special erase case handling */
  270. if (op->addr.nbytes && !op->data.buswidth)
  271. trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
  272. opcode_index = spi_setup_opcode(ctlr, trans, lock);
  273. if (opcode_index < 0)
  274. return -EINVAL;
  275. if (op->addr.nbytes) {
  276. trans->offset = op->addr.val;
  277. with_address = 1;
  278. }
  279. if (ctlr->speed && ctlr->max_speed >= 33000000) {
  280. int byte;
  281. byte = ich_readb(ctlr, ctlr->speed);
  282. if (ctlr->cur_speed >= 33000000)
  283. byte |= SSFC_SCF_33MHZ;
  284. else
  285. byte &= ~SSFC_SCF_33MHZ;
  286. ich_writeb(ctlr, byte, ctlr->speed);
  287. }
  288. /* Preset control fields */
  289. control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
  290. /* Issue atomic preop cycle if needed */
  291. if (ich_readw(ctlr, ctlr->preop))
  292. control |= SPIC_ACS;
  293. if (!trans->bytesout && !trans->bytesin) {
  294. /* SPI addresses are 24 bit only */
  295. if (with_address) {
  296. ich_writel(ctlr, trans->offset & 0x00FFFFFF,
  297. ctlr->addr);
  298. }
  299. /*
  300. * This is a 'no data' command (like Write Enable), its
  301. * bitesout size was 1, decremented to zero while executing
  302. * spi_setup_opcode() above. Tell the chip to send the
  303. * command.
  304. */
  305. ich_writew(ctlr, control, ctlr->control);
  306. /* wait for the result */
  307. status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
  308. if (status < 0)
  309. return status;
  310. if (status & SPIS_FCERR) {
  311. debug("ICH SPI: Command transaction error\n");
  312. return -EIO;
  313. }
  314. return 0;
  315. }
  316. while (trans->bytesout || trans->bytesin) {
  317. uint32_t data_length;
  318. /* SPI addresses are 24 bit only */
  319. ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
  320. if (trans->bytesout)
  321. data_length = min(trans->bytesout, ctlr->databytes);
  322. else
  323. data_length = min(trans->bytesin, ctlr->databytes);
  324. /* Program data into FDATA0 to N */
  325. if (trans->bytesout) {
  326. write_reg(ctlr, trans->out, ctlr->data, data_length);
  327. trans->bytesout -= data_length;
  328. }
  329. /* Add proper control fields' values */
  330. control &= ~((ctlr->databytes - 1) << 8);
  331. control |= SPIC_DS;
  332. control |= (data_length - 1) << 8;
  333. /* write it */
  334. ich_writew(ctlr, control, ctlr->control);
  335. /* Wait for Cycle Done Status or Flash Cycle Error */
  336. status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
  337. if (status < 0)
  338. return status;
  339. if (status & SPIS_FCERR) {
  340. debug("ICH SPI: Data transaction error %x\n", status);
  341. return -EIO;
  342. }
  343. if (trans->bytesin) {
  344. read_reg(ctlr, ctlr->data, trans->in, data_length);
  345. trans->bytesin -= data_length;
  346. }
  347. }
  348. /* Clear atomic preop now that xfer is done */
  349. if (!lock)
  350. ich_writew(ctlr, 0, ctlr->preop);
  351. return 0;
  352. }
  353. /*
  354. * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
  355. * that the operation does not cross page boundary.
  356. */
  357. static uint get_xfer_len(u32 offset, int len, int page_size)
  358. {
  359. uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
  360. uint bytes_left = ALIGN(offset, page_size) - offset;
  361. if (bytes_left)
  362. xfer_len = min(xfer_len, bytes_left);
  363. return xfer_len;
  364. }
  365. /* Fill FDATAn FIFO in preparation for a write transaction */
  366. static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
  367. uint len)
  368. {
  369. memcpy(regs->fdata, data, len);
  370. }
  371. /* Drain FDATAn FIFO after a read transaction populates data */
  372. static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
  373. {
  374. memcpy(dest, regs->fdata, len);
  375. }
  376. /* Fire up a transfer using the hardware sequencer */
  377. static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
  378. uint offset, uint len)
  379. {
  380. /* Make sure all W1C status bits get cleared */
  381. u32 hsfsts;
  382. hsfsts = readl(&regs->hsfsts_ctl);
  383. hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
  384. hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
  385. /* Set up transaction parameters */
  386. hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
  387. hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
  388. hsfsts |= HSFSTS_FGO;
  389. writel(offset, &regs->faddr);
  390. writel(hsfsts, &regs->hsfsts_ctl);
  391. }
  392. static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
  393. {
  394. ulong start;
  395. u32 hsfsts;
  396. start = get_timer(0);
  397. do {
  398. hsfsts = readl(&regs->hsfsts_ctl);
  399. if (hsfsts & HSFSTS_FCERR) {
  400. debug("SPI transaction error at offset %x HSFSTS = %08x\n",
  401. offset, hsfsts);
  402. return -EIO;
  403. }
  404. if (hsfsts & HSFSTS_AEL)
  405. return -EPERM;
  406. if (hsfsts & HSFSTS_FDONE)
  407. return 0;
  408. } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
  409. debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
  410. offset, hsfsts, (uint)get_timer(start));
  411. return -ETIMEDOUT;
  412. }
  413. /**
  414. * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
  415. *
  416. * This waits until complete or timeout
  417. *
  418. * @regs: SPI registers
  419. * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
  420. * @offset: Offset to access
  421. * @len: Number of bytes to transfer (can be 0)
  422. * @return 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
  423. * (AEL), -ETIMEDOUT on timeout
  424. */
  425. static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
  426. uint offset, uint len)
  427. {
  428. start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
  429. return wait_for_hwseq_xfer(regs, offset);
  430. }
  431. static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
  432. const struct spi_mem_op *op)
  433. {
  434. struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
  435. struct udevice *bus = dev_get_parent(slave->dev);
  436. struct ich_spi_priv *priv = dev_get_priv(bus);
  437. struct fast_spi_regs *regs = priv->base;
  438. uint page_size;
  439. uint offset;
  440. int cycle;
  441. uint len;
  442. bool out;
  443. int ret;
  444. u8 *buf;
  445. offset = op->addr.val;
  446. len = op->data.nbytes;
  447. switch (op->cmd.opcode) {
  448. case SPINOR_OP_RDID:
  449. cycle = HSFSTS_CYCLE_RDID;
  450. break;
  451. case SPINOR_OP_READ_FAST:
  452. cycle = HSFSTS_CYCLE_READ;
  453. break;
  454. case SPINOR_OP_PP:
  455. cycle = HSFSTS_CYCLE_WRITE;
  456. break;
  457. case SPINOR_OP_WREN:
  458. /* Nothing needs to be done */
  459. return 0;
  460. case SPINOR_OP_WRSR:
  461. cycle = HSFSTS_CYCLE_WR_STATUS;
  462. break;
  463. case SPINOR_OP_RDSR:
  464. cycle = HSFSTS_CYCLE_RD_STATUS;
  465. break;
  466. case SPINOR_OP_WRDI:
  467. return 0; /* ignore */
  468. case SPINOR_OP_BE_4K:
  469. cycle = HSFSTS_CYCLE_4K_ERASE;
  470. ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
  471. return ret;
  472. default:
  473. debug("Unknown cycle %x\n", op->cmd.opcode);
  474. return -EINVAL;
  475. };
  476. out = op->data.dir == SPI_MEM_DATA_OUT;
  477. buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
  478. page_size = flash->page_size ? : 256;
  479. while (len) {
  480. uint xfer_len = get_xfer_len(offset, len, page_size);
  481. if (out)
  482. fill_xfer_fifo(regs, buf, xfer_len);
  483. ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
  484. if (ret)
  485. return ret;
  486. if (!out)
  487. drain_xfer_fifo(regs, buf, xfer_len);
  488. offset += xfer_len;
  489. buf += xfer_len;
  490. len -= xfer_len;
  491. }
  492. return 0;
  493. }
  494. static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
  495. {
  496. struct udevice *bus = dev_get_parent(slave->dev);
  497. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  498. int ret;
  499. bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
  500. if (plat->hwseq)
  501. ret = ich_spi_exec_op_hwseq(slave, op);
  502. else
  503. ret = ich_spi_exec_op_swseq(slave, op);
  504. bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
  505. return ret;
  506. }
  507. static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep,
  508. uint *map_sizep, uint *offsetp)
  509. {
  510. pci_dev_t spi_bdf;
  511. #if !CONFIG_IS_ENABLED(OF_PLATDATA)
  512. struct pci_child_platdata *pplat = dev_get_parent_platdata(bus);
  513. spi_bdf = pplat->devfn;
  514. #else
  515. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  516. /*
  517. * We cannot rely on plat->bdf being set up yet since this method can
  518. * be called before the device is probed. Use the of-platdata directly
  519. * instead.
  520. */
  521. spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
  522. #endif
  523. return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp);
  524. }
  525. static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
  526. uint *offsetp)
  527. {
  528. struct udevice *bus = dev_get_parent(dev);
  529. return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp);
  530. }
  531. static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
  532. {
  533. unsigned int page_offset;
  534. int addr = op->addr.val;
  535. unsigned int byte_count = op->data.nbytes;
  536. if (hweight32(ICH_BOUNDARY) == 1) {
  537. page_offset = addr & (ICH_BOUNDARY - 1);
  538. } else {
  539. u64 aux = addr;
  540. page_offset = do_div(aux, ICH_BOUNDARY);
  541. }
  542. if (op->data.dir == SPI_MEM_DATA_IN) {
  543. if (slave->max_read_size) {
  544. op->data.nbytes = min(ICH_BOUNDARY - page_offset,
  545. slave->max_read_size);
  546. }
  547. } else if (slave->max_write_size) {
  548. op->data.nbytes = min(ICH_BOUNDARY - page_offset,
  549. slave->max_write_size);
  550. }
  551. op->data.nbytes = min(op->data.nbytes, byte_count);
  552. return 0;
  553. }
  554. static int ich_protect_lockdown(struct udevice *dev)
  555. {
  556. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  557. struct ich_spi_priv *priv = dev_get_priv(dev);
  558. int ret = -ENOSYS;
  559. /* Disable the BIOS write protect so write commands are allowed */
  560. if (priv->pch)
  561. ret = pch_set_spi_protect(priv->pch, false);
  562. if (ret == -ENOSYS) {
  563. u8 bios_cntl;
  564. bios_cntl = ich_readb(priv, priv->bcr);
  565. bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
  566. bios_cntl |= 1; /* Write Protect Disable (WPD) */
  567. ich_writeb(priv, bios_cntl, priv->bcr);
  568. } else if (ret) {
  569. debug("%s: Failed to disable write-protect: err=%d\n",
  570. __func__, ret);
  571. return ret;
  572. }
  573. /* Lock down SPI controller settings if required */
  574. if (plat->lockdown) {
  575. ich_spi_config_opcode(dev);
  576. spi_lock_down(plat, priv->base);
  577. }
  578. return 0;
  579. }
  580. static int ich_init_controller(struct udevice *dev,
  581. struct ich_spi_platdata *plat,
  582. struct ich_spi_priv *ctlr)
  583. {
  584. if (spl_phase() == PHASE_TPL) {
  585. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  586. int ret;
  587. ret = fast_spi_early_init(plat->bdf, plat->mmio_base);
  588. if (ret)
  589. return ret;
  590. }
  591. ctlr->base = (void *)plat->mmio_base;
  592. if (plat->ich_version == ICHV_7) {
  593. struct ich7_spi_regs *ich7_spi = ctlr->base;
  594. ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
  595. ctlr->menubytes = sizeof(ich7_spi->opmenu);
  596. ctlr->optype = offsetof(struct ich7_spi_regs, optype);
  597. ctlr->addr = offsetof(struct ich7_spi_regs, spia);
  598. ctlr->data = offsetof(struct ich7_spi_regs, spid);
  599. ctlr->databytes = sizeof(ich7_spi->spid);
  600. ctlr->status = offsetof(struct ich7_spi_regs, spis);
  601. ctlr->control = offsetof(struct ich7_spi_regs, spic);
  602. ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
  603. ctlr->preop = offsetof(struct ich7_spi_regs, preop);
  604. } else if (plat->ich_version == ICHV_9) {
  605. struct ich9_spi_regs *ich9_spi = ctlr->base;
  606. ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
  607. ctlr->menubytes = sizeof(ich9_spi->opmenu);
  608. ctlr->optype = offsetof(struct ich9_spi_regs, optype);
  609. ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
  610. ctlr->data = offsetof(struct ich9_spi_regs, fdata);
  611. ctlr->databytes = sizeof(ich9_spi->fdata);
  612. ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
  613. ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
  614. ctlr->speed = ctlr->control + 2;
  615. ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
  616. ctlr->preop = offsetof(struct ich9_spi_regs, preop);
  617. ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
  618. ctlr->pr = &ich9_spi->pr[0];
  619. } else if (plat->ich_version == ICHV_APL) {
  620. } else {
  621. debug("ICH SPI: Unrecognised ICH version %d\n",
  622. plat->ich_version);
  623. return -EINVAL;
  624. }
  625. /* Work out the maximum speed we can support */
  626. ctlr->max_speed = 20000000;
  627. if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
  628. ctlr->max_speed = 33000000;
  629. debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
  630. plat->ich_version, plat->mmio_base, ctlr->max_speed);
  631. ich_set_bbar(ctlr, 0);
  632. return 0;
  633. }
  634. static int ich_cache_bios_region(struct udevice *dev)
  635. {
  636. ulong map_base;
  637. uint map_size;
  638. uint offset;
  639. ulong base;
  640. int ret;
  641. ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset);
  642. if (ret)
  643. return ret;
  644. /* Don't use WRBACK since we are not supposed to write to SPI flash */
  645. base = SZ_4G - map_size;
  646. mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size);
  647. log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size);
  648. return 0;
  649. }
  650. static int ich_spi_probe(struct udevice *dev)
  651. {
  652. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  653. struct ich_spi_priv *priv = dev_get_priv(dev);
  654. int ret;
  655. ret = ich_init_controller(dev, plat, priv);
  656. if (ret)
  657. return ret;
  658. if (spl_phase() == PHASE_TPL) {
  659. /* Cache the BIOS to speed things up */
  660. ret = ich_cache_bios_region(dev);
  661. if (ret)
  662. return ret;
  663. } else {
  664. ret = ich_protect_lockdown(dev);
  665. if (ret)
  666. return ret;
  667. }
  668. priv->cur_speed = priv->max_speed;
  669. return 0;
  670. }
  671. static int ich_spi_remove(struct udevice *bus)
  672. {
  673. /*
  674. * Configure SPI controller so that the Linux MTD driver can fully
  675. * access the SPI NOR chip
  676. */
  677. ich_spi_config_opcode(bus);
  678. return 0;
  679. }
  680. static int ich_spi_set_speed(struct udevice *bus, uint speed)
  681. {
  682. struct ich_spi_priv *priv = dev_get_priv(bus);
  683. priv->cur_speed = speed;
  684. return 0;
  685. }
  686. static int ich_spi_set_mode(struct udevice *bus, uint mode)
  687. {
  688. debug("%s: mode=%d\n", __func__, mode);
  689. return 0;
  690. }
  691. static int ich_spi_child_pre_probe(struct udevice *dev)
  692. {
  693. struct udevice *bus = dev_get_parent(dev);
  694. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  695. struct ich_spi_priv *priv = dev_get_priv(bus);
  696. struct spi_slave *slave = dev_get_parent_priv(dev);
  697. /*
  698. * Yes this controller can only write a small number of bytes at
  699. * once! The limit is typically 64 bytes. For hardware sequencing a
  700. * a loop is used to get around this.
  701. */
  702. if (!plat->hwseq)
  703. slave->max_write_size = priv->databytes;
  704. /*
  705. * ICH 7 SPI controller only supports array read command
  706. * and byte program command for SST flash
  707. */
  708. if (plat->ich_version == ICHV_7)
  709. slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
  710. return 0;
  711. }
  712. static int ich_spi_ofdata_to_platdata(struct udevice *dev)
  713. {
  714. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  715. #if !CONFIG_IS_ENABLED(OF_PLATDATA)
  716. struct ich_spi_priv *priv = dev_get_priv(dev);
  717. /* Find a PCH if there is one */
  718. uclass_first_device(UCLASS_PCH, &priv->pch);
  719. if (!priv->pch)
  720. priv->pch = dev_get_parent(dev);
  721. plat->ich_version = dev_get_driver_data(dev);
  722. plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
  723. if (plat->ich_version == ICHV_APL) {
  724. plat->mmio_base = dm_pci_read_bar32(dev, 0);
  725. } else {
  726. /* SBASE is similar */
  727. pch_get_spi_base(priv->pch, &plat->mmio_base);
  728. }
  729. /*
  730. * Use an int so that the property is present in of-platdata even
  731. * when false.
  732. */
  733. plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
  734. #else
  735. plat->ich_version = ICHV_APL;
  736. plat->mmio_base = plat->dtplat.early_regs[0];
  737. plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
  738. plat->hwseq = plat->dtplat.intel_hardware_seq;
  739. #endif
  740. debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
  741. return 0;
  742. }
  743. static const struct spi_controller_mem_ops ich_controller_mem_ops = {
  744. .adjust_op_size = ich_spi_adjust_size,
  745. .supports_op = NULL,
  746. .exec_op = ich_spi_exec_op,
  747. };
  748. static const struct dm_spi_ops ich_spi_ops = {
  749. /* xfer is not supported */
  750. .set_speed = ich_spi_set_speed,
  751. .set_mode = ich_spi_set_mode,
  752. .mem_ops = &ich_controller_mem_ops,
  753. .get_mmap = ich_get_mmap,
  754. /*
  755. * cs_info is not needed, since we require all chip selects to be
  756. * in the device tree explicitly
  757. */
  758. };
  759. static const struct udevice_id ich_spi_ids[] = {
  760. { .compatible = "intel,ich7-spi", ICHV_7 },
  761. { .compatible = "intel,ich9-spi", ICHV_9 },
  762. { .compatible = "intel,fast-spi", ICHV_APL },
  763. { }
  764. };
  765. U_BOOT_DRIVER(intel_fast_spi) = {
  766. .name = "intel_fast_spi",
  767. .id = UCLASS_SPI,
  768. .of_match = ich_spi_ids,
  769. .ops = &ich_spi_ops,
  770. .ofdata_to_platdata = ich_spi_ofdata_to_platdata,
  771. .platdata_auto_alloc_size = sizeof(struct ich_spi_platdata),
  772. .priv_auto_alloc_size = sizeof(struct ich_spi_priv),
  773. .child_pre_probe = ich_spi_child_pre_probe,
  774. .probe = ich_spi_probe,
  775. .remove = ich_spi_remove,
  776. .flags = DM_FLAG_OS_PREPARE,
  777. };