ich.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) 2011-12 The Chromium OS Authors.
  4. *
  5. * This file is derived from the flashrom project.
  6. */
  7. #define LOG_CATEGORY UCLASS_SPI
  8. #include <common.h>
  9. #include <bootstage.h>
  10. #include <div64.h>
  11. #include <dm.h>
  12. #include <dt-structs.h>
  13. #include <errno.h>
  14. #include <malloc.h>
  15. #include <pch.h>
  16. #include <pci.h>
  17. #include <pci_ids.h>
  18. #include <spi.h>
  19. #include <spi_flash.h>
  20. #include <spi-mem.h>
  21. #include <spl.h>
  22. #include <asm/fast_spi.h>
  23. #include <asm/io.h>
  24. #include <asm/mtrr.h>
  25. #include <linux/sizes.h>
  26. #include "ich.h"
  27. #ifdef DEBUG_TRACE
  28. #define debug_trace(fmt, args...) debug(fmt, ##args)
  29. #else
  30. #define debug_trace(x, args...)
  31. #endif
  32. struct ich_spi_platdata {
  33. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  34. struct dtd_intel_fast_spi dtplat;
  35. #endif
  36. enum ich_version ich_version; /* Controller version, 7 or 9 */
  37. bool lockdown; /* lock down controller settings? */
  38. ulong mmio_base; /* Base of MMIO registers */
  39. pci_dev_t bdf; /* PCI address used by of-platdata */
  40. bool hwseq; /* Use hardware sequencing (not s/w) */
  41. };
  42. static u8 ich_readb(struct ich_spi_priv *priv, int reg)
  43. {
  44. u8 value = readb(priv->base + reg);
  45. debug_trace("read %2.2x from %4.4x\n", value, reg);
  46. return value;
  47. }
  48. static u16 ich_readw(struct ich_spi_priv *priv, int reg)
  49. {
  50. u16 value = readw(priv->base + reg);
  51. debug_trace("read %4.4x from %4.4x\n", value, reg);
  52. return value;
  53. }
  54. static u32 ich_readl(struct ich_spi_priv *priv, int reg)
  55. {
  56. u32 value = readl(priv->base + reg);
  57. debug_trace("read %8.8x from %4.4x\n", value, reg);
  58. return value;
  59. }
  60. static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
  61. {
  62. writeb(value, priv->base + reg);
  63. debug_trace("wrote %2.2x to %4.4x\n", value, reg);
  64. }
  65. static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
  66. {
  67. writew(value, priv->base + reg);
  68. debug_trace("wrote %4.4x to %4.4x\n", value, reg);
  69. }
  70. static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
  71. {
  72. writel(value, priv->base + reg);
  73. debug_trace("wrote %8.8x to %4.4x\n", value, reg);
  74. }
  75. static void write_reg(struct ich_spi_priv *priv, const void *value,
  76. int dest_reg, uint32_t size)
  77. {
  78. memcpy_toio(priv->base + dest_reg, value, size);
  79. }
  80. static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
  81. uint32_t size)
  82. {
  83. memcpy_fromio(value, priv->base + src_reg, size);
  84. }
  85. static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
  86. {
  87. const uint32_t bbar_mask = 0x00ffff00;
  88. uint32_t ichspi_bbar;
  89. if (ctlr->bbar) {
  90. minaddr &= bbar_mask;
  91. ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
  92. ichspi_bbar |= minaddr;
  93. ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
  94. }
  95. }
  96. /* @return 1 if the SPI flash supports the 33MHz speed */
  97. static bool ich9_can_do_33mhz(struct udevice *dev)
  98. {
  99. struct ich_spi_priv *priv = dev_get_priv(dev);
  100. u32 fdod, speed;
  101. if (!CONFIG_IS_ENABLED(PCI))
  102. return false;
  103. /* Observe SPI Descriptor Component Section 0 */
  104. dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
  105. /* Extract the Write/Erase SPI Frequency from descriptor */
  106. dm_pci_read_config32(priv->pch, 0xb4, &fdod);
  107. /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
  108. speed = (fdod >> 21) & 7;
  109. return speed == 1;
  110. }
  111. static void spi_lock_down(struct ich_spi_platdata *plat, void *sbase)
  112. {
  113. if (plat->ich_version == ICHV_7) {
  114. struct ich7_spi_regs *ich7_spi = sbase;
  115. setbits_le16(&ich7_spi->spis, SPIS_LOCK);
  116. } else if (plat->ich_version == ICHV_9) {
  117. struct ich9_spi_regs *ich9_spi = sbase;
  118. setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
  119. }
  120. }
  121. static bool spi_lock_status(struct ich_spi_platdata *plat, void *sbase)
  122. {
  123. int lock = 0;
  124. if (plat->ich_version == ICHV_7) {
  125. struct ich7_spi_regs *ich7_spi = sbase;
  126. lock = readw(&ich7_spi->spis) & SPIS_LOCK;
  127. } else if (plat->ich_version == ICHV_9) {
  128. struct ich9_spi_regs *ich9_spi = sbase;
  129. lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
  130. }
  131. return lock != 0;
  132. }
  133. static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
  134. bool lock)
  135. {
  136. uint16_t optypes;
  137. uint8_t opmenu[ctlr->menubytes];
  138. if (!lock) {
  139. /* The lock is off, so just use index 0. */
  140. ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
  141. optypes = ich_readw(ctlr, ctlr->optype);
  142. optypes = (optypes & 0xfffc) | (trans->type & 0x3);
  143. ich_writew(ctlr, optypes, ctlr->optype);
  144. return 0;
  145. } else {
  146. /* The lock is on. See if what we need is on the menu. */
  147. uint8_t optype;
  148. uint16_t opcode_index;
  149. /* Write Enable is handled as atomic prefix */
  150. if (trans->opcode == SPI_OPCODE_WREN)
  151. return 0;
  152. read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
  153. for (opcode_index = 0; opcode_index < ctlr->menubytes;
  154. opcode_index++) {
  155. if (opmenu[opcode_index] == trans->opcode)
  156. break;
  157. }
  158. if (opcode_index == ctlr->menubytes) {
  159. debug("ICH SPI: Opcode %x not found\n", trans->opcode);
  160. return -EINVAL;
  161. }
  162. optypes = ich_readw(ctlr, ctlr->optype);
  163. optype = (optypes >> (opcode_index * 2)) & 0x3;
  164. if (optype != trans->type) {
  165. debug("ICH SPI: Transaction doesn't fit type %d\n",
  166. optype);
  167. return -ENOSPC;
  168. }
  169. return opcode_index;
  170. }
  171. }
  172. /*
  173. * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
  174. * below is true) or 0. In case the wait was for the bit(s) to set - write
  175. * those bits back, which would cause resetting them.
  176. *
  177. * Return the last read status value on success or -1 on failure.
  178. */
  179. static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
  180. int wait_til_set)
  181. {
  182. int timeout = 600000; /* This will result in 6s */
  183. u16 status = 0;
  184. while (timeout--) {
  185. status = ich_readw(ctlr, ctlr->status);
  186. if (wait_til_set ^ ((status & bitmask) == 0)) {
  187. if (wait_til_set) {
  188. ich_writew(ctlr, status & bitmask,
  189. ctlr->status);
  190. }
  191. return status;
  192. }
  193. udelay(10);
  194. }
  195. debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
  196. status, bitmask, wait_til_set, status & bitmask);
  197. return -ETIMEDOUT;
  198. }
  199. static void ich_spi_config_opcode(struct udevice *dev)
  200. {
  201. struct ich_spi_priv *ctlr = dev_get_priv(dev);
  202. /*
  203. * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
  204. * to prevent accidental or intentional writes. Before they get
  205. * locked down, these registers should be initialized properly.
  206. */
  207. ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
  208. ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
  209. ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
  210. ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
  211. }
  212. static int ich_spi_exec_op_swseq(struct spi_slave *slave,
  213. const struct spi_mem_op *op)
  214. {
  215. struct udevice *bus = dev_get_parent(slave->dev);
  216. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  217. struct ich_spi_priv *ctlr = dev_get_priv(bus);
  218. uint16_t control;
  219. int16_t opcode_index;
  220. int with_address;
  221. int status;
  222. struct spi_trans *trans = &ctlr->trans;
  223. bool lock = spi_lock_status(plat, ctlr->base);
  224. int ret = 0;
  225. trans->in = NULL;
  226. trans->out = NULL;
  227. trans->type = 0xFF;
  228. if (op->data.nbytes) {
  229. if (op->data.dir == SPI_MEM_DATA_IN) {
  230. trans->in = op->data.buf.in;
  231. trans->bytesin = op->data.nbytes;
  232. } else {
  233. trans->out = op->data.buf.out;
  234. trans->bytesout = op->data.nbytes;
  235. }
  236. }
  237. if (trans->opcode != op->cmd.opcode)
  238. trans->opcode = op->cmd.opcode;
  239. if (lock && trans->opcode == SPI_OPCODE_WRDIS)
  240. return 0;
  241. if (trans->opcode == SPI_OPCODE_WREN) {
  242. /*
  243. * Treat Write Enable as Atomic Pre-Op if possible
  244. * in order to prevent the Management Engine from
  245. * issuing a transaction between WREN and DATA.
  246. */
  247. if (!lock)
  248. ich_writew(ctlr, trans->opcode, ctlr->preop);
  249. return 0;
  250. }
  251. ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
  252. if (ret < 0)
  253. return ret;
  254. if (plat->ich_version == ICHV_7)
  255. ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
  256. else
  257. ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
  258. /* Try to guess spi transaction type */
  259. if (op->data.dir == SPI_MEM_DATA_OUT) {
  260. if (op->addr.nbytes)
  261. trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
  262. else
  263. trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
  264. } else {
  265. if (op->addr.nbytes)
  266. trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
  267. else
  268. trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
  269. }
  270. /* Special erase case handling */
  271. if (op->addr.nbytes && !op->data.buswidth)
  272. trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
  273. opcode_index = spi_setup_opcode(ctlr, trans, lock);
  274. if (opcode_index < 0)
  275. return -EINVAL;
  276. if (op->addr.nbytes) {
  277. trans->offset = op->addr.val;
  278. with_address = 1;
  279. }
  280. if (ctlr->speed && ctlr->max_speed >= 33000000) {
  281. int byte;
  282. byte = ich_readb(ctlr, ctlr->speed);
  283. if (ctlr->cur_speed >= 33000000)
  284. byte |= SSFC_SCF_33MHZ;
  285. else
  286. byte &= ~SSFC_SCF_33MHZ;
  287. ich_writeb(ctlr, byte, ctlr->speed);
  288. }
  289. /* Preset control fields */
  290. control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
  291. /* Issue atomic preop cycle if needed */
  292. if (ich_readw(ctlr, ctlr->preop))
  293. control |= SPIC_ACS;
  294. if (!trans->bytesout && !trans->bytesin) {
  295. /* SPI addresses are 24 bit only */
  296. if (with_address) {
  297. ich_writel(ctlr, trans->offset & 0x00FFFFFF,
  298. ctlr->addr);
  299. }
  300. /*
  301. * This is a 'no data' command (like Write Enable), its
  302. * bitesout size was 1, decremented to zero while executing
  303. * spi_setup_opcode() above. Tell the chip to send the
  304. * command.
  305. */
  306. ich_writew(ctlr, control, ctlr->control);
  307. /* wait for the result */
  308. status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
  309. if (status < 0)
  310. return status;
  311. if (status & SPIS_FCERR) {
  312. debug("ICH SPI: Command transaction error\n");
  313. return -EIO;
  314. }
  315. return 0;
  316. }
  317. while (trans->bytesout || trans->bytesin) {
  318. uint32_t data_length;
  319. /* SPI addresses are 24 bit only */
  320. ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
  321. if (trans->bytesout)
  322. data_length = min(trans->bytesout, ctlr->databytes);
  323. else
  324. data_length = min(trans->bytesin, ctlr->databytes);
  325. /* Program data into FDATA0 to N */
  326. if (trans->bytesout) {
  327. write_reg(ctlr, trans->out, ctlr->data, data_length);
  328. trans->bytesout -= data_length;
  329. }
  330. /* Add proper control fields' values */
  331. control &= ~((ctlr->databytes - 1) << 8);
  332. control |= SPIC_DS;
  333. control |= (data_length - 1) << 8;
  334. /* write it */
  335. ich_writew(ctlr, control, ctlr->control);
  336. /* Wait for Cycle Done Status or Flash Cycle Error */
  337. status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
  338. if (status < 0)
  339. return status;
  340. if (status & SPIS_FCERR) {
  341. debug("ICH SPI: Data transaction error %x\n", status);
  342. return -EIO;
  343. }
  344. if (trans->bytesin) {
  345. read_reg(ctlr, ctlr->data, trans->in, data_length);
  346. trans->bytesin -= data_length;
  347. }
  348. }
  349. /* Clear atomic preop now that xfer is done */
  350. if (!lock)
  351. ich_writew(ctlr, 0, ctlr->preop);
  352. return 0;
  353. }
  354. /*
  355. * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
  356. * that the operation does not cross page boundary.
  357. */
  358. static uint get_xfer_len(u32 offset, int len, int page_size)
  359. {
  360. uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
  361. uint bytes_left = ALIGN(offset, page_size) - offset;
  362. if (bytes_left)
  363. xfer_len = min(xfer_len, bytes_left);
  364. return xfer_len;
  365. }
  366. /* Fill FDATAn FIFO in preparation for a write transaction */
  367. static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
  368. uint len)
  369. {
  370. memcpy(regs->fdata, data, len);
  371. }
  372. /* Drain FDATAn FIFO after a read transaction populates data */
  373. static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
  374. {
  375. memcpy(dest, regs->fdata, len);
  376. }
  377. /* Fire up a transfer using the hardware sequencer */
  378. static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
  379. uint offset, uint len)
  380. {
  381. /* Make sure all W1C status bits get cleared */
  382. u32 hsfsts;
  383. hsfsts = readl(&regs->hsfsts_ctl);
  384. hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
  385. hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
  386. /* Set up transaction parameters */
  387. hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
  388. hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
  389. hsfsts |= HSFSTS_FGO;
  390. writel(offset, &regs->faddr);
  391. writel(hsfsts, &regs->hsfsts_ctl);
  392. }
  393. static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
  394. {
  395. ulong start;
  396. u32 hsfsts;
  397. start = get_timer(0);
  398. do {
  399. hsfsts = readl(&regs->hsfsts_ctl);
  400. if (hsfsts & HSFSTS_FCERR) {
  401. debug("SPI transaction error at offset %x HSFSTS = %08x\n",
  402. offset, hsfsts);
  403. return -EIO;
  404. }
  405. if (hsfsts & HSFSTS_AEL)
  406. return -EPERM;
  407. if (hsfsts & HSFSTS_FDONE)
  408. return 0;
  409. } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
  410. debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
  411. offset, hsfsts, (uint)get_timer(start));
  412. return -ETIMEDOUT;
  413. }
  414. /**
  415. * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
  416. *
  417. * This waits until complete or timeout
  418. *
  419. * @regs: SPI registers
  420. * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
  421. * @offset: Offset to access
  422. * @len: Number of bytes to transfer (can be 0)
  423. * @return 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
  424. * (AEL), -ETIMEDOUT on timeout
  425. */
  426. static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
  427. uint offset, uint len)
  428. {
  429. start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
  430. return wait_for_hwseq_xfer(regs, offset);
  431. }
  432. static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
  433. const struct spi_mem_op *op)
  434. {
  435. struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
  436. struct udevice *bus = dev_get_parent(slave->dev);
  437. struct ich_spi_priv *priv = dev_get_priv(bus);
  438. struct fast_spi_regs *regs = priv->base;
  439. uint page_size;
  440. uint offset;
  441. int cycle;
  442. uint len;
  443. bool out;
  444. int ret;
  445. u8 *buf;
  446. offset = op->addr.val;
  447. len = op->data.nbytes;
  448. switch (op->cmd.opcode) {
  449. case SPINOR_OP_RDID:
  450. cycle = HSFSTS_CYCLE_RDID;
  451. break;
  452. case SPINOR_OP_READ_FAST:
  453. cycle = HSFSTS_CYCLE_READ;
  454. break;
  455. case SPINOR_OP_PP:
  456. cycle = HSFSTS_CYCLE_WRITE;
  457. break;
  458. case SPINOR_OP_WREN:
  459. /* Nothing needs to be done */
  460. return 0;
  461. case SPINOR_OP_WRSR:
  462. cycle = HSFSTS_CYCLE_WR_STATUS;
  463. break;
  464. case SPINOR_OP_RDSR:
  465. cycle = HSFSTS_CYCLE_RD_STATUS;
  466. break;
  467. case SPINOR_OP_WRDI:
  468. return 0; /* ignore */
  469. case SPINOR_OP_BE_4K:
  470. cycle = HSFSTS_CYCLE_4K_ERASE;
  471. ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
  472. return ret;
  473. default:
  474. debug("Unknown cycle %x\n", op->cmd.opcode);
  475. return -EINVAL;
  476. };
  477. out = op->data.dir == SPI_MEM_DATA_OUT;
  478. buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
  479. page_size = flash->page_size ? : 256;
  480. while (len) {
  481. uint xfer_len = get_xfer_len(offset, len, page_size);
  482. if (out)
  483. fill_xfer_fifo(regs, buf, xfer_len);
  484. ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
  485. if (ret)
  486. return ret;
  487. if (!out)
  488. drain_xfer_fifo(regs, buf, xfer_len);
  489. offset += xfer_len;
  490. buf += xfer_len;
  491. len -= xfer_len;
  492. }
  493. return 0;
  494. }
  495. static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
  496. {
  497. struct udevice *bus = dev_get_parent(slave->dev);
  498. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  499. int ret;
  500. bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
  501. if (plat->hwseq)
  502. ret = ich_spi_exec_op_hwseq(slave, op);
  503. else
  504. ret = ich_spi_exec_op_swseq(slave, op);
  505. bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
  506. return ret;
  507. }
  508. static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep,
  509. uint *map_sizep, uint *offsetp)
  510. {
  511. pci_dev_t spi_bdf;
  512. #if !CONFIG_IS_ENABLED(OF_PLATDATA)
  513. struct pci_child_platdata *pplat = dev_get_parent_platdata(bus);
  514. spi_bdf = pplat->devfn;
  515. #else
  516. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  517. /*
  518. * We cannot rely on plat->bdf being set up yet since this method can
  519. * be called before the device is probed. Use the of-platdata directly
  520. * instead.
  521. */
  522. spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
  523. #endif
  524. return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp);
  525. }
  526. static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
  527. uint *offsetp)
  528. {
  529. struct udevice *bus = dev_get_parent(dev);
  530. return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp);
  531. }
  532. static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
  533. {
  534. unsigned int page_offset;
  535. int addr = op->addr.val;
  536. unsigned int byte_count = op->data.nbytes;
  537. if (hweight32(ICH_BOUNDARY) == 1) {
  538. page_offset = addr & (ICH_BOUNDARY - 1);
  539. } else {
  540. u64 aux = addr;
  541. page_offset = do_div(aux, ICH_BOUNDARY);
  542. }
  543. if (op->data.dir == SPI_MEM_DATA_IN) {
  544. if (slave->max_read_size) {
  545. op->data.nbytes = min(ICH_BOUNDARY - page_offset,
  546. slave->max_read_size);
  547. }
  548. } else if (slave->max_write_size) {
  549. op->data.nbytes = min(ICH_BOUNDARY - page_offset,
  550. slave->max_write_size);
  551. }
  552. op->data.nbytes = min(op->data.nbytes, byte_count);
  553. return 0;
  554. }
  555. static int ich_protect_lockdown(struct udevice *dev)
  556. {
  557. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  558. struct ich_spi_priv *priv = dev_get_priv(dev);
  559. int ret = -ENOSYS;
  560. /* Disable the BIOS write protect so write commands are allowed */
  561. if (priv->pch)
  562. ret = pch_set_spi_protect(priv->pch, false);
  563. if (ret == -ENOSYS) {
  564. u8 bios_cntl;
  565. bios_cntl = ich_readb(priv, priv->bcr);
  566. bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
  567. bios_cntl |= 1; /* Write Protect Disable (WPD) */
  568. ich_writeb(priv, bios_cntl, priv->bcr);
  569. } else if (ret) {
  570. debug("%s: Failed to disable write-protect: err=%d\n",
  571. __func__, ret);
  572. return ret;
  573. }
  574. /* Lock down SPI controller settings if required */
  575. if (plat->lockdown) {
  576. ich_spi_config_opcode(dev);
  577. spi_lock_down(plat, priv->base);
  578. }
  579. return 0;
  580. }
  581. static int ich_init_controller(struct udevice *dev,
  582. struct ich_spi_platdata *plat,
  583. struct ich_spi_priv *ctlr)
  584. {
  585. if (spl_phase() == PHASE_TPL) {
  586. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  587. int ret;
  588. ret = fast_spi_early_init(plat->bdf, plat->mmio_base);
  589. if (ret)
  590. return ret;
  591. }
  592. ctlr->base = (void *)plat->mmio_base;
  593. if (plat->ich_version == ICHV_7) {
  594. struct ich7_spi_regs *ich7_spi = ctlr->base;
  595. ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
  596. ctlr->menubytes = sizeof(ich7_spi->opmenu);
  597. ctlr->optype = offsetof(struct ich7_spi_regs, optype);
  598. ctlr->addr = offsetof(struct ich7_spi_regs, spia);
  599. ctlr->data = offsetof(struct ich7_spi_regs, spid);
  600. ctlr->databytes = sizeof(ich7_spi->spid);
  601. ctlr->status = offsetof(struct ich7_spi_regs, spis);
  602. ctlr->control = offsetof(struct ich7_spi_regs, spic);
  603. ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
  604. ctlr->preop = offsetof(struct ich7_spi_regs, preop);
  605. } else if (plat->ich_version == ICHV_9) {
  606. struct ich9_spi_regs *ich9_spi = ctlr->base;
  607. ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
  608. ctlr->menubytes = sizeof(ich9_spi->opmenu);
  609. ctlr->optype = offsetof(struct ich9_spi_regs, optype);
  610. ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
  611. ctlr->data = offsetof(struct ich9_spi_regs, fdata);
  612. ctlr->databytes = sizeof(ich9_spi->fdata);
  613. ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
  614. ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
  615. ctlr->speed = ctlr->control + 2;
  616. ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
  617. ctlr->preop = offsetof(struct ich9_spi_regs, preop);
  618. ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
  619. ctlr->pr = &ich9_spi->pr[0];
  620. } else if (plat->ich_version == ICHV_APL) {
  621. } else {
  622. debug("ICH SPI: Unrecognised ICH version %d\n",
  623. plat->ich_version);
  624. return -EINVAL;
  625. }
  626. /* Work out the maximum speed we can support */
  627. ctlr->max_speed = 20000000;
  628. if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
  629. ctlr->max_speed = 33000000;
  630. debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
  631. plat->ich_version, plat->mmio_base, ctlr->max_speed);
  632. ich_set_bbar(ctlr, 0);
  633. return 0;
  634. }
  635. static int ich_cache_bios_region(struct udevice *dev)
  636. {
  637. ulong map_base;
  638. uint map_size;
  639. uint offset;
  640. ulong base;
  641. int ret;
  642. ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset);
  643. if (ret)
  644. return ret;
  645. /* Don't use WRBACK since we are not supposed to write to SPI flash */
  646. base = SZ_4G - map_size;
  647. mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size);
  648. log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size);
  649. return 0;
  650. }
  651. static int ich_spi_probe(struct udevice *dev)
  652. {
  653. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  654. struct ich_spi_priv *priv = dev_get_priv(dev);
  655. int ret;
  656. ret = ich_init_controller(dev, plat, priv);
  657. if (ret)
  658. return ret;
  659. if (spl_phase() == PHASE_TPL) {
  660. /* Cache the BIOS to speed things up */
  661. ret = ich_cache_bios_region(dev);
  662. if (ret)
  663. return ret;
  664. } else {
  665. ret = ich_protect_lockdown(dev);
  666. if (ret)
  667. return ret;
  668. }
  669. priv->cur_speed = priv->max_speed;
  670. return 0;
  671. }
  672. static int ich_spi_remove(struct udevice *bus)
  673. {
  674. /*
  675. * Configure SPI controller so that the Linux MTD driver can fully
  676. * access the SPI NOR chip
  677. */
  678. ich_spi_config_opcode(bus);
  679. return 0;
  680. }
  681. static int ich_spi_set_speed(struct udevice *bus, uint speed)
  682. {
  683. struct ich_spi_priv *priv = dev_get_priv(bus);
  684. priv->cur_speed = speed;
  685. return 0;
  686. }
  687. static int ich_spi_set_mode(struct udevice *bus, uint mode)
  688. {
  689. debug("%s: mode=%d\n", __func__, mode);
  690. return 0;
  691. }
  692. static int ich_spi_child_pre_probe(struct udevice *dev)
  693. {
  694. struct udevice *bus = dev_get_parent(dev);
  695. struct ich_spi_platdata *plat = dev_get_platdata(bus);
  696. struct ich_spi_priv *priv = dev_get_priv(bus);
  697. struct spi_slave *slave = dev_get_parent_priv(dev);
  698. /*
  699. * Yes this controller can only write a small number of bytes at
  700. * once! The limit is typically 64 bytes. For hardware sequencing a
  701. * a loop is used to get around this.
  702. */
  703. if (!plat->hwseq)
  704. slave->max_write_size = priv->databytes;
  705. /*
  706. * ICH 7 SPI controller only supports array read command
  707. * and byte program command for SST flash
  708. */
  709. if (plat->ich_version == ICHV_7)
  710. slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
  711. return 0;
  712. }
  713. static int ich_spi_ofdata_to_platdata(struct udevice *dev)
  714. {
  715. struct ich_spi_platdata *plat = dev_get_platdata(dev);
  716. #if !CONFIG_IS_ENABLED(OF_PLATDATA)
  717. struct ich_spi_priv *priv = dev_get_priv(dev);
  718. /* Find a PCH if there is one */
  719. uclass_first_device(UCLASS_PCH, &priv->pch);
  720. if (!priv->pch)
  721. priv->pch = dev_get_parent(dev);
  722. plat->ich_version = dev_get_driver_data(dev);
  723. plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
  724. if (plat->ich_version == ICHV_APL) {
  725. plat->mmio_base = dm_pci_read_bar32(dev, 0);
  726. } else {
  727. /* SBASE is similar */
  728. pch_get_spi_base(priv->pch, &plat->mmio_base);
  729. }
  730. /*
  731. * Use an int so that the property is present in of-platdata even
  732. * when false.
  733. */
  734. plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
  735. #else
  736. plat->ich_version = ICHV_APL;
  737. plat->mmio_base = plat->dtplat.early_regs[0];
  738. plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
  739. plat->hwseq = plat->dtplat.intel_hardware_seq;
  740. #endif
  741. debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
  742. return 0;
  743. }
  744. static const struct spi_controller_mem_ops ich_controller_mem_ops = {
  745. .adjust_op_size = ich_spi_adjust_size,
  746. .supports_op = NULL,
  747. .exec_op = ich_spi_exec_op,
  748. };
  749. static const struct dm_spi_ops ich_spi_ops = {
  750. /* xfer is not supported */
  751. .set_speed = ich_spi_set_speed,
  752. .set_mode = ich_spi_set_mode,
  753. .mem_ops = &ich_controller_mem_ops,
  754. .get_mmap = ich_get_mmap,
  755. /*
  756. * cs_info is not needed, since we require all chip selects to be
  757. * in the device tree explicitly
  758. */
  759. };
  760. static const struct udevice_id ich_spi_ids[] = {
  761. { .compatible = "intel,ich7-spi", ICHV_7 },
  762. { .compatible = "intel,ich9-spi", ICHV_9 },
  763. { .compatible = "intel,fast-spi", ICHV_APL },
  764. { }
  765. };
  766. U_BOOT_DRIVER(intel_fast_spi) = {
  767. .name = "intel_fast_spi",
  768. .id = UCLASS_SPI,
  769. .of_match = ich_spi_ids,
  770. .ops = &ich_spi_ops,
  771. .ofdata_to_platdata = ich_spi_ofdata_to_platdata,
  772. .platdata_auto_alloc_size = sizeof(struct ich_spi_platdata),
  773. .priv_auto_alloc_size = sizeof(struct ich_spi_priv),
  774. .child_pre_probe = ich_spi_child_pre_probe,
  775. .probe = ich_spi_probe,
  776. .remove = ich_spi_remove,
  777. .flags = DM_FLAG_OS_PREPARE,
  778. };