core.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016-2017 Micron Technology, Inc.
  4. *
  5. * Authors:
  6. * Peter Pan <peterpandong@micron.com>
  7. * Boris Brezillon <boris.brezillon@bootlin.com>
  8. */
  9. #define pr_fmt(fmt) "spi-nand: " fmt
  10. #ifndef __UBOOT__
  11. #include <linux/device.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/spinand.h>
  16. #include <linux/of.h>
  17. #include <linux/slab.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/spi/spi-mem.h>
  20. #else
  21. #include <common.h>
  22. #include <errno.h>
  23. #include <spi.h>
  24. #include <spi-mem.h>
  25. #include <linux/mtd/spinand.h>
  26. #endif
  27. /* SPI NAND index visible in MTD names */
  28. static int spi_nand_idx;
  29. static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
  30. const struct nand_page_io_req *req,
  31. u16 *column)
  32. {
  33. struct nand_device *nand = spinand_to_nand(spinand);
  34. unsigned int shift;
  35. if (nand->memorg.planes_per_lun < 2)
  36. return;
  37. /* The plane number is passed in MSB just above the column address */
  38. shift = fls(nand->memorg.pagesize);
  39. *column |= req->pos.plane << shift;
  40. }
  41. static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
  42. {
  43. struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
  44. spinand->scratchbuf);
  45. int ret;
  46. ret = spi_mem_exec_op(spinand->slave, &op);
  47. if (ret)
  48. return ret;
  49. *val = *spinand->scratchbuf;
  50. return 0;
  51. }
  52. static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
  53. {
  54. struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
  55. spinand->scratchbuf);
  56. *spinand->scratchbuf = val;
  57. return spi_mem_exec_op(spinand->slave, &op);
  58. }
  59. static int spinand_read_status(struct spinand_device *spinand, u8 *status)
  60. {
  61. return spinand_read_reg_op(spinand, REG_STATUS, status);
  62. }
  63. static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
  64. {
  65. struct nand_device *nand = spinand_to_nand(spinand);
  66. if (WARN_ON(spinand->cur_target < 0 ||
  67. spinand->cur_target >= nand->memorg.ntargets))
  68. return -EINVAL;
  69. *cfg = spinand->cfg_cache[spinand->cur_target];
  70. return 0;
  71. }
  72. static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
  73. {
  74. struct nand_device *nand = spinand_to_nand(spinand);
  75. int ret;
  76. if (WARN_ON(spinand->cur_target < 0 ||
  77. spinand->cur_target >= nand->memorg.ntargets))
  78. return -EINVAL;
  79. if (spinand->cfg_cache[spinand->cur_target] == cfg)
  80. return 0;
  81. ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
  82. if (ret)
  83. return ret;
  84. spinand->cfg_cache[spinand->cur_target] = cfg;
  85. return 0;
  86. }
  87. /**
  88. * spinand_upd_cfg() - Update the configuration register
  89. * @spinand: the spinand device
  90. * @mask: the mask encoding the bits to update in the config reg
  91. * @val: the new value to apply
  92. *
  93. * Update the configuration register.
  94. *
  95. * Return: 0 on success, a negative error code otherwise.
  96. */
  97. int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
  98. {
  99. int ret;
  100. u8 cfg;
  101. ret = spinand_get_cfg(spinand, &cfg);
  102. if (ret)
  103. return ret;
  104. cfg &= ~mask;
  105. cfg |= val;
  106. return spinand_set_cfg(spinand, cfg);
  107. }
  108. /**
  109. * spinand_select_target() - Select a specific NAND target/die
  110. * @spinand: the spinand device
  111. * @target: the target/die to select
  112. *
  113. * Select a new target/die. If chip only has one die, this function is a NOOP.
  114. *
  115. * Return: 0 on success, a negative error code otherwise.
  116. */
  117. int spinand_select_target(struct spinand_device *spinand, unsigned int target)
  118. {
  119. struct nand_device *nand = spinand_to_nand(spinand);
  120. int ret;
  121. if (WARN_ON(target >= nand->memorg.ntargets))
  122. return -EINVAL;
  123. if (spinand->cur_target == target)
  124. return 0;
  125. if (nand->memorg.ntargets == 1) {
  126. spinand->cur_target = target;
  127. return 0;
  128. }
  129. ret = spinand->select_target(spinand, target);
  130. if (ret)
  131. return ret;
  132. spinand->cur_target = target;
  133. return 0;
  134. }
  135. static int spinand_init_cfg_cache(struct spinand_device *spinand)
  136. {
  137. struct nand_device *nand = spinand_to_nand(spinand);
  138. struct udevice *dev = spinand->slave->dev;
  139. unsigned int target;
  140. int ret;
  141. spinand->cfg_cache = devm_kzalloc(dev,
  142. sizeof(*spinand->cfg_cache) *
  143. nand->memorg.ntargets,
  144. GFP_KERNEL);
  145. if (!spinand->cfg_cache)
  146. return -ENOMEM;
  147. for (target = 0; target < nand->memorg.ntargets; target++) {
  148. ret = spinand_select_target(spinand, target);
  149. if (ret)
  150. return ret;
  151. /*
  152. * We use spinand_read_reg_op() instead of spinand_get_cfg()
  153. * here to bypass the config cache.
  154. */
  155. ret = spinand_read_reg_op(spinand, REG_CFG,
  156. &spinand->cfg_cache[target]);
  157. if (ret)
  158. return ret;
  159. }
  160. return 0;
  161. }
  162. static int spinand_init_quad_enable(struct spinand_device *spinand)
  163. {
  164. bool enable = false;
  165. if (!(spinand->flags & SPINAND_HAS_QE_BIT))
  166. return 0;
  167. if (spinand->op_templates.read_cache->data.buswidth == 4 ||
  168. spinand->op_templates.write_cache->data.buswidth == 4 ||
  169. spinand->op_templates.update_cache->data.buswidth == 4)
  170. enable = true;
  171. return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
  172. enable ? CFG_QUAD_ENABLE : 0);
  173. }
  174. static int spinand_ecc_enable(struct spinand_device *spinand,
  175. bool enable)
  176. {
  177. return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
  178. enable ? CFG_ECC_ENABLE : 0);
  179. }
  180. static int spinand_write_enable_op(struct spinand_device *spinand)
  181. {
  182. struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
  183. return spi_mem_exec_op(spinand->slave, &op);
  184. }
  185. static int spinand_load_page_op(struct spinand_device *spinand,
  186. const struct nand_page_io_req *req)
  187. {
  188. struct nand_device *nand = spinand_to_nand(spinand);
  189. unsigned int row = nanddev_pos_to_row(nand, &req->pos);
  190. struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
  191. return spi_mem_exec_op(spinand->slave, &op);
  192. }
  193. static int spinand_read_from_cache_op(struct spinand_device *spinand,
  194. const struct nand_page_io_req *req)
  195. {
  196. struct spi_mem_op op = *spinand->op_templates.read_cache;
  197. struct nand_device *nand = spinand_to_nand(spinand);
  198. struct mtd_info *mtd = nanddev_to_mtd(nand);
  199. struct nand_page_io_req adjreq = *req;
  200. unsigned int nbytes = 0;
  201. void *buf = NULL;
  202. u16 column = 0;
  203. int ret;
  204. if (req->datalen) {
  205. adjreq.datalen = nanddev_page_size(nand);
  206. adjreq.dataoffs = 0;
  207. adjreq.databuf.in = spinand->databuf;
  208. buf = spinand->databuf;
  209. nbytes = adjreq.datalen;
  210. }
  211. if (req->ooblen) {
  212. adjreq.ooblen = nanddev_per_page_oobsize(nand);
  213. adjreq.ooboffs = 0;
  214. adjreq.oobbuf.in = spinand->oobbuf;
  215. nbytes += nanddev_per_page_oobsize(nand);
  216. if (!buf) {
  217. buf = spinand->oobbuf;
  218. column = nanddev_page_size(nand);
  219. }
  220. }
  221. spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
  222. op.addr.val = column;
  223. /*
  224. * Some controllers are limited in term of max RX data size. In this
  225. * case, just repeat the READ_CACHE operation after updating the
  226. * column.
  227. */
  228. while (nbytes) {
  229. op.data.buf.in = buf;
  230. op.data.nbytes = nbytes;
  231. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  232. if (ret)
  233. return ret;
  234. ret = spi_mem_exec_op(spinand->slave, &op);
  235. if (ret)
  236. return ret;
  237. buf += op.data.nbytes;
  238. nbytes -= op.data.nbytes;
  239. op.addr.val += op.data.nbytes;
  240. }
  241. if (req->datalen)
  242. memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
  243. req->datalen);
  244. if (req->ooblen) {
  245. if (req->mode == MTD_OPS_AUTO_OOB)
  246. mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
  247. spinand->oobbuf,
  248. req->ooboffs,
  249. req->ooblen);
  250. else
  251. memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
  252. req->ooblen);
  253. }
  254. return 0;
  255. }
  256. static int spinand_write_to_cache_op(struct spinand_device *spinand,
  257. const struct nand_page_io_req *req)
  258. {
  259. struct spi_mem_op op = *spinand->op_templates.write_cache;
  260. struct nand_device *nand = spinand_to_nand(spinand);
  261. struct mtd_info *mtd = nanddev_to_mtd(nand);
  262. struct nand_page_io_req adjreq = *req;
  263. unsigned int nbytes = 0;
  264. void *buf = NULL;
  265. u16 column = 0;
  266. int ret;
  267. memset(spinand->databuf, 0xff,
  268. nanddev_page_size(nand) +
  269. nanddev_per_page_oobsize(nand));
  270. if (req->datalen) {
  271. memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
  272. req->datalen);
  273. adjreq.dataoffs = 0;
  274. adjreq.datalen = nanddev_page_size(nand);
  275. adjreq.databuf.out = spinand->databuf;
  276. nbytes = adjreq.datalen;
  277. buf = spinand->databuf;
  278. }
  279. if (req->ooblen) {
  280. if (req->mode == MTD_OPS_AUTO_OOB)
  281. mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
  282. spinand->oobbuf,
  283. req->ooboffs,
  284. req->ooblen);
  285. else
  286. memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
  287. req->ooblen);
  288. adjreq.ooblen = nanddev_per_page_oobsize(nand);
  289. adjreq.ooboffs = 0;
  290. nbytes += nanddev_per_page_oobsize(nand);
  291. if (!buf) {
  292. buf = spinand->oobbuf;
  293. column = nanddev_page_size(nand);
  294. }
  295. }
  296. spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
  297. op = *spinand->op_templates.write_cache;
  298. op.addr.val = column;
  299. /*
  300. * Some controllers are limited in term of max TX data size. In this
  301. * case, split the operation into one LOAD CACHE and one or more
  302. * LOAD RANDOM CACHE.
  303. */
  304. while (nbytes) {
  305. op.data.buf.out = buf;
  306. op.data.nbytes = nbytes;
  307. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  308. if (ret)
  309. return ret;
  310. ret = spi_mem_exec_op(spinand->slave, &op);
  311. if (ret)
  312. return ret;
  313. buf += op.data.nbytes;
  314. nbytes -= op.data.nbytes;
  315. op.addr.val += op.data.nbytes;
  316. /*
  317. * We need to use the RANDOM LOAD CACHE operation if there's
  318. * more than one iteration, because the LOAD operation resets
  319. * the cache to 0xff.
  320. */
  321. if (nbytes) {
  322. column = op.addr.val;
  323. op = *spinand->op_templates.update_cache;
  324. op.addr.val = column;
  325. }
  326. }
  327. return 0;
  328. }
  329. static int spinand_program_op(struct spinand_device *spinand,
  330. const struct nand_page_io_req *req)
  331. {
  332. struct nand_device *nand = spinand_to_nand(spinand);
  333. unsigned int row = nanddev_pos_to_row(nand, &req->pos);
  334. struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
  335. return spi_mem_exec_op(spinand->slave, &op);
  336. }
  337. static int spinand_erase_op(struct spinand_device *spinand,
  338. const struct nand_pos *pos)
  339. {
  340. struct nand_device *nand = &spinand->base;
  341. unsigned int row = nanddev_pos_to_row(nand, pos);
  342. struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
  343. return spi_mem_exec_op(spinand->slave, &op);
  344. }
  345. static int spinand_wait(struct spinand_device *spinand, u8 *s)
  346. {
  347. unsigned long start, stop;
  348. u8 status;
  349. int ret;
  350. start = get_timer(0);
  351. stop = 400;
  352. do {
  353. ret = spinand_read_status(spinand, &status);
  354. if (ret)
  355. return ret;
  356. if (!(status & STATUS_BUSY))
  357. goto out;
  358. } while (get_timer(start) < stop);
  359. /*
  360. * Extra read, just in case the STATUS_READY bit has changed
  361. * since our last check
  362. */
  363. ret = spinand_read_status(spinand, &status);
  364. if (ret)
  365. return ret;
  366. out:
  367. if (s)
  368. *s = status;
  369. return status & STATUS_BUSY ? -ETIMEDOUT : 0;
  370. }
  371. static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
  372. {
  373. struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
  374. SPINAND_MAX_ID_LEN);
  375. int ret;
  376. ret = spi_mem_exec_op(spinand->slave, &op);
  377. if (!ret)
  378. memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
  379. return ret;
  380. }
  381. static int spinand_reset_op(struct spinand_device *spinand)
  382. {
  383. struct spi_mem_op op = SPINAND_RESET_OP;
  384. int ret;
  385. ret = spi_mem_exec_op(spinand->slave, &op);
  386. if (ret)
  387. return ret;
  388. return spinand_wait(spinand, NULL);
  389. }
  390. static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
  391. {
  392. return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
  393. }
  394. static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
  395. {
  396. struct nand_device *nand = spinand_to_nand(spinand);
  397. if (spinand->eccinfo.get_status)
  398. return spinand->eccinfo.get_status(spinand, status);
  399. switch (status & STATUS_ECC_MASK) {
  400. case STATUS_ECC_NO_BITFLIPS:
  401. return 0;
  402. case STATUS_ECC_HAS_BITFLIPS:
  403. /*
  404. * We have no way to know exactly how many bitflips have been
  405. * fixed, so let's return the maximum possible value so that
  406. * wear-leveling layers move the data immediately.
  407. */
  408. return nand->eccreq.strength;
  409. case STATUS_ECC_UNCOR_ERROR:
  410. return -EBADMSG;
  411. default:
  412. break;
  413. }
  414. return -EINVAL;
  415. }
  416. static int spinand_read_page(struct spinand_device *spinand,
  417. const struct nand_page_io_req *req,
  418. bool ecc_enabled)
  419. {
  420. u8 status;
  421. int ret;
  422. ret = spinand_load_page_op(spinand, req);
  423. if (ret)
  424. return ret;
  425. ret = spinand_wait(spinand, &status);
  426. if (ret < 0)
  427. return ret;
  428. ret = spinand_read_from_cache_op(spinand, req);
  429. if (ret)
  430. return ret;
  431. if (!ecc_enabled)
  432. return 0;
  433. return spinand_check_ecc_status(spinand, status);
  434. }
  435. static int spinand_write_page(struct spinand_device *spinand,
  436. const struct nand_page_io_req *req)
  437. {
  438. u8 status;
  439. int ret;
  440. ret = spinand_write_enable_op(spinand);
  441. if (ret)
  442. return ret;
  443. ret = spinand_write_to_cache_op(spinand, req);
  444. if (ret)
  445. return ret;
  446. ret = spinand_program_op(spinand, req);
  447. if (ret)
  448. return ret;
  449. ret = spinand_wait(spinand, &status);
  450. if (!ret && (status & STATUS_PROG_FAILED))
  451. ret = -EIO;
  452. return ret;
  453. }
  454. static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
  455. struct mtd_oob_ops *ops)
  456. {
  457. struct spinand_device *spinand = mtd_to_spinand(mtd);
  458. struct nand_device *nand = mtd_to_nanddev(mtd);
  459. unsigned int max_bitflips = 0;
  460. struct nand_io_iter iter;
  461. bool enable_ecc = false;
  462. bool ecc_failed = false;
  463. int ret = 0;
  464. if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
  465. enable_ecc = true;
  466. #ifndef __UBOOT__
  467. mutex_lock(&spinand->lock);
  468. #endif
  469. nanddev_io_for_each_page(nand, from, ops, &iter) {
  470. ret = spinand_select_target(spinand, iter.req.pos.target);
  471. if (ret)
  472. break;
  473. ret = spinand_ecc_enable(spinand, enable_ecc);
  474. if (ret)
  475. break;
  476. ret = spinand_read_page(spinand, &iter.req, enable_ecc);
  477. if (ret < 0 && ret != -EBADMSG)
  478. break;
  479. if (ret == -EBADMSG) {
  480. ecc_failed = true;
  481. mtd->ecc_stats.failed++;
  482. ret = 0;
  483. } else {
  484. mtd->ecc_stats.corrected += ret;
  485. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  486. }
  487. ops->retlen += iter.req.datalen;
  488. ops->oobretlen += iter.req.ooblen;
  489. }
  490. #ifndef __UBOOT__
  491. mutex_unlock(&spinand->lock);
  492. #endif
  493. if (ecc_failed && !ret)
  494. ret = -EBADMSG;
  495. return ret ? ret : max_bitflips;
  496. }
  497. static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
  498. struct mtd_oob_ops *ops)
  499. {
  500. struct spinand_device *spinand = mtd_to_spinand(mtd);
  501. struct nand_device *nand = mtd_to_nanddev(mtd);
  502. struct nand_io_iter iter;
  503. bool enable_ecc = false;
  504. int ret = 0;
  505. if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
  506. enable_ecc = true;
  507. #ifndef __UBOOT__
  508. mutex_lock(&spinand->lock);
  509. #endif
  510. nanddev_io_for_each_page(nand, to, ops, &iter) {
  511. ret = spinand_select_target(spinand, iter.req.pos.target);
  512. if (ret)
  513. break;
  514. ret = spinand_ecc_enable(spinand, enable_ecc);
  515. if (ret)
  516. break;
  517. ret = spinand_write_page(spinand, &iter.req);
  518. if (ret)
  519. break;
  520. ops->retlen += iter.req.datalen;
  521. ops->oobretlen += iter.req.ooblen;
  522. }
  523. #ifndef __UBOOT__
  524. mutex_unlock(&spinand->lock);
  525. #endif
  526. return ret;
  527. }
  528. static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
  529. {
  530. struct spinand_device *spinand = nand_to_spinand(nand);
  531. struct nand_page_io_req req = {
  532. .pos = *pos,
  533. .ooblen = 2,
  534. .ooboffs = 0,
  535. .oobbuf.in = spinand->oobbuf,
  536. .mode = MTD_OPS_RAW,
  537. };
  538. int ret;
  539. memset(spinand->oobbuf, 0, 2);
  540. ret = spinand_select_target(spinand, pos->target);
  541. if (ret)
  542. return ret;
  543. ret = spinand_read_page(spinand, &req, false);
  544. if (ret)
  545. return ret;
  546. if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
  547. return true;
  548. return false;
  549. }
  550. static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
  551. {
  552. struct nand_device *nand = mtd_to_nanddev(mtd);
  553. #ifndef __UBOOT__
  554. struct spinand_device *spinand = nand_to_spinand(nand);
  555. #endif
  556. struct nand_pos pos;
  557. int ret;
  558. nanddev_offs_to_pos(nand, offs, &pos);
  559. #ifndef __UBOOT__
  560. mutex_lock(&spinand->lock);
  561. #endif
  562. ret = nanddev_isbad(nand, &pos);
  563. #ifndef __UBOOT__
  564. mutex_unlock(&spinand->lock);
  565. #endif
  566. return ret;
  567. }
  568. static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
  569. {
  570. struct spinand_device *spinand = nand_to_spinand(nand);
  571. struct nand_page_io_req req = {
  572. .pos = *pos,
  573. .ooboffs = 0,
  574. .ooblen = 2,
  575. .oobbuf.out = spinand->oobbuf,
  576. };
  577. int ret;
  578. /* Erase block before marking it bad. */
  579. ret = spinand_select_target(spinand, pos->target);
  580. if (ret)
  581. return ret;
  582. ret = spinand_write_enable_op(spinand);
  583. if (ret)
  584. return ret;
  585. ret = spinand_erase_op(spinand, pos);
  586. if (ret)
  587. return ret;
  588. memset(spinand->oobbuf, 0, 2);
  589. return spinand_write_page(spinand, &req);
  590. }
  591. static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
  592. {
  593. struct nand_device *nand = mtd_to_nanddev(mtd);
  594. #ifndef __UBOOT__
  595. struct spinand_device *spinand = nand_to_spinand(nand);
  596. #endif
  597. struct nand_pos pos;
  598. int ret;
  599. nanddev_offs_to_pos(nand, offs, &pos);
  600. #ifndef __UBOOT__
  601. mutex_lock(&spinand->lock);
  602. #endif
  603. ret = nanddev_markbad(nand, &pos);
  604. #ifndef __UBOOT__
  605. mutex_unlock(&spinand->lock);
  606. #endif
  607. return ret;
  608. }
  609. static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
  610. {
  611. struct spinand_device *spinand = nand_to_spinand(nand);
  612. u8 status;
  613. int ret;
  614. ret = spinand_select_target(spinand, pos->target);
  615. if (ret)
  616. return ret;
  617. ret = spinand_write_enable_op(spinand);
  618. if (ret)
  619. return ret;
  620. ret = spinand_erase_op(spinand, pos);
  621. if (ret)
  622. return ret;
  623. ret = spinand_wait(spinand, &status);
  624. if (!ret && (status & STATUS_ERASE_FAILED))
  625. ret = -EIO;
  626. return ret;
  627. }
  628. static int spinand_mtd_erase(struct mtd_info *mtd,
  629. struct erase_info *einfo)
  630. {
  631. #ifndef __UBOOT__
  632. struct spinand_device *spinand = mtd_to_spinand(mtd);
  633. #endif
  634. int ret;
  635. #ifndef __UBOOT__
  636. mutex_lock(&spinand->lock);
  637. #endif
  638. ret = nanddev_mtd_erase(mtd, einfo);
  639. #ifndef __UBOOT__
  640. mutex_unlock(&spinand->lock);
  641. #endif
  642. return ret;
  643. }
  644. static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
  645. {
  646. #ifndef __UBOOT__
  647. struct spinand_device *spinand = mtd_to_spinand(mtd);
  648. #endif
  649. struct nand_device *nand = mtd_to_nanddev(mtd);
  650. struct nand_pos pos;
  651. int ret;
  652. nanddev_offs_to_pos(nand, offs, &pos);
  653. #ifndef __UBOOT__
  654. mutex_lock(&spinand->lock);
  655. #endif
  656. ret = nanddev_isreserved(nand, &pos);
  657. #ifndef __UBOOT__
  658. mutex_unlock(&spinand->lock);
  659. #endif
  660. return ret;
  661. }
  662. const struct spi_mem_op *
  663. spinand_find_supported_op(struct spinand_device *spinand,
  664. const struct spi_mem_op *ops,
  665. unsigned int nops)
  666. {
  667. unsigned int i;
  668. for (i = 0; i < nops; i++) {
  669. if (spi_mem_supports_op(spinand->slave, &ops[i]))
  670. return &ops[i];
  671. }
  672. return NULL;
  673. }
  674. static const struct nand_ops spinand_ops = {
  675. .erase = spinand_erase,
  676. .markbad = spinand_markbad,
  677. .isbad = spinand_isbad,
  678. };
  679. static const struct spinand_manufacturer *spinand_manufacturers[] = {
  680. &macronix_spinand_manufacturer,
  681. &micron_spinand_manufacturer,
  682. &winbond_spinand_manufacturer,
  683. };
  684. static int spinand_manufacturer_detect(struct spinand_device *spinand)
  685. {
  686. unsigned int i;
  687. int ret;
  688. for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
  689. ret = spinand_manufacturers[i]->ops->detect(spinand);
  690. if (ret > 0) {
  691. spinand->manufacturer = spinand_manufacturers[i];
  692. return 0;
  693. } else if (ret < 0) {
  694. return ret;
  695. }
  696. }
  697. return -ENOTSUPP;
  698. }
  699. static int spinand_manufacturer_init(struct spinand_device *spinand)
  700. {
  701. if (spinand->manufacturer->ops->init)
  702. return spinand->manufacturer->ops->init(spinand);
  703. return 0;
  704. }
  705. static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
  706. {
  707. /* Release manufacturer private data */
  708. if (spinand->manufacturer->ops->cleanup)
  709. return spinand->manufacturer->ops->cleanup(spinand);
  710. }
  711. static const struct spi_mem_op *
  712. spinand_select_op_variant(struct spinand_device *spinand,
  713. const struct spinand_op_variants *variants)
  714. {
  715. struct nand_device *nand = spinand_to_nand(spinand);
  716. unsigned int i;
  717. for (i = 0; i < variants->nops; i++) {
  718. struct spi_mem_op op = variants->ops[i];
  719. unsigned int nbytes;
  720. int ret;
  721. nbytes = nanddev_per_page_oobsize(nand) +
  722. nanddev_page_size(nand);
  723. while (nbytes) {
  724. op.data.nbytes = nbytes;
  725. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  726. if (ret)
  727. break;
  728. if (!spi_mem_supports_op(spinand->slave, &op))
  729. break;
  730. nbytes -= op.data.nbytes;
  731. }
  732. if (!nbytes)
  733. return &variants->ops[i];
  734. }
  735. return NULL;
  736. }
  737. /**
  738. * spinand_match_and_init() - Try to find a match between a device ID and an
  739. * entry in a spinand_info table
  740. * @spinand: SPI NAND object
  741. * @table: SPI NAND device description table
  742. * @table_size: size of the device description table
  743. *
  744. * Should be used by SPI NAND manufacturer drivers when they want to find a
  745. * match between a device ID retrieved through the READ_ID command and an
  746. * entry in the SPI NAND description table. If a match is found, the spinand
  747. * object will be initialized with information provided by the matching
  748. * spinand_info entry.
  749. *
  750. * Return: 0 on success, a negative error code otherwise.
  751. */
  752. int spinand_match_and_init(struct spinand_device *spinand,
  753. const struct spinand_info *table,
  754. unsigned int table_size, u8 devid)
  755. {
  756. struct nand_device *nand = spinand_to_nand(spinand);
  757. unsigned int i;
  758. for (i = 0; i < table_size; i++) {
  759. const struct spinand_info *info = &table[i];
  760. const struct spi_mem_op *op;
  761. if (devid != info->devid)
  762. continue;
  763. nand->memorg = table[i].memorg;
  764. nand->eccreq = table[i].eccreq;
  765. spinand->eccinfo = table[i].eccinfo;
  766. spinand->flags = table[i].flags;
  767. spinand->select_target = table[i].select_target;
  768. op = spinand_select_op_variant(spinand,
  769. info->op_variants.read_cache);
  770. if (!op)
  771. return -ENOTSUPP;
  772. spinand->op_templates.read_cache = op;
  773. op = spinand_select_op_variant(spinand,
  774. info->op_variants.write_cache);
  775. if (!op)
  776. return -ENOTSUPP;
  777. spinand->op_templates.write_cache = op;
  778. op = spinand_select_op_variant(spinand,
  779. info->op_variants.update_cache);
  780. spinand->op_templates.update_cache = op;
  781. return 0;
  782. }
  783. return -ENOTSUPP;
  784. }
  785. static int spinand_detect(struct spinand_device *spinand)
  786. {
  787. struct nand_device *nand = spinand_to_nand(spinand);
  788. int ret;
  789. ret = spinand_reset_op(spinand);
  790. if (ret)
  791. return ret;
  792. ret = spinand_read_id_op(spinand, spinand->id.data);
  793. if (ret)
  794. return ret;
  795. spinand->id.len = SPINAND_MAX_ID_LEN;
  796. ret = spinand_manufacturer_detect(spinand);
  797. if (ret) {
  798. dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
  799. spinand->id.data);
  800. return ret;
  801. }
  802. if (nand->memorg.ntargets > 1 && !spinand->select_target) {
  803. dev_err(dev,
  804. "SPI NANDs with more than one die must implement ->select_target()\n");
  805. return -EINVAL;
  806. }
  807. dev_info(spinand->slave->dev,
  808. "%s SPI NAND was found.\n", spinand->manufacturer->name);
  809. dev_info(spinand->slave->dev,
  810. "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
  811. nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
  812. nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
  813. return 0;
  814. }
  815. static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
  816. struct mtd_oob_region *region)
  817. {
  818. return -ERANGE;
  819. }
  820. static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
  821. struct mtd_oob_region *region)
  822. {
  823. if (section)
  824. return -ERANGE;
  825. /* Reserve 2 bytes for the BBM. */
  826. region->offset = 2;
  827. region->length = 62;
  828. return 0;
  829. }
  830. static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
  831. .ecc = spinand_noecc_ooblayout_ecc,
  832. .free = spinand_noecc_ooblayout_free,
  833. };
  834. static int spinand_init(struct spinand_device *spinand)
  835. {
  836. struct mtd_info *mtd = spinand_to_mtd(spinand);
  837. struct nand_device *nand = mtd_to_nanddev(mtd);
  838. int ret, i;
  839. /*
  840. * We need a scratch buffer because the spi_mem interface requires that
  841. * buf passed in spi_mem_op->data.buf be DMA-able.
  842. */
  843. spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
  844. if (!spinand->scratchbuf)
  845. return -ENOMEM;
  846. ret = spinand_detect(spinand);
  847. if (ret)
  848. goto err_free_bufs;
  849. /*
  850. * Use kzalloc() instead of devm_kzalloc() here, because some drivers
  851. * may use this buffer for DMA access.
  852. * Memory allocated by devm_ does not guarantee DMA-safe alignment.
  853. */
  854. spinand->databuf = kzalloc(nanddev_page_size(nand) +
  855. nanddev_per_page_oobsize(nand),
  856. GFP_KERNEL);
  857. if (!spinand->databuf) {
  858. ret = -ENOMEM;
  859. goto err_free_bufs;
  860. }
  861. spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
  862. ret = spinand_init_cfg_cache(spinand);
  863. if (ret)
  864. goto err_free_bufs;
  865. ret = spinand_init_quad_enable(spinand);
  866. if (ret)
  867. goto err_free_bufs;
  868. ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
  869. if (ret)
  870. goto err_free_bufs;
  871. ret = spinand_manufacturer_init(spinand);
  872. if (ret) {
  873. dev_err(dev,
  874. "Failed to initialize the SPI NAND chip (err = %d)\n",
  875. ret);
  876. goto err_free_bufs;
  877. }
  878. /* After power up, all blocks are locked, so unlock them here. */
  879. for (i = 0; i < nand->memorg.ntargets; i++) {
  880. ret = spinand_select_target(spinand, i);
  881. if (ret)
  882. goto err_free_bufs;
  883. ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
  884. if (ret)
  885. goto err_free_bufs;
  886. }
  887. ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
  888. if (ret)
  889. goto err_manuf_cleanup;
  890. /*
  891. * Right now, we don't support ECC, so let the whole oob
  892. * area is available for user.
  893. */
  894. mtd->_read_oob = spinand_mtd_read;
  895. mtd->_write_oob = spinand_mtd_write;
  896. mtd->_block_isbad = spinand_mtd_block_isbad;
  897. mtd->_block_markbad = spinand_mtd_block_markbad;
  898. mtd->_block_isreserved = spinand_mtd_block_isreserved;
  899. mtd->_erase = spinand_mtd_erase;
  900. if (spinand->eccinfo.ooblayout)
  901. mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
  902. else
  903. mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
  904. ret = mtd_ooblayout_count_freebytes(mtd);
  905. if (ret < 0)
  906. goto err_cleanup_nanddev;
  907. mtd->oobavail = ret;
  908. return 0;
  909. err_cleanup_nanddev:
  910. nanddev_cleanup(nand);
  911. err_manuf_cleanup:
  912. spinand_manufacturer_cleanup(spinand);
  913. err_free_bufs:
  914. kfree(spinand->databuf);
  915. kfree(spinand->scratchbuf);
  916. return ret;
  917. }
  918. static void spinand_cleanup(struct spinand_device *spinand)
  919. {
  920. struct nand_device *nand = spinand_to_nand(spinand);
  921. nanddev_cleanup(nand);
  922. spinand_manufacturer_cleanup(spinand);
  923. kfree(spinand->databuf);
  924. kfree(spinand->scratchbuf);
  925. }
  926. static int spinand_probe(struct udevice *dev)
  927. {
  928. struct spinand_device *spinand = dev_get_priv(dev);
  929. struct spi_slave *slave = dev_get_parent_priv(dev);
  930. struct mtd_info *mtd = dev_get_uclass_priv(dev);
  931. struct nand_device *nand = spinand_to_nand(spinand);
  932. int ret;
  933. #ifndef __UBOOT__
  934. spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
  935. GFP_KERNEL);
  936. if (!spinand)
  937. return -ENOMEM;
  938. spinand->spimem = mem;
  939. spi_mem_set_drvdata(mem, spinand);
  940. spinand_set_of_node(spinand, mem->spi->dev.of_node);
  941. mutex_init(&spinand->lock);
  942. mtd = spinand_to_mtd(spinand);
  943. mtd->dev.parent = &mem->spi->dev;
  944. #else
  945. nand->mtd = mtd;
  946. mtd->priv = nand;
  947. mtd->dev = dev;
  948. mtd->name = malloc(20);
  949. if (!mtd->name)
  950. return -ENOMEM;
  951. sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
  952. spinand->slave = slave;
  953. spinand_set_of_node(spinand, dev->node.np);
  954. #endif
  955. ret = spinand_init(spinand);
  956. if (ret)
  957. return ret;
  958. #ifndef __UBOOT__
  959. ret = mtd_device_register(mtd, NULL, 0);
  960. #else
  961. ret = add_mtd_device(mtd);
  962. #endif
  963. if (ret)
  964. goto err_spinand_cleanup;
  965. return 0;
  966. err_spinand_cleanup:
  967. spinand_cleanup(spinand);
  968. return ret;
  969. }
  970. #ifndef __UBOOT__
  971. static int spinand_remove(struct udevice *slave)
  972. {
  973. struct spinand_device *spinand;
  974. struct mtd_info *mtd;
  975. int ret;
  976. spinand = spi_mem_get_drvdata(slave);
  977. mtd = spinand_to_mtd(spinand);
  978. free(mtd->name);
  979. ret = mtd_device_unregister(mtd);
  980. if (ret)
  981. return ret;
  982. spinand_cleanup(spinand);
  983. return 0;
  984. }
  985. static const struct spi_device_id spinand_ids[] = {
  986. { .name = "spi-nand" },
  987. { /* sentinel */ },
  988. };
  989. #ifdef CONFIG_OF
  990. static const struct of_device_id spinand_of_ids[] = {
  991. { .compatible = "spi-nand" },
  992. { /* sentinel */ },
  993. };
  994. #endif
  995. static struct spi_mem_driver spinand_drv = {
  996. .spidrv = {
  997. .id_table = spinand_ids,
  998. .driver = {
  999. .name = "spi-nand",
  1000. .of_match_table = of_match_ptr(spinand_of_ids),
  1001. },
  1002. },
  1003. .probe = spinand_probe,
  1004. .remove = spinand_remove,
  1005. };
  1006. module_spi_mem_driver(spinand_drv);
  1007. MODULE_DESCRIPTION("SPI NAND framework");
  1008. MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
  1009. MODULE_LICENSE("GPL v2");
  1010. #endif /* __UBOOT__ */
  1011. static const struct udevice_id spinand_ids[] = {
  1012. { .compatible = "spi-nand" },
  1013. { /* sentinel */ },
  1014. };
  1015. U_BOOT_DRIVER(spinand) = {
  1016. .name = "spi_nand",
  1017. .id = UCLASS_MTD,
  1018. .of_match = spinand_ids,
  1019. .priv_auto_alloc_size = sizeof(struct spinand_device),
  1020. .probe = spinand_probe,
  1021. };