core.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016-2017 Micron Technology, Inc.
  4. *
  5. * Authors:
  6. * Peter Pan <peterpandong@micron.com>
  7. * Boris Brezillon <boris.brezillon@bootlin.com>
  8. */
  9. #define pr_fmt(fmt) "spi-nand: " fmt
  10. #ifndef __UBOOT__
  11. #include <linux/device.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/spinand.h>
  16. #include <linux/of.h>
  17. #include <linux/slab.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/spi/spi-mem.h>
  20. #else
  21. #include <common.h>
  22. #include <errno.h>
  23. #include <spi.h>
  24. #include <spi-mem.h>
  25. #include <dm/device_compat.h>
  26. #include <dm/devres.h>
  27. #include <linux/bitops.h>
  28. #include <linux/bug.h>
  29. #include <linux/mtd/spinand.h>
  30. #endif
  31. /* SPI NAND index visible in MTD names */
  32. static int spi_nand_idx;
  33. static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
  34. const struct nand_page_io_req *req,
  35. u16 *column)
  36. {
  37. struct nand_device *nand = spinand_to_nand(spinand);
  38. unsigned int shift;
  39. if (nand->memorg.planes_per_lun < 2)
  40. return;
  41. /* The plane number is passed in MSB just above the column address */
  42. shift = fls(nand->memorg.pagesize);
  43. *column |= req->pos.plane << shift;
  44. }
  45. static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
  46. {
  47. struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
  48. spinand->scratchbuf);
  49. int ret;
  50. ret = spi_mem_exec_op(spinand->slave, &op);
  51. if (ret)
  52. return ret;
  53. *val = *spinand->scratchbuf;
  54. return 0;
  55. }
  56. static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
  57. {
  58. struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
  59. spinand->scratchbuf);
  60. *spinand->scratchbuf = val;
  61. return spi_mem_exec_op(spinand->slave, &op);
  62. }
  63. static int spinand_read_status(struct spinand_device *spinand, u8 *status)
  64. {
  65. return spinand_read_reg_op(spinand, REG_STATUS, status);
  66. }
  67. static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
  68. {
  69. struct nand_device *nand = spinand_to_nand(spinand);
  70. if (WARN_ON(spinand->cur_target < 0 ||
  71. spinand->cur_target >= nand->memorg.ntargets))
  72. return -EINVAL;
  73. *cfg = spinand->cfg_cache[spinand->cur_target];
  74. return 0;
  75. }
  76. static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
  77. {
  78. struct nand_device *nand = spinand_to_nand(spinand);
  79. int ret;
  80. if (WARN_ON(spinand->cur_target < 0 ||
  81. spinand->cur_target >= nand->memorg.ntargets))
  82. return -EINVAL;
  83. if (spinand->cfg_cache[spinand->cur_target] == cfg)
  84. return 0;
  85. ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
  86. if (ret)
  87. return ret;
  88. spinand->cfg_cache[spinand->cur_target] = cfg;
  89. return 0;
  90. }
  91. /**
  92. * spinand_upd_cfg() - Update the configuration register
  93. * @spinand: the spinand device
  94. * @mask: the mask encoding the bits to update in the config reg
  95. * @val: the new value to apply
  96. *
  97. * Update the configuration register.
  98. *
  99. * Return: 0 on success, a negative error code otherwise.
  100. */
  101. int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
  102. {
  103. int ret;
  104. u8 cfg;
  105. ret = spinand_get_cfg(spinand, &cfg);
  106. if (ret)
  107. return ret;
  108. cfg &= ~mask;
  109. cfg |= val;
  110. return spinand_set_cfg(spinand, cfg);
  111. }
  112. /**
  113. * spinand_select_target() - Select a specific NAND target/die
  114. * @spinand: the spinand device
  115. * @target: the target/die to select
  116. *
  117. * Select a new target/die. If chip only has one die, this function is a NOOP.
  118. *
  119. * Return: 0 on success, a negative error code otherwise.
  120. */
  121. int spinand_select_target(struct spinand_device *spinand, unsigned int target)
  122. {
  123. struct nand_device *nand = spinand_to_nand(spinand);
  124. int ret;
  125. if (WARN_ON(target >= nand->memorg.ntargets))
  126. return -EINVAL;
  127. if (spinand->cur_target == target)
  128. return 0;
  129. if (nand->memorg.ntargets == 1) {
  130. spinand->cur_target = target;
  131. return 0;
  132. }
  133. ret = spinand->select_target(spinand, target);
  134. if (ret)
  135. return ret;
  136. spinand->cur_target = target;
  137. return 0;
  138. }
  139. static int spinand_init_cfg_cache(struct spinand_device *spinand)
  140. {
  141. struct nand_device *nand = spinand_to_nand(spinand);
  142. struct udevice *dev = spinand->slave->dev;
  143. unsigned int target;
  144. int ret;
  145. spinand->cfg_cache = devm_kzalloc(dev,
  146. sizeof(*spinand->cfg_cache) *
  147. nand->memorg.ntargets,
  148. GFP_KERNEL);
  149. if (!spinand->cfg_cache)
  150. return -ENOMEM;
  151. for (target = 0; target < nand->memorg.ntargets; target++) {
  152. ret = spinand_select_target(spinand, target);
  153. if (ret)
  154. return ret;
  155. /*
  156. * We use spinand_read_reg_op() instead of spinand_get_cfg()
  157. * here to bypass the config cache.
  158. */
  159. ret = spinand_read_reg_op(spinand, REG_CFG,
  160. &spinand->cfg_cache[target]);
  161. if (ret)
  162. return ret;
  163. }
  164. return 0;
  165. }
  166. static int spinand_init_quad_enable(struct spinand_device *spinand)
  167. {
  168. bool enable = false;
  169. if (!(spinand->flags & SPINAND_HAS_QE_BIT))
  170. return 0;
  171. if (spinand->op_templates.read_cache->data.buswidth == 4 ||
  172. spinand->op_templates.write_cache->data.buswidth == 4 ||
  173. spinand->op_templates.update_cache->data.buswidth == 4)
  174. enable = true;
  175. return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
  176. enable ? CFG_QUAD_ENABLE : 0);
  177. }
  178. static int spinand_ecc_enable(struct spinand_device *spinand,
  179. bool enable)
  180. {
  181. return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
  182. enable ? CFG_ECC_ENABLE : 0);
  183. }
  184. static int spinand_write_enable_op(struct spinand_device *spinand)
  185. {
  186. struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
  187. return spi_mem_exec_op(spinand->slave, &op);
  188. }
  189. static int spinand_load_page_op(struct spinand_device *spinand,
  190. const struct nand_page_io_req *req)
  191. {
  192. struct nand_device *nand = spinand_to_nand(spinand);
  193. unsigned int row = nanddev_pos_to_row(nand, &req->pos);
  194. struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
  195. return spi_mem_exec_op(spinand->slave, &op);
  196. }
  197. static int spinand_read_from_cache_op(struct spinand_device *spinand,
  198. const struct nand_page_io_req *req)
  199. {
  200. struct spi_mem_op op = *spinand->op_templates.read_cache;
  201. struct nand_device *nand = spinand_to_nand(spinand);
  202. struct mtd_info *mtd = nanddev_to_mtd(nand);
  203. struct nand_page_io_req adjreq = *req;
  204. unsigned int nbytes = 0;
  205. void *buf = NULL;
  206. u16 column = 0;
  207. int ret;
  208. if (req->datalen) {
  209. adjreq.datalen = nanddev_page_size(nand);
  210. adjreq.dataoffs = 0;
  211. adjreq.databuf.in = spinand->databuf;
  212. buf = spinand->databuf;
  213. nbytes = adjreq.datalen;
  214. }
  215. if (req->ooblen) {
  216. adjreq.ooblen = nanddev_per_page_oobsize(nand);
  217. adjreq.ooboffs = 0;
  218. adjreq.oobbuf.in = spinand->oobbuf;
  219. nbytes += nanddev_per_page_oobsize(nand);
  220. if (!buf) {
  221. buf = spinand->oobbuf;
  222. column = nanddev_page_size(nand);
  223. }
  224. }
  225. spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
  226. op.addr.val = column;
  227. /*
  228. * Some controllers are limited in term of max RX data size. In this
  229. * case, just repeat the READ_CACHE operation after updating the
  230. * column.
  231. */
  232. while (nbytes) {
  233. op.data.buf.in = buf;
  234. op.data.nbytes = nbytes;
  235. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  236. if (ret)
  237. return ret;
  238. ret = spi_mem_exec_op(spinand->slave, &op);
  239. if (ret)
  240. return ret;
  241. buf += op.data.nbytes;
  242. nbytes -= op.data.nbytes;
  243. op.addr.val += op.data.nbytes;
  244. }
  245. if (req->datalen)
  246. memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
  247. req->datalen);
  248. if (req->ooblen) {
  249. if (req->mode == MTD_OPS_AUTO_OOB)
  250. mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
  251. spinand->oobbuf,
  252. req->ooboffs,
  253. req->ooblen);
  254. else
  255. memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
  256. req->ooblen);
  257. }
  258. return 0;
  259. }
  260. static int spinand_write_to_cache_op(struct spinand_device *spinand,
  261. const struct nand_page_io_req *req)
  262. {
  263. struct spi_mem_op op = *spinand->op_templates.write_cache;
  264. struct nand_device *nand = spinand_to_nand(spinand);
  265. struct mtd_info *mtd = nanddev_to_mtd(nand);
  266. struct nand_page_io_req adjreq = *req;
  267. unsigned int nbytes = 0;
  268. void *buf = NULL;
  269. u16 column = 0;
  270. int ret;
  271. memset(spinand->databuf, 0xff,
  272. nanddev_page_size(nand) +
  273. nanddev_per_page_oobsize(nand));
  274. if (req->datalen) {
  275. memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
  276. req->datalen);
  277. adjreq.dataoffs = 0;
  278. adjreq.datalen = nanddev_page_size(nand);
  279. adjreq.databuf.out = spinand->databuf;
  280. nbytes = adjreq.datalen;
  281. buf = spinand->databuf;
  282. }
  283. if (req->ooblen) {
  284. if (req->mode == MTD_OPS_AUTO_OOB)
  285. mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
  286. spinand->oobbuf,
  287. req->ooboffs,
  288. req->ooblen);
  289. else
  290. memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
  291. req->ooblen);
  292. adjreq.ooblen = nanddev_per_page_oobsize(nand);
  293. adjreq.ooboffs = 0;
  294. nbytes += nanddev_per_page_oobsize(nand);
  295. if (!buf) {
  296. buf = spinand->oobbuf;
  297. column = nanddev_page_size(nand);
  298. }
  299. }
  300. spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
  301. op = *spinand->op_templates.write_cache;
  302. op.addr.val = column;
  303. /*
  304. * Some controllers are limited in term of max TX data size. In this
  305. * case, split the operation into one LOAD CACHE and one or more
  306. * LOAD RANDOM CACHE.
  307. */
  308. while (nbytes) {
  309. op.data.buf.out = buf;
  310. op.data.nbytes = nbytes;
  311. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  312. if (ret)
  313. return ret;
  314. ret = spi_mem_exec_op(spinand->slave, &op);
  315. if (ret)
  316. return ret;
  317. buf += op.data.nbytes;
  318. nbytes -= op.data.nbytes;
  319. op.addr.val += op.data.nbytes;
  320. /*
  321. * We need to use the RANDOM LOAD CACHE operation if there's
  322. * more than one iteration, because the LOAD operation resets
  323. * the cache to 0xff.
  324. */
  325. if (nbytes) {
  326. column = op.addr.val;
  327. op = *spinand->op_templates.update_cache;
  328. op.addr.val = column;
  329. }
  330. }
  331. return 0;
  332. }
  333. static int spinand_program_op(struct spinand_device *spinand,
  334. const struct nand_page_io_req *req)
  335. {
  336. struct nand_device *nand = spinand_to_nand(spinand);
  337. unsigned int row = nanddev_pos_to_row(nand, &req->pos);
  338. struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
  339. return spi_mem_exec_op(spinand->slave, &op);
  340. }
  341. static int spinand_erase_op(struct spinand_device *spinand,
  342. const struct nand_pos *pos)
  343. {
  344. struct nand_device *nand = &spinand->base;
  345. unsigned int row = nanddev_pos_to_row(nand, pos);
  346. struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
  347. return spi_mem_exec_op(spinand->slave, &op);
  348. }
  349. static int spinand_wait(struct spinand_device *spinand, u8 *s)
  350. {
  351. unsigned long start, stop;
  352. u8 status;
  353. int ret;
  354. start = get_timer(0);
  355. stop = 400;
  356. do {
  357. ret = spinand_read_status(spinand, &status);
  358. if (ret)
  359. return ret;
  360. if (!(status & STATUS_BUSY))
  361. goto out;
  362. } while (get_timer(start) < stop);
  363. /*
  364. * Extra read, just in case the STATUS_READY bit has changed
  365. * since our last check
  366. */
  367. ret = spinand_read_status(spinand, &status);
  368. if (ret)
  369. return ret;
  370. out:
  371. if (s)
  372. *s = status;
  373. return status & STATUS_BUSY ? -ETIMEDOUT : 0;
  374. }
  375. static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
  376. {
  377. struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
  378. SPINAND_MAX_ID_LEN);
  379. int ret;
  380. ret = spi_mem_exec_op(spinand->slave, &op);
  381. if (!ret)
  382. memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
  383. return ret;
  384. }
  385. static int spinand_reset_op(struct spinand_device *spinand)
  386. {
  387. struct spi_mem_op op = SPINAND_RESET_OP;
  388. int ret;
  389. ret = spi_mem_exec_op(spinand->slave, &op);
  390. if (ret)
  391. return ret;
  392. return spinand_wait(spinand, NULL);
  393. }
  394. static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
  395. {
  396. return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
  397. }
  398. static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
  399. {
  400. struct nand_device *nand = spinand_to_nand(spinand);
  401. if (spinand->eccinfo.get_status)
  402. return spinand->eccinfo.get_status(spinand, status);
  403. switch (status & STATUS_ECC_MASK) {
  404. case STATUS_ECC_NO_BITFLIPS:
  405. return 0;
  406. case STATUS_ECC_HAS_BITFLIPS:
  407. /*
  408. * We have no way to know exactly how many bitflips have been
  409. * fixed, so let's return the maximum possible value so that
  410. * wear-leveling layers move the data immediately.
  411. */
  412. return nand->eccreq.strength;
  413. case STATUS_ECC_UNCOR_ERROR:
  414. return -EBADMSG;
  415. default:
  416. break;
  417. }
  418. return -EINVAL;
  419. }
  420. static int spinand_read_page(struct spinand_device *spinand,
  421. const struct nand_page_io_req *req,
  422. bool ecc_enabled)
  423. {
  424. u8 status;
  425. int ret;
  426. ret = spinand_load_page_op(spinand, req);
  427. if (ret)
  428. return ret;
  429. ret = spinand_wait(spinand, &status);
  430. if (ret < 0)
  431. return ret;
  432. ret = spinand_read_from_cache_op(spinand, req);
  433. if (ret)
  434. return ret;
  435. if (!ecc_enabled)
  436. return 0;
  437. return spinand_check_ecc_status(spinand, status);
  438. }
  439. static int spinand_write_page(struct spinand_device *spinand,
  440. const struct nand_page_io_req *req)
  441. {
  442. u8 status;
  443. int ret;
  444. ret = spinand_write_enable_op(spinand);
  445. if (ret)
  446. return ret;
  447. ret = spinand_write_to_cache_op(spinand, req);
  448. if (ret)
  449. return ret;
  450. ret = spinand_program_op(spinand, req);
  451. if (ret)
  452. return ret;
  453. ret = spinand_wait(spinand, &status);
  454. if (!ret && (status & STATUS_PROG_FAILED))
  455. ret = -EIO;
  456. return ret;
  457. }
  458. static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
  459. struct mtd_oob_ops *ops)
  460. {
  461. struct spinand_device *spinand = mtd_to_spinand(mtd);
  462. struct nand_device *nand = mtd_to_nanddev(mtd);
  463. unsigned int max_bitflips = 0;
  464. struct nand_io_iter iter;
  465. bool enable_ecc = false;
  466. bool ecc_failed = false;
  467. int ret = 0;
  468. if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
  469. enable_ecc = true;
  470. #ifndef __UBOOT__
  471. mutex_lock(&spinand->lock);
  472. #endif
  473. nanddev_io_for_each_page(nand, from, ops, &iter) {
  474. ret = spinand_select_target(spinand, iter.req.pos.target);
  475. if (ret)
  476. break;
  477. ret = spinand_ecc_enable(spinand, enable_ecc);
  478. if (ret)
  479. break;
  480. ret = spinand_read_page(spinand, &iter.req, enable_ecc);
  481. if (ret < 0 && ret != -EBADMSG)
  482. break;
  483. if (ret == -EBADMSG) {
  484. ecc_failed = true;
  485. mtd->ecc_stats.failed++;
  486. ret = 0;
  487. } else {
  488. mtd->ecc_stats.corrected += ret;
  489. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  490. }
  491. ops->retlen += iter.req.datalen;
  492. ops->oobretlen += iter.req.ooblen;
  493. }
  494. #ifndef __UBOOT__
  495. mutex_unlock(&spinand->lock);
  496. #endif
  497. if (ecc_failed && !ret)
  498. ret = -EBADMSG;
  499. return ret ? ret : max_bitflips;
  500. }
  501. static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
  502. struct mtd_oob_ops *ops)
  503. {
  504. struct spinand_device *spinand = mtd_to_spinand(mtd);
  505. struct nand_device *nand = mtd_to_nanddev(mtd);
  506. struct nand_io_iter iter;
  507. bool enable_ecc = false;
  508. int ret = 0;
  509. if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
  510. enable_ecc = true;
  511. #ifndef __UBOOT__
  512. mutex_lock(&spinand->lock);
  513. #endif
  514. nanddev_io_for_each_page(nand, to, ops, &iter) {
  515. ret = spinand_select_target(spinand, iter.req.pos.target);
  516. if (ret)
  517. break;
  518. ret = spinand_ecc_enable(spinand, enable_ecc);
  519. if (ret)
  520. break;
  521. ret = spinand_write_page(spinand, &iter.req);
  522. if (ret)
  523. break;
  524. ops->retlen += iter.req.datalen;
  525. ops->oobretlen += iter.req.ooblen;
  526. }
  527. #ifndef __UBOOT__
  528. mutex_unlock(&spinand->lock);
  529. #endif
  530. return ret;
  531. }
  532. static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
  533. {
  534. struct spinand_device *spinand = nand_to_spinand(nand);
  535. struct nand_page_io_req req = {
  536. .pos = *pos,
  537. .ooblen = 2,
  538. .ooboffs = 0,
  539. .oobbuf.in = spinand->oobbuf,
  540. .mode = MTD_OPS_RAW,
  541. };
  542. int ret;
  543. memset(spinand->oobbuf, 0, 2);
  544. ret = spinand_select_target(spinand, pos->target);
  545. if (ret)
  546. return ret;
  547. ret = spinand_read_page(spinand, &req, false);
  548. if (ret)
  549. return ret;
  550. if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
  551. return true;
  552. return false;
  553. }
  554. static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
  555. {
  556. struct nand_device *nand = mtd_to_nanddev(mtd);
  557. #ifndef __UBOOT__
  558. struct spinand_device *spinand = nand_to_spinand(nand);
  559. #endif
  560. struct nand_pos pos;
  561. int ret;
  562. nanddev_offs_to_pos(nand, offs, &pos);
  563. #ifndef __UBOOT__
  564. mutex_lock(&spinand->lock);
  565. #endif
  566. ret = nanddev_isbad(nand, &pos);
  567. #ifndef __UBOOT__
  568. mutex_unlock(&spinand->lock);
  569. #endif
  570. return ret;
  571. }
  572. static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
  573. {
  574. struct spinand_device *spinand = nand_to_spinand(nand);
  575. struct nand_page_io_req req = {
  576. .pos = *pos,
  577. .ooboffs = 0,
  578. .ooblen = 2,
  579. .oobbuf.out = spinand->oobbuf,
  580. };
  581. int ret;
  582. /* Erase block before marking it bad. */
  583. ret = spinand_select_target(spinand, pos->target);
  584. if (ret)
  585. return ret;
  586. ret = spinand_write_enable_op(spinand);
  587. if (ret)
  588. return ret;
  589. ret = spinand_erase_op(spinand, pos);
  590. if (ret)
  591. return ret;
  592. memset(spinand->oobbuf, 0, 2);
  593. return spinand_write_page(spinand, &req);
  594. }
  595. static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
  596. {
  597. struct nand_device *nand = mtd_to_nanddev(mtd);
  598. #ifndef __UBOOT__
  599. struct spinand_device *spinand = nand_to_spinand(nand);
  600. #endif
  601. struct nand_pos pos;
  602. int ret;
  603. nanddev_offs_to_pos(nand, offs, &pos);
  604. #ifndef __UBOOT__
  605. mutex_lock(&spinand->lock);
  606. #endif
  607. ret = nanddev_markbad(nand, &pos);
  608. #ifndef __UBOOT__
  609. mutex_unlock(&spinand->lock);
  610. #endif
  611. return ret;
  612. }
  613. static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
  614. {
  615. struct spinand_device *spinand = nand_to_spinand(nand);
  616. u8 status;
  617. int ret;
  618. ret = spinand_select_target(spinand, pos->target);
  619. if (ret)
  620. return ret;
  621. ret = spinand_write_enable_op(spinand);
  622. if (ret)
  623. return ret;
  624. ret = spinand_erase_op(spinand, pos);
  625. if (ret)
  626. return ret;
  627. ret = spinand_wait(spinand, &status);
  628. if (!ret && (status & STATUS_ERASE_FAILED))
  629. ret = -EIO;
  630. return ret;
  631. }
  632. static int spinand_mtd_erase(struct mtd_info *mtd,
  633. struct erase_info *einfo)
  634. {
  635. #ifndef __UBOOT__
  636. struct spinand_device *spinand = mtd_to_spinand(mtd);
  637. #endif
  638. int ret;
  639. #ifndef __UBOOT__
  640. mutex_lock(&spinand->lock);
  641. #endif
  642. ret = nanddev_mtd_erase(mtd, einfo);
  643. #ifndef __UBOOT__
  644. mutex_unlock(&spinand->lock);
  645. #endif
  646. return ret;
  647. }
  648. static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
  649. {
  650. #ifndef __UBOOT__
  651. struct spinand_device *spinand = mtd_to_spinand(mtd);
  652. #endif
  653. struct nand_device *nand = mtd_to_nanddev(mtd);
  654. struct nand_pos pos;
  655. int ret;
  656. nanddev_offs_to_pos(nand, offs, &pos);
  657. #ifndef __UBOOT__
  658. mutex_lock(&spinand->lock);
  659. #endif
  660. ret = nanddev_isreserved(nand, &pos);
  661. #ifndef __UBOOT__
  662. mutex_unlock(&spinand->lock);
  663. #endif
  664. return ret;
  665. }
  666. const struct spi_mem_op *
  667. spinand_find_supported_op(struct spinand_device *spinand,
  668. const struct spi_mem_op *ops,
  669. unsigned int nops)
  670. {
  671. unsigned int i;
  672. for (i = 0; i < nops; i++) {
  673. if (spi_mem_supports_op(spinand->slave, &ops[i]))
  674. return &ops[i];
  675. }
  676. return NULL;
  677. }
  678. static const struct nand_ops spinand_ops = {
  679. .erase = spinand_erase,
  680. .markbad = spinand_markbad,
  681. .isbad = spinand_isbad,
  682. };
  683. static const struct spinand_manufacturer *spinand_manufacturers[] = {
  684. &gigadevice_spinand_manufacturer,
  685. &macronix_spinand_manufacturer,
  686. &micron_spinand_manufacturer,
  687. &toshiba_spinand_manufacturer,
  688. &winbond_spinand_manufacturer,
  689. };
  690. static int spinand_manufacturer_detect(struct spinand_device *spinand)
  691. {
  692. unsigned int i;
  693. int ret;
  694. for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
  695. ret = spinand_manufacturers[i]->ops->detect(spinand);
  696. if (ret > 0) {
  697. spinand->manufacturer = spinand_manufacturers[i];
  698. return 0;
  699. } else if (ret < 0) {
  700. return ret;
  701. }
  702. }
  703. return -ENOTSUPP;
  704. }
  705. static int spinand_manufacturer_init(struct spinand_device *spinand)
  706. {
  707. if (spinand->manufacturer->ops->init)
  708. return spinand->manufacturer->ops->init(spinand);
  709. return 0;
  710. }
  711. static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
  712. {
  713. /* Release manufacturer private data */
  714. if (spinand->manufacturer->ops->cleanup)
  715. return spinand->manufacturer->ops->cleanup(spinand);
  716. }
  717. static const struct spi_mem_op *
  718. spinand_select_op_variant(struct spinand_device *spinand,
  719. const struct spinand_op_variants *variants)
  720. {
  721. struct nand_device *nand = spinand_to_nand(spinand);
  722. unsigned int i;
  723. for (i = 0; i < variants->nops; i++) {
  724. struct spi_mem_op op = variants->ops[i];
  725. unsigned int nbytes;
  726. int ret;
  727. nbytes = nanddev_per_page_oobsize(nand) +
  728. nanddev_page_size(nand);
  729. while (nbytes) {
  730. op.data.nbytes = nbytes;
  731. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  732. if (ret)
  733. break;
  734. if (!spi_mem_supports_op(spinand->slave, &op))
  735. break;
  736. nbytes -= op.data.nbytes;
  737. }
  738. if (!nbytes)
  739. return &variants->ops[i];
  740. }
  741. return NULL;
  742. }
  743. /**
  744. * spinand_match_and_init() - Try to find a match between a device ID and an
  745. * entry in a spinand_info table
  746. * @spinand: SPI NAND object
  747. * @table: SPI NAND device description table
  748. * @table_size: size of the device description table
  749. *
  750. * Should be used by SPI NAND manufacturer drivers when they want to find a
  751. * match between a device ID retrieved through the READ_ID command and an
  752. * entry in the SPI NAND description table. If a match is found, the spinand
  753. * object will be initialized with information provided by the matching
  754. * spinand_info entry.
  755. *
  756. * Return: 0 on success, a negative error code otherwise.
  757. */
  758. int spinand_match_and_init(struct spinand_device *spinand,
  759. const struct spinand_info *table,
  760. unsigned int table_size, u8 devid)
  761. {
  762. struct nand_device *nand = spinand_to_nand(spinand);
  763. unsigned int i;
  764. for (i = 0; i < table_size; i++) {
  765. const struct spinand_info *info = &table[i];
  766. const struct spi_mem_op *op;
  767. if (devid != info->devid)
  768. continue;
  769. nand->memorg = table[i].memorg;
  770. nand->eccreq = table[i].eccreq;
  771. spinand->eccinfo = table[i].eccinfo;
  772. spinand->flags = table[i].flags;
  773. spinand->select_target = table[i].select_target;
  774. op = spinand_select_op_variant(spinand,
  775. info->op_variants.read_cache);
  776. if (!op)
  777. return -ENOTSUPP;
  778. spinand->op_templates.read_cache = op;
  779. op = spinand_select_op_variant(spinand,
  780. info->op_variants.write_cache);
  781. if (!op)
  782. return -ENOTSUPP;
  783. spinand->op_templates.write_cache = op;
  784. op = spinand_select_op_variant(spinand,
  785. info->op_variants.update_cache);
  786. spinand->op_templates.update_cache = op;
  787. return 0;
  788. }
  789. return -ENOTSUPP;
  790. }
  791. static int spinand_detect(struct spinand_device *spinand)
  792. {
  793. struct nand_device *nand = spinand_to_nand(spinand);
  794. int ret;
  795. ret = spinand_reset_op(spinand);
  796. if (ret)
  797. return ret;
  798. ret = spinand_read_id_op(spinand, spinand->id.data);
  799. if (ret)
  800. return ret;
  801. spinand->id.len = SPINAND_MAX_ID_LEN;
  802. ret = spinand_manufacturer_detect(spinand);
  803. if (ret) {
  804. dev_err(spinand->slave->dev, "unknown raw ID %*phN\n",
  805. SPINAND_MAX_ID_LEN, spinand->id.data);
  806. return ret;
  807. }
  808. if (nand->memorg.ntargets > 1 && !spinand->select_target) {
  809. dev_err(spinand->slave->dev,
  810. "SPI NANDs with more than one die must implement ->select_target()\n");
  811. return -EINVAL;
  812. }
  813. dev_info(spinand->slave->dev,
  814. "%s SPI NAND was found.\n", spinand->manufacturer->name);
  815. dev_info(spinand->slave->dev,
  816. "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
  817. nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
  818. nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
  819. return 0;
  820. }
  821. static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
  822. struct mtd_oob_region *region)
  823. {
  824. return -ERANGE;
  825. }
  826. static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
  827. struct mtd_oob_region *region)
  828. {
  829. if (section)
  830. return -ERANGE;
  831. /* Reserve 2 bytes for the BBM. */
  832. region->offset = 2;
  833. region->length = 62;
  834. return 0;
  835. }
  836. static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
  837. .ecc = spinand_noecc_ooblayout_ecc,
  838. .rfree = spinand_noecc_ooblayout_free,
  839. };
  840. static int spinand_init(struct spinand_device *spinand)
  841. {
  842. struct mtd_info *mtd = spinand_to_mtd(spinand);
  843. struct nand_device *nand = mtd_to_nanddev(mtd);
  844. int ret, i;
  845. /*
  846. * We need a scratch buffer because the spi_mem interface requires that
  847. * buf passed in spi_mem_op->data.buf be DMA-able.
  848. */
  849. spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
  850. if (!spinand->scratchbuf)
  851. return -ENOMEM;
  852. ret = spinand_detect(spinand);
  853. if (ret)
  854. goto err_free_bufs;
  855. /*
  856. * Use kzalloc() instead of devm_kzalloc() here, because some drivers
  857. * may use this buffer for DMA access.
  858. * Memory allocated by devm_ does not guarantee DMA-safe alignment.
  859. */
  860. spinand->databuf = kzalloc(nanddev_page_size(nand) +
  861. nanddev_per_page_oobsize(nand),
  862. GFP_KERNEL);
  863. if (!spinand->databuf) {
  864. ret = -ENOMEM;
  865. goto err_free_bufs;
  866. }
  867. spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
  868. ret = spinand_init_cfg_cache(spinand);
  869. if (ret)
  870. goto err_free_bufs;
  871. ret = spinand_init_quad_enable(spinand);
  872. if (ret)
  873. goto err_free_bufs;
  874. ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
  875. if (ret)
  876. goto err_free_bufs;
  877. ret = spinand_manufacturer_init(spinand);
  878. if (ret) {
  879. dev_err(spinand->slave->dev,
  880. "Failed to initialize the SPI NAND chip (err = %d)\n",
  881. ret);
  882. goto err_free_bufs;
  883. }
  884. /* After power up, all blocks are locked, so unlock them here. */
  885. for (i = 0; i < nand->memorg.ntargets; i++) {
  886. ret = spinand_select_target(spinand, i);
  887. if (ret)
  888. goto err_free_bufs;
  889. ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
  890. if (ret)
  891. goto err_free_bufs;
  892. }
  893. ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
  894. if (ret)
  895. goto err_manuf_cleanup;
  896. /*
  897. * Right now, we don't support ECC, so let the whole oob
  898. * area is available for user.
  899. */
  900. mtd->_read_oob = spinand_mtd_read;
  901. mtd->_write_oob = spinand_mtd_write;
  902. mtd->_block_isbad = spinand_mtd_block_isbad;
  903. mtd->_block_markbad = spinand_mtd_block_markbad;
  904. mtd->_block_isreserved = spinand_mtd_block_isreserved;
  905. mtd->_erase = spinand_mtd_erase;
  906. if (spinand->eccinfo.ooblayout)
  907. mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
  908. else
  909. mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
  910. ret = mtd_ooblayout_count_freebytes(mtd);
  911. if (ret < 0)
  912. goto err_cleanup_nanddev;
  913. mtd->oobavail = ret;
  914. return 0;
  915. err_cleanup_nanddev:
  916. nanddev_cleanup(nand);
  917. err_manuf_cleanup:
  918. spinand_manufacturer_cleanup(spinand);
  919. err_free_bufs:
  920. kfree(spinand->databuf);
  921. kfree(spinand->scratchbuf);
  922. return ret;
  923. }
  924. static void spinand_cleanup(struct spinand_device *spinand)
  925. {
  926. struct nand_device *nand = spinand_to_nand(spinand);
  927. nanddev_cleanup(nand);
  928. spinand_manufacturer_cleanup(spinand);
  929. kfree(spinand->databuf);
  930. kfree(spinand->scratchbuf);
  931. }
  932. static int spinand_probe(struct udevice *dev)
  933. {
  934. struct spinand_device *spinand = dev_get_priv(dev);
  935. struct spi_slave *slave = dev_get_parent_priv(dev);
  936. struct mtd_info *mtd = dev_get_uclass_priv(dev);
  937. struct nand_device *nand = spinand_to_nand(spinand);
  938. int ret;
  939. #ifndef __UBOOT__
  940. spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
  941. GFP_KERNEL);
  942. if (!spinand)
  943. return -ENOMEM;
  944. spinand->spimem = mem;
  945. spi_mem_set_drvdata(mem, spinand);
  946. spinand_set_of_node(spinand, mem->spi->dev.of_node);
  947. mutex_init(&spinand->lock);
  948. mtd = spinand_to_mtd(spinand);
  949. mtd->dev.parent = &mem->spi->dev;
  950. #else
  951. nand->mtd = mtd;
  952. mtd->priv = nand;
  953. mtd->dev = dev;
  954. mtd->name = malloc(20);
  955. if (!mtd->name)
  956. return -ENOMEM;
  957. sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
  958. spinand->slave = slave;
  959. spinand_set_ofnode(spinand, dev_ofnode(dev));
  960. #endif
  961. ret = spinand_init(spinand);
  962. if (ret)
  963. return ret;
  964. #ifndef __UBOOT__
  965. ret = mtd_device_register(mtd, NULL, 0);
  966. #else
  967. ret = add_mtd_device(mtd);
  968. #endif
  969. if (ret)
  970. goto err_spinand_cleanup;
  971. return 0;
  972. err_spinand_cleanup:
  973. spinand_cleanup(spinand);
  974. return ret;
  975. }
  976. #ifndef __UBOOT__
  977. static int spinand_remove(struct udevice *slave)
  978. {
  979. struct spinand_device *spinand;
  980. struct mtd_info *mtd;
  981. int ret;
  982. spinand = spi_mem_get_drvdata(slave);
  983. mtd = spinand_to_mtd(spinand);
  984. free(mtd->name);
  985. ret = mtd_device_unregister(mtd);
  986. if (ret)
  987. return ret;
  988. spinand_cleanup(spinand);
  989. return 0;
  990. }
  991. static const struct spi_device_id spinand_ids[] = {
  992. { .name = "spi-nand" },
  993. { /* sentinel */ },
  994. };
  995. #ifdef CONFIG_OF
  996. static const struct of_device_id spinand_of_ids[] = {
  997. { .compatible = "spi-nand" },
  998. { /* sentinel */ },
  999. };
  1000. #endif
  1001. static struct spi_mem_driver spinand_drv = {
  1002. .spidrv = {
  1003. .id_table = spinand_ids,
  1004. .driver = {
  1005. .name = "spi-nand",
  1006. .of_match_table = of_match_ptr(spinand_of_ids),
  1007. },
  1008. },
  1009. .probe = spinand_probe,
  1010. .remove = spinand_remove,
  1011. };
  1012. module_spi_mem_driver(spinand_drv);
  1013. MODULE_DESCRIPTION("SPI NAND framework");
  1014. MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
  1015. MODULE_LICENSE("GPL v2");
  1016. #endif /* __UBOOT__ */
  1017. static const struct udevice_id spinand_ids[] = {
  1018. { .compatible = "spi-nand" },
  1019. { /* sentinel */ },
  1020. };
  1021. U_BOOT_DRIVER(spinand) = {
  1022. .name = "spi_nand",
  1023. .id = UCLASS_MTD,
  1024. .of_match = spinand_ids,
  1025. .priv_auto = sizeof(struct spinand_device),
  1026. .probe = spinand_probe,
  1027. };