core.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016-2017 Micron Technology, Inc.
  4. *
  5. * Authors:
  6. * Peter Pan <peterpandong@micron.com>
  7. * Boris Brezillon <boris.brezillon@bootlin.com>
  8. */
  9. #define pr_fmt(fmt) "spi-nand: " fmt
  10. #ifndef __UBOOT__
  11. #include <linux/device.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/mtd/spinand.h>
  16. #include <linux/of.h>
  17. #include <linux/slab.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/spi/spi-mem.h>
  20. #else
  21. #include <common.h>
  22. #include <errno.h>
  23. #include <spi.h>
  24. #include <spi-mem.h>
  25. #include <dm/devres.h>
  26. #include <linux/mtd/spinand.h>
  27. #endif
  28. /* SPI NAND index visible in MTD names */
  29. static int spi_nand_idx;
  30. static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
  31. const struct nand_page_io_req *req,
  32. u16 *column)
  33. {
  34. struct nand_device *nand = spinand_to_nand(spinand);
  35. unsigned int shift;
  36. if (nand->memorg.planes_per_lun < 2)
  37. return;
  38. /* The plane number is passed in MSB just above the column address */
  39. shift = fls(nand->memorg.pagesize);
  40. *column |= req->pos.plane << shift;
  41. }
  42. static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
  43. {
  44. struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
  45. spinand->scratchbuf);
  46. int ret;
  47. ret = spi_mem_exec_op(spinand->slave, &op);
  48. if (ret)
  49. return ret;
  50. *val = *spinand->scratchbuf;
  51. return 0;
  52. }
  53. static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
  54. {
  55. struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
  56. spinand->scratchbuf);
  57. *spinand->scratchbuf = val;
  58. return spi_mem_exec_op(spinand->slave, &op);
  59. }
  60. static int spinand_read_status(struct spinand_device *spinand, u8 *status)
  61. {
  62. return spinand_read_reg_op(spinand, REG_STATUS, status);
  63. }
  64. static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
  65. {
  66. struct nand_device *nand = spinand_to_nand(spinand);
  67. if (WARN_ON(spinand->cur_target < 0 ||
  68. spinand->cur_target >= nand->memorg.ntargets))
  69. return -EINVAL;
  70. *cfg = spinand->cfg_cache[spinand->cur_target];
  71. return 0;
  72. }
  73. static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
  74. {
  75. struct nand_device *nand = spinand_to_nand(spinand);
  76. int ret;
  77. if (WARN_ON(spinand->cur_target < 0 ||
  78. spinand->cur_target >= nand->memorg.ntargets))
  79. return -EINVAL;
  80. if (spinand->cfg_cache[spinand->cur_target] == cfg)
  81. return 0;
  82. ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
  83. if (ret)
  84. return ret;
  85. spinand->cfg_cache[spinand->cur_target] = cfg;
  86. return 0;
  87. }
  88. /**
  89. * spinand_upd_cfg() - Update the configuration register
  90. * @spinand: the spinand device
  91. * @mask: the mask encoding the bits to update in the config reg
  92. * @val: the new value to apply
  93. *
  94. * Update the configuration register.
  95. *
  96. * Return: 0 on success, a negative error code otherwise.
  97. */
  98. int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
  99. {
  100. int ret;
  101. u8 cfg;
  102. ret = spinand_get_cfg(spinand, &cfg);
  103. if (ret)
  104. return ret;
  105. cfg &= ~mask;
  106. cfg |= val;
  107. return spinand_set_cfg(spinand, cfg);
  108. }
  109. /**
  110. * spinand_select_target() - Select a specific NAND target/die
  111. * @spinand: the spinand device
  112. * @target: the target/die to select
  113. *
  114. * Select a new target/die. If chip only has one die, this function is a NOOP.
  115. *
  116. * Return: 0 on success, a negative error code otherwise.
  117. */
  118. int spinand_select_target(struct spinand_device *spinand, unsigned int target)
  119. {
  120. struct nand_device *nand = spinand_to_nand(spinand);
  121. int ret;
  122. if (WARN_ON(target >= nand->memorg.ntargets))
  123. return -EINVAL;
  124. if (spinand->cur_target == target)
  125. return 0;
  126. if (nand->memorg.ntargets == 1) {
  127. spinand->cur_target = target;
  128. return 0;
  129. }
  130. ret = spinand->select_target(spinand, target);
  131. if (ret)
  132. return ret;
  133. spinand->cur_target = target;
  134. return 0;
  135. }
  136. static int spinand_init_cfg_cache(struct spinand_device *spinand)
  137. {
  138. struct nand_device *nand = spinand_to_nand(spinand);
  139. struct udevice *dev = spinand->slave->dev;
  140. unsigned int target;
  141. int ret;
  142. spinand->cfg_cache = devm_kzalloc(dev,
  143. sizeof(*spinand->cfg_cache) *
  144. nand->memorg.ntargets,
  145. GFP_KERNEL);
  146. if (!spinand->cfg_cache)
  147. return -ENOMEM;
  148. for (target = 0; target < nand->memorg.ntargets; target++) {
  149. ret = spinand_select_target(spinand, target);
  150. if (ret)
  151. return ret;
  152. /*
  153. * We use spinand_read_reg_op() instead of spinand_get_cfg()
  154. * here to bypass the config cache.
  155. */
  156. ret = spinand_read_reg_op(spinand, REG_CFG,
  157. &spinand->cfg_cache[target]);
  158. if (ret)
  159. return ret;
  160. }
  161. return 0;
  162. }
  163. static int spinand_init_quad_enable(struct spinand_device *spinand)
  164. {
  165. bool enable = false;
  166. if (!(spinand->flags & SPINAND_HAS_QE_BIT))
  167. return 0;
  168. if (spinand->op_templates.read_cache->data.buswidth == 4 ||
  169. spinand->op_templates.write_cache->data.buswidth == 4 ||
  170. spinand->op_templates.update_cache->data.buswidth == 4)
  171. enable = true;
  172. return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
  173. enable ? CFG_QUAD_ENABLE : 0);
  174. }
  175. static int spinand_ecc_enable(struct spinand_device *spinand,
  176. bool enable)
  177. {
  178. return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
  179. enable ? CFG_ECC_ENABLE : 0);
  180. }
  181. static int spinand_write_enable_op(struct spinand_device *spinand)
  182. {
  183. struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
  184. return spi_mem_exec_op(spinand->slave, &op);
  185. }
  186. static int spinand_load_page_op(struct spinand_device *spinand,
  187. const struct nand_page_io_req *req)
  188. {
  189. struct nand_device *nand = spinand_to_nand(spinand);
  190. unsigned int row = nanddev_pos_to_row(nand, &req->pos);
  191. struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
  192. return spi_mem_exec_op(spinand->slave, &op);
  193. }
  194. static int spinand_read_from_cache_op(struct spinand_device *spinand,
  195. const struct nand_page_io_req *req)
  196. {
  197. struct spi_mem_op op = *spinand->op_templates.read_cache;
  198. struct nand_device *nand = spinand_to_nand(spinand);
  199. struct mtd_info *mtd = nanddev_to_mtd(nand);
  200. struct nand_page_io_req adjreq = *req;
  201. unsigned int nbytes = 0;
  202. void *buf = NULL;
  203. u16 column = 0;
  204. int ret;
  205. if (req->datalen) {
  206. adjreq.datalen = nanddev_page_size(nand);
  207. adjreq.dataoffs = 0;
  208. adjreq.databuf.in = spinand->databuf;
  209. buf = spinand->databuf;
  210. nbytes = adjreq.datalen;
  211. }
  212. if (req->ooblen) {
  213. adjreq.ooblen = nanddev_per_page_oobsize(nand);
  214. adjreq.ooboffs = 0;
  215. adjreq.oobbuf.in = spinand->oobbuf;
  216. nbytes += nanddev_per_page_oobsize(nand);
  217. if (!buf) {
  218. buf = spinand->oobbuf;
  219. column = nanddev_page_size(nand);
  220. }
  221. }
  222. spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
  223. op.addr.val = column;
  224. /*
  225. * Some controllers are limited in term of max RX data size. In this
  226. * case, just repeat the READ_CACHE operation after updating the
  227. * column.
  228. */
  229. while (nbytes) {
  230. op.data.buf.in = buf;
  231. op.data.nbytes = nbytes;
  232. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  233. if (ret)
  234. return ret;
  235. ret = spi_mem_exec_op(spinand->slave, &op);
  236. if (ret)
  237. return ret;
  238. buf += op.data.nbytes;
  239. nbytes -= op.data.nbytes;
  240. op.addr.val += op.data.nbytes;
  241. }
  242. if (req->datalen)
  243. memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
  244. req->datalen);
  245. if (req->ooblen) {
  246. if (req->mode == MTD_OPS_AUTO_OOB)
  247. mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
  248. spinand->oobbuf,
  249. req->ooboffs,
  250. req->ooblen);
  251. else
  252. memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
  253. req->ooblen);
  254. }
  255. return 0;
  256. }
  257. static int spinand_write_to_cache_op(struct spinand_device *spinand,
  258. const struct nand_page_io_req *req)
  259. {
  260. struct spi_mem_op op = *spinand->op_templates.write_cache;
  261. struct nand_device *nand = spinand_to_nand(spinand);
  262. struct mtd_info *mtd = nanddev_to_mtd(nand);
  263. struct nand_page_io_req adjreq = *req;
  264. unsigned int nbytes = 0;
  265. void *buf = NULL;
  266. u16 column = 0;
  267. int ret;
  268. memset(spinand->databuf, 0xff,
  269. nanddev_page_size(nand) +
  270. nanddev_per_page_oobsize(nand));
  271. if (req->datalen) {
  272. memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
  273. req->datalen);
  274. adjreq.dataoffs = 0;
  275. adjreq.datalen = nanddev_page_size(nand);
  276. adjreq.databuf.out = spinand->databuf;
  277. nbytes = adjreq.datalen;
  278. buf = spinand->databuf;
  279. }
  280. if (req->ooblen) {
  281. if (req->mode == MTD_OPS_AUTO_OOB)
  282. mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
  283. spinand->oobbuf,
  284. req->ooboffs,
  285. req->ooblen);
  286. else
  287. memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
  288. req->ooblen);
  289. adjreq.ooblen = nanddev_per_page_oobsize(nand);
  290. adjreq.ooboffs = 0;
  291. nbytes += nanddev_per_page_oobsize(nand);
  292. if (!buf) {
  293. buf = spinand->oobbuf;
  294. column = nanddev_page_size(nand);
  295. }
  296. }
  297. spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
  298. op = *spinand->op_templates.write_cache;
  299. op.addr.val = column;
  300. /*
  301. * Some controllers are limited in term of max TX data size. In this
  302. * case, split the operation into one LOAD CACHE and one or more
  303. * LOAD RANDOM CACHE.
  304. */
  305. while (nbytes) {
  306. op.data.buf.out = buf;
  307. op.data.nbytes = nbytes;
  308. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  309. if (ret)
  310. return ret;
  311. ret = spi_mem_exec_op(spinand->slave, &op);
  312. if (ret)
  313. return ret;
  314. buf += op.data.nbytes;
  315. nbytes -= op.data.nbytes;
  316. op.addr.val += op.data.nbytes;
  317. /*
  318. * We need to use the RANDOM LOAD CACHE operation if there's
  319. * more than one iteration, because the LOAD operation resets
  320. * the cache to 0xff.
  321. */
  322. if (nbytes) {
  323. column = op.addr.val;
  324. op = *spinand->op_templates.update_cache;
  325. op.addr.val = column;
  326. }
  327. }
  328. return 0;
  329. }
  330. static int spinand_program_op(struct spinand_device *spinand,
  331. const struct nand_page_io_req *req)
  332. {
  333. struct nand_device *nand = spinand_to_nand(spinand);
  334. unsigned int row = nanddev_pos_to_row(nand, &req->pos);
  335. struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
  336. return spi_mem_exec_op(spinand->slave, &op);
  337. }
  338. static int spinand_erase_op(struct spinand_device *spinand,
  339. const struct nand_pos *pos)
  340. {
  341. struct nand_device *nand = &spinand->base;
  342. unsigned int row = nanddev_pos_to_row(nand, pos);
  343. struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
  344. return spi_mem_exec_op(spinand->slave, &op);
  345. }
  346. static int spinand_wait(struct spinand_device *spinand, u8 *s)
  347. {
  348. unsigned long start, stop;
  349. u8 status;
  350. int ret;
  351. start = get_timer(0);
  352. stop = 400;
  353. do {
  354. ret = spinand_read_status(spinand, &status);
  355. if (ret)
  356. return ret;
  357. if (!(status & STATUS_BUSY))
  358. goto out;
  359. } while (get_timer(start) < stop);
  360. /*
  361. * Extra read, just in case the STATUS_READY bit has changed
  362. * since our last check
  363. */
  364. ret = spinand_read_status(spinand, &status);
  365. if (ret)
  366. return ret;
  367. out:
  368. if (s)
  369. *s = status;
  370. return status & STATUS_BUSY ? -ETIMEDOUT : 0;
  371. }
  372. static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
  373. {
  374. struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
  375. SPINAND_MAX_ID_LEN);
  376. int ret;
  377. ret = spi_mem_exec_op(spinand->slave, &op);
  378. if (!ret)
  379. memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
  380. return ret;
  381. }
  382. static int spinand_reset_op(struct spinand_device *spinand)
  383. {
  384. struct spi_mem_op op = SPINAND_RESET_OP;
  385. int ret;
  386. ret = spi_mem_exec_op(spinand->slave, &op);
  387. if (ret)
  388. return ret;
  389. return spinand_wait(spinand, NULL);
  390. }
  391. static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
  392. {
  393. return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
  394. }
  395. static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
  396. {
  397. struct nand_device *nand = spinand_to_nand(spinand);
  398. if (spinand->eccinfo.get_status)
  399. return spinand->eccinfo.get_status(spinand, status);
  400. switch (status & STATUS_ECC_MASK) {
  401. case STATUS_ECC_NO_BITFLIPS:
  402. return 0;
  403. case STATUS_ECC_HAS_BITFLIPS:
  404. /*
  405. * We have no way to know exactly how many bitflips have been
  406. * fixed, so let's return the maximum possible value so that
  407. * wear-leveling layers move the data immediately.
  408. */
  409. return nand->eccreq.strength;
  410. case STATUS_ECC_UNCOR_ERROR:
  411. return -EBADMSG;
  412. default:
  413. break;
  414. }
  415. return -EINVAL;
  416. }
  417. static int spinand_read_page(struct spinand_device *spinand,
  418. const struct nand_page_io_req *req,
  419. bool ecc_enabled)
  420. {
  421. u8 status;
  422. int ret;
  423. ret = spinand_load_page_op(spinand, req);
  424. if (ret)
  425. return ret;
  426. ret = spinand_wait(spinand, &status);
  427. if (ret < 0)
  428. return ret;
  429. ret = spinand_read_from_cache_op(spinand, req);
  430. if (ret)
  431. return ret;
  432. if (!ecc_enabled)
  433. return 0;
  434. return spinand_check_ecc_status(spinand, status);
  435. }
  436. static int spinand_write_page(struct spinand_device *spinand,
  437. const struct nand_page_io_req *req)
  438. {
  439. u8 status;
  440. int ret;
  441. ret = spinand_write_enable_op(spinand);
  442. if (ret)
  443. return ret;
  444. ret = spinand_write_to_cache_op(spinand, req);
  445. if (ret)
  446. return ret;
  447. ret = spinand_program_op(spinand, req);
  448. if (ret)
  449. return ret;
  450. ret = spinand_wait(spinand, &status);
  451. if (!ret && (status & STATUS_PROG_FAILED))
  452. ret = -EIO;
  453. return ret;
  454. }
  455. static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
  456. struct mtd_oob_ops *ops)
  457. {
  458. struct spinand_device *spinand = mtd_to_spinand(mtd);
  459. struct nand_device *nand = mtd_to_nanddev(mtd);
  460. unsigned int max_bitflips = 0;
  461. struct nand_io_iter iter;
  462. bool enable_ecc = false;
  463. bool ecc_failed = false;
  464. int ret = 0;
  465. if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
  466. enable_ecc = true;
  467. #ifndef __UBOOT__
  468. mutex_lock(&spinand->lock);
  469. #endif
  470. nanddev_io_for_each_page(nand, from, ops, &iter) {
  471. ret = spinand_select_target(spinand, iter.req.pos.target);
  472. if (ret)
  473. break;
  474. ret = spinand_ecc_enable(spinand, enable_ecc);
  475. if (ret)
  476. break;
  477. ret = spinand_read_page(spinand, &iter.req, enable_ecc);
  478. if (ret < 0 && ret != -EBADMSG)
  479. break;
  480. if (ret == -EBADMSG) {
  481. ecc_failed = true;
  482. mtd->ecc_stats.failed++;
  483. ret = 0;
  484. } else {
  485. mtd->ecc_stats.corrected += ret;
  486. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  487. }
  488. ops->retlen += iter.req.datalen;
  489. ops->oobretlen += iter.req.ooblen;
  490. }
  491. #ifndef __UBOOT__
  492. mutex_unlock(&spinand->lock);
  493. #endif
  494. if (ecc_failed && !ret)
  495. ret = -EBADMSG;
  496. return ret ? ret : max_bitflips;
  497. }
  498. static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
  499. struct mtd_oob_ops *ops)
  500. {
  501. struct spinand_device *spinand = mtd_to_spinand(mtd);
  502. struct nand_device *nand = mtd_to_nanddev(mtd);
  503. struct nand_io_iter iter;
  504. bool enable_ecc = false;
  505. int ret = 0;
  506. if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
  507. enable_ecc = true;
  508. #ifndef __UBOOT__
  509. mutex_lock(&spinand->lock);
  510. #endif
  511. nanddev_io_for_each_page(nand, to, ops, &iter) {
  512. ret = spinand_select_target(spinand, iter.req.pos.target);
  513. if (ret)
  514. break;
  515. ret = spinand_ecc_enable(spinand, enable_ecc);
  516. if (ret)
  517. break;
  518. ret = spinand_write_page(spinand, &iter.req);
  519. if (ret)
  520. break;
  521. ops->retlen += iter.req.datalen;
  522. ops->oobretlen += iter.req.ooblen;
  523. }
  524. #ifndef __UBOOT__
  525. mutex_unlock(&spinand->lock);
  526. #endif
  527. return ret;
  528. }
  529. static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
  530. {
  531. struct spinand_device *spinand = nand_to_spinand(nand);
  532. struct nand_page_io_req req = {
  533. .pos = *pos,
  534. .ooblen = 2,
  535. .ooboffs = 0,
  536. .oobbuf.in = spinand->oobbuf,
  537. .mode = MTD_OPS_RAW,
  538. };
  539. int ret;
  540. memset(spinand->oobbuf, 0, 2);
  541. ret = spinand_select_target(spinand, pos->target);
  542. if (ret)
  543. return ret;
  544. ret = spinand_read_page(spinand, &req, false);
  545. if (ret)
  546. return ret;
  547. if (spinand->oobbuf[0] != 0xff || spinand->oobbuf[1] != 0xff)
  548. return true;
  549. return false;
  550. }
  551. static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
  552. {
  553. struct nand_device *nand = mtd_to_nanddev(mtd);
  554. #ifndef __UBOOT__
  555. struct spinand_device *spinand = nand_to_spinand(nand);
  556. #endif
  557. struct nand_pos pos;
  558. int ret;
  559. nanddev_offs_to_pos(nand, offs, &pos);
  560. #ifndef __UBOOT__
  561. mutex_lock(&spinand->lock);
  562. #endif
  563. ret = nanddev_isbad(nand, &pos);
  564. #ifndef __UBOOT__
  565. mutex_unlock(&spinand->lock);
  566. #endif
  567. return ret;
  568. }
  569. static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
  570. {
  571. struct spinand_device *spinand = nand_to_spinand(nand);
  572. struct nand_page_io_req req = {
  573. .pos = *pos,
  574. .ooboffs = 0,
  575. .ooblen = 2,
  576. .oobbuf.out = spinand->oobbuf,
  577. };
  578. int ret;
  579. /* Erase block before marking it bad. */
  580. ret = spinand_select_target(spinand, pos->target);
  581. if (ret)
  582. return ret;
  583. ret = spinand_write_enable_op(spinand);
  584. if (ret)
  585. return ret;
  586. ret = spinand_erase_op(spinand, pos);
  587. if (ret)
  588. return ret;
  589. memset(spinand->oobbuf, 0, 2);
  590. return spinand_write_page(spinand, &req);
  591. }
  592. static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
  593. {
  594. struct nand_device *nand = mtd_to_nanddev(mtd);
  595. #ifndef __UBOOT__
  596. struct spinand_device *spinand = nand_to_spinand(nand);
  597. #endif
  598. struct nand_pos pos;
  599. int ret;
  600. nanddev_offs_to_pos(nand, offs, &pos);
  601. #ifndef __UBOOT__
  602. mutex_lock(&spinand->lock);
  603. #endif
  604. ret = nanddev_markbad(nand, &pos);
  605. #ifndef __UBOOT__
  606. mutex_unlock(&spinand->lock);
  607. #endif
  608. return ret;
  609. }
  610. static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
  611. {
  612. struct spinand_device *spinand = nand_to_spinand(nand);
  613. u8 status;
  614. int ret;
  615. ret = spinand_select_target(spinand, pos->target);
  616. if (ret)
  617. return ret;
  618. ret = spinand_write_enable_op(spinand);
  619. if (ret)
  620. return ret;
  621. ret = spinand_erase_op(spinand, pos);
  622. if (ret)
  623. return ret;
  624. ret = spinand_wait(spinand, &status);
  625. if (!ret && (status & STATUS_ERASE_FAILED))
  626. ret = -EIO;
  627. return ret;
  628. }
  629. static int spinand_mtd_erase(struct mtd_info *mtd,
  630. struct erase_info *einfo)
  631. {
  632. #ifndef __UBOOT__
  633. struct spinand_device *spinand = mtd_to_spinand(mtd);
  634. #endif
  635. int ret;
  636. #ifndef __UBOOT__
  637. mutex_lock(&spinand->lock);
  638. #endif
  639. ret = nanddev_mtd_erase(mtd, einfo);
  640. #ifndef __UBOOT__
  641. mutex_unlock(&spinand->lock);
  642. #endif
  643. return ret;
  644. }
  645. static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
  646. {
  647. #ifndef __UBOOT__
  648. struct spinand_device *spinand = mtd_to_spinand(mtd);
  649. #endif
  650. struct nand_device *nand = mtd_to_nanddev(mtd);
  651. struct nand_pos pos;
  652. int ret;
  653. nanddev_offs_to_pos(nand, offs, &pos);
  654. #ifndef __UBOOT__
  655. mutex_lock(&spinand->lock);
  656. #endif
  657. ret = nanddev_isreserved(nand, &pos);
  658. #ifndef __UBOOT__
  659. mutex_unlock(&spinand->lock);
  660. #endif
  661. return ret;
  662. }
  663. const struct spi_mem_op *
  664. spinand_find_supported_op(struct spinand_device *spinand,
  665. const struct spi_mem_op *ops,
  666. unsigned int nops)
  667. {
  668. unsigned int i;
  669. for (i = 0; i < nops; i++) {
  670. if (spi_mem_supports_op(spinand->slave, &ops[i]))
  671. return &ops[i];
  672. }
  673. return NULL;
  674. }
  675. static const struct nand_ops spinand_ops = {
  676. .erase = spinand_erase,
  677. .markbad = spinand_markbad,
  678. .isbad = spinand_isbad,
  679. };
  680. static const struct spinand_manufacturer *spinand_manufacturers[] = {
  681. &gigadevice_spinand_manufacturer,
  682. &macronix_spinand_manufacturer,
  683. &micron_spinand_manufacturer,
  684. &winbond_spinand_manufacturer,
  685. };
  686. static int spinand_manufacturer_detect(struct spinand_device *spinand)
  687. {
  688. unsigned int i;
  689. int ret;
  690. for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
  691. ret = spinand_manufacturers[i]->ops->detect(spinand);
  692. if (ret > 0) {
  693. spinand->manufacturer = spinand_manufacturers[i];
  694. return 0;
  695. } else if (ret < 0) {
  696. return ret;
  697. }
  698. }
  699. return -ENOTSUPP;
  700. }
  701. static int spinand_manufacturer_init(struct spinand_device *spinand)
  702. {
  703. if (spinand->manufacturer->ops->init)
  704. return spinand->manufacturer->ops->init(spinand);
  705. return 0;
  706. }
  707. static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
  708. {
  709. /* Release manufacturer private data */
  710. if (spinand->manufacturer->ops->cleanup)
  711. return spinand->manufacturer->ops->cleanup(spinand);
  712. }
  713. static const struct spi_mem_op *
  714. spinand_select_op_variant(struct spinand_device *spinand,
  715. const struct spinand_op_variants *variants)
  716. {
  717. struct nand_device *nand = spinand_to_nand(spinand);
  718. unsigned int i;
  719. for (i = 0; i < variants->nops; i++) {
  720. struct spi_mem_op op = variants->ops[i];
  721. unsigned int nbytes;
  722. int ret;
  723. nbytes = nanddev_per_page_oobsize(nand) +
  724. nanddev_page_size(nand);
  725. while (nbytes) {
  726. op.data.nbytes = nbytes;
  727. ret = spi_mem_adjust_op_size(spinand->slave, &op);
  728. if (ret)
  729. break;
  730. if (!spi_mem_supports_op(spinand->slave, &op))
  731. break;
  732. nbytes -= op.data.nbytes;
  733. }
  734. if (!nbytes)
  735. return &variants->ops[i];
  736. }
  737. return NULL;
  738. }
  739. /**
  740. * spinand_match_and_init() - Try to find a match between a device ID and an
  741. * entry in a spinand_info table
  742. * @spinand: SPI NAND object
  743. * @table: SPI NAND device description table
  744. * @table_size: size of the device description table
  745. *
  746. * Should be used by SPI NAND manufacturer drivers when they want to find a
  747. * match between a device ID retrieved through the READ_ID command and an
  748. * entry in the SPI NAND description table. If a match is found, the spinand
  749. * object will be initialized with information provided by the matching
  750. * spinand_info entry.
  751. *
  752. * Return: 0 on success, a negative error code otherwise.
  753. */
  754. int spinand_match_and_init(struct spinand_device *spinand,
  755. const struct spinand_info *table,
  756. unsigned int table_size, u8 devid)
  757. {
  758. struct nand_device *nand = spinand_to_nand(spinand);
  759. unsigned int i;
  760. for (i = 0; i < table_size; i++) {
  761. const struct spinand_info *info = &table[i];
  762. const struct spi_mem_op *op;
  763. if (devid != info->devid)
  764. continue;
  765. nand->memorg = table[i].memorg;
  766. nand->eccreq = table[i].eccreq;
  767. spinand->eccinfo = table[i].eccinfo;
  768. spinand->flags = table[i].flags;
  769. spinand->select_target = table[i].select_target;
  770. op = spinand_select_op_variant(spinand,
  771. info->op_variants.read_cache);
  772. if (!op)
  773. return -ENOTSUPP;
  774. spinand->op_templates.read_cache = op;
  775. op = spinand_select_op_variant(spinand,
  776. info->op_variants.write_cache);
  777. if (!op)
  778. return -ENOTSUPP;
  779. spinand->op_templates.write_cache = op;
  780. op = spinand_select_op_variant(spinand,
  781. info->op_variants.update_cache);
  782. spinand->op_templates.update_cache = op;
  783. return 0;
  784. }
  785. return -ENOTSUPP;
  786. }
  787. static int spinand_detect(struct spinand_device *spinand)
  788. {
  789. struct nand_device *nand = spinand_to_nand(spinand);
  790. int ret;
  791. ret = spinand_reset_op(spinand);
  792. if (ret)
  793. return ret;
  794. ret = spinand_read_id_op(spinand, spinand->id.data);
  795. if (ret)
  796. return ret;
  797. spinand->id.len = SPINAND_MAX_ID_LEN;
  798. ret = spinand_manufacturer_detect(spinand);
  799. if (ret) {
  800. dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
  801. spinand->id.data);
  802. return ret;
  803. }
  804. if (nand->memorg.ntargets > 1 && !spinand->select_target) {
  805. dev_err(dev,
  806. "SPI NANDs with more than one die must implement ->select_target()\n");
  807. return -EINVAL;
  808. }
  809. dev_info(spinand->slave->dev,
  810. "%s SPI NAND was found.\n", spinand->manufacturer->name);
  811. dev_info(spinand->slave->dev,
  812. "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
  813. nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
  814. nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
  815. return 0;
  816. }
  817. static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
  818. struct mtd_oob_region *region)
  819. {
  820. return -ERANGE;
  821. }
  822. static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
  823. struct mtd_oob_region *region)
  824. {
  825. if (section)
  826. return -ERANGE;
  827. /* Reserve 2 bytes for the BBM. */
  828. region->offset = 2;
  829. region->length = 62;
  830. return 0;
  831. }
  832. static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
  833. .ecc = spinand_noecc_ooblayout_ecc,
  834. .rfree = spinand_noecc_ooblayout_free,
  835. };
  836. static int spinand_init(struct spinand_device *spinand)
  837. {
  838. struct mtd_info *mtd = spinand_to_mtd(spinand);
  839. struct nand_device *nand = mtd_to_nanddev(mtd);
  840. int ret, i;
  841. /*
  842. * We need a scratch buffer because the spi_mem interface requires that
  843. * buf passed in spi_mem_op->data.buf be DMA-able.
  844. */
  845. spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
  846. if (!spinand->scratchbuf)
  847. return -ENOMEM;
  848. ret = spinand_detect(spinand);
  849. if (ret)
  850. goto err_free_bufs;
  851. /*
  852. * Use kzalloc() instead of devm_kzalloc() here, because some drivers
  853. * may use this buffer for DMA access.
  854. * Memory allocated by devm_ does not guarantee DMA-safe alignment.
  855. */
  856. spinand->databuf = kzalloc(nanddev_page_size(nand) +
  857. nanddev_per_page_oobsize(nand),
  858. GFP_KERNEL);
  859. if (!spinand->databuf) {
  860. ret = -ENOMEM;
  861. goto err_free_bufs;
  862. }
  863. spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
  864. ret = spinand_init_cfg_cache(spinand);
  865. if (ret)
  866. goto err_free_bufs;
  867. ret = spinand_init_quad_enable(spinand);
  868. if (ret)
  869. goto err_free_bufs;
  870. ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
  871. if (ret)
  872. goto err_free_bufs;
  873. ret = spinand_manufacturer_init(spinand);
  874. if (ret) {
  875. dev_err(dev,
  876. "Failed to initialize the SPI NAND chip (err = %d)\n",
  877. ret);
  878. goto err_free_bufs;
  879. }
  880. /* After power up, all blocks are locked, so unlock them here. */
  881. for (i = 0; i < nand->memorg.ntargets; i++) {
  882. ret = spinand_select_target(spinand, i);
  883. if (ret)
  884. goto err_free_bufs;
  885. ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
  886. if (ret)
  887. goto err_free_bufs;
  888. }
  889. ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
  890. if (ret)
  891. goto err_manuf_cleanup;
  892. /*
  893. * Right now, we don't support ECC, so let the whole oob
  894. * area is available for user.
  895. */
  896. mtd->_read_oob = spinand_mtd_read;
  897. mtd->_write_oob = spinand_mtd_write;
  898. mtd->_block_isbad = spinand_mtd_block_isbad;
  899. mtd->_block_markbad = spinand_mtd_block_markbad;
  900. mtd->_block_isreserved = spinand_mtd_block_isreserved;
  901. mtd->_erase = spinand_mtd_erase;
  902. if (spinand->eccinfo.ooblayout)
  903. mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
  904. else
  905. mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
  906. ret = mtd_ooblayout_count_freebytes(mtd);
  907. if (ret < 0)
  908. goto err_cleanup_nanddev;
  909. mtd->oobavail = ret;
  910. return 0;
  911. err_cleanup_nanddev:
  912. nanddev_cleanup(nand);
  913. err_manuf_cleanup:
  914. spinand_manufacturer_cleanup(spinand);
  915. err_free_bufs:
  916. kfree(spinand->databuf);
  917. kfree(spinand->scratchbuf);
  918. return ret;
  919. }
  920. static void spinand_cleanup(struct spinand_device *spinand)
  921. {
  922. struct nand_device *nand = spinand_to_nand(spinand);
  923. nanddev_cleanup(nand);
  924. spinand_manufacturer_cleanup(spinand);
  925. kfree(spinand->databuf);
  926. kfree(spinand->scratchbuf);
  927. }
  928. static int spinand_probe(struct udevice *dev)
  929. {
  930. struct spinand_device *spinand = dev_get_priv(dev);
  931. struct spi_slave *slave = dev_get_parent_priv(dev);
  932. struct mtd_info *mtd = dev_get_uclass_priv(dev);
  933. struct nand_device *nand = spinand_to_nand(spinand);
  934. int ret;
  935. #ifndef __UBOOT__
  936. spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
  937. GFP_KERNEL);
  938. if (!spinand)
  939. return -ENOMEM;
  940. spinand->spimem = mem;
  941. spi_mem_set_drvdata(mem, spinand);
  942. spinand_set_of_node(spinand, mem->spi->dev.of_node);
  943. mutex_init(&spinand->lock);
  944. mtd = spinand_to_mtd(spinand);
  945. mtd->dev.parent = &mem->spi->dev;
  946. #else
  947. nand->mtd = mtd;
  948. mtd->priv = nand;
  949. mtd->dev = dev;
  950. mtd->name = malloc(20);
  951. if (!mtd->name)
  952. return -ENOMEM;
  953. sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
  954. spinand->slave = slave;
  955. spinand_set_of_node(spinand, dev->node.np);
  956. #endif
  957. ret = spinand_init(spinand);
  958. if (ret)
  959. return ret;
  960. #ifndef __UBOOT__
  961. ret = mtd_device_register(mtd, NULL, 0);
  962. #else
  963. ret = add_mtd_device(mtd);
  964. #endif
  965. if (ret)
  966. goto err_spinand_cleanup;
  967. return 0;
  968. err_spinand_cleanup:
  969. spinand_cleanup(spinand);
  970. return ret;
  971. }
  972. #ifndef __UBOOT__
  973. static int spinand_remove(struct udevice *slave)
  974. {
  975. struct spinand_device *spinand;
  976. struct mtd_info *mtd;
  977. int ret;
  978. spinand = spi_mem_get_drvdata(slave);
  979. mtd = spinand_to_mtd(spinand);
  980. free(mtd->name);
  981. ret = mtd_device_unregister(mtd);
  982. if (ret)
  983. return ret;
  984. spinand_cleanup(spinand);
  985. return 0;
  986. }
  987. static const struct spi_device_id spinand_ids[] = {
  988. { .name = "spi-nand" },
  989. { /* sentinel */ },
  990. };
  991. #ifdef CONFIG_OF
  992. static const struct of_device_id spinand_of_ids[] = {
  993. { .compatible = "spi-nand" },
  994. { /* sentinel */ },
  995. };
  996. #endif
  997. static struct spi_mem_driver spinand_drv = {
  998. .spidrv = {
  999. .id_table = spinand_ids,
  1000. .driver = {
  1001. .name = "spi-nand",
  1002. .of_match_table = of_match_ptr(spinand_of_ids),
  1003. },
  1004. },
  1005. .probe = spinand_probe,
  1006. .remove = spinand_remove,
  1007. };
  1008. module_spi_mem_driver(spinand_drv);
  1009. MODULE_DESCRIPTION("SPI NAND framework");
  1010. MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
  1011. MODULE_LICENSE("GPL v2");
  1012. #endif /* __UBOOT__ */
  1013. static const struct udevice_id spinand_ids[] = {
  1014. { .compatible = "spi-nand" },
  1015. { /* sentinel */ },
  1016. };
  1017. U_BOOT_DRIVER(spinand) = {
  1018. .name = "spi_nand",
  1019. .id = UCLASS_MTD,
  1020. .of_match = spinand_ids,
  1021. .priv_auto_alloc_size = sizeof(struct spinand_device),
  1022. .probe = spinand_probe,
  1023. };