arasan_nfc.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Arasan NAND Flash Controller Driver
  4. *
  5. * Copyright (C) 2014 - 2015 Xilinx, Inc.
  6. */
  7. #include <common.h>
  8. #include <malloc.h>
  9. #include <asm/io.h>
  10. #include <linux/errno.h>
  11. #include <linux/mtd/mtd.h>
  12. #include <linux/mtd/rawnand.h>
  13. #include <linux/mtd/partitions.h>
  14. #include <linux/mtd/nand_ecc.h>
  15. #include <asm/arch/hardware.h>
  16. #include <asm/arch/sys_proto.h>
  17. #include <nand.h>
  18. struct arasan_nand_info {
  19. void __iomem *nand_base;
  20. u32 page;
  21. bool on_die_ecc_enabled;
  22. };
  23. struct nand_regs {
  24. u32 pkt_reg;
  25. u32 memadr_reg1;
  26. u32 memadr_reg2;
  27. u32 cmd_reg;
  28. u32 pgm_reg;
  29. u32 intsts_enr;
  30. u32 intsig_enr;
  31. u32 intsts_reg;
  32. u32 rdy_busy;
  33. u32 cms_sysadr_reg;
  34. u32 flash_sts_reg;
  35. u32 tmg_reg;
  36. u32 buf_dataport;
  37. u32 ecc_reg;
  38. u32 ecc_errcnt_reg;
  39. u32 ecc_sprcmd_reg;
  40. u32 errcnt_1bitreg;
  41. u32 errcnt_2bitreg;
  42. u32 errcnt_3bitreg;
  43. u32 errcnt_4bitreg;
  44. u32 dma_sysadr0_reg;
  45. u32 dma_bufbdry_reg;
  46. u32 cpu_rls_reg;
  47. u32 errcnt_5bitreg;
  48. u32 errcnt_6bitreg;
  49. u32 errcnt_7bitreg;
  50. u32 errcnt_8bitreg;
  51. u32 data_if_reg;
  52. };
  53. #define arasan_nand_base ((struct nand_regs __iomem *)ARASAN_NAND_BASEADDR)
  54. struct arasan_nand_command_format {
  55. u8 cmd1;
  56. u8 cmd2;
  57. u8 addr_cycles;
  58. u32 pgm;
  59. };
  60. #define ONDIE_ECC_FEATURE_ADDR 0x90
  61. #define ENABLE_ONDIE_ECC 0x08
  62. #define ARASAN_PROG_RD_MASK 0x00000001
  63. #define ARASAN_PROG_BLK_ERS_MASK 0x00000004
  64. #define ARASAN_PROG_RD_ID_MASK 0x00000040
  65. #define ARASAN_PROG_RD_STS_MASK 0x00000008
  66. #define ARASAN_PROG_PG_PROG_MASK 0x00000010
  67. #define ARASAN_PROG_RD_PARAM_PG_MASK 0x00000080
  68. #define ARASAN_PROG_RST_MASK 0x00000100
  69. #define ARASAN_PROG_GET_FTRS_MASK 0x00000200
  70. #define ARASAN_PROG_SET_FTRS_MASK 0x00000400
  71. #define ARASAN_PROG_CHNG_ROWADR_END_MASK 0x00400000
  72. #define ARASAN_NAND_CMD_ECC_ON_MASK 0x80000000
  73. #define ARASAN_NAND_CMD_CMD12_MASK 0xFFFF
  74. #define ARASAN_NAND_CMD_PG_SIZE_MASK 0x3800000
  75. #define ARASAN_NAND_CMD_PG_SIZE_SHIFT 23
  76. #define ARASAN_NAND_CMD_CMD2_SHIFT 8
  77. #define ARASAN_NAND_CMD_ADDR_CYCL_MASK 0x70000000
  78. #define ARASAN_NAND_CMD_ADDR_CYCL_SHIFT 28
  79. #define ARASAN_NAND_MEM_ADDR1_PAGE_MASK 0xFFFF0000
  80. #define ARASAN_NAND_MEM_ADDR1_COL_MASK 0xFFFF
  81. #define ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT 16
  82. #define ARASAN_NAND_MEM_ADDR2_PAGE_MASK 0xFF
  83. #define ARASAN_NAND_MEM_ADDR2_CS_MASK 0xC0000000
  84. #define ARASAN_NAND_MEM_ADDR2_BCH_MASK 0xE000000
  85. #define ARASAN_NAND_MEM_ADDR2_BCH_SHIFT 25
  86. #define ARASAN_NAND_INT_STS_ERR_EN_MASK 0x10
  87. #define ARASAN_NAND_INT_STS_MUL_BIT_ERR_MASK 0x08
  88. #define ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK 0x02
  89. #define ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK 0x01
  90. #define ARASAN_NAND_INT_STS_XFR_CMPLT_MASK 0x04
  91. #define ARASAN_NAND_PKT_REG_PKT_CNT_MASK 0xFFF000
  92. #define ARASAN_NAND_PKT_REG_PKT_SIZE_MASK 0x7FF
  93. #define ARASAN_NAND_PKT_REG_PKT_CNT_SHFT 12
  94. #define ARASAN_NAND_ROW_ADDR_CYCL_MASK 0x0F
  95. #define ARASAN_NAND_COL_ADDR_CYCL_MASK 0xF0
  96. #define ARASAN_NAND_COL_ADDR_CYCL_SHIFT 4
  97. #define ARASAN_NAND_ECC_SIZE_SHIFT 16
  98. #define ARASAN_NAND_ECC_BCH_SHIFT 27
  99. #define ARASAN_NAND_PKTSIZE_1K 1024
  100. #define ARASAN_NAND_PKTSIZE_512 512
  101. #define ARASAN_NAND_POLL_TIMEOUT 1000000
  102. #define ARASAN_NAND_INVALID_ADDR_CYCL 0xFF
  103. #define ERR_ADDR_CYCLE -1
  104. #define READ_BUFF_SIZE 0x4000
  105. static struct arasan_nand_command_format *curr_cmd;
  106. enum addr_cycles {
  107. NAND_ADDR_CYCL_NONE,
  108. NAND_ADDR_CYCL_ONE,
  109. NAND_ADDR_CYCL_ROW,
  110. NAND_ADDR_CYCL_COL,
  111. NAND_ADDR_CYCL_BOTH,
  112. };
  113. static struct arasan_nand_command_format arasan_nand_commands[] = {
  114. {NAND_CMD_READ0, NAND_CMD_READSTART, NAND_ADDR_CYCL_BOTH,
  115. ARASAN_PROG_RD_MASK},
  116. {NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART, NAND_ADDR_CYCL_COL,
  117. ARASAN_PROG_RD_MASK},
  118. {NAND_CMD_READID, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
  119. ARASAN_PROG_RD_ID_MASK},
  120. {NAND_CMD_STATUS, NAND_CMD_NONE, NAND_ADDR_CYCL_NONE,
  121. ARASAN_PROG_RD_STS_MASK},
  122. {NAND_CMD_SEQIN, NAND_CMD_PAGEPROG, NAND_ADDR_CYCL_BOTH,
  123. ARASAN_PROG_PG_PROG_MASK},
  124. {NAND_CMD_RNDIN, NAND_CMD_NONE, NAND_ADDR_CYCL_COL,
  125. ARASAN_PROG_CHNG_ROWADR_END_MASK},
  126. {NAND_CMD_ERASE1, NAND_CMD_ERASE2, NAND_ADDR_CYCL_ROW,
  127. ARASAN_PROG_BLK_ERS_MASK},
  128. {NAND_CMD_RESET, NAND_CMD_NONE, NAND_ADDR_CYCL_NONE,
  129. ARASAN_PROG_RST_MASK},
  130. {NAND_CMD_PARAM, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
  131. ARASAN_PROG_RD_PARAM_PG_MASK},
  132. {NAND_CMD_GET_FEATURES, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
  133. ARASAN_PROG_GET_FTRS_MASK},
  134. {NAND_CMD_SET_FEATURES, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
  135. ARASAN_PROG_SET_FTRS_MASK},
  136. {NAND_CMD_NONE, NAND_CMD_NONE, NAND_ADDR_CYCL_NONE, 0},
  137. };
  138. struct arasan_ecc_matrix {
  139. u32 pagesize;
  140. u32 ecc_codeword_size;
  141. u8 eccbits;
  142. u8 bch;
  143. u8 bchval;
  144. u16 eccaddr;
  145. u16 eccsize;
  146. };
  147. static const struct arasan_ecc_matrix ecc_matrix[] = {
  148. {512, 512, 1, 0, 0, 0x20D, 0x3},
  149. {512, 512, 4, 1, 3, 0x209, 0x7},
  150. {512, 512, 8, 1, 2, 0x203, 0xD},
  151. /*
  152. * 2K byte page
  153. */
  154. {2048, 512, 1, 0, 0, 0x834, 0xC},
  155. {2048, 512, 4, 1, 3, 0x826, 0x1A},
  156. {2048, 512, 8, 1, 2, 0x80c, 0x34},
  157. {2048, 512, 12, 1, 1, 0x822, 0x4E},
  158. {2048, 512, 16, 1, 0, 0x808, 0x68},
  159. {2048, 1024, 24, 1, 4, 0x81c, 0x54},
  160. /*
  161. * 4K byte page
  162. */
  163. {4096, 512, 1, 0, 0, 0x1068, 0x18},
  164. {4096, 512, 4, 1, 3, 0x104c, 0x34},
  165. {4096, 512, 8, 1, 2, 0x1018, 0x68},
  166. {4096, 512, 12, 1, 1, 0x1044, 0x9C},
  167. {4096, 512, 16, 1, 0, 0x1010, 0xD0},
  168. {4096, 1024, 24, 1, 4, 0x1038, 0xA8},
  169. /*
  170. * 8K byte page
  171. */
  172. {8192, 512, 1, 0, 0, 0x20d0, 0x30},
  173. {8192, 512, 4, 1, 3, 0x2098, 0x68},
  174. {8192, 512, 8, 1, 2, 0x2030, 0xD0},
  175. {8192, 512, 12, 1, 1, 0x2088, 0x138},
  176. {8192, 512, 16, 1, 0, 0x2020, 0x1A0},
  177. {8192, 1024, 24, 1, 4, 0x2070, 0x150},
  178. /*
  179. * 16K byte page
  180. */
  181. {16384, 512, 1, 0, 0, 0x4460, 0x60},
  182. {16384, 512, 4, 1, 3, 0x43f0, 0xD0},
  183. {16384, 512, 8, 1, 2, 0x4320, 0x1A0},
  184. {16384, 512, 12, 1, 1, 0x4250, 0x270},
  185. {16384, 512, 16, 1, 0, 0x4180, 0x340},
  186. {16384, 1024, 24, 1, 4, 0x4220, 0x2A0}
  187. };
  188. static struct nand_ecclayout ondie_nand_oob_64 = {
  189. .eccbytes = 32,
  190. .eccpos = {
  191. 8, 9, 10, 11, 12, 13, 14, 15,
  192. 24, 25, 26, 27, 28, 29, 30, 31,
  193. 40, 41, 42, 43, 44, 45, 46, 47,
  194. 56, 57, 58, 59, 60, 61, 62, 63
  195. },
  196. .oobfree = {
  197. { .offset = 4, .length = 4 },
  198. { .offset = 20, .length = 4 },
  199. { .offset = 36, .length = 4 },
  200. { .offset = 52, .length = 4 }
  201. }
  202. };
  203. /*
  204. * bbt decriptors for chips with on-die ECC and
  205. * chips with 64-byte OOB
  206. */
  207. static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
  208. static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
  209. static struct nand_bbt_descr bbt_main_descr = {
  210. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
  211. NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  212. .offs = 4,
  213. .len = 4,
  214. .veroffs = 20,
  215. .maxblocks = 4,
  216. .pattern = bbt_pattern
  217. };
  218. static struct nand_bbt_descr bbt_mirror_descr = {
  219. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
  220. NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  221. .offs = 4,
  222. .len = 4,
  223. .veroffs = 20,
  224. .maxblocks = 4,
  225. .pattern = mirror_pattern
  226. };
  227. static u8 buf_data[READ_BUFF_SIZE];
  228. static u32 buf_index;
  229. static struct nand_ecclayout nand_oob;
  230. static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE];
  231. static void arasan_nand_select_chip(struct mtd_info *mtd, int chip)
  232. {
  233. }
  234. static void arasan_nand_enable_ecc(void)
  235. {
  236. u32 reg_val;
  237. reg_val = readl(&arasan_nand_base->cmd_reg);
  238. reg_val |= ARASAN_NAND_CMD_ECC_ON_MASK;
  239. writel(reg_val, &arasan_nand_base->cmd_reg);
  240. }
  241. static u8 arasan_nand_get_addrcycle(struct mtd_info *mtd)
  242. {
  243. u8 addrcycles;
  244. struct nand_chip *chip = mtd_to_nand(mtd);
  245. switch (curr_cmd->addr_cycles) {
  246. case NAND_ADDR_CYCL_NONE:
  247. addrcycles = 0;
  248. break;
  249. case NAND_ADDR_CYCL_ONE:
  250. addrcycles = 1;
  251. break;
  252. case NAND_ADDR_CYCL_ROW:
  253. addrcycles = chip->onfi_params.addr_cycles &
  254. ARASAN_NAND_ROW_ADDR_CYCL_MASK;
  255. break;
  256. case NAND_ADDR_CYCL_COL:
  257. addrcycles = (chip->onfi_params.addr_cycles &
  258. ARASAN_NAND_COL_ADDR_CYCL_MASK) >>
  259. ARASAN_NAND_COL_ADDR_CYCL_SHIFT;
  260. break;
  261. case NAND_ADDR_CYCL_BOTH:
  262. addrcycles = chip->onfi_params.addr_cycles &
  263. ARASAN_NAND_ROW_ADDR_CYCL_MASK;
  264. addrcycles += (chip->onfi_params.addr_cycles &
  265. ARASAN_NAND_COL_ADDR_CYCL_MASK) >>
  266. ARASAN_NAND_COL_ADDR_CYCL_SHIFT;
  267. break;
  268. default:
  269. addrcycles = ARASAN_NAND_INVALID_ADDR_CYCL;
  270. break;
  271. }
  272. return addrcycles;
  273. }
  274. static int arasan_nand_read_page(struct mtd_info *mtd, u8 *buf, u32 size)
  275. {
  276. struct nand_chip *chip = mtd_to_nand(mtd);
  277. struct arasan_nand_info *nand = nand_get_controller_data(chip);
  278. u32 reg_val, i, pktsize, pktnum;
  279. u32 *bufptr = (u32 *)buf;
  280. u32 timeout;
  281. u32 rdcount = 0;
  282. u8 addr_cycles;
  283. if (chip->ecc_step_ds >= ARASAN_NAND_PKTSIZE_1K)
  284. pktsize = ARASAN_NAND_PKTSIZE_1K;
  285. else
  286. pktsize = ARASAN_NAND_PKTSIZE_512;
  287. if (size % pktsize)
  288. pktnum = size/pktsize + 1;
  289. else
  290. pktnum = size/pktsize;
  291. reg_val = readl(&arasan_nand_base->intsts_enr);
  292. reg_val |= ARASAN_NAND_INT_STS_ERR_EN_MASK |
  293. ARASAN_NAND_INT_STS_MUL_BIT_ERR_MASK;
  294. writel(reg_val, &arasan_nand_base->intsts_enr);
  295. reg_val = readl(&arasan_nand_base->pkt_reg);
  296. reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
  297. ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
  298. reg_val |= (pktnum << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) |
  299. pktsize;
  300. writel(reg_val, &arasan_nand_base->pkt_reg);
  301. if (!nand->on_die_ecc_enabled) {
  302. arasan_nand_enable_ecc();
  303. addr_cycles = arasan_nand_get_addrcycle(mtd);
  304. if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
  305. return ERR_ADDR_CYCLE;
  306. writel((NAND_CMD_RNDOUTSTART << ARASAN_NAND_CMD_CMD2_SHIFT) |
  307. NAND_CMD_RNDOUT | (addr_cycles <<
  308. ARASAN_NAND_CMD_ADDR_CYCL_SHIFT),
  309. &arasan_nand_base->ecc_sprcmd_reg);
  310. }
  311. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  312. while (rdcount < pktnum) {
  313. timeout = ARASAN_NAND_POLL_TIMEOUT;
  314. while (!(readl(&arasan_nand_base->intsts_reg) &
  315. ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK) && timeout) {
  316. udelay(1);
  317. timeout--;
  318. }
  319. if (!timeout) {
  320. puts("arasan_read_page: timedout:Buff RDY\n");
  321. return -ETIMEDOUT;
  322. }
  323. rdcount++;
  324. if (pktnum == rdcount) {
  325. reg_val = readl(&arasan_nand_base->intsts_enr);
  326. reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
  327. writel(reg_val, &arasan_nand_base->intsts_enr);
  328. } else {
  329. reg_val = readl(&arasan_nand_base->intsts_enr);
  330. writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
  331. &arasan_nand_base->intsts_enr);
  332. }
  333. reg_val = readl(&arasan_nand_base->intsts_reg);
  334. writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
  335. &arasan_nand_base->intsts_reg);
  336. for (i = 0; i < pktsize/4; i++)
  337. bufptr[i] = readl(&arasan_nand_base->buf_dataport);
  338. bufptr += pktsize/4;
  339. if (rdcount >= pktnum)
  340. break;
  341. writel(ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
  342. &arasan_nand_base->intsts_enr);
  343. }
  344. timeout = ARASAN_NAND_POLL_TIMEOUT;
  345. while (!(readl(&arasan_nand_base->intsts_reg) &
  346. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  347. udelay(1);
  348. timeout--;
  349. }
  350. if (!timeout) {
  351. puts("arasan rd_page timedout:Xfer CMPLT\n");
  352. return -ETIMEDOUT;
  353. }
  354. reg_val = readl(&arasan_nand_base->intsts_enr);
  355. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  356. &arasan_nand_base->intsts_enr);
  357. reg_val = readl(&arasan_nand_base->intsts_reg);
  358. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  359. &arasan_nand_base->intsts_reg);
  360. if (!nand->on_die_ecc_enabled) {
  361. if (readl(&arasan_nand_base->intsts_reg) &
  362. ARASAN_NAND_INT_STS_MUL_BIT_ERR_MASK) {
  363. printf("arasan rd_page:sbiterror\n");
  364. return -1;
  365. }
  366. if (readl(&arasan_nand_base->intsts_reg) &
  367. ARASAN_NAND_INT_STS_ERR_EN_MASK) {
  368. mtd->ecc_stats.failed++;
  369. printf("arasan rd_page:multibiterror\n");
  370. return -1;
  371. }
  372. }
  373. return 0;
  374. }
  375. static int arasan_nand_read_page_hwecc(struct mtd_info *mtd,
  376. struct nand_chip *chip, u8 *buf, int oob_required, int page)
  377. {
  378. int status;
  379. status = arasan_nand_read_page(mtd, buf, (mtd->writesize));
  380. if (oob_required)
  381. chip->ecc.read_oob(mtd, chip, page);
  382. return status;
  383. }
  384. static void arasan_nand_fill_tx(const u8 *buf, int len)
  385. {
  386. u32 __iomem *nand = &arasan_nand_base->buf_dataport;
  387. if (((unsigned long)buf & 0x3) != 0) {
  388. if (((unsigned long)buf & 0x1) != 0) {
  389. if (len) {
  390. writeb(*buf, nand);
  391. buf += 1;
  392. len--;
  393. }
  394. }
  395. if (((unsigned long)buf & 0x3) != 0) {
  396. if (len >= 2) {
  397. writew(*(u16 *)buf, nand);
  398. buf += 2;
  399. len -= 2;
  400. }
  401. }
  402. }
  403. while (len >= 4) {
  404. writel(*(u32 *)buf, nand);
  405. buf += 4;
  406. len -= 4;
  407. }
  408. if (len) {
  409. if (len >= 2) {
  410. writew(*(u16 *)buf, nand);
  411. buf += 2;
  412. len -= 2;
  413. }
  414. if (len)
  415. writeb(*buf, nand);
  416. }
  417. }
  418. static int arasan_nand_write_page_hwecc(struct mtd_info *mtd,
  419. struct nand_chip *chip, const u8 *buf, int oob_required,
  420. int page)
  421. {
  422. u32 reg_val, i, pktsize, pktnum;
  423. const u32 *bufptr = (const u32 *)buf;
  424. u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
  425. u32 size = mtd->writesize;
  426. u32 rdcount = 0;
  427. u8 column_addr_cycles;
  428. struct arasan_nand_info *nand = nand_get_controller_data(chip);
  429. if (chip->ecc_step_ds >= ARASAN_NAND_PKTSIZE_1K)
  430. pktsize = ARASAN_NAND_PKTSIZE_1K;
  431. else
  432. pktsize = ARASAN_NAND_PKTSIZE_512;
  433. if (size % pktsize)
  434. pktnum = size/pktsize + 1;
  435. else
  436. pktnum = size/pktsize;
  437. reg_val = readl(&arasan_nand_base->pkt_reg);
  438. reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
  439. ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
  440. reg_val |= (pktnum << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | pktsize;
  441. writel(reg_val, &arasan_nand_base->pkt_reg);
  442. if (!nand->on_die_ecc_enabled) {
  443. arasan_nand_enable_ecc();
  444. column_addr_cycles = (chip->onfi_params.addr_cycles &
  445. ARASAN_NAND_COL_ADDR_CYCL_MASK) >>
  446. ARASAN_NAND_COL_ADDR_CYCL_SHIFT;
  447. writel((NAND_CMD_RNDIN | (column_addr_cycles << 28)),
  448. &arasan_nand_base->ecc_sprcmd_reg);
  449. }
  450. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  451. while (rdcount < pktnum) {
  452. timeout = ARASAN_NAND_POLL_TIMEOUT;
  453. while (!(readl(&arasan_nand_base->intsts_reg) &
  454. ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK) && timeout) {
  455. udelay(1);
  456. timeout--;
  457. }
  458. if (!timeout) {
  459. puts("arasan_write_page: timedout:Buff RDY\n");
  460. return -ETIMEDOUT;
  461. }
  462. rdcount++;
  463. if (pktnum == rdcount) {
  464. reg_val = readl(&arasan_nand_base->intsts_enr);
  465. reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
  466. writel(reg_val, &arasan_nand_base->intsts_enr);
  467. } else {
  468. reg_val = readl(&arasan_nand_base->intsts_enr);
  469. writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
  470. &arasan_nand_base->intsts_enr);
  471. }
  472. reg_val = readl(&arasan_nand_base->intsts_reg);
  473. writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
  474. &arasan_nand_base->intsts_reg);
  475. for (i = 0; i < pktsize/4; i++)
  476. writel(bufptr[i], &arasan_nand_base->buf_dataport);
  477. bufptr += pktsize/4;
  478. if (rdcount >= pktnum)
  479. break;
  480. writel(ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
  481. &arasan_nand_base->intsts_enr);
  482. }
  483. timeout = ARASAN_NAND_POLL_TIMEOUT;
  484. while (!(readl(&arasan_nand_base->intsts_reg) &
  485. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  486. udelay(1);
  487. timeout--;
  488. }
  489. if (!timeout) {
  490. puts("arasan write_page timedout:Xfer CMPLT\n");
  491. return -ETIMEDOUT;
  492. }
  493. reg_val = readl(&arasan_nand_base->intsts_enr);
  494. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  495. &arasan_nand_base->intsts_enr);
  496. reg_val = readl(&arasan_nand_base->intsts_reg);
  497. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  498. &arasan_nand_base->intsts_reg);
  499. if (oob_required)
  500. chip->ecc.write_oob(mtd, chip, nand->page);
  501. return 0;
  502. }
  503. static int arasan_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  504. int page)
  505. {
  506. chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
  507. chip->read_buf(mtd, chip->oob_poi, (mtd->oobsize));
  508. return 0;
  509. }
  510. static int arasan_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  511. int page)
  512. {
  513. int status = 0;
  514. const u8 *buf = chip->oob_poi;
  515. chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
  516. chip->write_buf(mtd, buf, mtd->oobsize);
  517. return status;
  518. }
  519. static int arasan_nand_reset(struct arasan_nand_command_format *curr_cmd)
  520. {
  521. u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
  522. u32 cmd_reg = 0;
  523. writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  524. &arasan_nand_base->intsts_enr);
  525. cmd_reg = readl(&arasan_nand_base->cmd_reg);
  526. cmd_reg &= ~ARASAN_NAND_CMD_CMD12_MASK;
  527. cmd_reg |= curr_cmd->cmd1 |
  528. (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
  529. writel(cmd_reg, &arasan_nand_base->cmd_reg);
  530. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  531. while (!(readl(&arasan_nand_base->intsts_reg) &
  532. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  533. udelay(1);
  534. timeout--;
  535. }
  536. if (!timeout) {
  537. printf("ERROR:%s timedout\n", __func__);
  538. return -ETIMEDOUT;
  539. }
  540. writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  541. &arasan_nand_base->intsts_enr);
  542. writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  543. &arasan_nand_base->intsts_reg);
  544. return 0;
  545. }
  546. static u8 arasan_nand_page(struct mtd_info *mtd)
  547. {
  548. u8 page_val = 0;
  549. switch (mtd->writesize) {
  550. case 512:
  551. page_val = 0;
  552. break;
  553. case 2048:
  554. page_val = 1;
  555. break;
  556. case 4096:
  557. page_val = 2;
  558. break;
  559. case 8192:
  560. page_val = 3;
  561. break;
  562. case 16384:
  563. page_val = 4;
  564. break;
  565. case 1024:
  566. page_val = 5;
  567. break;
  568. default:
  569. printf("%s:Pagesize>16K\n", __func__);
  570. break;
  571. }
  572. return page_val;
  573. }
  574. static int arasan_nand_send_wrcmd(struct arasan_nand_command_format *curr_cmd,
  575. int column, int page_addr, struct mtd_info *mtd)
  576. {
  577. u32 reg_val, page;
  578. u8 page_val, addr_cycles;
  579. writel(ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
  580. &arasan_nand_base->intsts_enr);
  581. reg_val = readl(&arasan_nand_base->cmd_reg);
  582. reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
  583. reg_val |= curr_cmd->cmd1 |
  584. (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
  585. if (curr_cmd->cmd1 == NAND_CMD_SEQIN) {
  586. reg_val &= ~ARASAN_NAND_CMD_PG_SIZE_MASK;
  587. page_val = arasan_nand_page(mtd);
  588. reg_val |= (page_val << ARASAN_NAND_CMD_PG_SIZE_SHIFT);
  589. }
  590. reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
  591. addr_cycles = arasan_nand_get_addrcycle(mtd);
  592. if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
  593. return ERR_ADDR_CYCLE;
  594. reg_val |= (addr_cycles <<
  595. ARASAN_NAND_CMD_ADDR_CYCL_SHIFT);
  596. writel(reg_val, &arasan_nand_base->cmd_reg);
  597. if (page_addr == -1)
  598. page_addr = 0;
  599. page = (page_addr << ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT) &
  600. ARASAN_NAND_MEM_ADDR1_PAGE_MASK;
  601. column &= ARASAN_NAND_MEM_ADDR1_COL_MASK;
  602. writel(page|column, &arasan_nand_base->memadr_reg1);
  603. reg_val = readl(&arasan_nand_base->memadr_reg2);
  604. reg_val &= ~ARASAN_NAND_MEM_ADDR2_PAGE_MASK;
  605. reg_val |= (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT);
  606. writel(reg_val, &arasan_nand_base->memadr_reg2);
  607. reg_val = readl(&arasan_nand_base->memadr_reg2);
  608. reg_val &= ~ARASAN_NAND_MEM_ADDR2_CS_MASK;
  609. writel(reg_val, &arasan_nand_base->memadr_reg2);
  610. return 0;
  611. }
  612. static void arasan_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
  613. {
  614. u32 reg_val;
  615. u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
  616. reg_val = readl(&arasan_nand_base->pkt_reg);
  617. reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
  618. ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
  619. reg_val |= (1 << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | len;
  620. writel(reg_val, &arasan_nand_base->pkt_reg);
  621. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  622. while (!(readl(&arasan_nand_base->intsts_reg) &
  623. ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK) && timeout) {
  624. udelay(1);
  625. timeout--;
  626. }
  627. if (!timeout)
  628. puts("ERROR:arasan_nand_write_buf timedout:Buff RDY\n");
  629. reg_val = readl(&arasan_nand_base->intsts_enr);
  630. reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
  631. writel(reg_val, &arasan_nand_base->intsts_enr);
  632. writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
  633. &arasan_nand_base->intsts_enr);
  634. reg_val = readl(&arasan_nand_base->intsts_reg);
  635. writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
  636. &arasan_nand_base->intsts_reg);
  637. arasan_nand_fill_tx(buf, len);
  638. timeout = ARASAN_NAND_POLL_TIMEOUT;
  639. while (!(readl(&arasan_nand_base->intsts_reg) &
  640. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  641. udelay(1);
  642. timeout--;
  643. }
  644. if (!timeout)
  645. puts("ERROR:arasan_nand_write_buf timedout:Xfer CMPLT\n");
  646. writel(readl(&arasan_nand_base->intsts_enr) |
  647. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  648. &arasan_nand_base->intsts_enr);
  649. writel(readl(&arasan_nand_base->intsts_reg) |
  650. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  651. &arasan_nand_base->intsts_reg);
  652. }
  653. static int arasan_nand_erase(struct arasan_nand_command_format *curr_cmd,
  654. int column, int page_addr, struct mtd_info *mtd)
  655. {
  656. u32 reg_val, page;
  657. u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
  658. u8 row_addr_cycles;
  659. writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  660. &arasan_nand_base->intsts_enr);
  661. reg_val = readl(&arasan_nand_base->cmd_reg);
  662. reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
  663. reg_val |= curr_cmd->cmd1 |
  664. (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
  665. row_addr_cycles = arasan_nand_get_addrcycle(mtd);
  666. if (row_addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
  667. return ERR_ADDR_CYCLE;
  668. reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
  669. reg_val |= (row_addr_cycles <<
  670. ARASAN_NAND_CMD_ADDR_CYCL_SHIFT);
  671. writel(reg_val, &arasan_nand_base->cmd_reg);
  672. page = (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT) &
  673. ARASAN_NAND_MEM_ADDR1_COL_MASK;
  674. column = page_addr & ARASAN_NAND_MEM_ADDR1_COL_MASK;
  675. writel(column | (page << ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT),
  676. &arasan_nand_base->memadr_reg1);
  677. reg_val = readl(&arasan_nand_base->memadr_reg2);
  678. reg_val &= ~ARASAN_NAND_MEM_ADDR2_PAGE_MASK;
  679. reg_val |= (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT);
  680. writel(reg_val, &arasan_nand_base->memadr_reg2);
  681. reg_val = readl(&arasan_nand_base->memadr_reg2);
  682. reg_val &= ~ARASAN_NAND_MEM_ADDR2_CS_MASK;
  683. writel(reg_val, &arasan_nand_base->memadr_reg2);
  684. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  685. while (!(readl(&arasan_nand_base->intsts_reg) &
  686. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  687. udelay(1);
  688. timeout--;
  689. }
  690. if (!timeout) {
  691. printf("ERROR:%s timedout:Xfer CMPLT\n", __func__);
  692. return -ETIMEDOUT;
  693. }
  694. reg_val = readl(&arasan_nand_base->intsts_enr);
  695. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  696. &arasan_nand_base->intsts_enr);
  697. reg_val = readl(&arasan_nand_base->intsts_reg);
  698. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  699. &arasan_nand_base->intsts_reg);
  700. return 0;
  701. }
  702. static int arasan_nand_read_status(struct arasan_nand_command_format *curr_cmd,
  703. int column, int page_addr, struct mtd_info *mtd)
  704. {
  705. u32 reg_val;
  706. u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
  707. u8 addr_cycles;
  708. writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  709. &arasan_nand_base->intsts_enr);
  710. reg_val = readl(&arasan_nand_base->cmd_reg);
  711. reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
  712. reg_val |= curr_cmd->cmd1 |
  713. (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
  714. addr_cycles = arasan_nand_get_addrcycle(mtd);
  715. if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
  716. return ERR_ADDR_CYCLE;
  717. reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
  718. reg_val |= (addr_cycles <<
  719. ARASAN_NAND_CMD_ADDR_CYCL_SHIFT);
  720. writel(reg_val, &arasan_nand_base->cmd_reg);
  721. reg_val = readl(&arasan_nand_base->pkt_reg);
  722. reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
  723. ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
  724. reg_val |= (1 << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | 1;
  725. writel(reg_val, &arasan_nand_base->pkt_reg);
  726. reg_val = readl(&arasan_nand_base->memadr_reg2);
  727. reg_val &= ~ARASAN_NAND_MEM_ADDR2_CS_MASK;
  728. writel(reg_val, &arasan_nand_base->memadr_reg2);
  729. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  730. while (!(readl(&arasan_nand_base->intsts_reg) &
  731. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  732. udelay(1);
  733. timeout--;
  734. }
  735. if (!timeout) {
  736. printf("ERROR:%s: timedout:Xfer CMPLT\n", __func__);
  737. return -ETIMEDOUT;
  738. }
  739. reg_val = readl(&arasan_nand_base->intsts_enr);
  740. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  741. &arasan_nand_base->intsts_enr);
  742. reg_val = readl(&arasan_nand_base->intsts_reg);
  743. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  744. &arasan_nand_base->intsts_reg);
  745. return 0;
  746. }
  747. static int arasan_nand_send_rdcmd(struct arasan_nand_command_format *curr_cmd,
  748. int column, int page_addr, struct mtd_info *mtd)
  749. {
  750. u32 reg_val, addr_cycles, page;
  751. u8 page_val;
  752. reg_val = readl(&arasan_nand_base->intsts_enr);
  753. writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
  754. &arasan_nand_base->intsts_enr);
  755. reg_val = readl(&arasan_nand_base->cmd_reg);
  756. reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
  757. reg_val |= curr_cmd->cmd1 |
  758. (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
  759. if (curr_cmd->cmd1 == NAND_CMD_RNDOUT ||
  760. curr_cmd->cmd1 == NAND_CMD_READ0) {
  761. reg_val &= ~ARASAN_NAND_CMD_PG_SIZE_MASK;
  762. page_val = arasan_nand_page(mtd);
  763. reg_val |= (page_val << ARASAN_NAND_CMD_PG_SIZE_SHIFT);
  764. }
  765. reg_val &= ~ARASAN_NAND_CMD_ECC_ON_MASK;
  766. reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
  767. addr_cycles = arasan_nand_get_addrcycle(mtd);
  768. if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
  769. return ERR_ADDR_CYCLE;
  770. reg_val |= (addr_cycles << 28);
  771. writel(reg_val, &arasan_nand_base->cmd_reg);
  772. if (page_addr == -1)
  773. page_addr = 0;
  774. page = (page_addr << ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT) &
  775. ARASAN_NAND_MEM_ADDR1_PAGE_MASK;
  776. column &= ARASAN_NAND_MEM_ADDR1_COL_MASK;
  777. writel(page | column, &arasan_nand_base->memadr_reg1);
  778. reg_val = readl(&arasan_nand_base->memadr_reg2);
  779. reg_val &= ~ARASAN_NAND_MEM_ADDR2_PAGE_MASK;
  780. reg_val |= (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT);
  781. writel(reg_val, &arasan_nand_base->memadr_reg2);
  782. reg_val = readl(&arasan_nand_base->memadr_reg2);
  783. reg_val &= ~ARASAN_NAND_MEM_ADDR2_CS_MASK;
  784. writel(reg_val, &arasan_nand_base->memadr_reg2);
  785. buf_index = 0;
  786. return 0;
  787. }
  788. static void arasan_nand_read_buf(struct mtd_info *mtd, u8 *buf, int size)
  789. {
  790. u32 reg_val, i;
  791. u32 *bufptr = (u32 *)buf;
  792. u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
  793. reg_val = readl(&arasan_nand_base->pkt_reg);
  794. reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
  795. ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
  796. reg_val |= (1 << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | size;
  797. writel(reg_val, &arasan_nand_base->pkt_reg);
  798. writel(curr_cmd->pgm, &arasan_nand_base->pgm_reg);
  799. while (!(readl(&arasan_nand_base->intsts_reg) &
  800. ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK) && timeout) {
  801. udelay(1);
  802. timeout--;
  803. }
  804. if (!timeout)
  805. puts("ERROR:arasan_nand_read_buf timedout:Buff RDY\n");
  806. reg_val = readl(&arasan_nand_base->intsts_enr);
  807. reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
  808. writel(reg_val, &arasan_nand_base->intsts_enr);
  809. writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
  810. &arasan_nand_base->intsts_enr);
  811. reg_val = readl(&arasan_nand_base->intsts_reg);
  812. writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
  813. &arasan_nand_base->intsts_reg);
  814. buf_index = 0;
  815. for (i = 0; i < size / 4; i++)
  816. bufptr[i] = readl(&arasan_nand_base->buf_dataport);
  817. if (size & 0x03)
  818. bufptr[i] = readl(&arasan_nand_base->buf_dataport);
  819. timeout = ARASAN_NAND_POLL_TIMEOUT;
  820. while (!(readl(&arasan_nand_base->intsts_reg) &
  821. ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
  822. udelay(1);
  823. timeout--;
  824. }
  825. if (!timeout)
  826. puts("ERROR:arasan_nand_read_buf timedout:Xfer CMPLT\n");
  827. reg_val = readl(&arasan_nand_base->intsts_enr);
  828. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  829. &arasan_nand_base->intsts_enr);
  830. reg_val = readl(&arasan_nand_base->intsts_reg);
  831. writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  832. &arasan_nand_base->intsts_reg);
  833. }
  834. static u8 arasan_nand_read_byte(struct mtd_info *mtd)
  835. {
  836. struct nand_chip *chip = mtd_to_nand(mtd);
  837. u32 size;
  838. u8 val;
  839. struct nand_onfi_params *p;
  840. if (buf_index == 0) {
  841. p = &chip->onfi_params;
  842. if (curr_cmd->cmd1 == NAND_CMD_READID)
  843. size = 4;
  844. else if (curr_cmd->cmd1 == NAND_CMD_PARAM)
  845. size = sizeof(struct nand_onfi_params);
  846. else if (curr_cmd->cmd1 == NAND_CMD_RNDOUT)
  847. size = le16_to_cpu(p->ext_param_page_length) * 16;
  848. else if (curr_cmd->cmd1 == NAND_CMD_GET_FEATURES)
  849. size = 4;
  850. else if (curr_cmd->cmd1 == NAND_CMD_STATUS)
  851. return readb(&arasan_nand_base->flash_sts_reg);
  852. else
  853. size = 8;
  854. chip->read_buf(mtd, &buf_data[0], size);
  855. }
  856. val = *(&buf_data[0] + buf_index);
  857. buf_index++;
  858. return val;
  859. }
  860. static void arasan_nand_cmd_function(struct mtd_info *mtd, unsigned int command,
  861. int column, int page_addr)
  862. {
  863. u32 i, ret = 0;
  864. struct nand_chip *chip = mtd_to_nand(mtd);
  865. struct arasan_nand_info *nand = nand_get_controller_data(chip);
  866. curr_cmd = NULL;
  867. writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
  868. &arasan_nand_base->intsts_enr);
  869. if ((command == NAND_CMD_READOOB) &&
  870. (mtd->writesize > 512)) {
  871. column += mtd->writesize;
  872. command = NAND_CMD_READ0;
  873. }
  874. /* Get the command format */
  875. for (i = 0; (arasan_nand_commands[i].cmd1 != NAND_CMD_NONE ||
  876. arasan_nand_commands[i].cmd2 != NAND_CMD_NONE); i++) {
  877. if (command == arasan_nand_commands[i].cmd1) {
  878. curr_cmd = &arasan_nand_commands[i];
  879. break;
  880. }
  881. }
  882. if (curr_cmd == NULL) {
  883. printf("Unsupported Command; 0x%x\n", command);
  884. return;
  885. }
  886. if (curr_cmd->cmd1 == NAND_CMD_RESET)
  887. ret = arasan_nand_reset(curr_cmd);
  888. if ((curr_cmd->cmd1 == NAND_CMD_READID) ||
  889. (curr_cmd->cmd1 == NAND_CMD_PARAM) ||
  890. (curr_cmd->cmd1 == NAND_CMD_RNDOUT) ||
  891. (curr_cmd->cmd1 == NAND_CMD_GET_FEATURES) ||
  892. (curr_cmd->cmd1 == NAND_CMD_READ0))
  893. ret = arasan_nand_send_rdcmd(curr_cmd, column, page_addr, mtd);
  894. if ((curr_cmd->cmd1 == NAND_CMD_SET_FEATURES) ||
  895. (curr_cmd->cmd1 == NAND_CMD_SEQIN)) {
  896. nand->page = page_addr;
  897. ret = arasan_nand_send_wrcmd(curr_cmd, column, page_addr, mtd);
  898. }
  899. if (curr_cmd->cmd1 == NAND_CMD_ERASE1)
  900. ret = arasan_nand_erase(curr_cmd, column, page_addr, mtd);
  901. if (curr_cmd->cmd1 == NAND_CMD_STATUS)
  902. ret = arasan_nand_read_status(curr_cmd, column, page_addr, mtd);
  903. if (ret != 0)
  904. printf("ERROR:%s:command:0x%x\n", __func__, curr_cmd->cmd1);
  905. }
  906. static void arasan_check_ondie(struct mtd_info *mtd)
  907. {
  908. struct nand_chip *nand_chip = mtd_to_nand(mtd);
  909. struct arasan_nand_info *nand = nand_get_controller_data(nand_chip);
  910. u8 maf_id, dev_id;
  911. u8 get_feature[4];
  912. u8 set_feature[4] = {ENABLE_ONDIE_ECC, 0x00, 0x00, 0x00};
  913. u32 i;
  914. /* Send the command for reading device ID */
  915. nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  916. nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0, -1);
  917. /* Read manufacturer and device IDs */
  918. maf_id = nand_chip->read_byte(mtd);
  919. dev_id = nand_chip->read_byte(mtd);
  920. if ((maf_id == NAND_MFR_MICRON) &&
  921. ((dev_id == 0xf1) || (dev_id == 0xa1) || (dev_id == 0xb1) ||
  922. (dev_id == 0xaa) || (dev_id == 0xba) || (dev_id == 0xda) ||
  923. (dev_id == 0xca) || (dev_id == 0xac) || (dev_id == 0xbc) ||
  924. (dev_id == 0xdc) || (dev_id == 0xcc) || (dev_id == 0xa3) ||
  925. (dev_id == 0xb3) || (dev_id == 0xd3) || (dev_id == 0xc3))) {
  926. nand_chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES,
  927. ONDIE_ECC_FEATURE_ADDR, -1);
  928. nand_chip->write_buf(mtd, &set_feature[0], 4);
  929. nand_chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES,
  930. ONDIE_ECC_FEATURE_ADDR, -1);
  931. for (i = 0; i < 4; i++)
  932. get_feature[i] = nand_chip->read_byte(mtd);
  933. if (get_feature[0] & ENABLE_ONDIE_ECC)
  934. nand->on_die_ecc_enabled = true;
  935. else
  936. printf("%s: Unable to enable OnDie ECC\n", __func__);
  937. /* Use the BBT pattern descriptors */
  938. nand_chip->bbt_td = &bbt_main_descr;
  939. nand_chip->bbt_md = &bbt_mirror_descr;
  940. }
  941. }
  942. static int arasan_nand_ecc_init(struct mtd_info *mtd)
  943. {
  944. int found = -1;
  945. u32 regval, eccpos_start, i, eccaddr;
  946. struct nand_chip *nand_chip = mtd_to_nand(mtd);
  947. for (i = 0; i < ARRAY_SIZE(ecc_matrix); i++) {
  948. if ((ecc_matrix[i].pagesize == mtd->writesize) &&
  949. (ecc_matrix[i].ecc_codeword_size >=
  950. nand_chip->ecc_step_ds)) {
  951. if (ecc_matrix[i].eccbits >=
  952. nand_chip->ecc_strength_ds) {
  953. found = i;
  954. break;
  955. }
  956. found = i;
  957. }
  958. }
  959. if (found < 0)
  960. return 1;
  961. eccaddr = mtd->writesize + mtd->oobsize -
  962. ecc_matrix[found].eccsize;
  963. regval = eccaddr |
  964. (ecc_matrix[found].eccsize << ARASAN_NAND_ECC_SIZE_SHIFT) |
  965. (ecc_matrix[found].bch << ARASAN_NAND_ECC_BCH_SHIFT);
  966. writel(regval, &arasan_nand_base->ecc_reg);
  967. if (ecc_matrix[found].bch) {
  968. regval = readl(&arasan_nand_base->memadr_reg2);
  969. regval &= ~ARASAN_NAND_MEM_ADDR2_BCH_MASK;
  970. regval |= (ecc_matrix[found].bchval <<
  971. ARASAN_NAND_MEM_ADDR2_BCH_SHIFT);
  972. writel(regval, &arasan_nand_base->memadr_reg2);
  973. }
  974. nand_oob.eccbytes = ecc_matrix[found].eccsize;
  975. eccpos_start = mtd->oobsize - nand_oob.eccbytes;
  976. for (i = 0; i < nand_oob.eccbytes; i++)
  977. nand_oob.eccpos[i] = eccpos_start + i;
  978. nand_oob.oobfree[0].offset = 2;
  979. nand_oob.oobfree[0].length = eccpos_start - 2;
  980. nand_chip->ecc.size = ecc_matrix[found].ecc_codeword_size;
  981. nand_chip->ecc.strength = ecc_matrix[found].eccbits;
  982. nand_chip->ecc.bytes = ecc_matrix[found].eccsize;
  983. nand_chip->ecc.layout = &nand_oob;
  984. return 0;
  985. }
  986. static int arasan_nand_init(struct nand_chip *nand_chip, int devnum)
  987. {
  988. struct arasan_nand_info *nand;
  989. struct mtd_info *mtd;
  990. int err = -1;
  991. nand = calloc(1, sizeof(struct arasan_nand_info));
  992. if (!nand) {
  993. printf("%s: failed to allocate\n", __func__);
  994. return err;
  995. }
  996. nand->nand_base = arasan_nand_base;
  997. mtd = nand_to_mtd(nand_chip);
  998. nand_set_controller_data(nand_chip, nand);
  999. /* Set the driver entry points for MTD */
  1000. nand_chip->cmdfunc = arasan_nand_cmd_function;
  1001. nand_chip->select_chip = arasan_nand_select_chip;
  1002. nand_chip->read_byte = arasan_nand_read_byte;
  1003. /* Buffer read/write routines */
  1004. nand_chip->read_buf = arasan_nand_read_buf;
  1005. nand_chip->write_buf = arasan_nand_write_buf;
  1006. nand_chip->bbt_options = NAND_BBT_USE_FLASH;
  1007. writel(0x0, &arasan_nand_base->cmd_reg);
  1008. writel(0x0, &arasan_nand_base->pgm_reg);
  1009. /* first scan to find the device and get the page size */
  1010. if (nand_scan_ident(mtd, 1, NULL)) {
  1011. printf("%s: nand_scan_ident failed\n", __func__);
  1012. goto fail;
  1013. }
  1014. nand_chip->ecc.mode = NAND_ECC_HW;
  1015. nand_chip->ecc.hwctl = NULL;
  1016. nand_chip->ecc.read_page = arasan_nand_read_page_hwecc;
  1017. nand_chip->ecc.write_page = arasan_nand_write_page_hwecc;
  1018. nand_chip->ecc.read_oob = arasan_nand_read_oob;
  1019. nand_chip->ecc.write_oob = arasan_nand_write_oob;
  1020. arasan_check_ondie(mtd);
  1021. /*
  1022. * If on die supported, then give priority to on-die ecc and use
  1023. * it instead of controller ecc.
  1024. */
  1025. if (nand->on_die_ecc_enabled) {
  1026. nand_chip->ecc.strength = 1;
  1027. nand_chip->ecc.size = mtd->writesize;
  1028. nand_chip->ecc.bytes = 0;
  1029. nand_chip->ecc.layout = &ondie_nand_oob_64;
  1030. } else {
  1031. if (arasan_nand_ecc_init(mtd)) {
  1032. printf("%s: nand_ecc_init failed\n", __func__);
  1033. goto fail;
  1034. }
  1035. }
  1036. if (nand_scan_tail(mtd)) {
  1037. printf("%s: nand_scan_tail failed\n", __func__);
  1038. goto fail;
  1039. }
  1040. if (nand_register(devnum, mtd)) {
  1041. printf("Nand Register Fail\n");
  1042. goto fail;
  1043. }
  1044. return 0;
  1045. fail:
  1046. free(nand);
  1047. return err;
  1048. }
  1049. void board_nand_init(void)
  1050. {
  1051. struct nand_chip *nand = &nand_chip[0];
  1052. if (arasan_nand_init(nand, 0))
  1053. puts("NAND init failed\n");
  1054. }