tifm_sd.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * tifm_sd.c - TI FlashMedia driver
  4. *
  5. * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
  6. *
  7. * Special thanks to Brad Campbell for extensive testing of this driver.
  8. */
  9. #include <linux/tifm.h>
  10. #include <linux/mmc/host.h>
  11. #include <linux/highmem.h>
  12. #include <linux/scatterlist.h>
  13. #include <linux/module.h>
  14. #include <asm/io.h>
  15. #define DRIVER_NAME "tifm_sd"
  16. #define DRIVER_VERSION "0.8"
  17. static bool no_dma = 0;
  18. static bool fixed_timeout = 0;
  19. module_param(no_dma, bool, 0644);
  20. module_param(fixed_timeout, bool, 0644);
  21. /* Constants here are mostly from OMAP5912 datasheet */
  22. #define TIFM_MMCSD_RESET 0x0002
  23. #define TIFM_MMCSD_CLKMASK 0x03ff
  24. #define TIFM_MMCSD_POWER 0x0800
  25. #define TIFM_MMCSD_4BBUS 0x8000
  26. #define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */
  27. #define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */
  28. #define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */
  29. #define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */
  30. #define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */
  31. #define TIFM_MMCSD_READ 0x8000
  32. #define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */
  33. #define TIFM_MMCSD_EOC 0x0001 /* end of command phase */
  34. #define TIFM_MMCSD_CD 0x0002 /* card detect */
  35. #define TIFM_MMCSD_CB 0x0004 /* card enter busy state */
  36. #define TIFM_MMCSD_BRS 0x0008 /* block received/sent */
  37. #define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */
  38. #define TIFM_MMCSD_DTO 0x0020 /* data time-out */
  39. #define TIFM_MMCSD_DCRC 0x0040 /* data crc error */
  40. #define TIFM_MMCSD_CTO 0x0080 /* command time-out */
  41. #define TIFM_MMCSD_CCRC 0x0100 /* command crc error */
  42. #define TIFM_MMCSD_AF 0x0400 /* fifo almost full */
  43. #define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */
  44. #define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */
  45. #define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */
  46. #define TIFM_MMCSD_CERR 0x4000 /* card status error */
  47. #define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */
  48. #define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */
  49. #define TIFM_MMCSD_FIFO_SIZE 0x0020
  50. #define TIFM_MMCSD_RSP_R0 0x0000
  51. #define TIFM_MMCSD_RSP_R1 0x0100
  52. #define TIFM_MMCSD_RSP_R2 0x0200
  53. #define TIFM_MMCSD_RSP_R3 0x0300
  54. #define TIFM_MMCSD_RSP_R4 0x0400
  55. #define TIFM_MMCSD_RSP_R5 0x0500
  56. #define TIFM_MMCSD_RSP_R6 0x0600
  57. #define TIFM_MMCSD_RSP_BUSY 0x0800
  58. #define TIFM_MMCSD_CMD_BC 0x0000
  59. #define TIFM_MMCSD_CMD_BCR 0x1000
  60. #define TIFM_MMCSD_CMD_AC 0x2000
  61. #define TIFM_MMCSD_CMD_ADTC 0x3000
  62. #define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL
  63. #define TIFM_MMCSD_REQ_TIMEOUT_MS 1000
  64. enum {
  65. CMD_READY = 0x0001,
  66. FIFO_READY = 0x0002,
  67. BRS_READY = 0x0004,
  68. SCMD_ACTIVE = 0x0008,
  69. SCMD_READY = 0x0010,
  70. CARD_BUSY = 0x0020,
  71. DATA_CARRY = 0x0040
  72. };
  73. struct tifm_sd {
  74. struct tifm_dev *dev;
  75. unsigned short eject:1,
  76. open_drain:1,
  77. no_dma:1;
  78. unsigned short cmd_flags;
  79. unsigned int clk_freq;
  80. unsigned int clk_div;
  81. unsigned long timeout_jiffies;
  82. struct tasklet_struct finish_tasklet;
  83. struct timer_list timer;
  84. struct mmc_request *req;
  85. int sg_len;
  86. int sg_pos;
  87. unsigned int block_pos;
  88. struct scatterlist bounce_buf;
  89. unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE];
  90. };
  91. /* for some reason, host won't respond correctly to readw/writew */
  92. static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
  93. unsigned int off, unsigned int cnt)
  94. {
  95. struct tifm_dev *sock = host->dev;
  96. unsigned char *buf;
  97. unsigned int pos = 0, val;
  98. buf = kmap_atomic(pg) + off;
  99. if (host->cmd_flags & DATA_CARRY) {
  100. buf[pos++] = host->bounce_buf_data[0];
  101. host->cmd_flags &= ~DATA_CARRY;
  102. }
  103. while (pos < cnt) {
  104. val = readl(sock->addr + SOCK_MMCSD_DATA);
  105. buf[pos++] = val & 0xff;
  106. if (pos == cnt) {
  107. host->bounce_buf_data[0] = (val >> 8) & 0xff;
  108. host->cmd_flags |= DATA_CARRY;
  109. break;
  110. }
  111. buf[pos++] = (val >> 8) & 0xff;
  112. }
  113. kunmap_atomic(buf - off);
  114. }
  115. static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
  116. unsigned int off, unsigned int cnt)
  117. {
  118. struct tifm_dev *sock = host->dev;
  119. unsigned char *buf;
  120. unsigned int pos = 0, val;
  121. buf = kmap_atomic(pg) + off;
  122. if (host->cmd_flags & DATA_CARRY) {
  123. val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
  124. writel(val, sock->addr + SOCK_MMCSD_DATA);
  125. host->cmd_flags &= ~DATA_CARRY;
  126. }
  127. while (pos < cnt) {
  128. val = buf[pos++];
  129. if (pos == cnt) {
  130. host->bounce_buf_data[0] = val & 0xff;
  131. host->cmd_flags |= DATA_CARRY;
  132. break;
  133. }
  134. val |= (buf[pos++] << 8) & 0xff00;
  135. writel(val, sock->addr + SOCK_MMCSD_DATA);
  136. }
  137. kunmap_atomic(buf - off);
  138. }
  139. static void tifm_sd_transfer_data(struct tifm_sd *host)
  140. {
  141. struct mmc_data *r_data = host->req->cmd->data;
  142. struct scatterlist *sg = r_data->sg;
  143. unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2;
  144. unsigned int p_off, p_cnt;
  145. struct page *pg;
  146. if (host->sg_pos == host->sg_len)
  147. return;
  148. while (t_size) {
  149. cnt = sg[host->sg_pos].length - host->block_pos;
  150. if (!cnt) {
  151. host->block_pos = 0;
  152. host->sg_pos++;
  153. if (host->sg_pos == host->sg_len) {
  154. if ((r_data->flags & MMC_DATA_WRITE)
  155. && (host->cmd_flags & DATA_CARRY))
  156. writel(host->bounce_buf_data[0],
  157. host->dev->addr
  158. + SOCK_MMCSD_DATA);
  159. return;
  160. }
  161. cnt = sg[host->sg_pos].length;
  162. }
  163. off = sg[host->sg_pos].offset + host->block_pos;
  164. pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
  165. p_off = offset_in_page(off);
  166. p_cnt = PAGE_SIZE - p_off;
  167. p_cnt = min(p_cnt, cnt);
  168. p_cnt = min(p_cnt, t_size);
  169. if (r_data->flags & MMC_DATA_READ)
  170. tifm_sd_read_fifo(host, pg, p_off, p_cnt);
  171. else if (r_data->flags & MMC_DATA_WRITE)
  172. tifm_sd_write_fifo(host, pg, p_off, p_cnt);
  173. t_size -= p_cnt;
  174. host->block_pos += p_cnt;
  175. }
  176. }
  177. static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
  178. struct page *src, unsigned int src_off,
  179. unsigned int count)
  180. {
  181. unsigned char *src_buf = kmap_atomic(src) + src_off;
  182. unsigned char *dst_buf = kmap_atomic(dst) + dst_off;
  183. memcpy(dst_buf, src_buf, count);
  184. kunmap_atomic(dst_buf - dst_off);
  185. kunmap_atomic(src_buf - src_off);
  186. }
  187. static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
  188. {
  189. struct scatterlist *sg = r_data->sg;
  190. unsigned int t_size = r_data->blksz;
  191. unsigned int off, cnt;
  192. unsigned int p_off, p_cnt;
  193. struct page *pg;
  194. dev_dbg(&host->dev->dev, "bouncing block\n");
  195. while (t_size) {
  196. cnt = sg[host->sg_pos].length - host->block_pos;
  197. if (!cnt) {
  198. host->block_pos = 0;
  199. host->sg_pos++;
  200. if (host->sg_pos == host->sg_len)
  201. return;
  202. cnt = sg[host->sg_pos].length;
  203. }
  204. off = sg[host->sg_pos].offset + host->block_pos;
  205. pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
  206. p_off = offset_in_page(off);
  207. p_cnt = PAGE_SIZE - p_off;
  208. p_cnt = min(p_cnt, cnt);
  209. p_cnt = min(p_cnt, t_size);
  210. if (r_data->flags & MMC_DATA_WRITE)
  211. tifm_sd_copy_page(sg_page(&host->bounce_buf),
  212. r_data->blksz - t_size,
  213. pg, p_off, p_cnt);
  214. else if (r_data->flags & MMC_DATA_READ)
  215. tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
  216. r_data->blksz - t_size, p_cnt);
  217. t_size -= p_cnt;
  218. host->block_pos += p_cnt;
  219. }
  220. }
  221. static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data)
  222. {
  223. struct tifm_dev *sock = host->dev;
  224. unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz;
  225. unsigned int dma_len, dma_blk_cnt, dma_off;
  226. struct scatterlist *sg = NULL;
  227. unsigned long flags;
  228. if (host->sg_pos == host->sg_len)
  229. return 1;
  230. if (host->cmd_flags & DATA_CARRY) {
  231. host->cmd_flags &= ~DATA_CARRY;
  232. local_irq_save(flags);
  233. tifm_sd_bounce_block(host, r_data);
  234. local_irq_restore(flags);
  235. if (host->sg_pos == host->sg_len)
  236. return 1;
  237. }
  238. dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos;
  239. if (!dma_len) {
  240. host->block_pos = 0;
  241. host->sg_pos++;
  242. if (host->sg_pos == host->sg_len)
  243. return 1;
  244. dma_len = sg_dma_len(&r_data->sg[host->sg_pos]);
  245. }
  246. if (dma_len < t_size) {
  247. dma_blk_cnt = dma_len / r_data->blksz;
  248. dma_off = host->block_pos;
  249. host->block_pos += dma_blk_cnt * r_data->blksz;
  250. } else {
  251. dma_blk_cnt = TIFM_DMA_TSIZE;
  252. dma_off = host->block_pos;
  253. host->block_pos += t_size;
  254. }
  255. if (dma_blk_cnt)
  256. sg = &r_data->sg[host->sg_pos];
  257. else if (dma_len) {
  258. if (r_data->flags & MMC_DATA_WRITE) {
  259. local_irq_save(flags);
  260. tifm_sd_bounce_block(host, r_data);
  261. local_irq_restore(flags);
  262. } else
  263. host->cmd_flags |= DATA_CARRY;
  264. sg = &host->bounce_buf;
  265. dma_off = 0;
  266. dma_blk_cnt = 1;
  267. } else
  268. return 1;
  269. dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt);
  270. writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS);
  271. if (r_data->flags & MMC_DATA_WRITE)
  272. writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN,
  273. sock->addr + SOCK_DMA_CONTROL);
  274. else
  275. writel((dma_blk_cnt << 8) | TIFM_DMA_EN,
  276. sock->addr + SOCK_DMA_CONTROL);
  277. return 0;
  278. }
  279. static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
  280. {
  281. unsigned int rc = 0;
  282. switch (mmc_resp_type(cmd)) {
  283. case MMC_RSP_NONE:
  284. rc |= TIFM_MMCSD_RSP_R0;
  285. break;
  286. case MMC_RSP_R1B:
  287. rc |= TIFM_MMCSD_RSP_BUSY;
  288. fallthrough;
  289. case MMC_RSP_R1:
  290. rc |= TIFM_MMCSD_RSP_R1;
  291. break;
  292. case MMC_RSP_R2:
  293. rc |= TIFM_MMCSD_RSP_R2;
  294. break;
  295. case MMC_RSP_R3:
  296. rc |= TIFM_MMCSD_RSP_R3;
  297. break;
  298. default:
  299. BUG();
  300. }
  301. switch (mmc_cmd_type(cmd)) {
  302. case MMC_CMD_BC:
  303. rc |= TIFM_MMCSD_CMD_BC;
  304. break;
  305. case MMC_CMD_BCR:
  306. rc |= TIFM_MMCSD_CMD_BCR;
  307. break;
  308. case MMC_CMD_AC:
  309. rc |= TIFM_MMCSD_CMD_AC;
  310. break;
  311. case MMC_CMD_ADTC:
  312. rc |= TIFM_MMCSD_CMD_ADTC;
  313. break;
  314. default:
  315. BUG();
  316. }
  317. return rc;
  318. }
  319. static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
  320. {
  321. struct tifm_dev *sock = host->dev;
  322. unsigned int cmd_mask = tifm_sd_op_flags(cmd);
  323. if (host->open_drain)
  324. cmd_mask |= TIFM_MMCSD_ODTO;
  325. if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
  326. cmd_mask |= TIFM_MMCSD_READ;
  327. dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
  328. cmd->opcode, cmd->arg, cmd_mask);
  329. writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
  330. writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
  331. writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND);
  332. }
  333. static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock)
  334. {
  335. cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16)
  336. | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18);
  337. cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16)
  338. | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10);
  339. cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16)
  340. | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08);
  341. cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16)
  342. | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00);
  343. }
  344. static void tifm_sd_check_status(struct tifm_sd *host)
  345. {
  346. struct tifm_dev *sock = host->dev;
  347. struct mmc_command *cmd = host->req->cmd;
  348. if (cmd->error)
  349. goto finish_request;
  350. if (!(host->cmd_flags & CMD_READY))
  351. return;
  352. if (cmd->data) {
  353. if (cmd->data->error) {
  354. if ((host->cmd_flags & SCMD_ACTIVE)
  355. && !(host->cmd_flags & SCMD_READY))
  356. return;
  357. goto finish_request;
  358. }
  359. if (!(host->cmd_flags & BRS_READY))
  360. return;
  361. if (!(host->no_dma || (host->cmd_flags & FIFO_READY)))
  362. return;
  363. if (cmd->data->flags & MMC_DATA_WRITE) {
  364. if (host->req->stop) {
  365. if (!(host->cmd_flags & SCMD_ACTIVE)) {
  366. host->cmd_flags |= SCMD_ACTIVE;
  367. writel(TIFM_MMCSD_EOFB
  368. | readl(sock->addr
  369. + SOCK_MMCSD_INT_ENABLE),
  370. sock->addr
  371. + SOCK_MMCSD_INT_ENABLE);
  372. tifm_sd_exec(host, host->req->stop);
  373. return;
  374. } else {
  375. if (!(host->cmd_flags & SCMD_READY)
  376. || (host->cmd_flags & CARD_BUSY))
  377. return;
  378. writel((~TIFM_MMCSD_EOFB)
  379. & readl(sock->addr
  380. + SOCK_MMCSD_INT_ENABLE),
  381. sock->addr
  382. + SOCK_MMCSD_INT_ENABLE);
  383. }
  384. } else {
  385. if (host->cmd_flags & CARD_BUSY)
  386. return;
  387. writel((~TIFM_MMCSD_EOFB)
  388. & readl(sock->addr
  389. + SOCK_MMCSD_INT_ENABLE),
  390. sock->addr + SOCK_MMCSD_INT_ENABLE);
  391. }
  392. } else {
  393. if (host->req->stop) {
  394. if (!(host->cmd_flags & SCMD_ACTIVE)) {
  395. host->cmd_flags |= SCMD_ACTIVE;
  396. tifm_sd_exec(host, host->req->stop);
  397. return;
  398. } else {
  399. if (!(host->cmd_flags & SCMD_READY))
  400. return;
  401. }
  402. }
  403. }
  404. }
  405. finish_request:
  406. tasklet_schedule(&host->finish_tasklet);
  407. }
  408. /* Called from interrupt handler */
  409. static void tifm_sd_data_event(struct tifm_dev *sock)
  410. {
  411. struct tifm_sd *host;
  412. unsigned int fifo_status = 0;
  413. struct mmc_data *r_data = NULL;
  414. spin_lock(&sock->lock);
  415. host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
  416. fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
  417. dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n",
  418. fifo_status, host->cmd_flags);
  419. if (host->req) {
  420. r_data = host->req->cmd->data;
  421. if (r_data && (fifo_status & TIFM_FIFO_READY)) {
  422. if (tifm_sd_set_dma_data(host, r_data)) {
  423. host->cmd_flags |= FIFO_READY;
  424. tifm_sd_check_status(host);
  425. }
  426. }
  427. }
  428. writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
  429. spin_unlock(&sock->lock);
  430. }
  431. /* Called from interrupt handler */
  432. static void tifm_sd_card_event(struct tifm_dev *sock)
  433. {
  434. struct tifm_sd *host;
  435. unsigned int host_status = 0;
  436. int cmd_error = 0;
  437. struct mmc_command *cmd = NULL;
  438. unsigned long flags;
  439. spin_lock(&sock->lock);
  440. host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
  441. host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
  442. dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n",
  443. host_status, host->cmd_flags);
  444. if (host->req) {
  445. cmd = host->req->cmd;
  446. if (host_status & TIFM_MMCSD_ERRMASK) {
  447. writel(host_status & TIFM_MMCSD_ERRMASK,
  448. sock->addr + SOCK_MMCSD_STATUS);
  449. if (host_status & TIFM_MMCSD_CTO)
  450. cmd_error = -ETIMEDOUT;
  451. else if (host_status & TIFM_MMCSD_CCRC)
  452. cmd_error = -EILSEQ;
  453. if (cmd->data) {
  454. if (host_status & TIFM_MMCSD_DTO)
  455. cmd->data->error = -ETIMEDOUT;
  456. else if (host_status & TIFM_MMCSD_DCRC)
  457. cmd->data->error = -EILSEQ;
  458. }
  459. writel(TIFM_FIFO_INT_SETALL,
  460. sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
  461. writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
  462. if (host->req->stop) {
  463. if (host->cmd_flags & SCMD_ACTIVE) {
  464. host->req->stop->error = cmd_error;
  465. host->cmd_flags |= SCMD_READY;
  466. } else {
  467. cmd->error = cmd_error;
  468. host->cmd_flags |= SCMD_ACTIVE;
  469. tifm_sd_exec(host, host->req->stop);
  470. goto done;
  471. }
  472. } else
  473. cmd->error = cmd_error;
  474. } else {
  475. if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) {
  476. if (!(host->cmd_flags & CMD_READY)) {
  477. host->cmd_flags |= CMD_READY;
  478. tifm_sd_fetch_resp(cmd, sock);
  479. } else if (host->cmd_flags & SCMD_ACTIVE) {
  480. host->cmd_flags |= SCMD_READY;
  481. tifm_sd_fetch_resp(host->req->stop,
  482. sock);
  483. }
  484. }
  485. if (host_status & TIFM_MMCSD_BRS)
  486. host->cmd_flags |= BRS_READY;
  487. }
  488. if (host->no_dma && cmd->data) {
  489. if (host_status & TIFM_MMCSD_AE)
  490. writel(host_status & TIFM_MMCSD_AE,
  491. sock->addr + SOCK_MMCSD_STATUS);
  492. if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF
  493. | TIFM_MMCSD_BRS)) {
  494. local_irq_save(flags);
  495. tifm_sd_transfer_data(host);
  496. local_irq_restore(flags);
  497. host_status &= ~TIFM_MMCSD_AE;
  498. }
  499. }
  500. if (host_status & TIFM_MMCSD_EOFB)
  501. host->cmd_flags &= ~CARD_BUSY;
  502. else if (host_status & TIFM_MMCSD_CB)
  503. host->cmd_flags |= CARD_BUSY;
  504. tifm_sd_check_status(host);
  505. }
  506. done:
  507. writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
  508. spin_unlock(&sock->lock);
  509. }
  510. static void tifm_sd_set_data_timeout(struct tifm_sd *host,
  511. struct mmc_data *data)
  512. {
  513. struct tifm_dev *sock = host->dev;
  514. unsigned int data_timeout = data->timeout_clks;
  515. if (fixed_timeout)
  516. return;
  517. data_timeout += data->timeout_ns /
  518. ((1000000000UL / host->clk_freq) * host->clk_div);
  519. if (data_timeout < 0xffff) {
  520. writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
  521. writel((~TIFM_MMCSD_DPE)
  522. & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
  523. sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
  524. } else {
  525. data_timeout = (data_timeout >> 10) + 1;
  526. if (data_timeout > 0xffff)
  527. data_timeout = 0; /* set to unlimited */
  528. writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
  529. writel(TIFM_MMCSD_DPE
  530. | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
  531. sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
  532. }
  533. }
  534. static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
  535. {
  536. struct tifm_sd *host = mmc_priv(mmc);
  537. struct tifm_dev *sock = host->dev;
  538. unsigned long flags;
  539. struct mmc_data *r_data = mrq->cmd->data;
  540. spin_lock_irqsave(&sock->lock, flags);
  541. if (host->eject) {
  542. mrq->cmd->error = -ENOMEDIUM;
  543. goto err_out;
  544. }
  545. if (host->req) {
  546. pr_err("%s : unfinished request detected\n",
  547. dev_name(&sock->dev));
  548. mrq->cmd->error = -ETIMEDOUT;
  549. goto err_out;
  550. }
  551. host->cmd_flags = 0;
  552. host->block_pos = 0;
  553. host->sg_pos = 0;
  554. if (mrq->data && !is_power_of_2(mrq->data->blksz))
  555. host->no_dma = 1;
  556. else
  557. host->no_dma = no_dma ? 1 : 0;
  558. if (r_data) {
  559. tifm_sd_set_data_timeout(host, r_data);
  560. if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop)
  561. writel(TIFM_MMCSD_EOFB
  562. | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
  563. sock->addr + SOCK_MMCSD_INT_ENABLE);
  564. if (host->no_dma) {
  565. writel(TIFM_MMCSD_BUFINT
  566. | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
  567. sock->addr + SOCK_MMCSD_INT_ENABLE);
  568. writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
  569. | (TIFM_MMCSD_FIFO_SIZE - 1),
  570. sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
  571. host->sg_len = r_data->sg_len;
  572. } else {
  573. sg_init_one(&host->bounce_buf, host->bounce_buf_data,
  574. r_data->blksz);
  575. if(1 != tifm_map_sg(sock, &host->bounce_buf, 1,
  576. r_data->flags & MMC_DATA_WRITE
  577. ? PCI_DMA_TODEVICE
  578. : PCI_DMA_FROMDEVICE)) {
  579. pr_err("%s : scatterlist map failed\n",
  580. dev_name(&sock->dev));
  581. mrq->cmd->error = -ENOMEM;
  582. goto err_out;
  583. }
  584. host->sg_len = tifm_map_sg(sock, r_data->sg,
  585. r_data->sg_len,
  586. r_data->flags
  587. & MMC_DATA_WRITE
  588. ? PCI_DMA_TODEVICE
  589. : PCI_DMA_FROMDEVICE);
  590. if (host->sg_len < 1) {
  591. pr_err("%s : scatterlist map failed\n",
  592. dev_name(&sock->dev));
  593. tifm_unmap_sg(sock, &host->bounce_buf, 1,
  594. r_data->flags & MMC_DATA_WRITE
  595. ? PCI_DMA_TODEVICE
  596. : PCI_DMA_FROMDEVICE);
  597. mrq->cmd->error = -ENOMEM;
  598. goto err_out;
  599. }
  600. writel(TIFM_FIFO_INT_SETALL,
  601. sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
  602. writel(ilog2(r_data->blksz) - 2,
  603. sock->addr + SOCK_FIFO_PAGE_SIZE);
  604. writel(TIFM_FIFO_ENABLE,
  605. sock->addr + SOCK_FIFO_CONTROL);
  606. writel(TIFM_FIFO_INTMASK,
  607. sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
  608. if (r_data->flags & MMC_DATA_WRITE)
  609. writel(TIFM_MMCSD_TXDE,
  610. sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
  611. else
  612. writel(TIFM_MMCSD_RXDE,
  613. sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
  614. tifm_sd_set_dma_data(host, r_data);
  615. }
  616. writel(r_data->blocks - 1,
  617. sock->addr + SOCK_MMCSD_NUM_BLOCKS);
  618. writel(r_data->blksz - 1,
  619. sock->addr + SOCK_MMCSD_BLOCK_LEN);
  620. }
  621. host->req = mrq;
  622. mod_timer(&host->timer, jiffies + host->timeout_jiffies);
  623. writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
  624. sock->addr + SOCK_CONTROL);
  625. tifm_sd_exec(host, mrq->cmd);
  626. spin_unlock_irqrestore(&sock->lock, flags);
  627. return;
  628. err_out:
  629. spin_unlock_irqrestore(&sock->lock, flags);
  630. mmc_request_done(mmc, mrq);
  631. }
  632. static void tifm_sd_end_cmd(unsigned long data)
  633. {
  634. struct tifm_sd *host = (struct tifm_sd*)data;
  635. struct tifm_dev *sock = host->dev;
  636. struct mmc_host *mmc = tifm_get_drvdata(sock);
  637. struct mmc_request *mrq;
  638. struct mmc_data *r_data = NULL;
  639. unsigned long flags;
  640. spin_lock_irqsave(&sock->lock, flags);
  641. del_timer(&host->timer);
  642. mrq = host->req;
  643. host->req = NULL;
  644. if (!mrq) {
  645. pr_err(" %s : no request to complete?\n",
  646. dev_name(&sock->dev));
  647. spin_unlock_irqrestore(&sock->lock, flags);
  648. return;
  649. }
  650. r_data = mrq->cmd->data;
  651. if (r_data) {
  652. if (host->no_dma) {
  653. writel((~TIFM_MMCSD_BUFINT)
  654. & readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
  655. sock->addr + SOCK_MMCSD_INT_ENABLE);
  656. } else {
  657. tifm_unmap_sg(sock, &host->bounce_buf, 1,
  658. (r_data->flags & MMC_DATA_WRITE)
  659. ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
  660. tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
  661. (r_data->flags & MMC_DATA_WRITE)
  662. ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
  663. }
  664. r_data->bytes_xfered = r_data->blocks
  665. - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
  666. r_data->bytes_xfered *= r_data->blksz;
  667. r_data->bytes_xfered += r_data->blksz
  668. - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
  669. }
  670. writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
  671. sock->addr + SOCK_CONTROL);
  672. spin_unlock_irqrestore(&sock->lock, flags);
  673. mmc_request_done(mmc, mrq);
  674. }
  675. static void tifm_sd_abort(struct timer_list *t)
  676. {
  677. struct tifm_sd *host = from_timer(host, t, timer);
  678. pr_err("%s : card failed to respond for a long period of time "
  679. "(%x, %x)\n",
  680. dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags);
  681. tifm_eject(host->dev);
  682. }
  683. static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  684. {
  685. struct tifm_sd *host = mmc_priv(mmc);
  686. struct tifm_dev *sock = host->dev;
  687. unsigned int clk_div1, clk_div2;
  688. unsigned long flags;
  689. spin_lock_irqsave(&sock->lock, flags);
  690. dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, "
  691. "chip_select = %x, power_mode = %x, bus_width = %x\n",
  692. ios->clock, ios->vdd, ios->bus_mode, ios->chip_select,
  693. ios->power_mode, ios->bus_width);
  694. if (ios->bus_width == MMC_BUS_WIDTH_4) {
  695. writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
  696. sock->addr + SOCK_MMCSD_CONFIG);
  697. } else {
  698. writel((~TIFM_MMCSD_4BBUS)
  699. & readl(sock->addr + SOCK_MMCSD_CONFIG),
  700. sock->addr + SOCK_MMCSD_CONFIG);
  701. }
  702. if (ios->clock) {
  703. clk_div1 = 20000000 / ios->clock;
  704. if (!clk_div1)
  705. clk_div1 = 1;
  706. clk_div2 = 24000000 / ios->clock;
  707. if (!clk_div2)
  708. clk_div2 = 1;
  709. if ((20000000 / clk_div1) > ios->clock)
  710. clk_div1++;
  711. if ((24000000 / clk_div2) > ios->clock)
  712. clk_div2++;
  713. if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
  714. host->clk_freq = 20000000;
  715. host->clk_div = clk_div1;
  716. writel((~TIFM_CTRL_FAST_CLK)
  717. & readl(sock->addr + SOCK_CONTROL),
  718. sock->addr + SOCK_CONTROL);
  719. } else {
  720. host->clk_freq = 24000000;
  721. host->clk_div = clk_div2;
  722. writel(TIFM_CTRL_FAST_CLK
  723. | readl(sock->addr + SOCK_CONTROL),
  724. sock->addr + SOCK_CONTROL);
  725. }
  726. } else {
  727. host->clk_div = 0;
  728. }
  729. host->clk_div &= TIFM_MMCSD_CLKMASK;
  730. writel(host->clk_div
  731. | ((~TIFM_MMCSD_CLKMASK)
  732. & readl(sock->addr + SOCK_MMCSD_CONFIG)),
  733. sock->addr + SOCK_MMCSD_CONFIG);
  734. host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN);
  735. /* chip_select : maybe later */
  736. //vdd
  737. //power is set before probe / after remove
  738. spin_unlock_irqrestore(&sock->lock, flags);
  739. }
  740. static int tifm_sd_ro(struct mmc_host *mmc)
  741. {
  742. int rc = 0;
  743. struct tifm_sd *host = mmc_priv(mmc);
  744. struct tifm_dev *sock = host->dev;
  745. unsigned long flags;
  746. spin_lock_irqsave(&sock->lock, flags);
  747. if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE))
  748. rc = 1;
  749. spin_unlock_irqrestore(&sock->lock, flags);
  750. return rc;
  751. }
  752. static const struct mmc_host_ops tifm_sd_ops = {
  753. .request = tifm_sd_request,
  754. .set_ios = tifm_sd_ios,
  755. .get_ro = tifm_sd_ro
  756. };
  757. static int tifm_sd_initialize_host(struct tifm_sd *host)
  758. {
  759. int rc;
  760. unsigned int host_status = 0;
  761. struct tifm_dev *sock = host->dev;
  762. writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
  763. host->clk_div = 61;
  764. host->clk_freq = 20000000;
  765. writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
  766. writel(host->clk_div | TIFM_MMCSD_POWER,
  767. sock->addr + SOCK_MMCSD_CONFIG);
  768. /* wait up to 0.51 sec for reset */
  769. for (rc = 32; rc <= 256; rc <<= 1) {
  770. if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
  771. rc = 0;
  772. break;
  773. }
  774. msleep(rc);
  775. }
  776. if (rc) {
  777. pr_err("%s : controller failed to reset\n",
  778. dev_name(&sock->dev));
  779. return -ENODEV;
  780. }
  781. writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
  782. writel(host->clk_div | TIFM_MMCSD_POWER,
  783. sock->addr + SOCK_MMCSD_CONFIG);
  784. writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
  785. // command timeout fixed to 64 clocks for now
  786. writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
  787. writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
  788. for (rc = 16; rc <= 64; rc <<= 1) {
  789. host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
  790. writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
  791. if (!(host_status & TIFM_MMCSD_ERRMASK)
  792. && (host_status & TIFM_MMCSD_EOC)) {
  793. rc = 0;
  794. break;
  795. }
  796. msleep(rc);
  797. }
  798. if (rc) {
  799. pr_err("%s : card not ready - probe failed on initialization\n",
  800. dev_name(&sock->dev));
  801. return -ENODEV;
  802. }
  803. writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC
  804. | TIFM_MMCSD_ERRMASK,
  805. sock->addr + SOCK_MMCSD_INT_ENABLE);
  806. return 0;
  807. }
  808. static int tifm_sd_probe(struct tifm_dev *sock)
  809. {
  810. struct mmc_host *mmc;
  811. struct tifm_sd *host;
  812. int rc = -EIO;
  813. if (!(TIFM_SOCK_STATE_OCCUPIED
  814. & readl(sock->addr + SOCK_PRESENT_STATE))) {
  815. pr_warn("%s : card gone, unexpectedly\n",
  816. dev_name(&sock->dev));
  817. return rc;
  818. }
  819. mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
  820. if (!mmc)
  821. return -ENOMEM;
  822. host = mmc_priv(mmc);
  823. tifm_set_drvdata(sock, mmc);
  824. host->dev = sock;
  825. host->timeout_jiffies = msecs_to_jiffies(TIFM_MMCSD_REQ_TIMEOUT_MS);
  826. /*
  827. * We use a fixed request timeout of 1s, hence inform the core about it.
  828. * A future improvement should instead respect the cmd->busy_timeout.
  829. */
  830. mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS;
  831. tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
  832. (unsigned long)host);
  833. timer_setup(&host->timer, tifm_sd_abort, 0);
  834. mmc->ops = &tifm_sd_ops;
  835. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  836. mmc->caps = MMC_CAP_4_BIT_DATA;
  837. mmc->f_min = 20000000 / 60;
  838. mmc->f_max = 24000000;
  839. mmc->max_blk_count = 2048;
  840. mmc->max_segs = mmc->max_blk_count;
  841. mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE);
  842. mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size;
  843. mmc->max_req_size = mmc->max_seg_size;
  844. sock->card_event = tifm_sd_card_event;
  845. sock->data_event = tifm_sd_data_event;
  846. rc = tifm_sd_initialize_host(host);
  847. if (!rc)
  848. rc = mmc_add_host(mmc);
  849. if (!rc)
  850. return 0;
  851. mmc_free_host(mmc);
  852. return rc;
  853. }
  854. static void tifm_sd_remove(struct tifm_dev *sock)
  855. {
  856. struct mmc_host *mmc = tifm_get_drvdata(sock);
  857. struct tifm_sd *host = mmc_priv(mmc);
  858. unsigned long flags;
  859. spin_lock_irqsave(&sock->lock, flags);
  860. host->eject = 1;
  861. writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
  862. spin_unlock_irqrestore(&sock->lock, flags);
  863. tasklet_kill(&host->finish_tasklet);
  864. spin_lock_irqsave(&sock->lock, flags);
  865. if (host->req) {
  866. writel(TIFM_FIFO_INT_SETALL,
  867. sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
  868. writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
  869. host->req->cmd->error = -ENOMEDIUM;
  870. if (host->req->stop)
  871. host->req->stop->error = -ENOMEDIUM;
  872. tasklet_schedule(&host->finish_tasklet);
  873. }
  874. spin_unlock_irqrestore(&sock->lock, flags);
  875. mmc_remove_host(mmc);
  876. dev_dbg(&sock->dev, "after remove\n");
  877. mmc_free_host(mmc);
  878. }
  879. #ifdef CONFIG_PM
  880. static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
  881. {
  882. return 0;
  883. }
  884. static int tifm_sd_resume(struct tifm_dev *sock)
  885. {
  886. struct mmc_host *mmc = tifm_get_drvdata(sock);
  887. struct tifm_sd *host = mmc_priv(mmc);
  888. int rc;
  889. rc = tifm_sd_initialize_host(host);
  890. dev_dbg(&sock->dev, "resume initialize %d\n", rc);
  891. if (rc)
  892. host->eject = 1;
  893. return rc;
  894. }
  895. #else
  896. #define tifm_sd_suspend NULL
  897. #define tifm_sd_resume NULL
  898. #endif /* CONFIG_PM */
  899. static struct tifm_device_id tifm_sd_id_tbl[] = {
  900. { TIFM_TYPE_SD }, { }
  901. };
  902. static struct tifm_driver tifm_sd_driver = {
  903. .driver = {
  904. .name = DRIVER_NAME,
  905. .owner = THIS_MODULE
  906. },
  907. .id_table = tifm_sd_id_tbl,
  908. .probe = tifm_sd_probe,
  909. .remove = tifm_sd_remove,
  910. .suspend = tifm_sd_suspend,
  911. .resume = tifm_sd_resume
  912. };
  913. static int __init tifm_sd_init(void)
  914. {
  915. return tifm_register_driver(&tifm_sd_driver);
  916. }
  917. static void __exit tifm_sd_exit(void)
  918. {
  919. tifm_unregister_driver(&tifm_sd_driver);
  920. }
  921. MODULE_AUTHOR("Alex Dubov");
  922. MODULE_DESCRIPTION("TI FlashMedia SD driver");
  923. MODULE_LICENSE("GPL");
  924. MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl);
  925. MODULE_VERSION(DRIVER_VERSION);
  926. module_init(tifm_sd_init);
  927. module_exit(tifm_sd_exit);