mxs-dcp.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Freescale i.MX23/i.MX28 Data Co-Processor driver
  4. *
  5. * Copyright (C) 2013 Marek Vasut <marex@denx.de>
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/kernel.h>
  11. #include <linux/kthread.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/stmp_device.h>
  16. #include <linux/clk.h>
  17. #include <crypto/aes.h>
  18. #include <crypto/sha.h>
  19. #include <crypto/internal/hash.h>
  20. #include <crypto/internal/skcipher.h>
  21. #include <crypto/scatterwalk.h>
  22. #define DCP_MAX_CHANS 4
  23. #define DCP_BUF_SZ PAGE_SIZE
  24. #define DCP_SHA_PAY_SZ 64
  25. #define DCP_ALIGNMENT 64
  26. /*
  27. * Null hashes to align with hw behavior on imx6sl and ull
  28. * these are flipped for consistency with hw output
  29. */
  30. static const uint8_t sha1_null_hash[] =
  31. "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  32. "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  33. static const uint8_t sha256_null_hash[] =
  34. "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  35. "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  36. "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  37. "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  38. /* DCP DMA descriptor. */
  39. struct dcp_dma_desc {
  40. uint32_t next_cmd_addr;
  41. uint32_t control0;
  42. uint32_t control1;
  43. uint32_t source;
  44. uint32_t destination;
  45. uint32_t size;
  46. uint32_t payload;
  47. uint32_t status;
  48. };
  49. /* Coherent aligned block for bounce buffering. */
  50. struct dcp_coherent_block {
  51. uint8_t aes_in_buf[DCP_BUF_SZ];
  52. uint8_t aes_out_buf[DCP_BUF_SZ];
  53. uint8_t sha_in_buf[DCP_BUF_SZ];
  54. uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
  55. uint8_t aes_key[2 * AES_KEYSIZE_128];
  56. struct dcp_dma_desc desc[DCP_MAX_CHANS];
  57. };
  58. struct dcp {
  59. struct device *dev;
  60. void __iomem *base;
  61. uint32_t caps;
  62. struct dcp_coherent_block *coh;
  63. struct completion completion[DCP_MAX_CHANS];
  64. spinlock_t lock[DCP_MAX_CHANS];
  65. struct task_struct *thread[DCP_MAX_CHANS];
  66. struct crypto_queue queue[DCP_MAX_CHANS];
  67. struct clk *dcp_clk;
  68. };
  69. enum dcp_chan {
  70. DCP_CHAN_HASH_SHA = 0,
  71. DCP_CHAN_CRYPTO = 2,
  72. };
  73. struct dcp_async_ctx {
  74. /* Common context */
  75. enum dcp_chan chan;
  76. uint32_t fill;
  77. /* SHA Hash-specific context */
  78. struct mutex mutex;
  79. uint32_t alg;
  80. unsigned int hot:1;
  81. /* Crypto-specific context */
  82. struct crypto_skcipher *fallback;
  83. unsigned int key_len;
  84. uint8_t key[AES_KEYSIZE_128];
  85. };
  86. struct dcp_aes_req_ctx {
  87. unsigned int enc:1;
  88. unsigned int ecb:1;
  89. struct skcipher_request fallback_req; // keep at the end
  90. };
  91. struct dcp_sha_req_ctx {
  92. unsigned int init:1;
  93. unsigned int fini:1;
  94. };
  95. struct dcp_export_state {
  96. struct dcp_sha_req_ctx req_ctx;
  97. struct dcp_async_ctx async_ctx;
  98. };
  99. /*
  100. * There can even be only one instance of the MXS DCP due to the
  101. * design of Linux Crypto API.
  102. */
  103. static struct dcp *global_sdcp;
  104. /* DCP register layout. */
  105. #define MXS_DCP_CTRL 0x00
  106. #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
  107. #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
  108. #define MXS_DCP_STAT 0x10
  109. #define MXS_DCP_STAT_CLR 0x18
  110. #define MXS_DCP_STAT_IRQ_MASK 0xf
  111. #define MXS_DCP_CHANNELCTRL 0x20
  112. #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
  113. #define MXS_DCP_CAPABILITY1 0x40
  114. #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
  115. #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
  116. #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
  117. #define MXS_DCP_CONTEXT 0x50
  118. #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
  119. #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
  120. #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
  121. #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
  122. /* DMA descriptor bits. */
  123. #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
  124. #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
  125. #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
  126. #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
  127. #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
  128. #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
  129. #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
  130. #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
  131. #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
  132. #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
  133. #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
  134. #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
  135. #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
  136. #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
  137. static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
  138. {
  139. int dma_err;
  140. struct dcp *sdcp = global_sdcp;
  141. const int chan = actx->chan;
  142. uint32_t stat;
  143. unsigned long ret;
  144. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  145. dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
  146. DMA_TO_DEVICE);
  147. dma_err = dma_mapping_error(sdcp->dev, desc_phys);
  148. if (dma_err)
  149. return dma_err;
  150. reinit_completion(&sdcp->completion[chan]);
  151. /* Clear status register. */
  152. writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
  153. /* Load the DMA descriptor. */
  154. writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
  155. /* Increment the semaphore to start the DMA transfer. */
  156. writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
  157. ret = wait_for_completion_timeout(&sdcp->completion[chan],
  158. msecs_to_jiffies(1000));
  159. if (!ret) {
  160. dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
  161. chan, readl(sdcp->base + MXS_DCP_STAT));
  162. return -ETIMEDOUT;
  163. }
  164. stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
  165. if (stat & 0xff) {
  166. dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
  167. chan, stat);
  168. return -EINVAL;
  169. }
  170. dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
  171. return 0;
  172. }
  173. /*
  174. * Encryption (AES128)
  175. */
  176. static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
  177. struct skcipher_request *req, int init)
  178. {
  179. dma_addr_t key_phys, src_phys, dst_phys;
  180. struct dcp *sdcp = global_sdcp;
  181. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  182. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  183. int ret;
  184. key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
  185. 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
  186. ret = dma_mapping_error(sdcp->dev, key_phys);
  187. if (ret)
  188. return ret;
  189. src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
  190. DCP_BUF_SZ, DMA_TO_DEVICE);
  191. ret = dma_mapping_error(sdcp->dev, src_phys);
  192. if (ret)
  193. goto err_src;
  194. dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
  195. DCP_BUF_SZ, DMA_FROM_DEVICE);
  196. ret = dma_mapping_error(sdcp->dev, dst_phys);
  197. if (ret)
  198. goto err_dst;
  199. if (actx->fill % AES_BLOCK_SIZE) {
  200. dev_err(sdcp->dev, "Invalid block size!\n");
  201. ret = -EINVAL;
  202. goto aes_done_run;
  203. }
  204. /* Fill in the DMA descriptor. */
  205. desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
  206. MXS_DCP_CONTROL0_INTERRUPT |
  207. MXS_DCP_CONTROL0_ENABLE_CIPHER;
  208. /* Payload contains the key. */
  209. desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
  210. if (rctx->enc)
  211. desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
  212. if (init)
  213. desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
  214. desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
  215. if (rctx->ecb)
  216. desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
  217. else
  218. desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
  219. desc->next_cmd_addr = 0;
  220. desc->source = src_phys;
  221. desc->destination = dst_phys;
  222. desc->size = actx->fill;
  223. desc->payload = key_phys;
  224. desc->status = 0;
  225. ret = mxs_dcp_start_dma(actx);
  226. aes_done_run:
  227. dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
  228. err_dst:
  229. dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
  230. err_src:
  231. dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
  232. DMA_TO_DEVICE);
  233. return ret;
  234. }
  235. static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
  236. {
  237. struct dcp *sdcp = global_sdcp;
  238. struct skcipher_request *req = skcipher_request_cast(arq);
  239. struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
  240. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  241. struct scatterlist *dst = req->dst;
  242. struct scatterlist *src = req->src;
  243. int dst_nents = sg_nents(dst);
  244. const int out_off = DCP_BUF_SZ;
  245. uint8_t *in_buf = sdcp->coh->aes_in_buf;
  246. uint8_t *out_buf = sdcp->coh->aes_out_buf;
  247. uint32_t dst_off = 0;
  248. uint8_t *src_buf = NULL;
  249. uint32_t last_out_len = 0;
  250. uint8_t *key = sdcp->coh->aes_key;
  251. int ret = 0;
  252. unsigned int i, len, clen, tlen = 0;
  253. int init = 0;
  254. bool limit_hit = false;
  255. actx->fill = 0;
  256. /* Copy the key from the temporary location. */
  257. memcpy(key, actx->key, actx->key_len);
  258. if (!rctx->ecb) {
  259. /* Copy the CBC IV just past the key. */
  260. memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
  261. /* CBC needs the INIT set. */
  262. init = 1;
  263. } else {
  264. memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
  265. }
  266. for_each_sg(req->src, src, sg_nents(req->src), i) {
  267. src_buf = sg_virt(src);
  268. len = sg_dma_len(src);
  269. tlen += len;
  270. limit_hit = tlen > req->cryptlen;
  271. if (limit_hit)
  272. len = req->cryptlen - (tlen - len);
  273. do {
  274. if (actx->fill + len > out_off)
  275. clen = out_off - actx->fill;
  276. else
  277. clen = len;
  278. memcpy(in_buf + actx->fill, src_buf, clen);
  279. len -= clen;
  280. src_buf += clen;
  281. actx->fill += clen;
  282. /*
  283. * If we filled the buffer or this is the last SG,
  284. * submit the buffer.
  285. */
  286. if (actx->fill == out_off || sg_is_last(src) ||
  287. limit_hit) {
  288. ret = mxs_dcp_run_aes(actx, req, init);
  289. if (ret)
  290. return ret;
  291. init = 0;
  292. sg_pcopy_from_buffer(dst, dst_nents, out_buf,
  293. actx->fill, dst_off);
  294. dst_off += actx->fill;
  295. last_out_len = actx->fill;
  296. actx->fill = 0;
  297. }
  298. } while (len);
  299. if (limit_hit)
  300. break;
  301. }
  302. /* Copy the IV for CBC for chaining */
  303. if (!rctx->ecb) {
  304. if (rctx->enc)
  305. memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
  306. AES_BLOCK_SIZE);
  307. else
  308. memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
  309. AES_BLOCK_SIZE);
  310. }
  311. return ret;
  312. }
  313. static int dcp_chan_thread_aes(void *data)
  314. {
  315. struct dcp *sdcp = global_sdcp;
  316. const int chan = DCP_CHAN_CRYPTO;
  317. struct crypto_async_request *backlog;
  318. struct crypto_async_request *arq;
  319. int ret;
  320. while (!kthread_should_stop()) {
  321. set_current_state(TASK_INTERRUPTIBLE);
  322. spin_lock(&sdcp->lock[chan]);
  323. backlog = crypto_get_backlog(&sdcp->queue[chan]);
  324. arq = crypto_dequeue_request(&sdcp->queue[chan]);
  325. spin_unlock(&sdcp->lock[chan]);
  326. if (!backlog && !arq) {
  327. schedule();
  328. continue;
  329. }
  330. set_current_state(TASK_RUNNING);
  331. if (backlog)
  332. backlog->complete(backlog, -EINPROGRESS);
  333. if (arq) {
  334. ret = mxs_dcp_aes_block_crypt(arq);
  335. arq->complete(arq, ret);
  336. }
  337. }
  338. return 0;
  339. }
  340. static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
  341. {
  342. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  343. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  344. struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
  345. int ret;
  346. skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  347. skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
  348. req->base.complete, req->base.data);
  349. skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
  350. req->cryptlen, req->iv);
  351. if (enc)
  352. ret = crypto_skcipher_encrypt(&rctx->fallback_req);
  353. else
  354. ret = crypto_skcipher_decrypt(&rctx->fallback_req);
  355. return ret;
  356. }
  357. static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
  358. {
  359. struct dcp *sdcp = global_sdcp;
  360. struct crypto_async_request *arq = &req->base;
  361. struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
  362. struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  363. int ret;
  364. if (unlikely(actx->key_len != AES_KEYSIZE_128))
  365. return mxs_dcp_block_fallback(req, enc);
  366. rctx->enc = enc;
  367. rctx->ecb = ecb;
  368. actx->chan = DCP_CHAN_CRYPTO;
  369. spin_lock(&sdcp->lock[actx->chan]);
  370. ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
  371. spin_unlock(&sdcp->lock[actx->chan]);
  372. wake_up_process(sdcp->thread[actx->chan]);
  373. return ret;
  374. }
  375. static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
  376. {
  377. return mxs_dcp_aes_enqueue(req, 0, 1);
  378. }
  379. static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
  380. {
  381. return mxs_dcp_aes_enqueue(req, 1, 1);
  382. }
  383. static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
  384. {
  385. return mxs_dcp_aes_enqueue(req, 0, 0);
  386. }
  387. static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
  388. {
  389. return mxs_dcp_aes_enqueue(req, 1, 0);
  390. }
  391. static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  392. unsigned int len)
  393. {
  394. struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
  395. /*
  396. * AES 128 is supposed by the hardware, store key into temporary
  397. * buffer and exit. We must use the temporary buffer here, since
  398. * there can still be an operation in progress.
  399. */
  400. actx->key_len = len;
  401. if (len == AES_KEYSIZE_128) {
  402. memcpy(actx->key, key, len);
  403. return 0;
  404. }
  405. /*
  406. * If the requested AES key size is not supported by the hardware,
  407. * but is supported by in-kernel software implementation, we use
  408. * software fallback.
  409. */
  410. crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
  411. crypto_skcipher_set_flags(actx->fallback,
  412. tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  413. return crypto_skcipher_setkey(actx->fallback, key, len);
  414. }
  415. static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
  416. {
  417. const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
  418. struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
  419. struct crypto_skcipher *blk;
  420. blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  421. if (IS_ERR(blk))
  422. return PTR_ERR(blk);
  423. actx->fallback = blk;
  424. crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
  425. crypto_skcipher_reqsize(blk));
  426. return 0;
  427. }
  428. static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
  429. {
  430. struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
  431. crypto_free_skcipher(actx->fallback);
  432. }
  433. /*
  434. * Hashing (SHA1/SHA256)
  435. */
  436. static int mxs_dcp_run_sha(struct ahash_request *req)
  437. {
  438. struct dcp *sdcp = global_sdcp;
  439. int ret;
  440. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  441. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  442. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  443. struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
  444. dma_addr_t digest_phys = 0;
  445. dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
  446. DCP_BUF_SZ, DMA_TO_DEVICE);
  447. ret = dma_mapping_error(sdcp->dev, buf_phys);
  448. if (ret)
  449. return ret;
  450. /* Fill in the DMA descriptor. */
  451. desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
  452. MXS_DCP_CONTROL0_INTERRUPT |
  453. MXS_DCP_CONTROL0_ENABLE_HASH;
  454. if (rctx->init)
  455. desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
  456. desc->control1 = actx->alg;
  457. desc->next_cmd_addr = 0;
  458. desc->source = buf_phys;
  459. desc->destination = 0;
  460. desc->size = actx->fill;
  461. desc->payload = 0;
  462. desc->status = 0;
  463. /*
  464. * Align driver with hw behavior when generating null hashes
  465. */
  466. if (rctx->init && rctx->fini && desc->size == 0) {
  467. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  468. const uint8_t *sha_buf =
  469. (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
  470. sha1_null_hash : sha256_null_hash;
  471. memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
  472. ret = 0;
  473. goto done_run;
  474. }
  475. /* Set HASH_TERM bit for last transfer block. */
  476. if (rctx->fini) {
  477. digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
  478. DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
  479. ret = dma_mapping_error(sdcp->dev, digest_phys);
  480. if (ret)
  481. goto done_run;
  482. desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
  483. desc->payload = digest_phys;
  484. }
  485. ret = mxs_dcp_start_dma(actx);
  486. if (rctx->fini)
  487. dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
  488. DMA_FROM_DEVICE);
  489. done_run:
  490. dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
  491. return ret;
  492. }
  493. static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
  494. {
  495. struct dcp *sdcp = global_sdcp;
  496. struct ahash_request *req = ahash_request_cast(arq);
  497. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  498. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  499. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  500. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  501. uint8_t *in_buf = sdcp->coh->sha_in_buf;
  502. uint8_t *out_buf = sdcp->coh->sha_out_buf;
  503. struct scatterlist *src;
  504. unsigned int i, len, clen, oft = 0;
  505. int ret;
  506. int fin = rctx->fini;
  507. if (fin)
  508. rctx->fini = 0;
  509. src = req->src;
  510. len = req->nbytes;
  511. while (len) {
  512. if (actx->fill + len > DCP_BUF_SZ)
  513. clen = DCP_BUF_SZ - actx->fill;
  514. else
  515. clen = len;
  516. scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
  517. 0);
  518. len -= clen;
  519. oft += clen;
  520. actx->fill += clen;
  521. /*
  522. * If we filled the buffer and still have some
  523. * more data, submit the buffer.
  524. */
  525. if (len && actx->fill == DCP_BUF_SZ) {
  526. ret = mxs_dcp_run_sha(req);
  527. if (ret)
  528. return ret;
  529. actx->fill = 0;
  530. rctx->init = 0;
  531. }
  532. }
  533. if (fin) {
  534. rctx->fini = 1;
  535. /* Submit whatever is left. */
  536. if (!req->result)
  537. return -EINVAL;
  538. ret = mxs_dcp_run_sha(req);
  539. if (ret)
  540. return ret;
  541. actx->fill = 0;
  542. /* For some reason the result is flipped */
  543. for (i = 0; i < halg->digestsize; i++)
  544. req->result[i] = out_buf[halg->digestsize - i - 1];
  545. }
  546. return 0;
  547. }
  548. static int dcp_chan_thread_sha(void *data)
  549. {
  550. struct dcp *sdcp = global_sdcp;
  551. const int chan = DCP_CHAN_HASH_SHA;
  552. struct crypto_async_request *backlog;
  553. struct crypto_async_request *arq;
  554. int ret;
  555. while (!kthread_should_stop()) {
  556. set_current_state(TASK_INTERRUPTIBLE);
  557. spin_lock(&sdcp->lock[chan]);
  558. backlog = crypto_get_backlog(&sdcp->queue[chan]);
  559. arq = crypto_dequeue_request(&sdcp->queue[chan]);
  560. spin_unlock(&sdcp->lock[chan]);
  561. if (!backlog && !arq) {
  562. schedule();
  563. continue;
  564. }
  565. set_current_state(TASK_RUNNING);
  566. if (backlog)
  567. backlog->complete(backlog, -EINPROGRESS);
  568. if (arq) {
  569. ret = dcp_sha_req_to_buf(arq);
  570. arq->complete(arq, ret);
  571. }
  572. }
  573. return 0;
  574. }
  575. static int dcp_sha_init(struct ahash_request *req)
  576. {
  577. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  578. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  579. struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
  580. /*
  581. * Start hashing session. The code below only inits the
  582. * hashing session context, nothing more.
  583. */
  584. memset(actx, 0, sizeof(*actx));
  585. if (strcmp(halg->base.cra_name, "sha1") == 0)
  586. actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
  587. else
  588. actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
  589. actx->fill = 0;
  590. actx->hot = 0;
  591. actx->chan = DCP_CHAN_HASH_SHA;
  592. mutex_init(&actx->mutex);
  593. return 0;
  594. }
  595. static int dcp_sha_update_fx(struct ahash_request *req, int fini)
  596. {
  597. struct dcp *sdcp = global_sdcp;
  598. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  599. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  600. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  601. int ret;
  602. /*
  603. * Ignore requests that have no data in them and are not
  604. * the trailing requests in the stream of requests.
  605. */
  606. if (!req->nbytes && !fini)
  607. return 0;
  608. mutex_lock(&actx->mutex);
  609. rctx->fini = fini;
  610. if (!actx->hot) {
  611. actx->hot = 1;
  612. rctx->init = 1;
  613. }
  614. spin_lock(&sdcp->lock[actx->chan]);
  615. ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
  616. spin_unlock(&sdcp->lock[actx->chan]);
  617. wake_up_process(sdcp->thread[actx->chan]);
  618. mutex_unlock(&actx->mutex);
  619. return ret;
  620. }
  621. static int dcp_sha_update(struct ahash_request *req)
  622. {
  623. return dcp_sha_update_fx(req, 0);
  624. }
  625. static int dcp_sha_final(struct ahash_request *req)
  626. {
  627. ahash_request_set_crypt(req, NULL, req->result, 0);
  628. req->nbytes = 0;
  629. return dcp_sha_update_fx(req, 1);
  630. }
  631. static int dcp_sha_finup(struct ahash_request *req)
  632. {
  633. return dcp_sha_update_fx(req, 1);
  634. }
  635. static int dcp_sha_digest(struct ahash_request *req)
  636. {
  637. int ret;
  638. ret = dcp_sha_init(req);
  639. if (ret)
  640. return ret;
  641. return dcp_sha_finup(req);
  642. }
  643. static int dcp_sha_import(struct ahash_request *req, const void *in)
  644. {
  645. struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
  646. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  647. struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
  648. const struct dcp_export_state *export = in;
  649. memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
  650. memset(actx, 0, sizeof(struct dcp_async_ctx));
  651. memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
  652. memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
  653. return 0;
  654. }
  655. static int dcp_sha_export(struct ahash_request *req, void *out)
  656. {
  657. struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
  658. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  659. struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
  660. struct dcp_export_state *export = out;
  661. memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
  662. memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
  663. return 0;
  664. }
  665. static int dcp_sha_cra_init(struct crypto_tfm *tfm)
  666. {
  667. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  668. sizeof(struct dcp_sha_req_ctx));
  669. return 0;
  670. }
  671. static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
  672. {
  673. }
  674. /* AES 128 ECB and AES 128 CBC */
  675. static struct skcipher_alg dcp_aes_algs[] = {
  676. {
  677. .base.cra_name = "ecb(aes)",
  678. .base.cra_driver_name = "ecb-aes-dcp",
  679. .base.cra_priority = 400,
  680. .base.cra_alignmask = 15,
  681. .base.cra_flags = CRYPTO_ALG_ASYNC |
  682. CRYPTO_ALG_NEED_FALLBACK,
  683. .base.cra_blocksize = AES_BLOCK_SIZE,
  684. .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
  685. .base.cra_module = THIS_MODULE,
  686. .min_keysize = AES_MIN_KEY_SIZE,
  687. .max_keysize = AES_MAX_KEY_SIZE,
  688. .setkey = mxs_dcp_aes_setkey,
  689. .encrypt = mxs_dcp_aes_ecb_encrypt,
  690. .decrypt = mxs_dcp_aes_ecb_decrypt,
  691. .init = mxs_dcp_aes_fallback_init_tfm,
  692. .exit = mxs_dcp_aes_fallback_exit_tfm,
  693. }, {
  694. .base.cra_name = "cbc(aes)",
  695. .base.cra_driver_name = "cbc-aes-dcp",
  696. .base.cra_priority = 400,
  697. .base.cra_alignmask = 15,
  698. .base.cra_flags = CRYPTO_ALG_ASYNC |
  699. CRYPTO_ALG_NEED_FALLBACK,
  700. .base.cra_blocksize = AES_BLOCK_SIZE,
  701. .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
  702. .base.cra_module = THIS_MODULE,
  703. .min_keysize = AES_MIN_KEY_SIZE,
  704. .max_keysize = AES_MAX_KEY_SIZE,
  705. .setkey = mxs_dcp_aes_setkey,
  706. .encrypt = mxs_dcp_aes_cbc_encrypt,
  707. .decrypt = mxs_dcp_aes_cbc_decrypt,
  708. .ivsize = AES_BLOCK_SIZE,
  709. .init = mxs_dcp_aes_fallback_init_tfm,
  710. .exit = mxs_dcp_aes_fallback_exit_tfm,
  711. },
  712. };
  713. /* SHA1 */
  714. static struct ahash_alg dcp_sha1_alg = {
  715. .init = dcp_sha_init,
  716. .update = dcp_sha_update,
  717. .final = dcp_sha_final,
  718. .finup = dcp_sha_finup,
  719. .digest = dcp_sha_digest,
  720. .import = dcp_sha_import,
  721. .export = dcp_sha_export,
  722. .halg = {
  723. .digestsize = SHA1_DIGEST_SIZE,
  724. .statesize = sizeof(struct dcp_export_state),
  725. .base = {
  726. .cra_name = "sha1",
  727. .cra_driver_name = "sha1-dcp",
  728. .cra_priority = 400,
  729. .cra_alignmask = 63,
  730. .cra_flags = CRYPTO_ALG_ASYNC,
  731. .cra_blocksize = SHA1_BLOCK_SIZE,
  732. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  733. .cra_module = THIS_MODULE,
  734. .cra_init = dcp_sha_cra_init,
  735. .cra_exit = dcp_sha_cra_exit,
  736. },
  737. },
  738. };
  739. /* SHA256 */
  740. static struct ahash_alg dcp_sha256_alg = {
  741. .init = dcp_sha_init,
  742. .update = dcp_sha_update,
  743. .final = dcp_sha_final,
  744. .finup = dcp_sha_finup,
  745. .digest = dcp_sha_digest,
  746. .import = dcp_sha_import,
  747. .export = dcp_sha_export,
  748. .halg = {
  749. .digestsize = SHA256_DIGEST_SIZE,
  750. .statesize = sizeof(struct dcp_export_state),
  751. .base = {
  752. .cra_name = "sha256",
  753. .cra_driver_name = "sha256-dcp",
  754. .cra_priority = 400,
  755. .cra_alignmask = 63,
  756. .cra_flags = CRYPTO_ALG_ASYNC,
  757. .cra_blocksize = SHA256_BLOCK_SIZE,
  758. .cra_ctxsize = sizeof(struct dcp_async_ctx),
  759. .cra_module = THIS_MODULE,
  760. .cra_init = dcp_sha_cra_init,
  761. .cra_exit = dcp_sha_cra_exit,
  762. },
  763. },
  764. };
  765. static irqreturn_t mxs_dcp_irq(int irq, void *context)
  766. {
  767. struct dcp *sdcp = context;
  768. uint32_t stat;
  769. int i;
  770. stat = readl(sdcp->base + MXS_DCP_STAT);
  771. stat &= MXS_DCP_STAT_IRQ_MASK;
  772. if (!stat)
  773. return IRQ_NONE;
  774. /* Clear the interrupts. */
  775. writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
  776. /* Complete the DMA requests that finished. */
  777. for (i = 0; i < DCP_MAX_CHANS; i++)
  778. if (stat & (1 << i))
  779. complete(&sdcp->completion[i]);
  780. return IRQ_HANDLED;
  781. }
  782. static int mxs_dcp_probe(struct platform_device *pdev)
  783. {
  784. struct device *dev = &pdev->dev;
  785. struct dcp *sdcp = NULL;
  786. int i, ret;
  787. int dcp_vmi_irq, dcp_irq;
  788. if (global_sdcp) {
  789. dev_err(dev, "Only one DCP instance allowed!\n");
  790. return -ENODEV;
  791. }
  792. dcp_vmi_irq = platform_get_irq(pdev, 0);
  793. if (dcp_vmi_irq < 0)
  794. return dcp_vmi_irq;
  795. dcp_irq = platform_get_irq(pdev, 1);
  796. if (dcp_irq < 0)
  797. return dcp_irq;
  798. sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
  799. if (!sdcp)
  800. return -ENOMEM;
  801. sdcp->dev = dev;
  802. sdcp->base = devm_platform_ioremap_resource(pdev, 0);
  803. if (IS_ERR(sdcp->base))
  804. return PTR_ERR(sdcp->base);
  805. ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
  806. "dcp-vmi-irq", sdcp);
  807. if (ret) {
  808. dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
  809. return ret;
  810. }
  811. ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
  812. "dcp-irq", sdcp);
  813. if (ret) {
  814. dev_err(dev, "Failed to claim DCP IRQ!\n");
  815. return ret;
  816. }
  817. /* Allocate coherent helper block. */
  818. sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
  819. GFP_KERNEL);
  820. if (!sdcp->coh)
  821. return -ENOMEM;
  822. /* Re-align the structure so it fits the DCP constraints. */
  823. sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
  824. /* DCP clock is optional, only used on some SOCs */
  825. sdcp->dcp_clk = devm_clk_get(dev, "dcp");
  826. if (IS_ERR(sdcp->dcp_clk)) {
  827. if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
  828. return PTR_ERR(sdcp->dcp_clk);
  829. sdcp->dcp_clk = NULL;
  830. }
  831. ret = clk_prepare_enable(sdcp->dcp_clk);
  832. if (ret)
  833. return ret;
  834. /* Restart the DCP block. */
  835. ret = stmp_reset_block(sdcp->base);
  836. if (ret) {
  837. dev_err(dev, "Failed reset\n");
  838. goto err_disable_unprepare_clk;
  839. }
  840. /* Initialize control register. */
  841. writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
  842. MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
  843. sdcp->base + MXS_DCP_CTRL);
  844. /* Enable all DCP DMA channels. */
  845. writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
  846. sdcp->base + MXS_DCP_CHANNELCTRL);
  847. /*
  848. * We do not enable context switching. Give the context buffer a
  849. * pointer to an illegal address so if context switching is
  850. * inadvertantly enabled, the DCP will return an error instead of
  851. * trashing good memory. The DCP DMA cannot access ROM, so any ROM
  852. * address will do.
  853. */
  854. writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
  855. for (i = 0; i < DCP_MAX_CHANS; i++)
  856. writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
  857. writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
  858. global_sdcp = sdcp;
  859. platform_set_drvdata(pdev, sdcp);
  860. for (i = 0; i < DCP_MAX_CHANS; i++) {
  861. spin_lock_init(&sdcp->lock[i]);
  862. init_completion(&sdcp->completion[i]);
  863. crypto_init_queue(&sdcp->queue[i], 50);
  864. }
  865. /* Create the SHA and AES handler threads. */
  866. sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
  867. NULL, "mxs_dcp_chan/sha");
  868. if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
  869. dev_err(dev, "Error starting SHA thread!\n");
  870. ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
  871. goto err_disable_unprepare_clk;
  872. }
  873. sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
  874. NULL, "mxs_dcp_chan/aes");
  875. if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
  876. dev_err(dev, "Error starting SHA thread!\n");
  877. ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
  878. goto err_destroy_sha_thread;
  879. }
  880. /* Register the various crypto algorithms. */
  881. sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
  882. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
  883. ret = crypto_register_skciphers(dcp_aes_algs,
  884. ARRAY_SIZE(dcp_aes_algs));
  885. if (ret) {
  886. /* Failed to register algorithm. */
  887. dev_err(dev, "Failed to register AES crypto!\n");
  888. goto err_destroy_aes_thread;
  889. }
  890. }
  891. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
  892. ret = crypto_register_ahash(&dcp_sha1_alg);
  893. if (ret) {
  894. dev_err(dev, "Failed to register %s hash!\n",
  895. dcp_sha1_alg.halg.base.cra_name);
  896. goto err_unregister_aes;
  897. }
  898. }
  899. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
  900. ret = crypto_register_ahash(&dcp_sha256_alg);
  901. if (ret) {
  902. dev_err(dev, "Failed to register %s hash!\n",
  903. dcp_sha256_alg.halg.base.cra_name);
  904. goto err_unregister_sha1;
  905. }
  906. }
  907. return 0;
  908. err_unregister_sha1:
  909. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
  910. crypto_unregister_ahash(&dcp_sha1_alg);
  911. err_unregister_aes:
  912. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
  913. crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
  914. err_destroy_aes_thread:
  915. kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
  916. err_destroy_sha_thread:
  917. kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
  918. err_disable_unprepare_clk:
  919. clk_disable_unprepare(sdcp->dcp_clk);
  920. return ret;
  921. }
  922. static int mxs_dcp_remove(struct platform_device *pdev)
  923. {
  924. struct dcp *sdcp = platform_get_drvdata(pdev);
  925. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
  926. crypto_unregister_ahash(&dcp_sha256_alg);
  927. if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
  928. crypto_unregister_ahash(&dcp_sha1_alg);
  929. if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
  930. crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
  931. kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
  932. kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
  933. clk_disable_unprepare(sdcp->dcp_clk);
  934. platform_set_drvdata(pdev, NULL);
  935. global_sdcp = NULL;
  936. return 0;
  937. }
  938. static const struct of_device_id mxs_dcp_dt_ids[] = {
  939. { .compatible = "fsl,imx23-dcp", .data = NULL, },
  940. { .compatible = "fsl,imx28-dcp", .data = NULL, },
  941. { /* sentinel */ }
  942. };
  943. MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
  944. static struct platform_driver mxs_dcp_driver = {
  945. .probe = mxs_dcp_probe,
  946. .remove = mxs_dcp_remove,
  947. .driver = {
  948. .name = "mxs-dcp",
  949. .of_match_table = mxs_dcp_dt_ids,
  950. },
  951. };
  952. module_platform_driver(mxs_dcp_driver);
  953. MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
  954. MODULE_DESCRIPTION("Freescale MXS DCP Driver");
  955. MODULE_LICENSE("GPL");
  956. MODULE_ALIAS("platform:mxs-dcp");