img-hash.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014 Imagination Technologies
  4. * Authors: Will Thomas, James Hartley
  5. *
  6. * Interface structure taken from omap-sham driver
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/of_device.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/scatterlist.h>
  18. #include <crypto/internal/hash.h>
  19. #include <crypto/md5.h>
  20. #include <crypto/sha.h>
  21. #define CR_RESET 0
  22. #define CR_RESET_SET 1
  23. #define CR_RESET_UNSET 0
  24. #define CR_MESSAGE_LENGTH_H 0x4
  25. #define CR_MESSAGE_LENGTH_L 0x8
  26. #define CR_CONTROL 0xc
  27. #define CR_CONTROL_BYTE_ORDER_3210 0
  28. #define CR_CONTROL_BYTE_ORDER_0123 1
  29. #define CR_CONTROL_BYTE_ORDER_2310 2
  30. #define CR_CONTROL_BYTE_ORDER_1032 3
  31. #define CR_CONTROL_BYTE_ORDER_SHIFT 8
  32. #define CR_CONTROL_ALGO_MD5 0
  33. #define CR_CONTROL_ALGO_SHA1 1
  34. #define CR_CONTROL_ALGO_SHA224 2
  35. #define CR_CONTROL_ALGO_SHA256 3
  36. #define CR_INTSTAT 0x10
  37. #define CR_INTENAB 0x14
  38. #define CR_INTCLEAR 0x18
  39. #define CR_INT_RESULTS_AVAILABLE BIT(0)
  40. #define CR_INT_NEW_RESULTS_SET BIT(1)
  41. #define CR_INT_RESULT_READ_ERR BIT(2)
  42. #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
  43. #define CR_INT_STATUS BIT(8)
  44. #define CR_RESULT_QUEUE 0x1c
  45. #define CR_RSD0 0x40
  46. #define CR_CORE_REV 0x50
  47. #define CR_CORE_DES1 0x60
  48. #define CR_CORE_DES2 0x70
  49. #define DRIVER_FLAGS_BUSY BIT(0)
  50. #define DRIVER_FLAGS_FINAL BIT(1)
  51. #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
  52. #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
  53. #define DRIVER_FLAGS_INIT BIT(4)
  54. #define DRIVER_FLAGS_CPU BIT(5)
  55. #define DRIVER_FLAGS_DMA_READY BIT(6)
  56. #define DRIVER_FLAGS_ERROR BIT(7)
  57. #define DRIVER_FLAGS_SG BIT(8)
  58. #define DRIVER_FLAGS_SHA1 BIT(18)
  59. #define DRIVER_FLAGS_SHA224 BIT(19)
  60. #define DRIVER_FLAGS_SHA256 BIT(20)
  61. #define DRIVER_FLAGS_MD5 BIT(21)
  62. #define IMG_HASH_QUEUE_LENGTH 20
  63. #define IMG_HASH_DMA_BURST 4
  64. #define IMG_HASH_DMA_THRESHOLD 64
  65. #ifdef __LITTLE_ENDIAN
  66. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
  67. #else
  68. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
  69. #endif
  70. struct img_hash_dev;
  71. struct img_hash_request_ctx {
  72. struct img_hash_dev *hdev;
  73. u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  74. unsigned long flags;
  75. size_t digsize;
  76. dma_addr_t dma_addr;
  77. size_t dma_ct;
  78. /* sg root */
  79. struct scatterlist *sgfirst;
  80. /* walk state */
  81. struct scatterlist *sg;
  82. size_t nents;
  83. size_t offset;
  84. unsigned int total;
  85. size_t sent;
  86. unsigned long op;
  87. size_t bufcnt;
  88. struct ahash_request fallback_req;
  89. /* Zero length buffer must remain last member of struct */
  90. u8 buffer[] __aligned(sizeof(u32));
  91. };
  92. struct img_hash_ctx {
  93. struct img_hash_dev *hdev;
  94. unsigned long flags;
  95. struct crypto_ahash *fallback;
  96. };
  97. struct img_hash_dev {
  98. struct list_head list;
  99. struct device *dev;
  100. struct clk *hash_clk;
  101. struct clk *sys_clk;
  102. void __iomem *io_base;
  103. phys_addr_t bus_addr;
  104. void __iomem *cpu_addr;
  105. spinlock_t lock;
  106. int err;
  107. struct tasklet_struct done_task;
  108. struct tasklet_struct dma_task;
  109. unsigned long flags;
  110. struct crypto_queue queue;
  111. struct ahash_request *req;
  112. struct dma_chan *dma_lch;
  113. };
  114. struct img_hash_drv {
  115. struct list_head dev_list;
  116. spinlock_t lock;
  117. };
  118. static struct img_hash_drv img_hash = {
  119. .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
  120. .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
  121. };
  122. static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
  123. {
  124. return readl_relaxed(hdev->io_base + offset);
  125. }
  126. static inline void img_hash_write(struct img_hash_dev *hdev,
  127. u32 offset, u32 value)
  128. {
  129. writel_relaxed(value, hdev->io_base + offset);
  130. }
  131. static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
  132. {
  133. return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
  134. }
  135. static void img_hash_start(struct img_hash_dev *hdev, bool dma)
  136. {
  137. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  138. u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
  139. if (ctx->flags & DRIVER_FLAGS_MD5)
  140. cr |= CR_CONTROL_ALGO_MD5;
  141. else if (ctx->flags & DRIVER_FLAGS_SHA1)
  142. cr |= CR_CONTROL_ALGO_SHA1;
  143. else if (ctx->flags & DRIVER_FLAGS_SHA224)
  144. cr |= CR_CONTROL_ALGO_SHA224;
  145. else if (ctx->flags & DRIVER_FLAGS_SHA256)
  146. cr |= CR_CONTROL_ALGO_SHA256;
  147. dev_dbg(hdev->dev, "Starting hash process\n");
  148. img_hash_write(hdev, CR_CONTROL, cr);
  149. /*
  150. * The hardware block requires two cycles between writing the control
  151. * register and writing the first word of data in non DMA mode, to
  152. * ensure the first data write is not grouped in burst with the control
  153. * register write a read is issued to 'flush' the bus.
  154. */
  155. if (!dma)
  156. img_hash_read(hdev, CR_CONTROL);
  157. }
  158. static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
  159. size_t length, int final)
  160. {
  161. u32 count, len32;
  162. const u32 *buffer = (const u32 *)buf;
  163. dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
  164. if (final)
  165. hdev->flags |= DRIVER_FLAGS_FINAL;
  166. len32 = DIV_ROUND_UP(length, sizeof(u32));
  167. for (count = 0; count < len32; count++)
  168. writel_relaxed(buffer[count], hdev->cpu_addr);
  169. return -EINPROGRESS;
  170. }
  171. static void img_hash_dma_callback(void *data)
  172. {
  173. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  174. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  175. if (ctx->bufcnt) {
  176. img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
  177. ctx->bufcnt = 0;
  178. }
  179. if (ctx->sg)
  180. tasklet_schedule(&hdev->dma_task);
  181. }
  182. static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
  183. {
  184. struct dma_async_tx_descriptor *desc;
  185. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  186. ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
  187. if (ctx->dma_ct == 0) {
  188. dev_err(hdev->dev, "Invalid DMA sg\n");
  189. hdev->err = -EINVAL;
  190. return -EINVAL;
  191. }
  192. desc = dmaengine_prep_slave_sg(hdev->dma_lch,
  193. sg,
  194. ctx->dma_ct,
  195. DMA_MEM_TO_DEV,
  196. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  197. if (!desc) {
  198. dev_err(hdev->dev, "Null DMA descriptor\n");
  199. hdev->err = -EINVAL;
  200. dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
  201. return -EINVAL;
  202. }
  203. desc->callback = img_hash_dma_callback;
  204. desc->callback_param = hdev;
  205. dmaengine_submit(desc);
  206. dma_async_issue_pending(hdev->dma_lch);
  207. return 0;
  208. }
  209. static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
  210. {
  211. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  212. ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
  213. ctx->buffer, hdev->req->nbytes);
  214. ctx->total = hdev->req->nbytes;
  215. ctx->bufcnt = 0;
  216. hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
  217. img_hash_start(hdev, false);
  218. return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
  219. }
  220. static int img_hash_finish(struct ahash_request *req)
  221. {
  222. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  223. if (!req->result)
  224. return -EINVAL;
  225. memcpy(req->result, ctx->digest, ctx->digsize);
  226. return 0;
  227. }
  228. static void img_hash_copy_hash(struct ahash_request *req)
  229. {
  230. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  231. u32 *hash = (u32 *)ctx->digest;
  232. int i;
  233. for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
  234. hash[i] = img_hash_read_result_queue(ctx->hdev);
  235. }
  236. static void img_hash_finish_req(struct ahash_request *req, int err)
  237. {
  238. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  239. struct img_hash_dev *hdev = ctx->hdev;
  240. if (!err) {
  241. img_hash_copy_hash(req);
  242. if (DRIVER_FLAGS_FINAL & hdev->flags)
  243. err = img_hash_finish(req);
  244. } else {
  245. dev_warn(hdev->dev, "Hash failed with error %d\n", err);
  246. ctx->flags |= DRIVER_FLAGS_ERROR;
  247. }
  248. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
  249. DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
  250. if (req->base.complete)
  251. req->base.complete(&req->base, err);
  252. }
  253. static int img_hash_write_via_dma(struct img_hash_dev *hdev)
  254. {
  255. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  256. img_hash_start(hdev, true);
  257. dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
  258. if (!ctx->total)
  259. hdev->flags |= DRIVER_FLAGS_FINAL;
  260. hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
  261. tasklet_schedule(&hdev->dma_task);
  262. return -EINPROGRESS;
  263. }
  264. static int img_hash_dma_init(struct img_hash_dev *hdev)
  265. {
  266. struct dma_slave_config dma_conf;
  267. int err;
  268. hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
  269. if (IS_ERR(hdev->dma_lch)) {
  270. dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
  271. return PTR_ERR(hdev->dma_lch);
  272. }
  273. dma_conf.direction = DMA_MEM_TO_DEV;
  274. dma_conf.dst_addr = hdev->bus_addr;
  275. dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  276. dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
  277. dma_conf.device_fc = false;
  278. err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
  279. if (err) {
  280. dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
  281. dma_release_channel(hdev->dma_lch);
  282. return err;
  283. }
  284. return 0;
  285. }
  286. static void img_hash_dma_task(unsigned long d)
  287. {
  288. struct img_hash_dev *hdev = (struct img_hash_dev *)d;
  289. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  290. u8 *addr;
  291. size_t nbytes, bleft, wsend, len, tbc;
  292. struct scatterlist tsg;
  293. if (!hdev->req || !ctx->sg)
  294. return;
  295. addr = sg_virt(ctx->sg);
  296. nbytes = ctx->sg->length - ctx->offset;
  297. /*
  298. * The hash accelerator does not support a data valid mask. This means
  299. * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
  300. * padding bytes in the last word written by that dma would erroneously
  301. * be included in the hash. To avoid this we round down the transfer,
  302. * and add the excess to the start of the next dma. It does not matter
  303. * that the final dma may not be a multiple of 4 bytes as the hashing
  304. * block is programmed to accept the correct number of bytes.
  305. */
  306. bleft = nbytes % 4;
  307. wsend = (nbytes / 4);
  308. if (wsend) {
  309. sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
  310. if (img_hash_xmit_dma(hdev, &tsg)) {
  311. dev_err(hdev->dev, "DMA failed, falling back to CPU");
  312. ctx->flags |= DRIVER_FLAGS_CPU;
  313. hdev->err = 0;
  314. img_hash_xmit_cpu(hdev, addr + ctx->offset,
  315. wsend * 4, 0);
  316. ctx->sent += wsend * 4;
  317. wsend = 0;
  318. } else {
  319. ctx->sent += wsend * 4;
  320. }
  321. }
  322. if (bleft) {
  323. ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  324. ctx->buffer, bleft, ctx->sent);
  325. tbc = 0;
  326. ctx->sg = sg_next(ctx->sg);
  327. while (ctx->sg && (ctx->bufcnt < 4)) {
  328. len = ctx->sg->length;
  329. if (likely(len > (4 - ctx->bufcnt)))
  330. len = 4 - ctx->bufcnt;
  331. tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  332. ctx->buffer + ctx->bufcnt, len,
  333. ctx->sent + ctx->bufcnt);
  334. ctx->bufcnt += tbc;
  335. if (tbc >= ctx->sg->length) {
  336. ctx->sg = sg_next(ctx->sg);
  337. tbc = 0;
  338. }
  339. }
  340. ctx->sent += ctx->bufcnt;
  341. ctx->offset = tbc;
  342. if (!wsend)
  343. img_hash_dma_callback(hdev);
  344. } else {
  345. ctx->offset = 0;
  346. ctx->sg = sg_next(ctx->sg);
  347. }
  348. }
  349. static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
  350. {
  351. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  352. if (ctx->flags & DRIVER_FLAGS_SG)
  353. dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
  354. return 0;
  355. }
  356. static int img_hash_process_data(struct img_hash_dev *hdev)
  357. {
  358. struct ahash_request *req = hdev->req;
  359. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  360. int err = 0;
  361. ctx->bufcnt = 0;
  362. if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
  363. dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
  364. req->nbytes);
  365. err = img_hash_write_via_dma(hdev);
  366. } else {
  367. dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
  368. req->nbytes);
  369. err = img_hash_write_via_cpu(hdev);
  370. }
  371. return err;
  372. }
  373. static int img_hash_hw_init(struct img_hash_dev *hdev)
  374. {
  375. unsigned long long nbits;
  376. u32 u, l;
  377. img_hash_write(hdev, CR_RESET, CR_RESET_SET);
  378. img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
  379. img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
  380. nbits = (u64)hdev->req->nbytes << 3;
  381. u = nbits >> 32;
  382. l = nbits;
  383. img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
  384. img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
  385. if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
  386. hdev->flags |= DRIVER_FLAGS_INIT;
  387. hdev->err = 0;
  388. }
  389. dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
  390. return 0;
  391. }
  392. static int img_hash_init(struct ahash_request *req)
  393. {
  394. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  395. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  396. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  397. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  398. rctx->fallback_req.base.flags = req->base.flags
  399. & CRYPTO_TFM_REQ_MAY_SLEEP;
  400. return crypto_ahash_init(&rctx->fallback_req);
  401. }
  402. static int img_hash_handle_queue(struct img_hash_dev *hdev,
  403. struct ahash_request *req)
  404. {
  405. struct crypto_async_request *async_req, *backlog;
  406. struct img_hash_request_ctx *ctx;
  407. unsigned long flags;
  408. int err = 0, res = 0;
  409. spin_lock_irqsave(&hdev->lock, flags);
  410. if (req)
  411. res = ahash_enqueue_request(&hdev->queue, req);
  412. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  413. spin_unlock_irqrestore(&hdev->lock, flags);
  414. return res;
  415. }
  416. backlog = crypto_get_backlog(&hdev->queue);
  417. async_req = crypto_dequeue_request(&hdev->queue);
  418. if (async_req)
  419. hdev->flags |= DRIVER_FLAGS_BUSY;
  420. spin_unlock_irqrestore(&hdev->lock, flags);
  421. if (!async_req)
  422. return res;
  423. if (backlog)
  424. backlog->complete(backlog, -EINPROGRESS);
  425. req = ahash_request_cast(async_req);
  426. hdev->req = req;
  427. ctx = ahash_request_ctx(req);
  428. dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
  429. ctx->op, req->nbytes);
  430. err = img_hash_hw_init(hdev);
  431. if (!err)
  432. err = img_hash_process_data(hdev);
  433. if (err != -EINPROGRESS) {
  434. /* done_task will not finish so do it here */
  435. img_hash_finish_req(req, err);
  436. }
  437. return res;
  438. }
  439. static int img_hash_update(struct ahash_request *req)
  440. {
  441. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  442. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  443. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  444. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  445. rctx->fallback_req.base.flags = req->base.flags
  446. & CRYPTO_TFM_REQ_MAY_SLEEP;
  447. rctx->fallback_req.nbytes = req->nbytes;
  448. rctx->fallback_req.src = req->src;
  449. return crypto_ahash_update(&rctx->fallback_req);
  450. }
  451. static int img_hash_final(struct ahash_request *req)
  452. {
  453. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  454. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  455. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  456. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  457. rctx->fallback_req.base.flags = req->base.flags
  458. & CRYPTO_TFM_REQ_MAY_SLEEP;
  459. rctx->fallback_req.result = req->result;
  460. return crypto_ahash_final(&rctx->fallback_req);
  461. }
  462. static int img_hash_finup(struct ahash_request *req)
  463. {
  464. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  465. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  466. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  467. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  468. rctx->fallback_req.base.flags = req->base.flags
  469. & CRYPTO_TFM_REQ_MAY_SLEEP;
  470. rctx->fallback_req.nbytes = req->nbytes;
  471. rctx->fallback_req.src = req->src;
  472. rctx->fallback_req.result = req->result;
  473. return crypto_ahash_finup(&rctx->fallback_req);
  474. }
  475. static int img_hash_import(struct ahash_request *req, const void *in)
  476. {
  477. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  478. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  479. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  480. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  481. rctx->fallback_req.base.flags = req->base.flags
  482. & CRYPTO_TFM_REQ_MAY_SLEEP;
  483. return crypto_ahash_import(&rctx->fallback_req, in);
  484. }
  485. static int img_hash_export(struct ahash_request *req, void *out)
  486. {
  487. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  488. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  489. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  490. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  491. rctx->fallback_req.base.flags = req->base.flags
  492. & CRYPTO_TFM_REQ_MAY_SLEEP;
  493. return crypto_ahash_export(&rctx->fallback_req, out);
  494. }
  495. static int img_hash_digest(struct ahash_request *req)
  496. {
  497. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  498. struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  499. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  500. struct img_hash_dev *hdev = NULL;
  501. struct img_hash_dev *tmp;
  502. int err;
  503. spin_lock(&img_hash.lock);
  504. if (!tctx->hdev) {
  505. list_for_each_entry(tmp, &img_hash.dev_list, list) {
  506. hdev = tmp;
  507. break;
  508. }
  509. tctx->hdev = hdev;
  510. } else {
  511. hdev = tctx->hdev;
  512. }
  513. spin_unlock(&img_hash.lock);
  514. ctx->hdev = hdev;
  515. ctx->flags = 0;
  516. ctx->digsize = crypto_ahash_digestsize(tfm);
  517. switch (ctx->digsize) {
  518. case SHA1_DIGEST_SIZE:
  519. ctx->flags |= DRIVER_FLAGS_SHA1;
  520. break;
  521. case SHA256_DIGEST_SIZE:
  522. ctx->flags |= DRIVER_FLAGS_SHA256;
  523. break;
  524. case SHA224_DIGEST_SIZE:
  525. ctx->flags |= DRIVER_FLAGS_SHA224;
  526. break;
  527. case MD5_DIGEST_SIZE:
  528. ctx->flags |= DRIVER_FLAGS_MD5;
  529. break;
  530. default:
  531. return -EINVAL;
  532. }
  533. ctx->bufcnt = 0;
  534. ctx->offset = 0;
  535. ctx->sent = 0;
  536. ctx->total = req->nbytes;
  537. ctx->sg = req->src;
  538. ctx->sgfirst = req->src;
  539. ctx->nents = sg_nents(ctx->sg);
  540. err = img_hash_handle_queue(tctx->hdev, req);
  541. return err;
  542. }
  543. static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
  544. {
  545. struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  546. int err = -ENOMEM;
  547. ctx->fallback = crypto_alloc_ahash(alg_name, 0,
  548. CRYPTO_ALG_NEED_FALLBACK);
  549. if (IS_ERR(ctx->fallback)) {
  550. pr_err("img_hash: Could not load fallback driver.\n");
  551. err = PTR_ERR(ctx->fallback);
  552. goto err;
  553. }
  554. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  555. sizeof(struct img_hash_request_ctx) +
  556. crypto_ahash_reqsize(ctx->fallback) +
  557. IMG_HASH_DMA_THRESHOLD);
  558. return 0;
  559. err:
  560. return err;
  561. }
  562. static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
  563. {
  564. return img_hash_cra_init(tfm, "md5-generic");
  565. }
  566. static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
  567. {
  568. return img_hash_cra_init(tfm, "sha1-generic");
  569. }
  570. static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
  571. {
  572. return img_hash_cra_init(tfm, "sha224-generic");
  573. }
  574. static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
  575. {
  576. return img_hash_cra_init(tfm, "sha256-generic");
  577. }
  578. static void img_hash_cra_exit(struct crypto_tfm *tfm)
  579. {
  580. struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  581. crypto_free_ahash(tctx->fallback);
  582. }
  583. static irqreturn_t img_irq_handler(int irq, void *dev_id)
  584. {
  585. struct img_hash_dev *hdev = dev_id;
  586. u32 reg;
  587. reg = img_hash_read(hdev, CR_INTSTAT);
  588. img_hash_write(hdev, CR_INTCLEAR, reg);
  589. if (reg & CR_INT_NEW_RESULTS_SET) {
  590. dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
  591. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  592. hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
  593. if (!(DRIVER_FLAGS_CPU & hdev->flags))
  594. hdev->flags |= DRIVER_FLAGS_DMA_READY;
  595. tasklet_schedule(&hdev->done_task);
  596. } else {
  597. dev_warn(hdev->dev,
  598. "HASH interrupt when no active requests.\n");
  599. }
  600. } else if (reg & CR_INT_RESULTS_AVAILABLE) {
  601. dev_warn(hdev->dev,
  602. "IRQ triggered before the hash had completed\n");
  603. } else if (reg & CR_INT_RESULT_READ_ERR) {
  604. dev_warn(hdev->dev,
  605. "Attempt to read from an empty result queue\n");
  606. } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
  607. dev_warn(hdev->dev,
  608. "Data written before the hardware was configured\n");
  609. }
  610. return IRQ_HANDLED;
  611. }
  612. static struct ahash_alg img_algs[] = {
  613. {
  614. .init = img_hash_init,
  615. .update = img_hash_update,
  616. .final = img_hash_final,
  617. .finup = img_hash_finup,
  618. .export = img_hash_export,
  619. .import = img_hash_import,
  620. .digest = img_hash_digest,
  621. .halg = {
  622. .digestsize = MD5_DIGEST_SIZE,
  623. .statesize = sizeof(struct md5_state),
  624. .base = {
  625. .cra_name = "md5",
  626. .cra_driver_name = "img-md5",
  627. .cra_priority = 300,
  628. .cra_flags =
  629. CRYPTO_ALG_ASYNC |
  630. CRYPTO_ALG_NEED_FALLBACK,
  631. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  632. .cra_ctxsize = sizeof(struct img_hash_ctx),
  633. .cra_init = img_hash_cra_md5_init,
  634. .cra_exit = img_hash_cra_exit,
  635. .cra_module = THIS_MODULE,
  636. }
  637. }
  638. },
  639. {
  640. .init = img_hash_init,
  641. .update = img_hash_update,
  642. .final = img_hash_final,
  643. .finup = img_hash_finup,
  644. .export = img_hash_export,
  645. .import = img_hash_import,
  646. .digest = img_hash_digest,
  647. .halg = {
  648. .digestsize = SHA1_DIGEST_SIZE,
  649. .statesize = sizeof(struct sha1_state),
  650. .base = {
  651. .cra_name = "sha1",
  652. .cra_driver_name = "img-sha1",
  653. .cra_priority = 300,
  654. .cra_flags =
  655. CRYPTO_ALG_ASYNC |
  656. CRYPTO_ALG_NEED_FALLBACK,
  657. .cra_blocksize = SHA1_BLOCK_SIZE,
  658. .cra_ctxsize = sizeof(struct img_hash_ctx),
  659. .cra_init = img_hash_cra_sha1_init,
  660. .cra_exit = img_hash_cra_exit,
  661. .cra_module = THIS_MODULE,
  662. }
  663. }
  664. },
  665. {
  666. .init = img_hash_init,
  667. .update = img_hash_update,
  668. .final = img_hash_final,
  669. .finup = img_hash_finup,
  670. .export = img_hash_export,
  671. .import = img_hash_import,
  672. .digest = img_hash_digest,
  673. .halg = {
  674. .digestsize = SHA224_DIGEST_SIZE,
  675. .statesize = sizeof(struct sha256_state),
  676. .base = {
  677. .cra_name = "sha224",
  678. .cra_driver_name = "img-sha224",
  679. .cra_priority = 300,
  680. .cra_flags =
  681. CRYPTO_ALG_ASYNC |
  682. CRYPTO_ALG_NEED_FALLBACK,
  683. .cra_blocksize = SHA224_BLOCK_SIZE,
  684. .cra_ctxsize = sizeof(struct img_hash_ctx),
  685. .cra_init = img_hash_cra_sha224_init,
  686. .cra_exit = img_hash_cra_exit,
  687. .cra_module = THIS_MODULE,
  688. }
  689. }
  690. },
  691. {
  692. .init = img_hash_init,
  693. .update = img_hash_update,
  694. .final = img_hash_final,
  695. .finup = img_hash_finup,
  696. .export = img_hash_export,
  697. .import = img_hash_import,
  698. .digest = img_hash_digest,
  699. .halg = {
  700. .digestsize = SHA256_DIGEST_SIZE,
  701. .statesize = sizeof(struct sha256_state),
  702. .base = {
  703. .cra_name = "sha256",
  704. .cra_driver_name = "img-sha256",
  705. .cra_priority = 300,
  706. .cra_flags =
  707. CRYPTO_ALG_ASYNC |
  708. CRYPTO_ALG_NEED_FALLBACK,
  709. .cra_blocksize = SHA256_BLOCK_SIZE,
  710. .cra_ctxsize = sizeof(struct img_hash_ctx),
  711. .cra_init = img_hash_cra_sha256_init,
  712. .cra_exit = img_hash_cra_exit,
  713. .cra_module = THIS_MODULE,
  714. }
  715. }
  716. }
  717. };
  718. static int img_register_algs(struct img_hash_dev *hdev)
  719. {
  720. int i, err;
  721. for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
  722. err = crypto_register_ahash(&img_algs[i]);
  723. if (err)
  724. goto err_reg;
  725. }
  726. return 0;
  727. err_reg:
  728. for (; i--; )
  729. crypto_unregister_ahash(&img_algs[i]);
  730. return err;
  731. }
  732. static int img_unregister_algs(struct img_hash_dev *hdev)
  733. {
  734. int i;
  735. for (i = 0; i < ARRAY_SIZE(img_algs); i++)
  736. crypto_unregister_ahash(&img_algs[i]);
  737. return 0;
  738. }
  739. static void img_hash_done_task(unsigned long data)
  740. {
  741. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  742. int err = 0;
  743. if (hdev->err == -EINVAL) {
  744. err = hdev->err;
  745. goto finish;
  746. }
  747. if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
  748. img_hash_handle_queue(hdev, NULL);
  749. return;
  750. }
  751. if (DRIVER_FLAGS_CPU & hdev->flags) {
  752. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  753. hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
  754. goto finish;
  755. }
  756. } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
  757. if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
  758. hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
  759. img_hash_write_via_dma_stop(hdev);
  760. if (hdev->err) {
  761. err = hdev->err;
  762. goto finish;
  763. }
  764. }
  765. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  766. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
  767. DRIVER_FLAGS_OUTPUT_READY);
  768. goto finish;
  769. }
  770. }
  771. return;
  772. finish:
  773. img_hash_finish_req(hdev->req, err);
  774. }
  775. static const struct of_device_id img_hash_match[] = {
  776. { .compatible = "img,hash-accelerator" },
  777. {}
  778. };
  779. MODULE_DEVICE_TABLE(of, img_hash_match);
  780. static int img_hash_probe(struct platform_device *pdev)
  781. {
  782. struct img_hash_dev *hdev;
  783. struct device *dev = &pdev->dev;
  784. struct resource *hash_res;
  785. int irq;
  786. int err;
  787. hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
  788. if (hdev == NULL)
  789. return -ENOMEM;
  790. spin_lock_init(&hdev->lock);
  791. hdev->dev = dev;
  792. platform_set_drvdata(pdev, hdev);
  793. INIT_LIST_HEAD(&hdev->list);
  794. tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
  795. tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
  796. crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
  797. /* Register bank */
  798. hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
  799. if (IS_ERR(hdev->io_base)) {
  800. err = PTR_ERR(hdev->io_base);
  801. dev_err(dev, "can't ioremap, returned %d\n", err);
  802. goto res_err;
  803. }
  804. /* Write port (DMA or CPU) */
  805. hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  806. hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
  807. if (IS_ERR(hdev->cpu_addr)) {
  808. dev_err(dev, "can't ioremap write port\n");
  809. err = PTR_ERR(hdev->cpu_addr);
  810. goto res_err;
  811. }
  812. hdev->bus_addr = hash_res->start;
  813. irq = platform_get_irq(pdev, 0);
  814. if (irq < 0) {
  815. err = irq;
  816. goto res_err;
  817. }
  818. err = devm_request_irq(dev, irq, img_irq_handler, 0,
  819. dev_name(dev), hdev);
  820. if (err) {
  821. dev_err(dev, "unable to request irq\n");
  822. goto res_err;
  823. }
  824. dev_dbg(dev, "using IRQ channel %d\n", irq);
  825. hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
  826. if (IS_ERR(hdev->hash_clk)) {
  827. dev_err(dev, "clock initialization failed.\n");
  828. err = PTR_ERR(hdev->hash_clk);
  829. goto res_err;
  830. }
  831. hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
  832. if (IS_ERR(hdev->sys_clk)) {
  833. dev_err(dev, "clock initialization failed.\n");
  834. err = PTR_ERR(hdev->sys_clk);
  835. goto res_err;
  836. }
  837. err = clk_prepare_enable(hdev->hash_clk);
  838. if (err)
  839. goto res_err;
  840. err = clk_prepare_enable(hdev->sys_clk);
  841. if (err)
  842. goto clk_err;
  843. err = img_hash_dma_init(hdev);
  844. if (err)
  845. goto dma_err;
  846. dev_dbg(dev, "using %s for DMA transfers\n",
  847. dma_chan_name(hdev->dma_lch));
  848. spin_lock(&img_hash.lock);
  849. list_add_tail(&hdev->list, &img_hash.dev_list);
  850. spin_unlock(&img_hash.lock);
  851. err = img_register_algs(hdev);
  852. if (err)
  853. goto err_algs;
  854. dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
  855. return 0;
  856. err_algs:
  857. spin_lock(&img_hash.lock);
  858. list_del(&hdev->list);
  859. spin_unlock(&img_hash.lock);
  860. dma_release_channel(hdev->dma_lch);
  861. dma_err:
  862. clk_disable_unprepare(hdev->sys_clk);
  863. clk_err:
  864. clk_disable_unprepare(hdev->hash_clk);
  865. res_err:
  866. tasklet_kill(&hdev->done_task);
  867. tasklet_kill(&hdev->dma_task);
  868. return err;
  869. }
  870. static int img_hash_remove(struct platform_device *pdev)
  871. {
  872. struct img_hash_dev *hdev;
  873. hdev = platform_get_drvdata(pdev);
  874. spin_lock(&img_hash.lock);
  875. list_del(&hdev->list);
  876. spin_unlock(&img_hash.lock);
  877. img_unregister_algs(hdev);
  878. tasklet_kill(&hdev->done_task);
  879. tasklet_kill(&hdev->dma_task);
  880. dma_release_channel(hdev->dma_lch);
  881. clk_disable_unprepare(hdev->hash_clk);
  882. clk_disable_unprepare(hdev->sys_clk);
  883. return 0;
  884. }
  885. #ifdef CONFIG_PM_SLEEP
  886. static int img_hash_suspend(struct device *dev)
  887. {
  888. struct img_hash_dev *hdev = dev_get_drvdata(dev);
  889. clk_disable_unprepare(hdev->hash_clk);
  890. clk_disable_unprepare(hdev->sys_clk);
  891. return 0;
  892. }
  893. static int img_hash_resume(struct device *dev)
  894. {
  895. struct img_hash_dev *hdev = dev_get_drvdata(dev);
  896. int ret;
  897. ret = clk_prepare_enable(hdev->hash_clk);
  898. if (ret)
  899. return ret;
  900. ret = clk_prepare_enable(hdev->sys_clk);
  901. if (ret) {
  902. clk_disable_unprepare(hdev->hash_clk);
  903. return ret;
  904. }
  905. return 0;
  906. }
  907. #endif /* CONFIG_PM_SLEEP */
  908. static const struct dev_pm_ops img_hash_pm_ops = {
  909. SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
  910. };
  911. static struct platform_driver img_hash_driver = {
  912. .probe = img_hash_probe,
  913. .remove = img_hash_remove,
  914. .driver = {
  915. .name = "img-hash-accelerator",
  916. .pm = &img_hash_pm_ops,
  917. .of_match_table = of_match_ptr(img_hash_match),
  918. }
  919. };
  920. module_platform_driver(img_hash_driver);
  921. MODULE_LICENSE("GPL v2");
  922. MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
  923. MODULE_AUTHOR("Will Thomas.");
  924. MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");