skcipher.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/device.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/moduleparam.h>
  9. #include <linux/types.h>
  10. #include <crypto/aes.h>
  11. #include <crypto/internal/des.h>
  12. #include <crypto/internal/skcipher.h>
  13. #include "cipher.h"
  14. static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
  15. module_param(aes_sw_max_len, uint, 0644);
  16. MODULE_PARM_DESC(aes_sw_max_len,
  17. "Only use hardware for AES requests larger than this "
  18. "[0=always use hardware; anything <16 breaks AES-GCM; default="
  19. __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
  20. static LIST_HEAD(skcipher_algs);
  21. static void qce_skcipher_done(void *data)
  22. {
  23. struct crypto_async_request *async_req = data;
  24. struct skcipher_request *req = skcipher_request_cast(async_req);
  25. struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
  26. struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
  27. struct qce_device *qce = tmpl->qce;
  28. struct qce_result_dump *result_buf = qce->dma.result_buf;
  29. enum dma_data_direction dir_src, dir_dst;
  30. u32 status;
  31. int error;
  32. bool diff_dst;
  33. diff_dst = (req->src != req->dst) ? true : false;
  34. dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  35. dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  36. error = qce_dma_terminate_all(&qce->dma);
  37. if (error)
  38. dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
  39. error);
  40. if (diff_dst)
  41. dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
  42. dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  43. sg_free_table(&rctx->dst_tbl);
  44. error = qce_check_status(qce, &status);
  45. if (error < 0)
  46. dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
  47. memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
  48. qce->async_req_done(tmpl->qce, error);
  49. }
  50. static int
  51. qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
  52. {
  53. struct skcipher_request *req = skcipher_request_cast(async_req);
  54. struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
  55. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  56. struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
  57. struct qce_device *qce = tmpl->qce;
  58. enum dma_data_direction dir_src, dir_dst;
  59. struct scatterlist *sg;
  60. bool diff_dst;
  61. gfp_t gfp;
  62. int dst_nents, src_nents, ret;
  63. rctx->iv = req->iv;
  64. rctx->ivsize = crypto_skcipher_ivsize(skcipher);
  65. rctx->cryptlen = req->cryptlen;
  66. diff_dst = (req->src != req->dst) ? true : false;
  67. dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
  68. dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
  69. rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
  70. if (diff_dst)
  71. rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
  72. else
  73. rctx->dst_nents = rctx->src_nents;
  74. if (rctx->src_nents < 0) {
  75. dev_err(qce->dev, "Invalid numbers of src SG.\n");
  76. return rctx->src_nents;
  77. }
  78. if (rctx->dst_nents < 0) {
  79. dev_err(qce->dev, "Invalid numbers of dst SG.\n");
  80. return -rctx->dst_nents;
  81. }
  82. rctx->dst_nents += 1;
  83. gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  84. GFP_KERNEL : GFP_ATOMIC;
  85. ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
  86. if (ret)
  87. return ret;
  88. sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
  89. sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
  90. if (IS_ERR(sg)) {
  91. ret = PTR_ERR(sg);
  92. goto error_free;
  93. }
  94. sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
  95. QCE_RESULT_BUF_SZ);
  96. if (IS_ERR(sg)) {
  97. ret = PTR_ERR(sg);
  98. goto error_free;
  99. }
  100. sg_mark_end(sg);
  101. rctx->dst_sg = rctx->dst_tbl.sgl;
  102. dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  103. if (dst_nents < 0) {
  104. ret = dst_nents;
  105. goto error_free;
  106. }
  107. if (diff_dst) {
  108. src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
  109. if (src_nents < 0) {
  110. ret = src_nents;
  111. goto error_unmap_dst;
  112. }
  113. rctx->src_sg = req->src;
  114. } else {
  115. rctx->src_sg = rctx->dst_sg;
  116. src_nents = dst_nents - 1;
  117. }
  118. ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
  119. rctx->dst_sg, dst_nents,
  120. qce_skcipher_done, async_req);
  121. if (ret)
  122. goto error_unmap_src;
  123. qce_dma_issue_pending(&qce->dma);
  124. ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
  125. if (ret)
  126. goto error_terminate;
  127. return 0;
  128. error_terminate:
  129. qce_dma_terminate_all(&qce->dma);
  130. error_unmap_src:
  131. if (diff_dst)
  132. dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
  133. error_unmap_dst:
  134. dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
  135. error_free:
  136. sg_free_table(&rctx->dst_tbl);
  137. return ret;
  138. }
  139. static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
  140. unsigned int keylen)
  141. {
  142. struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
  143. struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  144. unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
  145. int ret;
  146. if (!key || !keylen)
  147. return -EINVAL;
  148. switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
  149. case AES_KEYSIZE_128:
  150. case AES_KEYSIZE_256:
  151. memcpy(ctx->enc_key, key, keylen);
  152. break;
  153. }
  154. ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
  155. if (!ret)
  156. ctx->enc_keylen = keylen;
  157. return ret;
  158. }
  159. static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
  160. unsigned int keylen)
  161. {
  162. struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
  163. int err;
  164. err = verify_skcipher_des_key(ablk, key);
  165. if (err)
  166. return err;
  167. ctx->enc_keylen = keylen;
  168. memcpy(ctx->enc_key, key, keylen);
  169. return 0;
  170. }
  171. static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
  172. unsigned int keylen)
  173. {
  174. struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
  175. int err;
  176. err = verify_skcipher_des3_key(ablk, key);
  177. if (err)
  178. return err;
  179. ctx->enc_keylen = keylen;
  180. memcpy(ctx->enc_key, key, keylen);
  181. return 0;
  182. }
  183. static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
  184. {
  185. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  186. struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  187. struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
  188. struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
  189. int keylen;
  190. int ret;
  191. rctx->flags = tmpl->alg_flags;
  192. rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
  193. keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
  194. /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
  195. * is not a multiple of it; pass such requests to the fallback
  196. */
  197. if (IS_AES(rctx->flags) &&
  198. (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
  199. req->cryptlen <= aes_sw_max_len) ||
  200. (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
  201. req->cryptlen % QCE_SECTOR_SIZE))) {
  202. skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  203. skcipher_request_set_callback(&rctx->fallback_req,
  204. req->base.flags,
  205. req->base.complete,
  206. req->base.data);
  207. skcipher_request_set_crypt(&rctx->fallback_req, req->src,
  208. req->dst, req->cryptlen, req->iv);
  209. ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
  210. crypto_skcipher_decrypt(&rctx->fallback_req);
  211. return ret;
  212. }
  213. return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
  214. }
  215. static int qce_skcipher_encrypt(struct skcipher_request *req)
  216. {
  217. return qce_skcipher_crypt(req, 1);
  218. }
  219. static int qce_skcipher_decrypt(struct skcipher_request *req)
  220. {
  221. return qce_skcipher_crypt(req, 0);
  222. }
  223. static int qce_skcipher_init(struct crypto_skcipher *tfm)
  224. {
  225. /* take the size without the fallback skcipher_request at the end */
  226. crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
  227. fallback_req));
  228. return 0;
  229. }
  230. static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
  231. {
  232. struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  233. ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
  234. 0, CRYPTO_ALG_NEED_FALLBACK);
  235. if (IS_ERR(ctx->fallback))
  236. return PTR_ERR(ctx->fallback);
  237. crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
  238. crypto_skcipher_reqsize(ctx->fallback));
  239. return 0;
  240. }
  241. static void qce_skcipher_exit(struct crypto_skcipher *tfm)
  242. {
  243. struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  244. crypto_free_skcipher(ctx->fallback);
  245. }
  246. struct qce_skcipher_def {
  247. unsigned long flags;
  248. const char *name;
  249. const char *drv_name;
  250. unsigned int blocksize;
  251. unsigned int chunksize;
  252. unsigned int ivsize;
  253. unsigned int min_keysize;
  254. unsigned int max_keysize;
  255. };
  256. static const struct qce_skcipher_def skcipher_def[] = {
  257. {
  258. .flags = QCE_ALG_AES | QCE_MODE_ECB,
  259. .name = "ecb(aes)",
  260. .drv_name = "ecb-aes-qce",
  261. .blocksize = AES_BLOCK_SIZE,
  262. .ivsize = AES_BLOCK_SIZE,
  263. .min_keysize = AES_MIN_KEY_SIZE,
  264. .max_keysize = AES_MAX_KEY_SIZE,
  265. },
  266. {
  267. .flags = QCE_ALG_AES | QCE_MODE_CBC,
  268. .name = "cbc(aes)",
  269. .drv_name = "cbc-aes-qce",
  270. .blocksize = AES_BLOCK_SIZE,
  271. .ivsize = AES_BLOCK_SIZE,
  272. .min_keysize = AES_MIN_KEY_SIZE,
  273. .max_keysize = AES_MAX_KEY_SIZE,
  274. },
  275. {
  276. .flags = QCE_ALG_AES | QCE_MODE_CTR,
  277. .name = "ctr(aes)",
  278. .drv_name = "ctr-aes-qce",
  279. .blocksize = 1,
  280. .chunksize = AES_BLOCK_SIZE,
  281. .ivsize = AES_BLOCK_SIZE,
  282. .min_keysize = AES_MIN_KEY_SIZE,
  283. .max_keysize = AES_MAX_KEY_SIZE,
  284. },
  285. {
  286. .flags = QCE_ALG_AES | QCE_MODE_XTS,
  287. .name = "xts(aes)",
  288. .drv_name = "xts-aes-qce",
  289. .blocksize = AES_BLOCK_SIZE,
  290. .ivsize = AES_BLOCK_SIZE,
  291. .min_keysize = AES_MIN_KEY_SIZE * 2,
  292. .max_keysize = AES_MAX_KEY_SIZE * 2,
  293. },
  294. {
  295. .flags = QCE_ALG_DES | QCE_MODE_ECB,
  296. .name = "ecb(des)",
  297. .drv_name = "ecb-des-qce",
  298. .blocksize = DES_BLOCK_SIZE,
  299. .ivsize = 0,
  300. .min_keysize = DES_KEY_SIZE,
  301. .max_keysize = DES_KEY_SIZE,
  302. },
  303. {
  304. .flags = QCE_ALG_DES | QCE_MODE_CBC,
  305. .name = "cbc(des)",
  306. .drv_name = "cbc-des-qce",
  307. .blocksize = DES_BLOCK_SIZE,
  308. .ivsize = DES_BLOCK_SIZE,
  309. .min_keysize = DES_KEY_SIZE,
  310. .max_keysize = DES_KEY_SIZE,
  311. },
  312. {
  313. .flags = QCE_ALG_3DES | QCE_MODE_ECB,
  314. .name = "ecb(des3_ede)",
  315. .drv_name = "ecb-3des-qce",
  316. .blocksize = DES3_EDE_BLOCK_SIZE,
  317. .ivsize = 0,
  318. .min_keysize = DES3_EDE_KEY_SIZE,
  319. .max_keysize = DES3_EDE_KEY_SIZE,
  320. },
  321. {
  322. .flags = QCE_ALG_3DES | QCE_MODE_CBC,
  323. .name = "cbc(des3_ede)",
  324. .drv_name = "cbc-3des-qce",
  325. .blocksize = DES3_EDE_BLOCK_SIZE,
  326. .ivsize = DES3_EDE_BLOCK_SIZE,
  327. .min_keysize = DES3_EDE_KEY_SIZE,
  328. .max_keysize = DES3_EDE_KEY_SIZE,
  329. },
  330. };
  331. static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
  332. struct qce_device *qce)
  333. {
  334. struct qce_alg_template *tmpl;
  335. struct skcipher_alg *alg;
  336. int ret;
  337. tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
  338. if (!tmpl)
  339. return -ENOMEM;
  340. alg = &tmpl->alg.skcipher;
  341. snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
  342. snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  343. def->drv_name);
  344. alg->base.cra_blocksize = def->blocksize;
  345. alg->chunksize = def->chunksize;
  346. alg->ivsize = def->ivsize;
  347. alg->min_keysize = def->min_keysize;
  348. alg->max_keysize = def->max_keysize;
  349. alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
  350. IS_DES(def->flags) ? qce_des_setkey :
  351. qce_skcipher_setkey;
  352. alg->encrypt = qce_skcipher_encrypt;
  353. alg->decrypt = qce_skcipher_decrypt;
  354. alg->base.cra_priority = 300;
  355. alg->base.cra_flags = CRYPTO_ALG_ASYNC |
  356. CRYPTO_ALG_ALLOCATES_MEMORY |
  357. CRYPTO_ALG_KERN_DRIVER_ONLY;
  358. alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
  359. alg->base.cra_alignmask = 0;
  360. alg->base.cra_module = THIS_MODULE;
  361. if (IS_AES(def->flags)) {
  362. alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
  363. alg->init = qce_skcipher_init_fallback;
  364. alg->exit = qce_skcipher_exit;
  365. } else {
  366. alg->init = qce_skcipher_init;
  367. }
  368. INIT_LIST_HEAD(&tmpl->entry);
  369. tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
  370. tmpl->alg_flags = def->flags;
  371. tmpl->qce = qce;
  372. ret = crypto_register_skcipher(alg);
  373. if (ret) {
  374. dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
  375. kfree(tmpl);
  376. return ret;
  377. }
  378. list_add_tail(&tmpl->entry, &skcipher_algs);
  379. dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
  380. return 0;
  381. }
  382. static void qce_skcipher_unregister(struct qce_device *qce)
  383. {
  384. struct qce_alg_template *tmpl, *n;
  385. list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
  386. crypto_unregister_skcipher(&tmpl->alg.skcipher);
  387. list_del(&tmpl->entry);
  388. kfree(tmpl);
  389. }
  390. }
  391. static int qce_skcipher_register(struct qce_device *qce)
  392. {
  393. int ret, i;
  394. for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
  395. ret = qce_skcipher_register_one(&skcipher_def[i], qce);
  396. if (ret)
  397. goto err;
  398. }
  399. return 0;
  400. err:
  401. qce_skcipher_unregister(qce);
  402. return ret;
  403. }
  404. const struct qce_algo_ops skcipher_ops = {
  405. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  406. .register_algs = qce_skcipher_register,
  407. .unregister_algs = qce_skcipher_unregister,
  408. .async_req_handle = qce_skcipher_async_req_handle,
  409. };