cryptd.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Software async crypto daemon.
  4. *
  5. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  6. *
  7. * Added AEAD support to cryptd.
  8. * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  9. * Adrian Hoban <adrian.hoban@intel.com>
  10. * Gabriele Paoloni <gabriele.paoloni@intel.com>
  11. * Aidan O'Mahony (aidan.o.mahony@intel.com)
  12. * Copyright (c) 2010, Intel Corporation.
  13. */
  14. #include <crypto/internal/hash.h>
  15. #include <crypto/internal/aead.h>
  16. #include <crypto/internal/skcipher.h>
  17. #include <crypto/cryptd.h>
  18. #include <linux/refcount.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/workqueue.h>
  28. static unsigned int cryptd_max_cpu_qlen = 1000;
  29. module_param(cryptd_max_cpu_qlen, uint, 0);
  30. MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  31. static struct workqueue_struct *cryptd_wq;
  32. struct cryptd_cpu_queue {
  33. struct crypto_queue queue;
  34. struct work_struct work;
  35. };
  36. struct cryptd_queue {
  37. struct cryptd_cpu_queue __percpu *cpu_queue;
  38. };
  39. struct cryptd_instance_ctx {
  40. struct crypto_spawn spawn;
  41. struct cryptd_queue *queue;
  42. };
  43. struct skcipherd_instance_ctx {
  44. struct crypto_skcipher_spawn spawn;
  45. struct cryptd_queue *queue;
  46. };
  47. struct hashd_instance_ctx {
  48. struct crypto_shash_spawn spawn;
  49. struct cryptd_queue *queue;
  50. };
  51. struct aead_instance_ctx {
  52. struct crypto_aead_spawn aead_spawn;
  53. struct cryptd_queue *queue;
  54. };
  55. struct cryptd_skcipher_ctx {
  56. refcount_t refcnt;
  57. struct crypto_sync_skcipher *child;
  58. };
  59. struct cryptd_skcipher_request_ctx {
  60. crypto_completion_t complete;
  61. };
  62. struct cryptd_hash_ctx {
  63. refcount_t refcnt;
  64. struct crypto_shash *child;
  65. };
  66. struct cryptd_hash_request_ctx {
  67. crypto_completion_t complete;
  68. struct shash_desc desc;
  69. };
  70. struct cryptd_aead_ctx {
  71. refcount_t refcnt;
  72. struct crypto_aead *child;
  73. };
  74. struct cryptd_aead_request_ctx {
  75. crypto_completion_t complete;
  76. };
  77. static void cryptd_queue_worker(struct work_struct *work);
  78. static int cryptd_init_queue(struct cryptd_queue *queue,
  79. unsigned int max_cpu_qlen)
  80. {
  81. int cpu;
  82. struct cryptd_cpu_queue *cpu_queue;
  83. queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
  84. if (!queue->cpu_queue)
  85. return -ENOMEM;
  86. for_each_possible_cpu(cpu) {
  87. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  88. crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  89. INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
  90. }
  91. pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
  92. return 0;
  93. }
  94. static void cryptd_fini_queue(struct cryptd_queue *queue)
  95. {
  96. int cpu;
  97. struct cryptd_cpu_queue *cpu_queue;
  98. for_each_possible_cpu(cpu) {
  99. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  100. BUG_ON(cpu_queue->queue.qlen);
  101. }
  102. free_percpu(queue->cpu_queue);
  103. }
  104. static int cryptd_enqueue_request(struct cryptd_queue *queue,
  105. struct crypto_async_request *request)
  106. {
  107. int cpu, err;
  108. struct cryptd_cpu_queue *cpu_queue;
  109. refcount_t *refcnt;
  110. cpu = get_cpu();
  111. cpu_queue = this_cpu_ptr(queue->cpu_queue);
  112. err = crypto_enqueue_request(&cpu_queue->queue, request);
  113. refcnt = crypto_tfm_ctx(request->tfm);
  114. if (err == -ENOSPC)
  115. goto out_put_cpu;
  116. queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
  117. if (!refcount_read(refcnt))
  118. goto out_put_cpu;
  119. refcount_inc(refcnt);
  120. out_put_cpu:
  121. put_cpu();
  122. return err;
  123. }
  124. /* Called in workqueue context, do one real cryption work (via
  125. * req->complete) and reschedule itself if there are more work to
  126. * do. */
  127. static void cryptd_queue_worker(struct work_struct *work)
  128. {
  129. struct cryptd_cpu_queue *cpu_queue;
  130. struct crypto_async_request *req, *backlog;
  131. cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
  132. /*
  133. * Only handle one request at a time to avoid hogging crypto workqueue.
  134. * preempt_disable/enable is used to prevent being preempted by
  135. * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
  136. * cryptd_enqueue_request() being accessed from software interrupts.
  137. */
  138. local_bh_disable();
  139. preempt_disable();
  140. backlog = crypto_get_backlog(&cpu_queue->queue);
  141. req = crypto_dequeue_request(&cpu_queue->queue);
  142. preempt_enable();
  143. local_bh_enable();
  144. if (!req)
  145. return;
  146. if (backlog)
  147. backlog->complete(backlog, -EINPROGRESS);
  148. req->complete(req, 0);
  149. if (cpu_queue->queue.qlen)
  150. queue_work(cryptd_wq, &cpu_queue->work);
  151. }
  152. static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
  153. {
  154. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  155. struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
  156. return ictx->queue;
  157. }
  158. static void cryptd_type_and_mask(struct crypto_attr_type *algt,
  159. u32 *type, u32 *mask)
  160. {
  161. /*
  162. * cryptd is allowed to wrap internal algorithms, but in that case the
  163. * resulting cryptd instance will be marked as internal as well.
  164. */
  165. *type = algt->type & CRYPTO_ALG_INTERNAL;
  166. *mask = algt->mask & CRYPTO_ALG_INTERNAL;
  167. /* No point in cryptd wrapping an algorithm that's already async. */
  168. *mask |= CRYPTO_ALG_ASYNC;
  169. *mask |= crypto_algt_inherited_mask(algt);
  170. }
  171. static int cryptd_init_instance(struct crypto_instance *inst,
  172. struct crypto_alg *alg)
  173. {
  174. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  175. "cryptd(%s)",
  176. alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  177. return -ENAMETOOLONG;
  178. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  179. inst->alg.cra_priority = alg->cra_priority + 50;
  180. inst->alg.cra_blocksize = alg->cra_blocksize;
  181. inst->alg.cra_alignmask = alg->cra_alignmask;
  182. return 0;
  183. }
  184. static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
  185. const u8 *key, unsigned int keylen)
  186. {
  187. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
  188. struct crypto_sync_skcipher *child = ctx->child;
  189. crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  190. crypto_sync_skcipher_set_flags(child,
  191. crypto_skcipher_get_flags(parent) &
  192. CRYPTO_TFM_REQ_MASK);
  193. return crypto_sync_skcipher_setkey(child, key, keylen);
  194. }
  195. static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
  196. {
  197. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  198. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  199. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  200. int refcnt = refcount_read(&ctx->refcnt);
  201. local_bh_disable();
  202. rctx->complete(&req->base, err);
  203. local_bh_enable();
  204. if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
  205. crypto_free_skcipher(tfm);
  206. }
  207. static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
  208. int err)
  209. {
  210. struct skcipher_request *req = skcipher_request_cast(base);
  211. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  212. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  213. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  214. struct crypto_sync_skcipher *child = ctx->child;
  215. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
  216. if (unlikely(err == -EINPROGRESS))
  217. goto out;
  218. skcipher_request_set_sync_tfm(subreq, child);
  219. skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
  220. NULL, NULL);
  221. skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  222. req->iv);
  223. err = crypto_skcipher_encrypt(subreq);
  224. skcipher_request_zero(subreq);
  225. req->base.complete = rctx->complete;
  226. out:
  227. cryptd_skcipher_complete(req, err);
  228. }
  229. static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
  230. int err)
  231. {
  232. struct skcipher_request *req = skcipher_request_cast(base);
  233. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  234. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  235. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  236. struct crypto_sync_skcipher *child = ctx->child;
  237. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
  238. if (unlikely(err == -EINPROGRESS))
  239. goto out;
  240. skcipher_request_set_sync_tfm(subreq, child);
  241. skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
  242. NULL, NULL);
  243. skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  244. req->iv);
  245. err = crypto_skcipher_decrypt(subreq);
  246. skcipher_request_zero(subreq);
  247. req->base.complete = rctx->complete;
  248. out:
  249. cryptd_skcipher_complete(req, err);
  250. }
  251. static int cryptd_skcipher_enqueue(struct skcipher_request *req,
  252. crypto_completion_t compl)
  253. {
  254. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  255. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  256. struct cryptd_queue *queue;
  257. queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
  258. rctx->complete = req->base.complete;
  259. req->base.complete = compl;
  260. return cryptd_enqueue_request(queue, &req->base);
  261. }
  262. static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
  263. {
  264. return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
  265. }
  266. static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
  267. {
  268. return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
  269. }
  270. static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
  271. {
  272. struct skcipher_instance *inst = skcipher_alg_instance(tfm);
  273. struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
  274. struct crypto_skcipher_spawn *spawn = &ictx->spawn;
  275. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  276. struct crypto_skcipher *cipher;
  277. cipher = crypto_spawn_skcipher(spawn);
  278. if (IS_ERR(cipher))
  279. return PTR_ERR(cipher);
  280. ctx->child = (struct crypto_sync_skcipher *)cipher;
  281. crypto_skcipher_set_reqsize(
  282. tfm, sizeof(struct cryptd_skcipher_request_ctx));
  283. return 0;
  284. }
  285. static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
  286. {
  287. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  288. crypto_free_sync_skcipher(ctx->child);
  289. }
  290. static void cryptd_skcipher_free(struct skcipher_instance *inst)
  291. {
  292. struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
  293. crypto_drop_skcipher(&ctx->spawn);
  294. kfree(inst);
  295. }
  296. static int cryptd_create_skcipher(struct crypto_template *tmpl,
  297. struct rtattr **tb,
  298. struct crypto_attr_type *algt,
  299. struct cryptd_queue *queue)
  300. {
  301. struct skcipherd_instance_ctx *ctx;
  302. struct skcipher_instance *inst;
  303. struct skcipher_alg *alg;
  304. u32 type;
  305. u32 mask;
  306. int err;
  307. cryptd_type_and_mask(algt, &type, &mask);
  308. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  309. if (!inst)
  310. return -ENOMEM;
  311. ctx = skcipher_instance_ctx(inst);
  312. ctx->queue = queue;
  313. err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
  314. crypto_attr_alg_name(tb[1]), type, mask);
  315. if (err)
  316. goto err_free_inst;
  317. alg = crypto_spawn_skcipher_alg(&ctx->spawn);
  318. err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
  319. if (err)
  320. goto err_free_inst;
  321. inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
  322. (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
  323. inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
  324. inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
  325. inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
  326. inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
  327. inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
  328. inst->alg.init = cryptd_skcipher_init_tfm;
  329. inst->alg.exit = cryptd_skcipher_exit_tfm;
  330. inst->alg.setkey = cryptd_skcipher_setkey;
  331. inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
  332. inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
  333. inst->free = cryptd_skcipher_free;
  334. err = skcipher_register_instance(tmpl, inst);
  335. if (err) {
  336. err_free_inst:
  337. cryptd_skcipher_free(inst);
  338. }
  339. return err;
  340. }
  341. static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
  342. {
  343. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  344. struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
  345. struct crypto_shash_spawn *spawn = &ictx->spawn;
  346. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  347. struct crypto_shash *hash;
  348. hash = crypto_spawn_shash(spawn);
  349. if (IS_ERR(hash))
  350. return PTR_ERR(hash);
  351. ctx->child = hash;
  352. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  353. sizeof(struct cryptd_hash_request_ctx) +
  354. crypto_shash_descsize(hash));
  355. return 0;
  356. }
  357. static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
  358. {
  359. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  360. crypto_free_shash(ctx->child);
  361. }
  362. static int cryptd_hash_setkey(struct crypto_ahash *parent,
  363. const u8 *key, unsigned int keylen)
  364. {
  365. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
  366. struct crypto_shash *child = ctx->child;
  367. crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  368. crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
  369. CRYPTO_TFM_REQ_MASK);
  370. return crypto_shash_setkey(child, key, keylen);
  371. }
  372. static int cryptd_hash_enqueue(struct ahash_request *req,
  373. crypto_completion_t compl)
  374. {
  375. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  376. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  377. struct cryptd_queue *queue =
  378. cryptd_get_queue(crypto_ahash_tfm(tfm));
  379. rctx->complete = req->base.complete;
  380. req->base.complete = compl;
  381. return cryptd_enqueue_request(queue, &req->base);
  382. }
  383. static void cryptd_hash_complete(struct ahash_request *req, int err)
  384. {
  385. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  386. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  387. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  388. int refcnt = refcount_read(&ctx->refcnt);
  389. local_bh_disable();
  390. rctx->complete(&req->base, err);
  391. local_bh_enable();
  392. if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
  393. crypto_free_ahash(tfm);
  394. }
  395. static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
  396. {
  397. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  398. struct crypto_shash *child = ctx->child;
  399. struct ahash_request *req = ahash_request_cast(req_async);
  400. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  401. struct shash_desc *desc = &rctx->desc;
  402. if (unlikely(err == -EINPROGRESS))
  403. goto out;
  404. desc->tfm = child;
  405. err = crypto_shash_init(desc);
  406. req->base.complete = rctx->complete;
  407. out:
  408. cryptd_hash_complete(req, err);
  409. }
  410. static int cryptd_hash_init_enqueue(struct ahash_request *req)
  411. {
  412. return cryptd_hash_enqueue(req, cryptd_hash_init);
  413. }
  414. static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
  415. {
  416. struct ahash_request *req = ahash_request_cast(req_async);
  417. struct cryptd_hash_request_ctx *rctx;
  418. rctx = ahash_request_ctx(req);
  419. if (unlikely(err == -EINPROGRESS))
  420. goto out;
  421. err = shash_ahash_update(req, &rctx->desc);
  422. req->base.complete = rctx->complete;
  423. out:
  424. cryptd_hash_complete(req, err);
  425. }
  426. static int cryptd_hash_update_enqueue(struct ahash_request *req)
  427. {
  428. return cryptd_hash_enqueue(req, cryptd_hash_update);
  429. }
  430. static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
  431. {
  432. struct ahash_request *req = ahash_request_cast(req_async);
  433. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  434. if (unlikely(err == -EINPROGRESS))
  435. goto out;
  436. err = crypto_shash_final(&rctx->desc, req->result);
  437. req->base.complete = rctx->complete;
  438. out:
  439. cryptd_hash_complete(req, err);
  440. }
  441. static int cryptd_hash_final_enqueue(struct ahash_request *req)
  442. {
  443. return cryptd_hash_enqueue(req, cryptd_hash_final);
  444. }
  445. static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
  446. {
  447. struct ahash_request *req = ahash_request_cast(req_async);
  448. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  449. if (unlikely(err == -EINPROGRESS))
  450. goto out;
  451. err = shash_ahash_finup(req, &rctx->desc);
  452. req->base.complete = rctx->complete;
  453. out:
  454. cryptd_hash_complete(req, err);
  455. }
  456. static int cryptd_hash_finup_enqueue(struct ahash_request *req)
  457. {
  458. return cryptd_hash_enqueue(req, cryptd_hash_finup);
  459. }
  460. static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
  461. {
  462. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  463. struct crypto_shash *child = ctx->child;
  464. struct ahash_request *req = ahash_request_cast(req_async);
  465. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  466. struct shash_desc *desc = &rctx->desc;
  467. if (unlikely(err == -EINPROGRESS))
  468. goto out;
  469. desc->tfm = child;
  470. err = shash_ahash_digest(req, desc);
  471. req->base.complete = rctx->complete;
  472. out:
  473. cryptd_hash_complete(req, err);
  474. }
  475. static int cryptd_hash_digest_enqueue(struct ahash_request *req)
  476. {
  477. return cryptd_hash_enqueue(req, cryptd_hash_digest);
  478. }
  479. static int cryptd_hash_export(struct ahash_request *req, void *out)
  480. {
  481. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  482. return crypto_shash_export(&rctx->desc, out);
  483. }
  484. static int cryptd_hash_import(struct ahash_request *req, const void *in)
  485. {
  486. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  487. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  488. struct shash_desc *desc = cryptd_shash_desc(req);
  489. desc->tfm = ctx->child;
  490. return crypto_shash_import(desc, in);
  491. }
  492. static void cryptd_hash_free(struct ahash_instance *inst)
  493. {
  494. struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
  495. crypto_drop_shash(&ctx->spawn);
  496. kfree(inst);
  497. }
  498. static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
  499. struct crypto_attr_type *algt,
  500. struct cryptd_queue *queue)
  501. {
  502. struct hashd_instance_ctx *ctx;
  503. struct ahash_instance *inst;
  504. struct shash_alg *alg;
  505. u32 type;
  506. u32 mask;
  507. int err;
  508. cryptd_type_and_mask(algt, &type, &mask);
  509. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  510. if (!inst)
  511. return -ENOMEM;
  512. ctx = ahash_instance_ctx(inst);
  513. ctx->queue = queue;
  514. err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
  515. crypto_attr_alg_name(tb[1]), type, mask);
  516. if (err)
  517. goto err_free_inst;
  518. alg = crypto_spawn_shash_alg(&ctx->spawn);
  519. err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
  520. if (err)
  521. goto err_free_inst;
  522. inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
  523. (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
  524. CRYPTO_ALG_OPTIONAL_KEY));
  525. inst->alg.halg.digestsize = alg->digestsize;
  526. inst->alg.halg.statesize = alg->statesize;
  527. inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
  528. inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
  529. inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
  530. inst->alg.init = cryptd_hash_init_enqueue;
  531. inst->alg.update = cryptd_hash_update_enqueue;
  532. inst->alg.final = cryptd_hash_final_enqueue;
  533. inst->alg.finup = cryptd_hash_finup_enqueue;
  534. inst->alg.export = cryptd_hash_export;
  535. inst->alg.import = cryptd_hash_import;
  536. if (crypto_shash_alg_has_setkey(alg))
  537. inst->alg.setkey = cryptd_hash_setkey;
  538. inst->alg.digest = cryptd_hash_digest_enqueue;
  539. inst->free = cryptd_hash_free;
  540. err = ahash_register_instance(tmpl, inst);
  541. if (err) {
  542. err_free_inst:
  543. cryptd_hash_free(inst);
  544. }
  545. return err;
  546. }
  547. static int cryptd_aead_setkey(struct crypto_aead *parent,
  548. const u8 *key, unsigned int keylen)
  549. {
  550. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
  551. struct crypto_aead *child = ctx->child;
  552. return crypto_aead_setkey(child, key, keylen);
  553. }
  554. static int cryptd_aead_setauthsize(struct crypto_aead *parent,
  555. unsigned int authsize)
  556. {
  557. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
  558. struct crypto_aead *child = ctx->child;
  559. return crypto_aead_setauthsize(child, authsize);
  560. }
  561. static void cryptd_aead_crypt(struct aead_request *req,
  562. struct crypto_aead *child,
  563. int err,
  564. int (*crypt)(struct aead_request *req))
  565. {
  566. struct cryptd_aead_request_ctx *rctx;
  567. struct cryptd_aead_ctx *ctx;
  568. crypto_completion_t compl;
  569. struct crypto_aead *tfm;
  570. int refcnt;
  571. rctx = aead_request_ctx(req);
  572. compl = rctx->complete;
  573. tfm = crypto_aead_reqtfm(req);
  574. if (unlikely(err == -EINPROGRESS))
  575. goto out;
  576. aead_request_set_tfm(req, child);
  577. err = crypt( req );
  578. out:
  579. ctx = crypto_aead_ctx(tfm);
  580. refcnt = refcount_read(&ctx->refcnt);
  581. local_bh_disable();
  582. compl(&req->base, err);
  583. local_bh_enable();
  584. if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
  585. crypto_free_aead(tfm);
  586. }
  587. static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
  588. {
  589. struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
  590. struct crypto_aead *child = ctx->child;
  591. struct aead_request *req;
  592. req = container_of(areq, struct aead_request, base);
  593. cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
  594. }
  595. static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
  596. {
  597. struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
  598. struct crypto_aead *child = ctx->child;
  599. struct aead_request *req;
  600. req = container_of(areq, struct aead_request, base);
  601. cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
  602. }
  603. static int cryptd_aead_enqueue(struct aead_request *req,
  604. crypto_completion_t compl)
  605. {
  606. struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
  607. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  608. struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
  609. rctx->complete = req->base.complete;
  610. req->base.complete = compl;
  611. return cryptd_enqueue_request(queue, &req->base);
  612. }
  613. static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
  614. {
  615. return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
  616. }
  617. static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
  618. {
  619. return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
  620. }
  621. static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
  622. {
  623. struct aead_instance *inst = aead_alg_instance(tfm);
  624. struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
  625. struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
  626. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  627. struct crypto_aead *cipher;
  628. cipher = crypto_spawn_aead(spawn);
  629. if (IS_ERR(cipher))
  630. return PTR_ERR(cipher);
  631. ctx->child = cipher;
  632. crypto_aead_set_reqsize(
  633. tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
  634. crypto_aead_reqsize(cipher)));
  635. return 0;
  636. }
  637. static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
  638. {
  639. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  640. crypto_free_aead(ctx->child);
  641. }
  642. static void cryptd_aead_free(struct aead_instance *inst)
  643. {
  644. struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
  645. crypto_drop_aead(&ctx->aead_spawn);
  646. kfree(inst);
  647. }
  648. static int cryptd_create_aead(struct crypto_template *tmpl,
  649. struct rtattr **tb,
  650. struct crypto_attr_type *algt,
  651. struct cryptd_queue *queue)
  652. {
  653. struct aead_instance_ctx *ctx;
  654. struct aead_instance *inst;
  655. struct aead_alg *alg;
  656. u32 type;
  657. u32 mask;
  658. int err;
  659. cryptd_type_and_mask(algt, &type, &mask);
  660. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  661. if (!inst)
  662. return -ENOMEM;
  663. ctx = aead_instance_ctx(inst);
  664. ctx->queue = queue;
  665. err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
  666. crypto_attr_alg_name(tb[1]), type, mask);
  667. if (err)
  668. goto err_free_inst;
  669. alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
  670. err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
  671. if (err)
  672. goto err_free_inst;
  673. inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
  674. (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
  675. inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
  676. inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
  677. inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
  678. inst->alg.init = cryptd_aead_init_tfm;
  679. inst->alg.exit = cryptd_aead_exit_tfm;
  680. inst->alg.setkey = cryptd_aead_setkey;
  681. inst->alg.setauthsize = cryptd_aead_setauthsize;
  682. inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
  683. inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
  684. inst->free = cryptd_aead_free;
  685. err = aead_register_instance(tmpl, inst);
  686. if (err) {
  687. err_free_inst:
  688. cryptd_aead_free(inst);
  689. }
  690. return err;
  691. }
  692. static struct cryptd_queue queue;
  693. static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
  694. {
  695. struct crypto_attr_type *algt;
  696. algt = crypto_get_attr_type(tb);
  697. if (IS_ERR(algt))
  698. return PTR_ERR(algt);
  699. switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
  700. case CRYPTO_ALG_TYPE_SKCIPHER:
  701. return cryptd_create_skcipher(tmpl, tb, algt, &queue);
  702. case CRYPTO_ALG_TYPE_HASH:
  703. return cryptd_create_hash(tmpl, tb, algt, &queue);
  704. case CRYPTO_ALG_TYPE_AEAD:
  705. return cryptd_create_aead(tmpl, tb, algt, &queue);
  706. }
  707. return -EINVAL;
  708. }
  709. static struct crypto_template cryptd_tmpl = {
  710. .name = "cryptd",
  711. .create = cryptd_create,
  712. .module = THIS_MODULE,
  713. };
  714. struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
  715. u32 type, u32 mask)
  716. {
  717. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  718. struct cryptd_skcipher_ctx *ctx;
  719. struct crypto_skcipher *tfm;
  720. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  721. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  722. return ERR_PTR(-EINVAL);
  723. tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
  724. if (IS_ERR(tfm))
  725. return ERR_CAST(tfm);
  726. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  727. crypto_free_skcipher(tfm);
  728. return ERR_PTR(-EINVAL);
  729. }
  730. ctx = crypto_skcipher_ctx(tfm);
  731. refcount_set(&ctx->refcnt, 1);
  732. return container_of(tfm, struct cryptd_skcipher, base);
  733. }
  734. EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
  735. struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
  736. {
  737. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  738. return &ctx->child->base;
  739. }
  740. EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
  741. bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
  742. {
  743. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  744. return refcount_read(&ctx->refcnt) - 1;
  745. }
  746. EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
  747. void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
  748. {
  749. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  750. if (refcount_dec_and_test(&ctx->refcnt))
  751. crypto_free_skcipher(&tfm->base);
  752. }
  753. EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
  754. struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
  755. u32 type, u32 mask)
  756. {
  757. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  758. struct cryptd_hash_ctx *ctx;
  759. struct crypto_ahash *tfm;
  760. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  761. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  762. return ERR_PTR(-EINVAL);
  763. tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
  764. if (IS_ERR(tfm))
  765. return ERR_CAST(tfm);
  766. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  767. crypto_free_ahash(tfm);
  768. return ERR_PTR(-EINVAL);
  769. }
  770. ctx = crypto_ahash_ctx(tfm);
  771. refcount_set(&ctx->refcnt, 1);
  772. return __cryptd_ahash_cast(tfm);
  773. }
  774. EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
  775. struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
  776. {
  777. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  778. return ctx->child;
  779. }
  780. EXPORT_SYMBOL_GPL(cryptd_ahash_child);
  781. struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
  782. {
  783. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  784. return &rctx->desc;
  785. }
  786. EXPORT_SYMBOL_GPL(cryptd_shash_desc);
  787. bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
  788. {
  789. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  790. return refcount_read(&ctx->refcnt) - 1;
  791. }
  792. EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
  793. void cryptd_free_ahash(struct cryptd_ahash *tfm)
  794. {
  795. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  796. if (refcount_dec_and_test(&ctx->refcnt))
  797. crypto_free_ahash(&tfm->base);
  798. }
  799. EXPORT_SYMBOL_GPL(cryptd_free_ahash);
  800. struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
  801. u32 type, u32 mask)
  802. {
  803. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  804. struct cryptd_aead_ctx *ctx;
  805. struct crypto_aead *tfm;
  806. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  807. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  808. return ERR_PTR(-EINVAL);
  809. tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
  810. if (IS_ERR(tfm))
  811. return ERR_CAST(tfm);
  812. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  813. crypto_free_aead(tfm);
  814. return ERR_PTR(-EINVAL);
  815. }
  816. ctx = crypto_aead_ctx(tfm);
  817. refcount_set(&ctx->refcnt, 1);
  818. return __cryptd_aead_cast(tfm);
  819. }
  820. EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
  821. struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
  822. {
  823. struct cryptd_aead_ctx *ctx;
  824. ctx = crypto_aead_ctx(&tfm->base);
  825. return ctx->child;
  826. }
  827. EXPORT_SYMBOL_GPL(cryptd_aead_child);
  828. bool cryptd_aead_queued(struct cryptd_aead *tfm)
  829. {
  830. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
  831. return refcount_read(&ctx->refcnt) - 1;
  832. }
  833. EXPORT_SYMBOL_GPL(cryptd_aead_queued);
  834. void cryptd_free_aead(struct cryptd_aead *tfm)
  835. {
  836. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
  837. if (refcount_dec_and_test(&ctx->refcnt))
  838. crypto_free_aead(&tfm->base);
  839. }
  840. EXPORT_SYMBOL_GPL(cryptd_free_aead);
  841. static int __init cryptd_init(void)
  842. {
  843. int err;
  844. cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
  845. 1);
  846. if (!cryptd_wq)
  847. return -ENOMEM;
  848. err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
  849. if (err)
  850. goto err_destroy_wq;
  851. err = crypto_register_template(&cryptd_tmpl);
  852. if (err)
  853. goto err_fini_queue;
  854. return 0;
  855. err_fini_queue:
  856. cryptd_fini_queue(&queue);
  857. err_destroy_wq:
  858. destroy_workqueue(cryptd_wq);
  859. return err;
  860. }
  861. static void __exit cryptd_exit(void)
  862. {
  863. destroy_workqueue(cryptd_wq);
  864. cryptd_fini_queue(&queue);
  865. crypto_unregister_template(&cryptd_tmpl);
  866. }
  867. subsys_initcall(cryptd_init);
  868. module_exit(cryptd_exit);
  869. MODULE_LICENSE("GPL");
  870. MODULE_DESCRIPTION("Software async crypto daemon");
  871. MODULE_ALIAS_CRYPTO("cryptd");