simd.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Shared crypto simd helpers
  4. *
  5. * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
  6. * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
  7. * Copyright (c) 2019 Google LLC
  8. *
  9. * Based on aesni-intel_glue.c by:
  10. * Copyright (C) 2008, Intel Corp.
  11. * Author: Huang Ying <ying.huang@intel.com>
  12. */
  13. /*
  14. * Shared crypto SIMD helpers. These functions dynamically create and register
  15. * an skcipher or AEAD algorithm that wraps another, internal algorithm. The
  16. * wrapper ensures that the internal algorithm is only executed in a context
  17. * where SIMD instructions are usable, i.e. where may_use_simd() returns true.
  18. * If SIMD is already usable, the wrapper directly calls the internal algorithm.
  19. * Otherwise it defers execution to a workqueue via cryptd.
  20. *
  21. * This is an alternative to the internal algorithm implementing a fallback for
  22. * the !may_use_simd() case itself.
  23. *
  24. * Note that the wrapper algorithm is asynchronous, i.e. it has the
  25. * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who
  26. * explicitly allocate a synchronous algorithm.
  27. */
  28. #include <crypto/cryptd.h>
  29. #include <crypto/internal/aead.h>
  30. #include <crypto/internal/simd.h>
  31. #include <crypto/internal/skcipher.h>
  32. #include <linux/kernel.h>
  33. #include <linux/module.h>
  34. #include <linux/preempt.h>
  35. #include <asm/simd.h>
  36. /* skcipher support */
  37. struct simd_skcipher_alg {
  38. const char *ialg_name;
  39. struct skcipher_alg alg;
  40. };
  41. struct simd_skcipher_ctx {
  42. struct cryptd_skcipher *cryptd_tfm;
  43. };
  44. static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
  45. unsigned int key_len)
  46. {
  47. struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  48. struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
  49. crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  50. crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
  51. CRYPTO_TFM_REQ_MASK);
  52. return crypto_skcipher_setkey(child, key, key_len);
  53. }
  54. static int simd_skcipher_encrypt(struct skcipher_request *req)
  55. {
  56. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  57. struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  58. struct skcipher_request *subreq;
  59. struct crypto_skcipher *child;
  60. subreq = skcipher_request_ctx(req);
  61. *subreq = *req;
  62. if (!crypto_simd_usable() ||
  63. (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
  64. child = &ctx->cryptd_tfm->base;
  65. else
  66. child = cryptd_skcipher_child(ctx->cryptd_tfm);
  67. skcipher_request_set_tfm(subreq, child);
  68. return crypto_skcipher_encrypt(subreq);
  69. }
  70. static int simd_skcipher_decrypt(struct skcipher_request *req)
  71. {
  72. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  73. struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  74. struct skcipher_request *subreq;
  75. struct crypto_skcipher *child;
  76. subreq = skcipher_request_ctx(req);
  77. *subreq = *req;
  78. if (!crypto_simd_usable() ||
  79. (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
  80. child = &ctx->cryptd_tfm->base;
  81. else
  82. child = cryptd_skcipher_child(ctx->cryptd_tfm);
  83. skcipher_request_set_tfm(subreq, child);
  84. return crypto_skcipher_decrypt(subreq);
  85. }
  86. static void simd_skcipher_exit(struct crypto_skcipher *tfm)
  87. {
  88. struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  89. cryptd_free_skcipher(ctx->cryptd_tfm);
  90. }
  91. static int simd_skcipher_init(struct crypto_skcipher *tfm)
  92. {
  93. struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  94. struct cryptd_skcipher *cryptd_tfm;
  95. struct simd_skcipher_alg *salg;
  96. struct skcipher_alg *alg;
  97. unsigned reqsize;
  98. alg = crypto_skcipher_alg(tfm);
  99. salg = container_of(alg, struct simd_skcipher_alg, alg);
  100. cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
  101. CRYPTO_ALG_INTERNAL,
  102. CRYPTO_ALG_INTERNAL);
  103. if (IS_ERR(cryptd_tfm))
  104. return PTR_ERR(cryptd_tfm);
  105. ctx->cryptd_tfm = cryptd_tfm;
  106. reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
  107. reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
  108. reqsize += sizeof(struct skcipher_request);
  109. crypto_skcipher_set_reqsize(tfm, reqsize);
  110. return 0;
  111. }
  112. struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
  113. const char *drvname,
  114. const char *basename)
  115. {
  116. struct simd_skcipher_alg *salg;
  117. struct crypto_skcipher *tfm;
  118. struct skcipher_alg *ialg;
  119. struct skcipher_alg *alg;
  120. int err;
  121. tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
  122. CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
  123. if (IS_ERR(tfm))
  124. return ERR_CAST(tfm);
  125. ialg = crypto_skcipher_alg(tfm);
  126. salg = kzalloc(sizeof(*salg), GFP_KERNEL);
  127. if (!salg) {
  128. salg = ERR_PTR(-ENOMEM);
  129. goto out_put_tfm;
  130. }
  131. salg->ialg_name = basename;
  132. alg = &salg->alg;
  133. err = -ENAMETOOLONG;
  134. if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
  135. CRYPTO_MAX_ALG_NAME)
  136. goto out_free_salg;
  137. if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  138. drvname) >= CRYPTO_MAX_ALG_NAME)
  139. goto out_free_salg;
  140. alg->base.cra_flags = CRYPTO_ALG_ASYNC |
  141. (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
  142. alg->base.cra_priority = ialg->base.cra_priority;
  143. alg->base.cra_blocksize = ialg->base.cra_blocksize;
  144. alg->base.cra_alignmask = ialg->base.cra_alignmask;
  145. alg->base.cra_module = ialg->base.cra_module;
  146. alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
  147. alg->ivsize = ialg->ivsize;
  148. alg->chunksize = ialg->chunksize;
  149. alg->min_keysize = ialg->min_keysize;
  150. alg->max_keysize = ialg->max_keysize;
  151. alg->init = simd_skcipher_init;
  152. alg->exit = simd_skcipher_exit;
  153. alg->setkey = simd_skcipher_setkey;
  154. alg->encrypt = simd_skcipher_encrypt;
  155. alg->decrypt = simd_skcipher_decrypt;
  156. err = crypto_register_skcipher(alg);
  157. if (err)
  158. goto out_free_salg;
  159. out_put_tfm:
  160. crypto_free_skcipher(tfm);
  161. return salg;
  162. out_free_salg:
  163. kfree(salg);
  164. salg = ERR_PTR(err);
  165. goto out_put_tfm;
  166. }
  167. EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
  168. struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
  169. const char *basename)
  170. {
  171. char drvname[CRYPTO_MAX_ALG_NAME];
  172. if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
  173. CRYPTO_MAX_ALG_NAME)
  174. return ERR_PTR(-ENAMETOOLONG);
  175. return simd_skcipher_create_compat(algname, drvname, basename);
  176. }
  177. EXPORT_SYMBOL_GPL(simd_skcipher_create);
  178. void simd_skcipher_free(struct simd_skcipher_alg *salg)
  179. {
  180. crypto_unregister_skcipher(&salg->alg);
  181. kfree(salg);
  182. }
  183. EXPORT_SYMBOL_GPL(simd_skcipher_free);
  184. int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
  185. struct simd_skcipher_alg **simd_algs)
  186. {
  187. int err;
  188. int i;
  189. const char *algname;
  190. const char *drvname;
  191. const char *basename;
  192. struct simd_skcipher_alg *simd;
  193. err = crypto_register_skciphers(algs, count);
  194. if (err)
  195. return err;
  196. for (i = 0; i < count; i++) {
  197. WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
  198. WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
  199. algname = algs[i].base.cra_name + 2;
  200. drvname = algs[i].base.cra_driver_name + 2;
  201. basename = algs[i].base.cra_driver_name;
  202. simd = simd_skcipher_create_compat(algname, drvname, basename);
  203. err = PTR_ERR(simd);
  204. if (IS_ERR(simd))
  205. goto err_unregister;
  206. simd_algs[i] = simd;
  207. }
  208. return 0;
  209. err_unregister:
  210. simd_unregister_skciphers(algs, count, simd_algs);
  211. return err;
  212. }
  213. EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
  214. void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
  215. struct simd_skcipher_alg **simd_algs)
  216. {
  217. int i;
  218. crypto_unregister_skciphers(algs, count);
  219. for (i = 0; i < count; i++) {
  220. if (simd_algs[i]) {
  221. simd_skcipher_free(simd_algs[i]);
  222. simd_algs[i] = NULL;
  223. }
  224. }
  225. }
  226. EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
  227. /* AEAD support */
  228. struct simd_aead_alg {
  229. const char *ialg_name;
  230. struct aead_alg alg;
  231. };
  232. struct simd_aead_ctx {
  233. struct cryptd_aead *cryptd_tfm;
  234. };
  235. static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  236. unsigned int key_len)
  237. {
  238. struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  239. struct crypto_aead *child = &ctx->cryptd_tfm->base;
  240. crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  241. crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
  242. CRYPTO_TFM_REQ_MASK);
  243. return crypto_aead_setkey(child, key, key_len);
  244. }
  245. static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  246. {
  247. struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  248. struct crypto_aead *child = &ctx->cryptd_tfm->base;
  249. return crypto_aead_setauthsize(child, authsize);
  250. }
  251. static int simd_aead_encrypt(struct aead_request *req)
  252. {
  253. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  254. struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  255. struct aead_request *subreq;
  256. struct crypto_aead *child;
  257. subreq = aead_request_ctx(req);
  258. *subreq = *req;
  259. if (!crypto_simd_usable() ||
  260. (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
  261. child = &ctx->cryptd_tfm->base;
  262. else
  263. child = cryptd_aead_child(ctx->cryptd_tfm);
  264. aead_request_set_tfm(subreq, child);
  265. return crypto_aead_encrypt(subreq);
  266. }
  267. static int simd_aead_decrypt(struct aead_request *req)
  268. {
  269. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  270. struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  271. struct aead_request *subreq;
  272. struct crypto_aead *child;
  273. subreq = aead_request_ctx(req);
  274. *subreq = *req;
  275. if (!crypto_simd_usable() ||
  276. (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
  277. child = &ctx->cryptd_tfm->base;
  278. else
  279. child = cryptd_aead_child(ctx->cryptd_tfm);
  280. aead_request_set_tfm(subreq, child);
  281. return crypto_aead_decrypt(subreq);
  282. }
  283. static void simd_aead_exit(struct crypto_aead *tfm)
  284. {
  285. struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  286. cryptd_free_aead(ctx->cryptd_tfm);
  287. }
  288. static int simd_aead_init(struct crypto_aead *tfm)
  289. {
  290. struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  291. struct cryptd_aead *cryptd_tfm;
  292. struct simd_aead_alg *salg;
  293. struct aead_alg *alg;
  294. unsigned reqsize;
  295. alg = crypto_aead_alg(tfm);
  296. salg = container_of(alg, struct simd_aead_alg, alg);
  297. cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL,
  298. CRYPTO_ALG_INTERNAL);
  299. if (IS_ERR(cryptd_tfm))
  300. return PTR_ERR(cryptd_tfm);
  301. ctx->cryptd_tfm = cryptd_tfm;
  302. reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
  303. reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
  304. reqsize += sizeof(struct aead_request);
  305. crypto_aead_set_reqsize(tfm, reqsize);
  306. return 0;
  307. }
  308. struct simd_aead_alg *simd_aead_create_compat(const char *algname,
  309. const char *drvname,
  310. const char *basename)
  311. {
  312. struct simd_aead_alg *salg;
  313. struct crypto_aead *tfm;
  314. struct aead_alg *ialg;
  315. struct aead_alg *alg;
  316. int err;
  317. tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
  318. CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
  319. if (IS_ERR(tfm))
  320. return ERR_CAST(tfm);
  321. ialg = crypto_aead_alg(tfm);
  322. salg = kzalloc(sizeof(*salg), GFP_KERNEL);
  323. if (!salg) {
  324. salg = ERR_PTR(-ENOMEM);
  325. goto out_put_tfm;
  326. }
  327. salg->ialg_name = basename;
  328. alg = &salg->alg;
  329. err = -ENAMETOOLONG;
  330. if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
  331. CRYPTO_MAX_ALG_NAME)
  332. goto out_free_salg;
  333. if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  334. drvname) >= CRYPTO_MAX_ALG_NAME)
  335. goto out_free_salg;
  336. alg->base.cra_flags = CRYPTO_ALG_ASYNC |
  337. (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
  338. alg->base.cra_priority = ialg->base.cra_priority;
  339. alg->base.cra_blocksize = ialg->base.cra_blocksize;
  340. alg->base.cra_alignmask = ialg->base.cra_alignmask;
  341. alg->base.cra_module = ialg->base.cra_module;
  342. alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx);
  343. alg->ivsize = ialg->ivsize;
  344. alg->maxauthsize = ialg->maxauthsize;
  345. alg->chunksize = ialg->chunksize;
  346. alg->init = simd_aead_init;
  347. alg->exit = simd_aead_exit;
  348. alg->setkey = simd_aead_setkey;
  349. alg->setauthsize = simd_aead_setauthsize;
  350. alg->encrypt = simd_aead_encrypt;
  351. alg->decrypt = simd_aead_decrypt;
  352. err = crypto_register_aead(alg);
  353. if (err)
  354. goto out_free_salg;
  355. out_put_tfm:
  356. crypto_free_aead(tfm);
  357. return salg;
  358. out_free_salg:
  359. kfree(salg);
  360. salg = ERR_PTR(err);
  361. goto out_put_tfm;
  362. }
  363. EXPORT_SYMBOL_GPL(simd_aead_create_compat);
  364. struct simd_aead_alg *simd_aead_create(const char *algname,
  365. const char *basename)
  366. {
  367. char drvname[CRYPTO_MAX_ALG_NAME];
  368. if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
  369. CRYPTO_MAX_ALG_NAME)
  370. return ERR_PTR(-ENAMETOOLONG);
  371. return simd_aead_create_compat(algname, drvname, basename);
  372. }
  373. EXPORT_SYMBOL_GPL(simd_aead_create);
  374. void simd_aead_free(struct simd_aead_alg *salg)
  375. {
  376. crypto_unregister_aead(&salg->alg);
  377. kfree(salg);
  378. }
  379. EXPORT_SYMBOL_GPL(simd_aead_free);
  380. int simd_register_aeads_compat(struct aead_alg *algs, int count,
  381. struct simd_aead_alg **simd_algs)
  382. {
  383. int err;
  384. int i;
  385. const char *algname;
  386. const char *drvname;
  387. const char *basename;
  388. struct simd_aead_alg *simd;
  389. err = crypto_register_aeads(algs, count);
  390. if (err)
  391. return err;
  392. for (i = 0; i < count; i++) {
  393. WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
  394. WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
  395. algname = algs[i].base.cra_name + 2;
  396. drvname = algs[i].base.cra_driver_name + 2;
  397. basename = algs[i].base.cra_driver_name;
  398. simd = simd_aead_create_compat(algname, drvname, basename);
  399. err = PTR_ERR(simd);
  400. if (IS_ERR(simd))
  401. goto err_unregister;
  402. simd_algs[i] = simd;
  403. }
  404. return 0;
  405. err_unregister:
  406. simd_unregister_aeads(algs, count, simd_algs);
  407. return err;
  408. }
  409. EXPORT_SYMBOL_GPL(simd_register_aeads_compat);
  410. void simd_unregister_aeads(struct aead_alg *algs, int count,
  411. struct simd_aead_alg **simd_algs)
  412. {
  413. int i;
  414. crypto_unregister_aeads(algs, count);
  415. for (i = 0; i < count; i++) {
  416. if (simd_algs[i]) {
  417. simd_aead_free(simd_algs[i]);
  418. simd_algs[i] = NULL;
  419. }
  420. }
  421. }
  422. EXPORT_SYMBOL_GPL(simd_unregister_aeads);
  423. MODULE_LICENSE("GPL");