ahash.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Asynchronous Cryptographic Hash operations.
  4. *
  5. * This is the asynchronous version of hash.c with notification of
  6. * completion via a callback.
  7. *
  8. * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  9. */
  10. #include <crypto/internal/hash.h>
  11. #include <crypto/scatterwalk.h>
  12. #include <linux/err.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/cryptouser.h>
  19. #include <linux/compiler.h>
  20. #include <net/netlink.h>
  21. #include "internal.h"
  22. static const struct crypto_type crypto_ahash_type;
  23. struct ahash_request_priv {
  24. crypto_completion_t complete;
  25. void *data;
  26. u8 *result;
  27. u32 flags;
  28. void *ubuf[] CRYPTO_MINALIGN_ATTR;
  29. };
  30. static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
  31. {
  32. return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
  33. halg);
  34. }
  35. static int hash_walk_next(struct crypto_hash_walk *walk)
  36. {
  37. unsigned int alignmask = walk->alignmask;
  38. unsigned int offset = walk->offset;
  39. unsigned int nbytes = min(walk->entrylen,
  40. ((unsigned int)(PAGE_SIZE)) - offset);
  41. walk->data = kmap_atomic(walk->pg);
  42. walk->data += offset;
  43. if (offset & alignmask) {
  44. unsigned int unaligned = alignmask + 1 - (offset & alignmask);
  45. if (nbytes > unaligned)
  46. nbytes = unaligned;
  47. }
  48. walk->entrylen -= nbytes;
  49. return nbytes;
  50. }
  51. static int hash_walk_new_entry(struct crypto_hash_walk *walk)
  52. {
  53. struct scatterlist *sg;
  54. sg = walk->sg;
  55. walk->offset = sg->offset;
  56. walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
  57. walk->offset = offset_in_page(walk->offset);
  58. walk->entrylen = sg->length;
  59. if (walk->entrylen > walk->total)
  60. walk->entrylen = walk->total;
  61. walk->total -= walk->entrylen;
  62. return hash_walk_next(walk);
  63. }
  64. int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
  65. {
  66. unsigned int alignmask = walk->alignmask;
  67. walk->data -= walk->offset;
  68. if (walk->entrylen && (walk->offset & alignmask) && !err) {
  69. unsigned int nbytes;
  70. walk->offset = ALIGN(walk->offset, alignmask + 1);
  71. nbytes = min(walk->entrylen,
  72. (unsigned int)(PAGE_SIZE - walk->offset));
  73. if (nbytes) {
  74. walk->entrylen -= nbytes;
  75. walk->data += walk->offset;
  76. return nbytes;
  77. }
  78. }
  79. kunmap_atomic(walk->data);
  80. crypto_yield(walk->flags);
  81. if (err)
  82. return err;
  83. if (walk->entrylen) {
  84. walk->offset = 0;
  85. walk->pg++;
  86. return hash_walk_next(walk);
  87. }
  88. if (!walk->total)
  89. return 0;
  90. walk->sg = sg_next(walk->sg);
  91. return hash_walk_new_entry(walk);
  92. }
  93. EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
  94. int crypto_hash_walk_first(struct ahash_request *req,
  95. struct crypto_hash_walk *walk)
  96. {
  97. walk->total = req->nbytes;
  98. if (!walk->total) {
  99. walk->entrylen = 0;
  100. return 0;
  101. }
  102. walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
  103. walk->sg = req->src;
  104. walk->flags = req->base.flags;
  105. return hash_walk_new_entry(walk);
  106. }
  107. EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
  108. static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
  109. unsigned int keylen)
  110. {
  111. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  112. int ret;
  113. u8 *buffer, *alignbuffer;
  114. unsigned long absize;
  115. absize = keylen + alignmask;
  116. buffer = kmalloc(absize, GFP_KERNEL);
  117. if (!buffer)
  118. return -ENOMEM;
  119. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  120. memcpy(alignbuffer, key, keylen);
  121. ret = tfm->setkey(tfm, alignbuffer, keylen);
  122. kfree_sensitive(buffer);
  123. return ret;
  124. }
  125. static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
  126. unsigned int keylen)
  127. {
  128. return -ENOSYS;
  129. }
  130. static void ahash_set_needkey(struct crypto_ahash *tfm)
  131. {
  132. const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
  133. if (tfm->setkey != ahash_nosetkey &&
  134. !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
  135. crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
  136. }
  137. int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  138. unsigned int keylen)
  139. {
  140. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  141. int err;
  142. if ((unsigned long)key & alignmask)
  143. err = ahash_setkey_unaligned(tfm, key, keylen);
  144. else
  145. err = tfm->setkey(tfm, key, keylen);
  146. if (unlikely(err)) {
  147. ahash_set_needkey(tfm);
  148. return err;
  149. }
  150. crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
  151. return 0;
  152. }
  153. EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
  154. static inline unsigned int ahash_align_buffer_size(unsigned len,
  155. unsigned long mask)
  156. {
  157. return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
  158. }
  159. static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
  160. {
  161. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  162. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  163. unsigned int ds = crypto_ahash_digestsize(tfm);
  164. struct ahash_request_priv *priv;
  165. priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
  166. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  167. GFP_KERNEL : GFP_ATOMIC);
  168. if (!priv)
  169. return -ENOMEM;
  170. /*
  171. * WARNING: Voodoo programming below!
  172. *
  173. * The code below is obscure and hard to understand, thus explanation
  174. * is necessary. See include/crypto/hash.h and include/linux/crypto.h
  175. * to understand the layout of structures used here!
  176. *
  177. * The code here will replace portions of the ORIGINAL request with
  178. * pointers to new code and buffers so the hashing operation can store
  179. * the result in aligned buffer. We will call the modified request
  180. * an ADJUSTED request.
  181. *
  182. * The newly mangled request will look as such:
  183. *
  184. * req {
  185. * .result = ADJUSTED[new aligned buffer]
  186. * .base.complete = ADJUSTED[pointer to completion function]
  187. * .base.data = ADJUSTED[*req (pointer to self)]
  188. * .priv = ADJUSTED[new priv] {
  189. * .result = ORIGINAL(result)
  190. * .complete = ORIGINAL(base.complete)
  191. * .data = ORIGINAL(base.data)
  192. * }
  193. */
  194. priv->result = req->result;
  195. priv->complete = req->base.complete;
  196. priv->data = req->base.data;
  197. priv->flags = req->base.flags;
  198. /*
  199. * WARNING: We do not backup req->priv here! The req->priv
  200. * is for internal use of the Crypto API and the
  201. * user must _NOT_ _EVER_ depend on it's content!
  202. */
  203. req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
  204. req->base.complete = cplt;
  205. req->base.data = req;
  206. req->priv = priv;
  207. return 0;
  208. }
  209. static void ahash_restore_req(struct ahash_request *req, int err)
  210. {
  211. struct ahash_request_priv *priv = req->priv;
  212. if (!err)
  213. memcpy(priv->result, req->result,
  214. crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
  215. /* Restore the original crypto request. */
  216. req->result = priv->result;
  217. ahash_request_set_callback(req, priv->flags,
  218. priv->complete, priv->data);
  219. req->priv = NULL;
  220. /* Free the req->priv.priv from the ADJUSTED request. */
  221. kfree_sensitive(priv);
  222. }
  223. static void ahash_notify_einprogress(struct ahash_request *req)
  224. {
  225. struct ahash_request_priv *priv = req->priv;
  226. struct crypto_async_request oreq;
  227. oreq.data = priv->data;
  228. priv->complete(&oreq, -EINPROGRESS);
  229. }
  230. static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
  231. {
  232. struct ahash_request *areq = req->data;
  233. if (err == -EINPROGRESS) {
  234. ahash_notify_einprogress(areq);
  235. return;
  236. }
  237. /*
  238. * Restore the original request, see ahash_op_unaligned() for what
  239. * goes where.
  240. *
  241. * The "struct ahash_request *req" here is in fact the "req.base"
  242. * from the ADJUSTED request from ahash_op_unaligned(), thus as it
  243. * is a pointer to self, it is also the ADJUSTED "req" .
  244. */
  245. /* First copy req->result into req->priv.result */
  246. ahash_restore_req(areq, err);
  247. /* Complete the ORIGINAL request. */
  248. areq->base.complete(&areq->base, err);
  249. }
  250. static int ahash_op_unaligned(struct ahash_request *req,
  251. int (*op)(struct ahash_request *))
  252. {
  253. int err;
  254. err = ahash_save_req(req, ahash_op_unaligned_done);
  255. if (err)
  256. return err;
  257. err = op(req);
  258. if (err == -EINPROGRESS || err == -EBUSY)
  259. return err;
  260. ahash_restore_req(req, err);
  261. return err;
  262. }
  263. static int crypto_ahash_op(struct ahash_request *req,
  264. int (*op)(struct ahash_request *))
  265. {
  266. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  267. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  268. if ((unsigned long)req->result & alignmask)
  269. return ahash_op_unaligned(req, op);
  270. return op(req);
  271. }
  272. int crypto_ahash_final(struct ahash_request *req)
  273. {
  274. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  275. struct crypto_alg *alg = tfm->base.__crt_alg;
  276. unsigned int nbytes = req->nbytes;
  277. int ret;
  278. crypto_stats_get(alg);
  279. ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
  280. crypto_stats_ahash_final(nbytes, ret, alg);
  281. return ret;
  282. }
  283. EXPORT_SYMBOL_GPL(crypto_ahash_final);
  284. int crypto_ahash_finup(struct ahash_request *req)
  285. {
  286. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  287. struct crypto_alg *alg = tfm->base.__crt_alg;
  288. unsigned int nbytes = req->nbytes;
  289. int ret;
  290. crypto_stats_get(alg);
  291. ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
  292. crypto_stats_ahash_final(nbytes, ret, alg);
  293. return ret;
  294. }
  295. EXPORT_SYMBOL_GPL(crypto_ahash_finup);
  296. int crypto_ahash_digest(struct ahash_request *req)
  297. {
  298. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  299. struct crypto_alg *alg = tfm->base.__crt_alg;
  300. unsigned int nbytes = req->nbytes;
  301. int ret;
  302. crypto_stats_get(alg);
  303. if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
  304. ret = -ENOKEY;
  305. else
  306. ret = crypto_ahash_op(req, tfm->digest);
  307. crypto_stats_ahash_final(nbytes, ret, alg);
  308. return ret;
  309. }
  310. EXPORT_SYMBOL_GPL(crypto_ahash_digest);
  311. static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
  312. {
  313. struct ahash_request *areq = req->data;
  314. if (err == -EINPROGRESS)
  315. return;
  316. ahash_restore_req(areq, err);
  317. areq->base.complete(&areq->base, err);
  318. }
  319. static int ahash_def_finup_finish1(struct ahash_request *req, int err)
  320. {
  321. if (err)
  322. goto out;
  323. req->base.complete = ahash_def_finup_done2;
  324. err = crypto_ahash_reqtfm(req)->final(req);
  325. if (err == -EINPROGRESS || err == -EBUSY)
  326. return err;
  327. out:
  328. ahash_restore_req(req, err);
  329. return err;
  330. }
  331. static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
  332. {
  333. struct ahash_request *areq = req->data;
  334. if (err == -EINPROGRESS) {
  335. ahash_notify_einprogress(areq);
  336. return;
  337. }
  338. areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  339. err = ahash_def_finup_finish1(areq, err);
  340. if (areq->priv)
  341. return;
  342. areq->base.complete(&areq->base, err);
  343. }
  344. static int ahash_def_finup(struct ahash_request *req)
  345. {
  346. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  347. int err;
  348. err = ahash_save_req(req, ahash_def_finup_done1);
  349. if (err)
  350. return err;
  351. err = tfm->update(req);
  352. if (err == -EINPROGRESS || err == -EBUSY)
  353. return err;
  354. return ahash_def_finup_finish1(req, err);
  355. }
  356. static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
  357. {
  358. struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
  359. struct ahash_alg *alg = crypto_ahash_alg(hash);
  360. alg->exit_tfm(hash);
  361. }
  362. static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
  363. {
  364. struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
  365. struct ahash_alg *alg = crypto_ahash_alg(hash);
  366. hash->setkey = ahash_nosetkey;
  367. if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
  368. return crypto_init_shash_ops_async(tfm);
  369. hash->init = alg->init;
  370. hash->update = alg->update;
  371. hash->final = alg->final;
  372. hash->finup = alg->finup ?: ahash_def_finup;
  373. hash->digest = alg->digest;
  374. hash->export = alg->export;
  375. hash->import = alg->import;
  376. if (alg->setkey) {
  377. hash->setkey = alg->setkey;
  378. ahash_set_needkey(hash);
  379. }
  380. if (alg->exit_tfm)
  381. tfm->exit = crypto_ahash_exit_tfm;
  382. return alg->init_tfm ? alg->init_tfm(hash) : 0;
  383. }
  384. static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
  385. {
  386. if (alg->cra_type != &crypto_ahash_type)
  387. return sizeof(struct crypto_shash *);
  388. return crypto_alg_extsize(alg);
  389. }
  390. static void crypto_ahash_free_instance(struct crypto_instance *inst)
  391. {
  392. struct ahash_instance *ahash = ahash_instance(inst);
  393. ahash->free(ahash);
  394. }
  395. #ifdef CONFIG_NET
  396. static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
  397. {
  398. struct crypto_report_hash rhash;
  399. memset(&rhash, 0, sizeof(rhash));
  400. strscpy(rhash.type, "ahash", sizeof(rhash.type));
  401. rhash.blocksize = alg->cra_blocksize;
  402. rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
  403. return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
  404. }
  405. #else
  406. static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
  407. {
  408. return -ENOSYS;
  409. }
  410. #endif
  411. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  412. __maybe_unused;
  413. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  414. {
  415. seq_printf(m, "type : ahash\n");
  416. seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  417. "yes" : "no");
  418. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  419. seq_printf(m, "digestsize : %u\n",
  420. __crypto_hash_alg_common(alg)->digestsize);
  421. }
  422. static const struct crypto_type crypto_ahash_type = {
  423. .extsize = crypto_ahash_extsize,
  424. .init_tfm = crypto_ahash_init_tfm,
  425. .free = crypto_ahash_free_instance,
  426. #ifdef CONFIG_PROC_FS
  427. .show = crypto_ahash_show,
  428. #endif
  429. .report = crypto_ahash_report,
  430. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  431. .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
  432. .type = CRYPTO_ALG_TYPE_AHASH,
  433. .tfmsize = offsetof(struct crypto_ahash, base),
  434. };
  435. int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
  436. struct crypto_instance *inst,
  437. const char *name, u32 type, u32 mask)
  438. {
  439. spawn->base.frontend = &crypto_ahash_type;
  440. return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
  441. }
  442. EXPORT_SYMBOL_GPL(crypto_grab_ahash);
  443. struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
  444. u32 mask)
  445. {
  446. return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
  447. }
  448. EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
  449. int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
  450. {
  451. return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
  452. }
  453. EXPORT_SYMBOL_GPL(crypto_has_ahash);
  454. static int ahash_prepare_alg(struct ahash_alg *alg)
  455. {
  456. struct crypto_alg *base = &alg->halg.base;
  457. if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
  458. alg->halg.statesize > HASH_MAX_STATESIZE ||
  459. alg->halg.statesize == 0)
  460. return -EINVAL;
  461. base->cra_type = &crypto_ahash_type;
  462. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  463. base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
  464. return 0;
  465. }
  466. int crypto_register_ahash(struct ahash_alg *alg)
  467. {
  468. struct crypto_alg *base = &alg->halg.base;
  469. int err;
  470. err = ahash_prepare_alg(alg);
  471. if (err)
  472. return err;
  473. return crypto_register_alg(base);
  474. }
  475. EXPORT_SYMBOL_GPL(crypto_register_ahash);
  476. void crypto_unregister_ahash(struct ahash_alg *alg)
  477. {
  478. crypto_unregister_alg(&alg->halg.base);
  479. }
  480. EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
  481. int crypto_register_ahashes(struct ahash_alg *algs, int count)
  482. {
  483. int i, ret;
  484. for (i = 0; i < count; i++) {
  485. ret = crypto_register_ahash(&algs[i]);
  486. if (ret)
  487. goto err;
  488. }
  489. return 0;
  490. err:
  491. for (--i; i >= 0; --i)
  492. crypto_unregister_ahash(&algs[i]);
  493. return ret;
  494. }
  495. EXPORT_SYMBOL_GPL(crypto_register_ahashes);
  496. void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
  497. {
  498. int i;
  499. for (i = count - 1; i >= 0; --i)
  500. crypto_unregister_ahash(&algs[i]);
  501. }
  502. EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
  503. int ahash_register_instance(struct crypto_template *tmpl,
  504. struct ahash_instance *inst)
  505. {
  506. int err;
  507. if (WARN_ON(!inst->free))
  508. return -EINVAL;
  509. err = ahash_prepare_alg(&inst->alg);
  510. if (err)
  511. return err;
  512. return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
  513. }
  514. EXPORT_SYMBOL_GPL(ahash_register_instance);
  515. bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
  516. {
  517. struct crypto_alg *alg = &halg->base;
  518. if (alg->cra_type != &crypto_ahash_type)
  519. return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
  520. return __crypto_ahash_alg(alg)->setkey != NULL;
  521. }
  522. EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
  523. MODULE_LICENSE("GPL");
  524. MODULE_DESCRIPTION("Asynchronous cryptographic hash type");