adiantum.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Adiantum length-preserving encryption mode
  4. *
  5. * Copyright 2018 Google LLC
  6. */
  7. /*
  8. * Adiantum is a tweakable, length-preserving encryption mode designed for fast
  9. * and secure disk encryption, especially on CPUs without dedicated crypto
  10. * instructions. Adiantum encrypts each sector using the XChaCha12 stream
  11. * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
  12. * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
  13. * 16-byte block. See the paper for details:
  14. *
  15. * Adiantum: length-preserving encryption for entry-level processors
  16. * (https://eprint.iacr.org/2018/720.pdf)
  17. *
  18. * For flexibility, this implementation also allows other ciphers:
  19. *
  20. * - Stream cipher: XChaCha12 or XChaCha20
  21. * - Block cipher: any with a 128-bit block size and 256-bit key
  22. *
  23. * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
  24. * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
  25. * but still provably as secure, and also the ε-∆U hash function of HBSH is
  26. * formally defined to take two inputs (tweak, message) which makes it difficult
  27. * to wrap with the crypto_shash API. Rather, some details need to be handled
  28. * here. Nevertheless, if needed in the future, support for other ε-∆U hash
  29. * functions could be added here.
  30. */
  31. #include <crypto/b128ops.h>
  32. #include <crypto/chacha.h>
  33. #include <crypto/internal/cipher.h>
  34. #include <crypto/internal/hash.h>
  35. #include <crypto/internal/poly1305.h>
  36. #include <crypto/internal/skcipher.h>
  37. #include <crypto/nhpoly1305.h>
  38. #include <crypto/scatterwalk.h>
  39. #include <linux/module.h>
  40. /*
  41. * Size of right-hand part of input data, in bytes; also the size of the block
  42. * cipher's block size and the hash function's output.
  43. */
  44. #define BLOCKCIPHER_BLOCK_SIZE 16
  45. /* Size of the block cipher key (K_E) in bytes */
  46. #define BLOCKCIPHER_KEY_SIZE 32
  47. /* Size of the hash key (K_H) in bytes */
  48. #define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
  49. /*
  50. * The specification allows variable-length tweaks, but Linux's crypto API
  51. * currently only allows algorithms to support a single length. The "natural"
  52. * tweak length for Adiantum is 16, since that fits into one Poly1305 block for
  53. * the best performance. But longer tweaks are useful for fscrypt, to avoid
  54. * needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
  55. */
  56. #define TWEAK_SIZE 32
  57. struct adiantum_instance_ctx {
  58. struct crypto_skcipher_spawn streamcipher_spawn;
  59. struct crypto_cipher_spawn blockcipher_spawn;
  60. struct crypto_shash_spawn hash_spawn;
  61. };
  62. struct adiantum_tfm_ctx {
  63. struct crypto_skcipher *streamcipher;
  64. struct crypto_cipher *blockcipher;
  65. struct crypto_shash *hash;
  66. struct poly1305_core_key header_hash_key;
  67. };
  68. struct adiantum_request_ctx {
  69. /*
  70. * Buffer for right-hand part of data, i.e.
  71. *
  72. * P_L => P_M => C_M => C_R when encrypting, or
  73. * C_R => C_M => P_M => P_L when decrypting.
  74. *
  75. * Also used to build the IV for the stream cipher.
  76. */
  77. union {
  78. u8 bytes[XCHACHA_IV_SIZE];
  79. __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
  80. le128 bignum; /* interpret as element of Z/(2^{128}Z) */
  81. } rbuf;
  82. bool enc; /* true if encrypting, false if decrypting */
  83. /*
  84. * The result of the Poly1305 ε-∆U hash function applied to
  85. * (bulk length, tweak)
  86. */
  87. le128 header_hash;
  88. /* Sub-requests, must be last */
  89. union {
  90. struct shash_desc hash_desc;
  91. struct skcipher_request streamcipher_req;
  92. } u;
  93. };
  94. /*
  95. * Given the XChaCha stream key K_S, derive the block cipher key K_E and the
  96. * hash key K_H as follows:
  97. *
  98. * K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
  99. *
  100. * Note that this denotes using bits from the XChaCha keystream, which here we
  101. * get indirectly by encrypting a buffer containing all 0's.
  102. */
  103. static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
  104. unsigned int keylen)
  105. {
  106. struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  107. struct {
  108. u8 iv[XCHACHA_IV_SIZE];
  109. u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
  110. struct scatterlist sg;
  111. struct crypto_wait wait;
  112. struct skcipher_request req; /* must be last */
  113. } *data;
  114. u8 *keyp;
  115. int err;
  116. /* Set the stream cipher key (K_S) */
  117. crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
  118. crypto_skcipher_set_flags(tctx->streamcipher,
  119. crypto_skcipher_get_flags(tfm) &
  120. CRYPTO_TFM_REQ_MASK);
  121. err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
  122. if (err)
  123. return err;
  124. /* Derive the subkeys */
  125. data = kzalloc(sizeof(*data) +
  126. crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
  127. if (!data)
  128. return -ENOMEM;
  129. data->iv[0] = 1;
  130. sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
  131. crypto_init_wait(&data->wait);
  132. skcipher_request_set_tfm(&data->req, tctx->streamcipher);
  133. skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  134. CRYPTO_TFM_REQ_MAY_BACKLOG,
  135. crypto_req_done, &data->wait);
  136. skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
  137. sizeof(data->derived_keys), data->iv);
  138. err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
  139. if (err)
  140. goto out;
  141. keyp = data->derived_keys;
  142. /* Set the block cipher key (K_E) */
  143. crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
  144. crypto_cipher_set_flags(tctx->blockcipher,
  145. crypto_skcipher_get_flags(tfm) &
  146. CRYPTO_TFM_REQ_MASK);
  147. err = crypto_cipher_setkey(tctx->blockcipher, keyp,
  148. BLOCKCIPHER_KEY_SIZE);
  149. if (err)
  150. goto out;
  151. keyp += BLOCKCIPHER_KEY_SIZE;
  152. /* Set the hash key (K_H) */
  153. poly1305_core_setkey(&tctx->header_hash_key, keyp);
  154. keyp += POLY1305_BLOCK_SIZE;
  155. crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
  156. crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
  157. CRYPTO_TFM_REQ_MASK);
  158. err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
  159. keyp += NHPOLY1305_KEY_SIZE;
  160. WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
  161. out:
  162. kfree_sensitive(data);
  163. return err;
  164. }
  165. /* Addition in Z/(2^{128}Z) */
  166. static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
  167. {
  168. u64 x = le64_to_cpu(v1->b);
  169. u64 y = le64_to_cpu(v2->b);
  170. r->b = cpu_to_le64(x + y);
  171. r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
  172. (x + y < x));
  173. }
  174. /* Subtraction in Z/(2^{128}Z) */
  175. static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
  176. {
  177. u64 x = le64_to_cpu(v1->b);
  178. u64 y = le64_to_cpu(v2->b);
  179. r->b = cpu_to_le64(x - y);
  180. r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
  181. (x - y > x));
  182. }
  183. /*
  184. * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
  185. * result to rctx->header_hash. This is the calculation
  186. *
  187. * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
  188. *
  189. * from the procedure in section 6.4 of the Adiantum paper. The resulting value
  190. * is reused in both the first and second hash steps. Specifically, it's added
  191. * to the result of an independently keyed ε-∆U hash function (for equal length
  192. * inputs only) taken over the left-hand part (the "bulk") of the message, to
  193. * give the overall Adiantum hash of the (tweak, left-hand part) pair.
  194. */
  195. static void adiantum_hash_header(struct skcipher_request *req)
  196. {
  197. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  198. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  199. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  200. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  201. struct {
  202. __le64 message_bits;
  203. __le64 padding;
  204. } header = {
  205. .message_bits = cpu_to_le64((u64)bulk_len * 8)
  206. };
  207. struct poly1305_state state;
  208. poly1305_core_init(&state);
  209. BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
  210. poly1305_core_blocks(&state, &tctx->header_hash_key,
  211. &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
  212. BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
  213. poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
  214. TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
  215. poly1305_core_emit(&state, NULL, &rctx->header_hash);
  216. }
  217. /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
  218. static int adiantum_hash_message(struct skcipher_request *req,
  219. struct scatterlist *sgl, le128 *digest)
  220. {
  221. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  222. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  223. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  224. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  225. struct shash_desc *hash_desc = &rctx->u.hash_desc;
  226. struct sg_mapping_iter miter;
  227. unsigned int i, n;
  228. int err;
  229. hash_desc->tfm = tctx->hash;
  230. err = crypto_shash_init(hash_desc);
  231. if (err)
  232. return err;
  233. sg_miter_start(&miter, sgl, sg_nents(sgl),
  234. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  235. for (i = 0; i < bulk_len; i += n) {
  236. sg_miter_next(&miter);
  237. n = min_t(unsigned int, miter.length, bulk_len - i);
  238. err = crypto_shash_update(hash_desc, miter.addr, n);
  239. if (err)
  240. break;
  241. }
  242. sg_miter_stop(&miter);
  243. if (err)
  244. return err;
  245. return crypto_shash_final(hash_desc, (u8 *)digest);
  246. }
  247. /* Continue Adiantum encryption/decryption after the stream cipher step */
  248. static int adiantum_finish(struct skcipher_request *req)
  249. {
  250. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  251. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  252. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  253. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  254. le128 digest;
  255. int err;
  256. /* If decrypting, decrypt C_M with the block cipher to get P_M */
  257. if (!rctx->enc)
  258. crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
  259. rctx->rbuf.bytes);
  260. /*
  261. * Second hash step
  262. * enc: C_R = C_M - H_{K_H}(T, C_L)
  263. * dec: P_R = P_M - H_{K_H}(T, P_L)
  264. */
  265. err = adiantum_hash_message(req, req->dst, &digest);
  266. if (err)
  267. return err;
  268. le128_add(&digest, &digest, &rctx->header_hash);
  269. le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
  270. scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
  271. bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
  272. return 0;
  273. }
  274. static void adiantum_streamcipher_done(struct crypto_async_request *areq,
  275. int err)
  276. {
  277. struct skcipher_request *req = areq->data;
  278. if (!err)
  279. err = adiantum_finish(req);
  280. skcipher_request_complete(req, err);
  281. }
  282. static int adiantum_crypt(struct skcipher_request *req, bool enc)
  283. {
  284. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  285. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  286. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  287. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  288. unsigned int stream_len;
  289. le128 digest;
  290. int err;
  291. if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
  292. return -EINVAL;
  293. rctx->enc = enc;
  294. /*
  295. * First hash step
  296. * enc: P_M = P_R + H_{K_H}(T, P_L)
  297. * dec: C_M = C_R + H_{K_H}(T, C_L)
  298. */
  299. adiantum_hash_header(req);
  300. err = adiantum_hash_message(req, req->src, &digest);
  301. if (err)
  302. return err;
  303. le128_add(&digest, &digest, &rctx->header_hash);
  304. scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
  305. bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
  306. le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
  307. /* If encrypting, encrypt P_M with the block cipher to get C_M */
  308. if (enc)
  309. crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
  310. rctx->rbuf.bytes);
  311. /* Initialize the rest of the XChaCha IV (first part is C_M) */
  312. BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
  313. BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
  314. rctx->rbuf.words[4] = cpu_to_le32(1);
  315. rctx->rbuf.words[5] = 0;
  316. rctx->rbuf.words[6] = 0;
  317. rctx->rbuf.words[7] = 0;
  318. /*
  319. * XChaCha needs to be done on all the data except the last 16 bytes;
  320. * for disk encryption that usually means 4080 or 496 bytes. But ChaCha
  321. * implementations tend to be most efficient when passed a whole number
  322. * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
  323. * And here it doesn't matter whether the last 16 bytes are written to,
  324. * as the second hash step will overwrite them. Thus, round the XChaCha
  325. * length up to the next 64-byte boundary if possible.
  326. */
  327. stream_len = bulk_len;
  328. if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
  329. stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
  330. skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
  331. skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
  332. req->dst, stream_len, &rctx->rbuf);
  333. skcipher_request_set_callback(&rctx->u.streamcipher_req,
  334. req->base.flags,
  335. adiantum_streamcipher_done, req);
  336. return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
  337. adiantum_finish(req);
  338. }
  339. static int adiantum_encrypt(struct skcipher_request *req)
  340. {
  341. return adiantum_crypt(req, true);
  342. }
  343. static int adiantum_decrypt(struct skcipher_request *req)
  344. {
  345. return adiantum_crypt(req, false);
  346. }
  347. static int adiantum_init_tfm(struct crypto_skcipher *tfm)
  348. {
  349. struct skcipher_instance *inst = skcipher_alg_instance(tfm);
  350. struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
  351. struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  352. struct crypto_skcipher *streamcipher;
  353. struct crypto_cipher *blockcipher;
  354. struct crypto_shash *hash;
  355. unsigned int subreq_size;
  356. int err;
  357. streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
  358. if (IS_ERR(streamcipher))
  359. return PTR_ERR(streamcipher);
  360. blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
  361. if (IS_ERR(blockcipher)) {
  362. err = PTR_ERR(blockcipher);
  363. goto err_free_streamcipher;
  364. }
  365. hash = crypto_spawn_shash(&ictx->hash_spawn);
  366. if (IS_ERR(hash)) {
  367. err = PTR_ERR(hash);
  368. goto err_free_blockcipher;
  369. }
  370. tctx->streamcipher = streamcipher;
  371. tctx->blockcipher = blockcipher;
  372. tctx->hash = hash;
  373. BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
  374. sizeof(struct adiantum_request_ctx));
  375. subreq_size = max(sizeof_field(struct adiantum_request_ctx,
  376. u.hash_desc) +
  377. crypto_shash_descsize(hash),
  378. sizeof_field(struct adiantum_request_ctx,
  379. u.streamcipher_req) +
  380. crypto_skcipher_reqsize(streamcipher));
  381. crypto_skcipher_set_reqsize(tfm,
  382. offsetof(struct adiantum_request_ctx, u) +
  383. subreq_size);
  384. return 0;
  385. err_free_blockcipher:
  386. crypto_free_cipher(blockcipher);
  387. err_free_streamcipher:
  388. crypto_free_skcipher(streamcipher);
  389. return err;
  390. }
  391. static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
  392. {
  393. struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  394. crypto_free_skcipher(tctx->streamcipher);
  395. crypto_free_cipher(tctx->blockcipher);
  396. crypto_free_shash(tctx->hash);
  397. }
  398. static void adiantum_free_instance(struct skcipher_instance *inst)
  399. {
  400. struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
  401. crypto_drop_skcipher(&ictx->streamcipher_spawn);
  402. crypto_drop_cipher(&ictx->blockcipher_spawn);
  403. crypto_drop_shash(&ictx->hash_spawn);
  404. kfree(inst);
  405. }
  406. /*
  407. * Check for a supported set of inner algorithms.
  408. * See the comment at the beginning of this file.
  409. */
  410. static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
  411. struct crypto_alg *blockcipher_alg,
  412. struct shash_alg *hash_alg)
  413. {
  414. if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
  415. strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
  416. return false;
  417. if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
  418. blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
  419. return false;
  420. if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
  421. return false;
  422. if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
  423. return false;
  424. return true;
  425. }
  426. static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
  427. {
  428. u32 mask;
  429. const char *nhpoly1305_name;
  430. struct skcipher_instance *inst;
  431. struct adiantum_instance_ctx *ictx;
  432. struct skcipher_alg *streamcipher_alg;
  433. struct crypto_alg *blockcipher_alg;
  434. struct shash_alg *hash_alg;
  435. int err;
  436. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
  437. if (err)
  438. return err;
  439. inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
  440. if (!inst)
  441. return -ENOMEM;
  442. ictx = skcipher_instance_ctx(inst);
  443. /* Stream cipher, e.g. "xchacha12" */
  444. err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
  445. skcipher_crypto_instance(inst),
  446. crypto_attr_alg_name(tb[1]), 0, mask);
  447. if (err)
  448. goto err_free_inst;
  449. streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
  450. /* Block cipher, e.g. "aes" */
  451. err = crypto_grab_cipher(&ictx->blockcipher_spawn,
  452. skcipher_crypto_instance(inst),
  453. crypto_attr_alg_name(tb[2]), 0, mask);
  454. if (err)
  455. goto err_free_inst;
  456. blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
  457. /* NHPoly1305 ε-∆U hash function */
  458. nhpoly1305_name = crypto_attr_alg_name(tb[3]);
  459. if (nhpoly1305_name == ERR_PTR(-ENOENT))
  460. nhpoly1305_name = "nhpoly1305";
  461. err = crypto_grab_shash(&ictx->hash_spawn,
  462. skcipher_crypto_instance(inst),
  463. nhpoly1305_name, 0, mask);
  464. if (err)
  465. goto err_free_inst;
  466. hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
  467. /* Check the set of algorithms */
  468. if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
  469. hash_alg)) {
  470. pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
  471. streamcipher_alg->base.cra_name,
  472. blockcipher_alg->cra_name, hash_alg->base.cra_name);
  473. err = -EINVAL;
  474. goto err_free_inst;
  475. }
  476. /* Instance fields */
  477. err = -ENAMETOOLONG;
  478. if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
  479. "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
  480. blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
  481. goto err_free_inst;
  482. if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  483. "adiantum(%s,%s,%s)",
  484. streamcipher_alg->base.cra_driver_name,
  485. blockcipher_alg->cra_driver_name,
  486. hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  487. goto err_free_inst;
  488. inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
  489. inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
  490. inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
  491. hash_alg->base.cra_alignmask;
  492. /*
  493. * The block cipher is only invoked once per message, so for long
  494. * messages (e.g. sectors for disk encryption) its performance doesn't
  495. * matter as much as that of the stream cipher and hash function. Thus,
  496. * weigh the block cipher's ->cra_priority less.
  497. */
  498. inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
  499. 2 * hash_alg->base.cra_priority +
  500. blockcipher_alg->cra_priority) / 7;
  501. inst->alg.setkey = adiantum_setkey;
  502. inst->alg.encrypt = adiantum_encrypt;
  503. inst->alg.decrypt = adiantum_decrypt;
  504. inst->alg.init = adiantum_init_tfm;
  505. inst->alg.exit = adiantum_exit_tfm;
  506. inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
  507. inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
  508. inst->alg.ivsize = TWEAK_SIZE;
  509. inst->free = adiantum_free_instance;
  510. err = skcipher_register_instance(tmpl, inst);
  511. if (err) {
  512. err_free_inst:
  513. adiantum_free_instance(inst);
  514. }
  515. return err;
  516. }
  517. /* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
  518. static struct crypto_template adiantum_tmpl = {
  519. .name = "adiantum",
  520. .create = adiantum_create,
  521. .module = THIS_MODULE,
  522. };
  523. static int __init adiantum_module_init(void)
  524. {
  525. return crypto_register_template(&adiantum_tmpl);
  526. }
  527. static void __exit adiantum_module_exit(void)
  528. {
  529. crypto_unregister_template(&adiantum_tmpl);
  530. }
  531. subsys_initcall(adiantum_module_init);
  532. module_exit(adiantum_module_exit);
  533. MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
  534. MODULE_LICENSE("GPL v2");
  535. MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
  536. MODULE_ALIAS_CRYPTO("adiantum");
  537. MODULE_IMPORT_NS(CRYPTO_INTERNAL);