aes-ce-ccm-glue.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
  4. *
  5. * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
  6. */
  7. #include <asm/neon.h>
  8. #include <asm/simd.h>
  9. #include <asm/unaligned.h>
  10. #include <crypto/aes.h>
  11. #include <crypto/scatterwalk.h>
  12. #include <crypto/internal/aead.h>
  13. #include <crypto/internal/simd.h>
  14. #include <crypto/internal/skcipher.h>
  15. #include <linux/module.h>
  16. #include "aes-ce-setkey.h"
  17. static int num_rounds(struct crypto_aes_ctx *ctx)
  18. {
  19. /*
  20. * # of rounds specified by AES:
  21. * 128 bit key 10 rounds
  22. * 192 bit key 12 rounds
  23. * 256 bit key 14 rounds
  24. * => n byte key => 6 + (n/4) rounds
  25. */
  26. return 6 + ctx->key_length / 4;
  27. }
  28. asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
  29. u32 *macp, u32 const rk[], u32 rounds);
  30. asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
  31. u32 const rk[], u32 rounds, u8 mac[],
  32. u8 ctr[]);
  33. asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
  34. u32 const rk[], u32 rounds, u8 mac[],
  35. u8 ctr[]);
  36. asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
  37. u32 rounds);
  38. static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
  39. unsigned int key_len)
  40. {
  41. struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
  42. return ce_aes_expandkey(ctx, in_key, key_len);
  43. }
  44. static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  45. {
  46. if ((authsize & 1) || authsize < 4)
  47. return -EINVAL;
  48. return 0;
  49. }
  50. static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
  51. {
  52. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  53. __be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
  54. u32 l = req->iv[0] + 1;
  55. /* verify that CCM dimension 'L' is set correctly in the IV */
  56. if (l < 2 || l > 8)
  57. return -EINVAL;
  58. /* verify that msglen can in fact be represented in L bytes */
  59. if (l < 4 && msglen >> (8 * l))
  60. return -EOVERFLOW;
  61. /*
  62. * Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
  63. * uses a u32 type to represent msglen so the top 4 bytes are always 0.
  64. */
  65. n[0] = 0;
  66. n[1] = cpu_to_be32(msglen);
  67. memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
  68. /*
  69. * Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
  70. * - bits 0..2 : max # of bytes required to represent msglen, minus 1
  71. * (already set by caller)
  72. * - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
  73. * - bit 6 : indicates presence of authenticate-only data
  74. */
  75. maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
  76. if (req->assoclen)
  77. maciv[0] |= 0x40;
  78. memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
  79. return 0;
  80. }
  81. static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
  82. u32 abytes, u32 *macp)
  83. {
  84. if (crypto_simd_usable()) {
  85. kernel_neon_begin();
  86. ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
  87. num_rounds(key));
  88. kernel_neon_end();
  89. } else {
  90. if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
  91. int added = min(abytes, AES_BLOCK_SIZE - *macp);
  92. crypto_xor(&mac[*macp], in, added);
  93. *macp += added;
  94. in += added;
  95. abytes -= added;
  96. }
  97. while (abytes >= AES_BLOCK_SIZE) {
  98. aes_encrypt(key, mac, mac);
  99. crypto_xor(mac, in, AES_BLOCK_SIZE);
  100. in += AES_BLOCK_SIZE;
  101. abytes -= AES_BLOCK_SIZE;
  102. }
  103. if (abytes > 0) {
  104. aes_encrypt(key, mac, mac);
  105. crypto_xor(mac, in, abytes);
  106. *macp = abytes;
  107. }
  108. }
  109. }
  110. static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
  111. {
  112. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  113. struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
  114. struct __packed { __be16 l; __be32 h; u16 len; } ltag;
  115. struct scatter_walk walk;
  116. u32 len = req->assoclen;
  117. u32 macp = 0;
  118. /* prepend the AAD with a length tag */
  119. if (len < 0xff00) {
  120. ltag.l = cpu_to_be16(len);
  121. ltag.len = 2;
  122. } else {
  123. ltag.l = cpu_to_be16(0xfffe);
  124. put_unaligned_be32(len, &ltag.h);
  125. ltag.len = 6;
  126. }
  127. ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp);
  128. scatterwalk_start(&walk, req->src);
  129. do {
  130. u32 n = scatterwalk_clamp(&walk, len);
  131. u8 *p;
  132. if (!n) {
  133. scatterwalk_start(&walk, sg_next(walk.sg));
  134. n = scatterwalk_clamp(&walk, len);
  135. }
  136. p = scatterwalk_map(&walk);
  137. ccm_update_mac(ctx, mac, p, n, &macp);
  138. len -= n;
  139. scatterwalk_unmap(p);
  140. scatterwalk_advance(&walk, n);
  141. scatterwalk_done(&walk, 0, len);
  142. } while (len);
  143. }
  144. static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
  145. struct crypto_aes_ctx *ctx, bool enc)
  146. {
  147. u8 buf[AES_BLOCK_SIZE];
  148. int err = 0;
  149. while (walk->nbytes) {
  150. int blocks = walk->nbytes / AES_BLOCK_SIZE;
  151. u32 tail = walk->nbytes % AES_BLOCK_SIZE;
  152. u8 *dst = walk->dst.virt.addr;
  153. u8 *src = walk->src.virt.addr;
  154. u32 nbytes = walk->nbytes;
  155. if (nbytes == walk->total && tail > 0) {
  156. blocks++;
  157. tail = 0;
  158. }
  159. do {
  160. u32 bsize = AES_BLOCK_SIZE;
  161. if (nbytes < AES_BLOCK_SIZE)
  162. bsize = nbytes;
  163. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  164. aes_encrypt(ctx, buf, walk->iv);
  165. aes_encrypt(ctx, mac, mac);
  166. if (enc)
  167. crypto_xor(mac, src, bsize);
  168. crypto_xor_cpy(dst, src, buf, bsize);
  169. if (!enc)
  170. crypto_xor(mac, dst, bsize);
  171. dst += bsize;
  172. src += bsize;
  173. nbytes -= bsize;
  174. } while (--blocks);
  175. err = skcipher_walk_done(walk, tail);
  176. }
  177. if (!err) {
  178. aes_encrypt(ctx, buf, iv0);
  179. aes_encrypt(ctx, mac, mac);
  180. crypto_xor(mac, buf, AES_BLOCK_SIZE);
  181. }
  182. return err;
  183. }
  184. static int ccm_encrypt(struct aead_request *req)
  185. {
  186. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  187. struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
  188. struct skcipher_walk walk;
  189. u8 __aligned(8) mac[AES_BLOCK_SIZE];
  190. u8 buf[AES_BLOCK_SIZE];
  191. u32 len = req->cryptlen;
  192. int err;
  193. err = ccm_init_mac(req, mac, len);
  194. if (err)
  195. return err;
  196. if (req->assoclen)
  197. ccm_calculate_auth_mac(req, mac);
  198. /* preserve the original iv for the final round */
  199. memcpy(buf, req->iv, AES_BLOCK_SIZE);
  200. err = skcipher_walk_aead_encrypt(&walk, req, false);
  201. if (crypto_simd_usable()) {
  202. while (walk.nbytes) {
  203. u32 tail = walk.nbytes % AES_BLOCK_SIZE;
  204. if (walk.nbytes == walk.total)
  205. tail = 0;
  206. kernel_neon_begin();
  207. ce_aes_ccm_encrypt(walk.dst.virt.addr,
  208. walk.src.virt.addr,
  209. walk.nbytes - tail, ctx->key_enc,
  210. num_rounds(ctx), mac, walk.iv);
  211. kernel_neon_end();
  212. err = skcipher_walk_done(&walk, tail);
  213. }
  214. if (!err) {
  215. kernel_neon_begin();
  216. ce_aes_ccm_final(mac, buf, ctx->key_enc,
  217. num_rounds(ctx));
  218. kernel_neon_end();
  219. }
  220. } else {
  221. err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
  222. }
  223. if (err)
  224. return err;
  225. /* copy authtag to end of dst */
  226. scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
  227. crypto_aead_authsize(aead), 1);
  228. return 0;
  229. }
  230. static int ccm_decrypt(struct aead_request *req)
  231. {
  232. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  233. struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
  234. unsigned int authsize = crypto_aead_authsize(aead);
  235. struct skcipher_walk walk;
  236. u8 __aligned(8) mac[AES_BLOCK_SIZE];
  237. u8 buf[AES_BLOCK_SIZE];
  238. u32 len = req->cryptlen - authsize;
  239. int err;
  240. err = ccm_init_mac(req, mac, len);
  241. if (err)
  242. return err;
  243. if (req->assoclen)
  244. ccm_calculate_auth_mac(req, mac);
  245. /* preserve the original iv for the final round */
  246. memcpy(buf, req->iv, AES_BLOCK_SIZE);
  247. err = skcipher_walk_aead_decrypt(&walk, req, false);
  248. if (crypto_simd_usable()) {
  249. while (walk.nbytes) {
  250. u32 tail = walk.nbytes % AES_BLOCK_SIZE;
  251. if (walk.nbytes == walk.total)
  252. tail = 0;
  253. kernel_neon_begin();
  254. ce_aes_ccm_decrypt(walk.dst.virt.addr,
  255. walk.src.virt.addr,
  256. walk.nbytes - tail, ctx->key_enc,
  257. num_rounds(ctx), mac, walk.iv);
  258. kernel_neon_end();
  259. err = skcipher_walk_done(&walk, tail);
  260. }
  261. if (!err) {
  262. kernel_neon_begin();
  263. ce_aes_ccm_final(mac, buf, ctx->key_enc,
  264. num_rounds(ctx));
  265. kernel_neon_end();
  266. }
  267. } else {
  268. err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
  269. }
  270. if (err)
  271. return err;
  272. /* compare calculated auth tag with the stored one */
  273. scatterwalk_map_and_copy(buf, req->src,
  274. req->assoclen + req->cryptlen - authsize,
  275. authsize, 0);
  276. if (crypto_memneq(mac, buf, authsize))
  277. return -EBADMSG;
  278. return 0;
  279. }
  280. static struct aead_alg ccm_aes_alg = {
  281. .base = {
  282. .cra_name = "ccm(aes)",
  283. .cra_driver_name = "ccm-aes-ce",
  284. .cra_priority = 300,
  285. .cra_blocksize = 1,
  286. .cra_ctxsize = sizeof(struct crypto_aes_ctx),
  287. .cra_module = THIS_MODULE,
  288. },
  289. .ivsize = AES_BLOCK_SIZE,
  290. .chunksize = AES_BLOCK_SIZE,
  291. .maxauthsize = AES_BLOCK_SIZE,
  292. .setkey = ccm_setkey,
  293. .setauthsize = ccm_setauthsize,
  294. .encrypt = ccm_encrypt,
  295. .decrypt = ccm_decrypt,
  296. };
  297. static int __init aes_mod_init(void)
  298. {
  299. if (!cpu_have_named_feature(AES))
  300. return -ENODEV;
  301. return crypto_register_aead(&ccm_aes_alg);
  302. }
  303. static void __exit aes_mod_exit(void)
  304. {
  305. crypto_unregister_aead(&ccm_aes_alg);
  306. }
  307. module_init(aes_mod_init);
  308. module_exit(aes_mod_exit);
  309. MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
  310. MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
  311. MODULE_LICENSE("GPL v2");
  312. MODULE_ALIAS_CRYPTO("ccm(aes)");