ctr.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * CTR: Counter mode
  4. *
  5. * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
  6. */
  7. #include <crypto/algapi.h>
  8. #include <crypto/ctr.h>
  9. #include <crypto/internal/cipher.h>
  10. #include <crypto/internal/skcipher.h>
  11. #include <linux/err.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. struct crypto_rfc3686_ctx {
  17. struct crypto_skcipher *child;
  18. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  19. };
  20. struct crypto_rfc3686_req_ctx {
  21. u8 iv[CTR_RFC3686_BLOCK_SIZE];
  22. struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
  23. };
  24. static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
  25. struct crypto_cipher *tfm)
  26. {
  27. unsigned int bsize = crypto_cipher_blocksize(tfm);
  28. unsigned long alignmask = crypto_cipher_alignmask(tfm);
  29. u8 *ctrblk = walk->iv;
  30. u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
  31. u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
  32. u8 *src = walk->src.virt.addr;
  33. u8 *dst = walk->dst.virt.addr;
  34. unsigned int nbytes = walk->nbytes;
  35. crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
  36. crypto_xor_cpy(dst, keystream, src, nbytes);
  37. crypto_inc(ctrblk, bsize);
  38. }
  39. static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
  40. struct crypto_cipher *tfm)
  41. {
  42. void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
  43. crypto_cipher_alg(tfm)->cia_encrypt;
  44. unsigned int bsize = crypto_cipher_blocksize(tfm);
  45. u8 *ctrblk = walk->iv;
  46. u8 *src = walk->src.virt.addr;
  47. u8 *dst = walk->dst.virt.addr;
  48. unsigned int nbytes = walk->nbytes;
  49. do {
  50. /* create keystream */
  51. fn(crypto_cipher_tfm(tfm), dst, ctrblk);
  52. crypto_xor(dst, src, bsize);
  53. /* increment counter in counterblock */
  54. crypto_inc(ctrblk, bsize);
  55. src += bsize;
  56. dst += bsize;
  57. } while ((nbytes -= bsize) >= bsize);
  58. return nbytes;
  59. }
  60. static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
  61. struct crypto_cipher *tfm)
  62. {
  63. void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
  64. crypto_cipher_alg(tfm)->cia_encrypt;
  65. unsigned int bsize = crypto_cipher_blocksize(tfm);
  66. unsigned long alignmask = crypto_cipher_alignmask(tfm);
  67. unsigned int nbytes = walk->nbytes;
  68. u8 *ctrblk = walk->iv;
  69. u8 *src = walk->src.virt.addr;
  70. u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
  71. u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
  72. do {
  73. /* create keystream */
  74. fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
  75. crypto_xor(src, keystream, bsize);
  76. /* increment counter in counterblock */
  77. crypto_inc(ctrblk, bsize);
  78. src += bsize;
  79. } while ((nbytes -= bsize) >= bsize);
  80. return nbytes;
  81. }
  82. static int crypto_ctr_crypt(struct skcipher_request *req)
  83. {
  84. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  85. struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
  86. const unsigned int bsize = crypto_cipher_blocksize(cipher);
  87. struct skcipher_walk walk;
  88. unsigned int nbytes;
  89. int err;
  90. err = skcipher_walk_virt(&walk, req, false);
  91. while (walk.nbytes >= bsize) {
  92. if (walk.src.virt.addr == walk.dst.virt.addr)
  93. nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
  94. else
  95. nbytes = crypto_ctr_crypt_segment(&walk, cipher);
  96. err = skcipher_walk_done(&walk, nbytes);
  97. }
  98. if (walk.nbytes) {
  99. crypto_ctr_crypt_final(&walk, cipher);
  100. err = skcipher_walk_done(&walk, 0);
  101. }
  102. return err;
  103. }
  104. static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
  105. {
  106. struct skcipher_instance *inst;
  107. struct crypto_alg *alg;
  108. int err;
  109. inst = skcipher_alloc_instance_simple(tmpl, tb);
  110. if (IS_ERR(inst))
  111. return PTR_ERR(inst);
  112. alg = skcipher_ialg_simple(inst);
  113. /* Block size must be >= 4 bytes. */
  114. err = -EINVAL;
  115. if (alg->cra_blocksize < 4)
  116. goto out_free_inst;
  117. /* If this is false we'd fail the alignment of crypto_inc. */
  118. if (alg->cra_blocksize % 4)
  119. goto out_free_inst;
  120. /* CTR mode is a stream cipher. */
  121. inst->alg.base.cra_blocksize = 1;
  122. /*
  123. * To simplify the implementation, configure the skcipher walk to only
  124. * give a partial block at the very end, never earlier.
  125. */
  126. inst->alg.chunksize = alg->cra_blocksize;
  127. inst->alg.encrypt = crypto_ctr_crypt;
  128. inst->alg.decrypt = crypto_ctr_crypt;
  129. err = skcipher_register_instance(tmpl, inst);
  130. if (err) {
  131. out_free_inst:
  132. inst->free(inst);
  133. }
  134. return err;
  135. }
  136. static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
  137. const u8 *key, unsigned int keylen)
  138. {
  139. struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
  140. struct crypto_skcipher *child = ctx->child;
  141. /* the nonce is stored in bytes at end of key */
  142. if (keylen < CTR_RFC3686_NONCE_SIZE)
  143. return -EINVAL;
  144. memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  145. CTR_RFC3686_NONCE_SIZE);
  146. keylen -= CTR_RFC3686_NONCE_SIZE;
  147. crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  148. crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
  149. CRYPTO_TFM_REQ_MASK);
  150. return crypto_skcipher_setkey(child, key, keylen);
  151. }
  152. static int crypto_rfc3686_crypt(struct skcipher_request *req)
  153. {
  154. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  155. struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
  156. struct crypto_skcipher *child = ctx->child;
  157. unsigned long align = crypto_skcipher_alignmask(tfm);
  158. struct crypto_rfc3686_req_ctx *rctx =
  159. (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
  160. struct skcipher_request *subreq = &rctx->subreq;
  161. u8 *iv = rctx->iv;
  162. /* set up counter block */
  163. memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
  164. memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
  165. /* initialize counter portion of counter block */
  166. *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
  167. cpu_to_be32(1);
  168. skcipher_request_set_tfm(subreq, child);
  169. skcipher_request_set_callback(subreq, req->base.flags,
  170. req->base.complete, req->base.data);
  171. skcipher_request_set_crypt(subreq, req->src, req->dst,
  172. req->cryptlen, iv);
  173. return crypto_skcipher_encrypt(subreq);
  174. }
  175. static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
  176. {
  177. struct skcipher_instance *inst = skcipher_alg_instance(tfm);
  178. struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
  179. struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
  180. struct crypto_skcipher *cipher;
  181. unsigned long align;
  182. unsigned int reqsize;
  183. cipher = crypto_spawn_skcipher(spawn);
  184. if (IS_ERR(cipher))
  185. return PTR_ERR(cipher);
  186. ctx->child = cipher;
  187. align = crypto_skcipher_alignmask(tfm);
  188. align &= ~(crypto_tfm_ctx_alignment() - 1);
  189. reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
  190. crypto_skcipher_reqsize(cipher);
  191. crypto_skcipher_set_reqsize(tfm, reqsize);
  192. return 0;
  193. }
  194. static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
  195. {
  196. struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
  197. crypto_free_skcipher(ctx->child);
  198. }
  199. static void crypto_rfc3686_free(struct skcipher_instance *inst)
  200. {
  201. struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
  202. crypto_drop_skcipher(spawn);
  203. kfree(inst);
  204. }
  205. static int crypto_rfc3686_create(struct crypto_template *tmpl,
  206. struct rtattr **tb)
  207. {
  208. struct skcipher_instance *inst;
  209. struct skcipher_alg *alg;
  210. struct crypto_skcipher_spawn *spawn;
  211. u32 mask;
  212. int err;
  213. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
  214. if (err)
  215. return err;
  216. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  217. if (!inst)
  218. return -ENOMEM;
  219. spawn = skcipher_instance_ctx(inst);
  220. err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
  221. crypto_attr_alg_name(tb[1]), 0, mask);
  222. if (err)
  223. goto err_free_inst;
  224. alg = crypto_spawn_skcipher_alg(spawn);
  225. /* We only support 16-byte blocks. */
  226. err = -EINVAL;
  227. if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
  228. goto err_free_inst;
  229. /* Not a stream cipher? */
  230. if (alg->base.cra_blocksize != 1)
  231. goto err_free_inst;
  232. err = -ENAMETOOLONG;
  233. if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
  234. "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
  235. goto err_free_inst;
  236. if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  237. "rfc3686(%s)", alg->base.cra_driver_name) >=
  238. CRYPTO_MAX_ALG_NAME)
  239. goto err_free_inst;
  240. inst->alg.base.cra_priority = alg->base.cra_priority;
  241. inst->alg.base.cra_blocksize = 1;
  242. inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
  243. inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
  244. inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
  245. inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
  246. CTR_RFC3686_NONCE_SIZE;
  247. inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
  248. CTR_RFC3686_NONCE_SIZE;
  249. inst->alg.setkey = crypto_rfc3686_setkey;
  250. inst->alg.encrypt = crypto_rfc3686_crypt;
  251. inst->alg.decrypt = crypto_rfc3686_crypt;
  252. inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
  253. inst->alg.init = crypto_rfc3686_init_tfm;
  254. inst->alg.exit = crypto_rfc3686_exit_tfm;
  255. inst->free = crypto_rfc3686_free;
  256. err = skcipher_register_instance(tmpl, inst);
  257. if (err) {
  258. err_free_inst:
  259. crypto_rfc3686_free(inst);
  260. }
  261. return err;
  262. }
  263. static struct crypto_template crypto_ctr_tmpls[] = {
  264. {
  265. .name = "ctr",
  266. .create = crypto_ctr_create,
  267. .module = THIS_MODULE,
  268. }, {
  269. .name = "rfc3686",
  270. .create = crypto_rfc3686_create,
  271. .module = THIS_MODULE,
  272. },
  273. };
  274. static int __init crypto_ctr_module_init(void)
  275. {
  276. return crypto_register_templates(crypto_ctr_tmpls,
  277. ARRAY_SIZE(crypto_ctr_tmpls));
  278. }
  279. static void __exit crypto_ctr_module_exit(void)
  280. {
  281. crypto_unregister_templates(crypto_ctr_tmpls,
  282. ARRAY_SIZE(crypto_ctr_tmpls));
  283. }
  284. subsys_initcall(crypto_ctr_module_init);
  285. module_exit(crypto_ctr_module_exit);
  286. MODULE_LICENSE("GPL");
  287. MODULE_DESCRIPTION("CTR block cipher mode of operation");
  288. MODULE_ALIAS_CRYPTO("rfc3686");
  289. MODULE_ALIAS_CRYPTO("ctr");
  290. MODULE_IMPORT_NS(CRYPTO_INTERNAL);