cfb.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. //SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * CFB: Cipher FeedBack mode
  4. *
  5. * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
  6. *
  7. * CFB is a stream cipher mode which is layered on to a block
  8. * encryption scheme. It works very much like a one time pad where
  9. * the pad is generated initially from the encrypted IV and then
  10. * subsequently from the encrypted previous block of ciphertext. The
  11. * pad is XOR'd into the plain text to get the final ciphertext.
  12. *
  13. * The scheme of CFB is best described by wikipedia:
  14. *
  15. * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
  16. *
  17. * Note that since the pad for both encryption and decryption is
  18. * generated by an encryption operation, CFB never uses the block
  19. * decryption function.
  20. */
  21. #include <crypto/algapi.h>
  22. #include <crypto/internal/cipher.h>
  23. #include <crypto/internal/skcipher.h>
  24. #include <linux/err.h>
  25. #include <linux/init.h>
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/string.h>
  29. static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
  30. {
  31. return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
  32. }
  33. static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
  34. const u8 *src, u8 *dst)
  35. {
  36. crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
  37. }
  38. /* final encrypt and decrypt is the same */
  39. static void crypto_cfb_final(struct skcipher_walk *walk,
  40. struct crypto_skcipher *tfm)
  41. {
  42. const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
  43. u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
  44. u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
  45. u8 *src = walk->src.virt.addr;
  46. u8 *dst = walk->dst.virt.addr;
  47. u8 *iv = walk->iv;
  48. unsigned int nbytes = walk->nbytes;
  49. crypto_cfb_encrypt_one(tfm, iv, stream);
  50. crypto_xor_cpy(dst, stream, src, nbytes);
  51. }
  52. static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
  53. struct crypto_skcipher *tfm)
  54. {
  55. const unsigned int bsize = crypto_cfb_bsize(tfm);
  56. unsigned int nbytes = walk->nbytes;
  57. u8 *src = walk->src.virt.addr;
  58. u8 *dst = walk->dst.virt.addr;
  59. u8 *iv = walk->iv;
  60. do {
  61. crypto_cfb_encrypt_one(tfm, iv, dst);
  62. crypto_xor(dst, src, bsize);
  63. iv = dst;
  64. src += bsize;
  65. dst += bsize;
  66. } while ((nbytes -= bsize) >= bsize);
  67. memcpy(walk->iv, iv, bsize);
  68. return nbytes;
  69. }
  70. static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
  71. struct crypto_skcipher *tfm)
  72. {
  73. const unsigned int bsize = crypto_cfb_bsize(tfm);
  74. unsigned int nbytes = walk->nbytes;
  75. u8 *src = walk->src.virt.addr;
  76. u8 *iv = walk->iv;
  77. u8 tmp[MAX_CIPHER_BLOCKSIZE];
  78. do {
  79. crypto_cfb_encrypt_one(tfm, iv, tmp);
  80. crypto_xor(src, tmp, bsize);
  81. iv = src;
  82. src += bsize;
  83. } while ((nbytes -= bsize) >= bsize);
  84. memcpy(walk->iv, iv, bsize);
  85. return nbytes;
  86. }
  87. static int crypto_cfb_encrypt(struct skcipher_request *req)
  88. {
  89. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  90. struct skcipher_walk walk;
  91. unsigned int bsize = crypto_cfb_bsize(tfm);
  92. int err;
  93. err = skcipher_walk_virt(&walk, req, false);
  94. while (walk.nbytes >= bsize) {
  95. if (walk.src.virt.addr == walk.dst.virt.addr)
  96. err = crypto_cfb_encrypt_inplace(&walk, tfm);
  97. else
  98. err = crypto_cfb_encrypt_segment(&walk, tfm);
  99. err = skcipher_walk_done(&walk, err);
  100. }
  101. if (walk.nbytes) {
  102. crypto_cfb_final(&walk, tfm);
  103. err = skcipher_walk_done(&walk, 0);
  104. }
  105. return err;
  106. }
  107. static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
  108. struct crypto_skcipher *tfm)
  109. {
  110. const unsigned int bsize = crypto_cfb_bsize(tfm);
  111. unsigned int nbytes = walk->nbytes;
  112. u8 *src = walk->src.virt.addr;
  113. u8 *dst = walk->dst.virt.addr;
  114. u8 *iv = walk->iv;
  115. do {
  116. crypto_cfb_encrypt_one(tfm, iv, dst);
  117. crypto_xor(dst, src, bsize);
  118. iv = src;
  119. src += bsize;
  120. dst += bsize;
  121. } while ((nbytes -= bsize) >= bsize);
  122. memcpy(walk->iv, iv, bsize);
  123. return nbytes;
  124. }
  125. static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
  126. struct crypto_skcipher *tfm)
  127. {
  128. const unsigned int bsize = crypto_cfb_bsize(tfm);
  129. unsigned int nbytes = walk->nbytes;
  130. u8 *src = walk->src.virt.addr;
  131. u8 * const iv = walk->iv;
  132. u8 tmp[MAX_CIPHER_BLOCKSIZE];
  133. do {
  134. crypto_cfb_encrypt_one(tfm, iv, tmp);
  135. memcpy(iv, src, bsize);
  136. crypto_xor(src, tmp, bsize);
  137. src += bsize;
  138. } while ((nbytes -= bsize) >= bsize);
  139. return nbytes;
  140. }
  141. static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
  142. struct crypto_skcipher *tfm)
  143. {
  144. if (walk->src.virt.addr == walk->dst.virt.addr)
  145. return crypto_cfb_decrypt_inplace(walk, tfm);
  146. else
  147. return crypto_cfb_decrypt_segment(walk, tfm);
  148. }
  149. static int crypto_cfb_decrypt(struct skcipher_request *req)
  150. {
  151. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  152. struct skcipher_walk walk;
  153. const unsigned int bsize = crypto_cfb_bsize(tfm);
  154. int err;
  155. err = skcipher_walk_virt(&walk, req, false);
  156. while (walk.nbytes >= bsize) {
  157. err = crypto_cfb_decrypt_blocks(&walk, tfm);
  158. err = skcipher_walk_done(&walk, err);
  159. }
  160. if (walk.nbytes) {
  161. crypto_cfb_final(&walk, tfm);
  162. err = skcipher_walk_done(&walk, 0);
  163. }
  164. return err;
  165. }
  166. static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
  167. {
  168. struct skcipher_instance *inst;
  169. struct crypto_alg *alg;
  170. int err;
  171. inst = skcipher_alloc_instance_simple(tmpl, tb);
  172. if (IS_ERR(inst))
  173. return PTR_ERR(inst);
  174. alg = skcipher_ialg_simple(inst);
  175. /* CFB mode is a stream cipher. */
  176. inst->alg.base.cra_blocksize = 1;
  177. /*
  178. * To simplify the implementation, configure the skcipher walk to only
  179. * give a partial block at the very end, never earlier.
  180. */
  181. inst->alg.chunksize = alg->cra_blocksize;
  182. inst->alg.encrypt = crypto_cfb_encrypt;
  183. inst->alg.decrypt = crypto_cfb_decrypt;
  184. err = skcipher_register_instance(tmpl, inst);
  185. if (err)
  186. inst->free(inst);
  187. return err;
  188. }
  189. static struct crypto_template crypto_cfb_tmpl = {
  190. .name = "cfb",
  191. .create = crypto_cfb_create,
  192. .module = THIS_MODULE,
  193. };
  194. static int __init crypto_cfb_module_init(void)
  195. {
  196. return crypto_register_template(&crypto_cfb_tmpl);
  197. }
  198. static void __exit crypto_cfb_module_exit(void)
  199. {
  200. crypto_unregister_template(&crypto_cfb_tmpl);
  201. }
  202. subsys_initcall(crypto_cfb_module_init);
  203. module_exit(crypto_cfb_module_exit);
  204. MODULE_LICENSE("GPL");
  205. MODULE_DESCRIPTION("CFB block cipher mode of operation");
  206. MODULE_ALIAS_CRYPTO("cfb");
  207. MODULE_IMPORT_NS(CRYPTO_INTERNAL);