scompress.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Synchronous Compression operations
  4. *
  5. * Copyright 2015 LG Electronics Inc.
  6. * Copyright (c) 2016, Intel Corporation
  7. * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/seq_file.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/crypto.h>
  16. #include <linux/compiler.h>
  17. #include <linux/vmalloc.h>
  18. #include <crypto/algapi.h>
  19. #include <linux/cryptouser.h>
  20. #include <net/netlink.h>
  21. #include <linux/scatterlist.h>
  22. #include <crypto/scatterwalk.h>
  23. #include <crypto/internal/acompress.h>
  24. #include <crypto/internal/scompress.h>
  25. #include "internal.h"
  26. struct scomp_scratch {
  27. spinlock_t lock;
  28. void *src;
  29. void *dst;
  30. };
  31. static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
  32. .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
  33. };
  34. static const struct crypto_type crypto_scomp_type;
  35. static int scomp_scratch_users;
  36. static DEFINE_MUTEX(scomp_lock);
  37. #ifdef CONFIG_NET
  38. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  39. {
  40. struct crypto_report_comp rscomp;
  41. memset(&rscomp, 0, sizeof(rscomp));
  42. strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
  43. return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
  44. sizeof(rscomp), &rscomp);
  45. }
  46. #else
  47. static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
  48. {
  49. return -ENOSYS;
  50. }
  51. #endif
  52. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  53. __maybe_unused;
  54. static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
  55. {
  56. seq_puts(m, "type : scomp\n");
  57. }
  58. static void crypto_scomp_free_scratches(void)
  59. {
  60. struct scomp_scratch *scratch;
  61. int i;
  62. for_each_possible_cpu(i) {
  63. scratch = per_cpu_ptr(&scomp_scratch, i);
  64. vfree(scratch->src);
  65. vfree(scratch->dst);
  66. scratch->src = NULL;
  67. scratch->dst = NULL;
  68. }
  69. }
  70. static int crypto_scomp_alloc_scratches(void)
  71. {
  72. struct scomp_scratch *scratch;
  73. int i;
  74. for_each_possible_cpu(i) {
  75. void *mem;
  76. scratch = per_cpu_ptr(&scomp_scratch, i);
  77. mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
  78. if (!mem)
  79. goto error;
  80. scratch->src = mem;
  81. mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
  82. if (!mem)
  83. goto error;
  84. scratch->dst = mem;
  85. }
  86. return 0;
  87. error:
  88. crypto_scomp_free_scratches();
  89. return -ENOMEM;
  90. }
  91. static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
  92. {
  93. int ret = 0;
  94. mutex_lock(&scomp_lock);
  95. if (!scomp_scratch_users++)
  96. ret = crypto_scomp_alloc_scratches();
  97. mutex_unlock(&scomp_lock);
  98. return ret;
  99. }
  100. static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
  101. {
  102. struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
  103. void **tfm_ctx = acomp_tfm_ctx(tfm);
  104. struct crypto_scomp *scomp = *tfm_ctx;
  105. void **ctx = acomp_request_ctx(req);
  106. struct scomp_scratch *scratch;
  107. int ret;
  108. if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
  109. return -EINVAL;
  110. if (req->dst && !req->dlen)
  111. return -EINVAL;
  112. if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
  113. req->dlen = SCOMP_SCRATCH_SIZE;
  114. scratch = raw_cpu_ptr(&scomp_scratch);
  115. spin_lock(&scratch->lock);
  116. scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
  117. if (dir)
  118. ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
  119. scratch->dst, &req->dlen, *ctx);
  120. else
  121. ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
  122. scratch->dst, &req->dlen, *ctx);
  123. if (!ret) {
  124. if (!req->dst) {
  125. req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
  126. if (!req->dst) {
  127. ret = -ENOMEM;
  128. goto out;
  129. }
  130. }
  131. scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
  132. 1);
  133. }
  134. out:
  135. spin_unlock(&scratch->lock);
  136. return ret;
  137. }
  138. static int scomp_acomp_compress(struct acomp_req *req)
  139. {
  140. return scomp_acomp_comp_decomp(req, 1);
  141. }
  142. static int scomp_acomp_decompress(struct acomp_req *req)
  143. {
  144. return scomp_acomp_comp_decomp(req, 0);
  145. }
  146. static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
  147. {
  148. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  149. crypto_free_scomp(*ctx);
  150. mutex_lock(&scomp_lock);
  151. if (!--scomp_scratch_users)
  152. crypto_scomp_free_scratches();
  153. mutex_unlock(&scomp_lock);
  154. }
  155. int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
  156. {
  157. struct crypto_alg *calg = tfm->__crt_alg;
  158. struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
  159. struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
  160. struct crypto_scomp *scomp;
  161. if (!crypto_mod_get(calg))
  162. return -EAGAIN;
  163. scomp = crypto_create_tfm(calg, &crypto_scomp_type);
  164. if (IS_ERR(scomp)) {
  165. crypto_mod_put(calg);
  166. return PTR_ERR(scomp);
  167. }
  168. *ctx = scomp;
  169. tfm->exit = crypto_exit_scomp_ops_async;
  170. crt->compress = scomp_acomp_compress;
  171. crt->decompress = scomp_acomp_decompress;
  172. crt->dst_free = sgl_free;
  173. crt->reqsize = sizeof(void *);
  174. return 0;
  175. }
  176. struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
  177. {
  178. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  179. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  180. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  181. struct crypto_scomp *scomp = *tfm_ctx;
  182. void *ctx;
  183. ctx = crypto_scomp_alloc_ctx(scomp);
  184. if (IS_ERR(ctx)) {
  185. kfree(req);
  186. return NULL;
  187. }
  188. *req->__ctx = ctx;
  189. return req;
  190. }
  191. void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
  192. {
  193. struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
  194. struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
  195. struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
  196. struct crypto_scomp *scomp = *tfm_ctx;
  197. void *ctx = *req->__ctx;
  198. if (ctx)
  199. crypto_scomp_free_ctx(scomp, ctx);
  200. }
  201. static const struct crypto_type crypto_scomp_type = {
  202. .extsize = crypto_alg_extsize,
  203. .init_tfm = crypto_scomp_init_tfm,
  204. #ifdef CONFIG_PROC_FS
  205. .show = crypto_scomp_show,
  206. #endif
  207. .report = crypto_scomp_report,
  208. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  209. .maskset = CRYPTO_ALG_TYPE_MASK,
  210. .type = CRYPTO_ALG_TYPE_SCOMPRESS,
  211. .tfmsize = offsetof(struct crypto_scomp, base),
  212. };
  213. int crypto_register_scomp(struct scomp_alg *alg)
  214. {
  215. struct crypto_alg *base = &alg->base;
  216. base->cra_type = &crypto_scomp_type;
  217. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  218. base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
  219. return crypto_register_alg(base);
  220. }
  221. EXPORT_SYMBOL_GPL(crypto_register_scomp);
  222. void crypto_unregister_scomp(struct scomp_alg *alg)
  223. {
  224. crypto_unregister_alg(&alg->base);
  225. }
  226. EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
  227. int crypto_register_scomps(struct scomp_alg *algs, int count)
  228. {
  229. int i, ret;
  230. for (i = 0; i < count; i++) {
  231. ret = crypto_register_scomp(&algs[i]);
  232. if (ret)
  233. goto err;
  234. }
  235. return 0;
  236. err:
  237. for (--i; i >= 0; --i)
  238. crypto_unregister_scomp(&algs[i]);
  239. return ret;
  240. }
  241. EXPORT_SYMBOL_GPL(crypto_register_scomps);
  242. void crypto_unregister_scomps(struct scomp_alg *algs, int count)
  243. {
  244. int i;
  245. for (i = count - 1; i >= 0; --i)
  246. crypto_unregister_scomp(&algs[i]);
  247. }
  248. EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
  249. MODULE_LICENSE("GPL");
  250. MODULE_DESCRIPTION("Synchronous compression type");