blk-crypto.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Google LLC
  4. */
  5. /*
  6. * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  7. */
  8. #define pr_fmt(fmt) "blk-crypto: " fmt
  9. #include <linux/bio.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/keyslot-manager.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include "blk-crypto-internal.h"
  15. const struct blk_crypto_mode blk_crypto_modes[] = {
  16. [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
  17. .cipher_str = "xts(aes)",
  18. .keysize = 64,
  19. .ivsize = 16,
  20. },
  21. [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
  22. .cipher_str = "essiv(cbc(aes),sha256)",
  23. .keysize = 16,
  24. .ivsize = 16,
  25. },
  26. [BLK_ENCRYPTION_MODE_ADIANTUM] = {
  27. .cipher_str = "adiantum(xchacha12,aes)",
  28. .keysize = 32,
  29. .ivsize = 32,
  30. },
  31. };
  32. /*
  33. * This number needs to be at least (the number of threads doing IO
  34. * concurrently) * (maximum recursive depth of a bio), so that we don't
  35. * deadlock on crypt_ctx allocations. The default is chosen to be the same
  36. * as the default number of post read contexts in both EXT4 and F2FS.
  37. */
  38. static int num_prealloc_crypt_ctxs = 128;
  39. module_param(num_prealloc_crypt_ctxs, int, 0444);
  40. MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
  41. "Number of bio crypto contexts to preallocate");
  42. static struct kmem_cache *bio_crypt_ctx_cache;
  43. static mempool_t *bio_crypt_ctx_pool;
  44. static int __init bio_crypt_ctx_init(void)
  45. {
  46. size_t i;
  47. bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
  48. if (!bio_crypt_ctx_cache)
  49. goto out_no_mem;
  50. bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
  51. bio_crypt_ctx_cache);
  52. if (!bio_crypt_ctx_pool)
  53. goto out_no_mem;
  54. /* This is assumed in various places. */
  55. BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
  56. /* Sanity check that no algorithm exceeds the defined limits. */
  57. for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
  58. BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
  59. BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
  60. }
  61. return 0;
  62. out_no_mem:
  63. panic("Failed to allocate mem for bio crypt ctxs\n");
  64. }
  65. subsys_initcall(bio_crypt_ctx_init);
  66. void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
  67. const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
  68. {
  69. struct bio_crypt_ctx *bc;
  70. /*
  71. * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
  72. * that the mempool_alloc() can't fail.
  73. */
  74. WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
  75. bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
  76. bc->bc_key = key;
  77. memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
  78. bio->bi_crypt_context = bc;
  79. }
  80. EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
  81. void __bio_crypt_free_ctx(struct bio *bio)
  82. {
  83. mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
  84. bio->bi_crypt_context = NULL;
  85. }
  86. int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
  87. {
  88. dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
  89. if (!dst->bi_crypt_context)
  90. return -ENOMEM;
  91. *dst->bi_crypt_context = *src->bi_crypt_context;
  92. return 0;
  93. }
  94. EXPORT_SYMBOL_GPL(__bio_crypt_clone);
  95. /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
  96. void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  97. unsigned int inc)
  98. {
  99. int i;
  100. for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
  101. dun[i] += inc;
  102. /*
  103. * If the addition in this limb overflowed, then we need to
  104. * carry 1 into the next limb. Else the carry is 0.
  105. */
  106. if (dun[i] < inc)
  107. inc = 1;
  108. else
  109. inc = 0;
  110. }
  111. }
  112. void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
  113. {
  114. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  115. bio_crypt_dun_increment(bc->bc_dun,
  116. bytes >> bc->bc_key->data_unit_size_bits);
  117. }
  118. /*
  119. * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
  120. * @next_dun, treating the DUNs as multi-limb integers.
  121. */
  122. bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
  123. unsigned int bytes,
  124. const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
  125. {
  126. int i;
  127. unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
  128. for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
  129. if (bc->bc_dun[i] + carry != next_dun[i])
  130. return false;
  131. /*
  132. * If the addition in this limb overflowed, then we need to
  133. * carry 1 into the next limb. Else the carry is 0.
  134. */
  135. if ((bc->bc_dun[i] + carry) < carry)
  136. carry = 1;
  137. else
  138. carry = 0;
  139. }
  140. /* If the DUN wrapped through 0, don't treat it as contiguous. */
  141. return carry == 0;
  142. }
  143. /*
  144. * Checks that two bio crypt contexts are compatible - i.e. that
  145. * they are mergeable except for data_unit_num continuity.
  146. */
  147. static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
  148. struct bio_crypt_ctx *bc2)
  149. {
  150. if (!bc1)
  151. return !bc2;
  152. return bc2 && bc1->bc_key == bc2->bc_key;
  153. }
  154. bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
  155. {
  156. return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
  157. }
  158. /*
  159. * Checks that two bio crypt contexts are compatible, and also
  160. * that their data_unit_nums are continuous (and can hence be merged)
  161. * in the order @bc1 followed by @bc2.
  162. */
  163. bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
  164. struct bio_crypt_ctx *bc2)
  165. {
  166. if (!bio_crypt_ctx_compatible(bc1, bc2))
  167. return false;
  168. return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
  169. }
  170. /* Check that all I/O segments are data unit aligned. */
  171. static bool bio_crypt_check_alignment(struct bio *bio)
  172. {
  173. const unsigned int data_unit_size =
  174. bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
  175. struct bvec_iter iter;
  176. struct bio_vec bv;
  177. bio_for_each_segment(bv, bio, iter) {
  178. if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
  179. return false;
  180. }
  181. return true;
  182. }
  183. blk_status_t __blk_crypto_init_request(struct request *rq)
  184. {
  185. return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key,
  186. &rq->crypt_keyslot);
  187. }
  188. /**
  189. * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
  190. *
  191. * @rq: The request whose crypto fields to uninitialize.
  192. *
  193. * Completely uninitializes the crypto fields of a request. If a keyslot has
  194. * been programmed into some inline encryption hardware, that keyslot is
  195. * released. The rq->crypt_ctx is also freed.
  196. */
  197. void __blk_crypto_free_request(struct request *rq)
  198. {
  199. blk_ksm_put_slot(rq->crypt_keyslot);
  200. mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
  201. blk_crypto_rq_set_defaults(rq);
  202. }
  203. /**
  204. * __blk_crypto_bio_prep - Prepare bio for inline encryption
  205. *
  206. * @bio_ptr: pointer to original bio pointer
  207. *
  208. * If the bio crypt context provided for the bio is supported by the underlying
  209. * device's inline encryption hardware, do nothing.
  210. *
  211. * Otherwise, try to perform en/decryption for this bio by falling back to the
  212. * kernel crypto API. When the crypto API fallback is used for encryption,
  213. * blk-crypto may choose to split the bio into 2 - the first one that will
  214. * continue to be processed and the second one that will be resubmitted via
  215. * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
  216. * of the aforementioned "first one", and *bio_ptr will be updated to this
  217. * bounce bio.
  218. *
  219. * Caller must ensure bio has bio_crypt_ctx.
  220. *
  221. * Return: true on success; false on error (and bio->bi_status will be set
  222. * appropriately, and bio_endio() will have been called so bio
  223. * submission should abort).
  224. */
  225. bool __blk_crypto_bio_prep(struct bio **bio_ptr)
  226. {
  227. struct bio *bio = *bio_ptr;
  228. const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
  229. /* Error if bio has no data. */
  230. if (WARN_ON_ONCE(!bio_has_data(bio))) {
  231. bio->bi_status = BLK_STS_IOERR;
  232. goto fail;
  233. }
  234. if (!bio_crypt_check_alignment(bio)) {
  235. bio->bi_status = BLK_STS_IOERR;
  236. goto fail;
  237. }
  238. /*
  239. * Success if device supports the encryption context, or if we succeeded
  240. * in falling back to the crypto API.
  241. */
  242. if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
  243. &bc_key->crypto_cfg))
  244. return true;
  245. if (blk_crypto_fallback_bio_prep(bio_ptr))
  246. return true;
  247. fail:
  248. bio_endio(*bio_ptr);
  249. return false;
  250. }
  251. int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
  252. gfp_t gfp_mask)
  253. {
  254. if (!rq->crypt_ctx) {
  255. rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
  256. if (!rq->crypt_ctx)
  257. return -ENOMEM;
  258. }
  259. *rq->crypt_ctx = *bio->bi_crypt_context;
  260. return 0;
  261. }
  262. /**
  263. * blk_crypto_init_key() - Prepare a key for use with blk-crypto
  264. * @blk_key: Pointer to the blk_crypto_key to initialize.
  265. * @raw_key: Pointer to the raw key.
  266. * @raw_key_size: Size of raw key. Must be at least the required size for the
  267. * chosen @crypto_mode; see blk_crypto_modes[]. (It's allowed
  268. * to be longer than the mode's actual key size, in order to
  269. * support inline encryption hardware that accepts wrapped keys.
  270. * @is_hw_wrapped has to be set for such keys)
  271. * @is_hw_wrapped: Denotes @raw_key is wrapped.
  272. * @crypto_mode: identifier for the encryption algorithm to use
  273. * @dun_bytes: number of bytes that will be used to specify the DUN when this
  274. * key is used
  275. * @data_unit_size: the data unit size to use for en/decryption
  276. *
  277. * Return: 0 on success, -errno on failure. The caller is responsible for
  278. * zeroizing both blk_key and raw_key when done with them.
  279. */
  280. int blk_crypto_init_key(struct blk_crypto_key *blk_key,
  281. const u8 *raw_key, unsigned int raw_key_size,
  282. bool is_hw_wrapped,
  283. enum blk_crypto_mode_num crypto_mode,
  284. unsigned int dun_bytes,
  285. unsigned int data_unit_size)
  286. {
  287. const struct blk_crypto_mode *mode;
  288. memset(blk_key, 0, sizeof(*blk_key));
  289. if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
  290. return -EINVAL;
  291. BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
  292. mode = &blk_crypto_modes[crypto_mode];
  293. if (is_hw_wrapped) {
  294. if (raw_key_size < mode->keysize ||
  295. raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
  296. return -EINVAL;
  297. } else {
  298. if (raw_key_size != mode->keysize)
  299. return -EINVAL;
  300. }
  301. if (dun_bytes == 0 || dun_bytes > mode->ivsize)
  302. return -EINVAL;
  303. if (!is_power_of_2(data_unit_size))
  304. return -EINVAL;
  305. blk_key->crypto_cfg.crypto_mode = crypto_mode;
  306. blk_key->crypto_cfg.dun_bytes = dun_bytes;
  307. blk_key->crypto_cfg.data_unit_size = data_unit_size;
  308. blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped;
  309. blk_key->data_unit_size_bits = ilog2(data_unit_size);
  310. blk_key->size = raw_key_size;
  311. memcpy(blk_key->raw, raw_key, raw_key_size);
  312. return 0;
  313. }
  314. EXPORT_SYMBOL_GPL(blk_crypto_init_key);
  315. /*
  316. * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
  317. * request queue it's submitted to supports inline crypto, or the
  318. * blk-crypto-fallback is enabled and supports the cfg).
  319. */
  320. bool blk_crypto_config_supported(struct request_queue *q,
  321. const struct blk_crypto_config *cfg)
  322. {
  323. if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
  324. !cfg->is_hw_wrapped)
  325. return true;
  326. return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
  327. }
  328. /**
  329. * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
  330. * @key: A key to use on the device
  331. * @q: the request queue for the device
  332. *
  333. * Upper layers must call this function to ensure that either the hardware
  334. * supports the key's crypto settings, or the crypto API fallback has transforms
  335. * for the needed mode allocated and ready to go. This function may allocate
  336. * an skcipher, and *should not* be called from the data path, since that might
  337. * cause a deadlock
  338. *
  339. * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
  340. * blk-crypto-fallback is either disabled or the needed algorithm
  341. * is disabled in the crypto API; or another -errno code.
  342. */
  343. int blk_crypto_start_using_key(const struct blk_crypto_key *key,
  344. struct request_queue *q)
  345. {
  346. if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
  347. return 0;
  348. if (key->crypto_cfg.is_hw_wrapped) {
  349. pr_warn_once("hardware doesn't support wrapped keys\n");
  350. return -EOPNOTSUPP;
  351. }
  352. return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
  353. }
  354. EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
  355. /**
  356. * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
  357. * it may have been programmed into
  358. * @q: The request queue who's associated inline encryption hardware this key
  359. * might have been programmed into
  360. * @key: The key to evict
  361. *
  362. * Upper layers (filesystems) must call this function to ensure that a key is
  363. * evicted from any hardware that it might have been programmed into. The key
  364. * must not be in use by any in-flight IO when this function is called.
  365. *
  366. * Return: 0 on success or if key is not present in the q's ksm, -err on error.
  367. */
  368. int blk_crypto_evict_key(struct request_queue *q,
  369. const struct blk_crypto_key *key)
  370. {
  371. if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
  372. return blk_ksm_evict_key(q->ksm, key);
  373. /*
  374. * If the request queue's associated inline encryption hardware didn't
  375. * have support for the key, then the key might have been programmed
  376. * into the fallback keyslot manager, so try to evict from there.
  377. */
  378. return blk_crypto_fallback_evict_key(key);
  379. }
  380. EXPORT_SYMBOL_GPL(blk_crypto_evict_key);