blk-crypto-fallback.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Google LLC
  4. */
  5. /*
  6. * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  7. */
  8. #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
  9. #include <crypto/skcipher.h>
  10. #include <linux/blk-cgroup.h>
  11. #include <linux/blk-crypto.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/crypto.h>
  14. #include <linux/keyslot-manager.h>
  15. #include <linux/mempool.h>
  16. #include <linux/module.h>
  17. #include <linux/random.h>
  18. #include "blk-crypto-internal.h"
  19. static unsigned int num_prealloc_bounce_pg = 32;
  20. module_param(num_prealloc_bounce_pg, uint, 0);
  21. MODULE_PARM_DESC(num_prealloc_bounce_pg,
  22. "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
  23. static unsigned int blk_crypto_num_keyslots = 100;
  24. module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
  25. MODULE_PARM_DESC(num_keyslots,
  26. "Number of keyslots for the blk-crypto crypto API fallback");
  27. static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
  28. module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
  29. MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
  30. "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
  31. struct bio_fallback_crypt_ctx {
  32. struct bio_crypt_ctx crypt_ctx;
  33. /*
  34. * Copy of the bvec_iter when this bio was submitted.
  35. * We only want to en/decrypt the part of the bio as described by the
  36. * bvec_iter upon submission because bio might be split before being
  37. * resubmitted
  38. */
  39. struct bvec_iter crypt_iter;
  40. union {
  41. struct {
  42. struct work_struct work;
  43. struct bio *bio;
  44. };
  45. struct {
  46. void *bi_private_orig;
  47. bio_end_io_t *bi_end_io_orig;
  48. };
  49. };
  50. };
  51. static struct kmem_cache *bio_fallback_crypt_ctx_cache;
  52. static mempool_t *bio_fallback_crypt_ctx_pool;
  53. /*
  54. * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
  55. * all of a mode's tfms when that mode starts being used. Since each mode may
  56. * need all the keyslots at some point, each mode needs its own tfm for each
  57. * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
  58. * match the behavior of real inline encryption hardware (which only supports a
  59. * single encryption context per keyslot), we only allow one tfm per keyslot to
  60. * be used at a time - the rest of the unused tfms have their keys cleared.
  61. */
  62. static DEFINE_MUTEX(tfms_init_lock);
  63. static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
  64. static struct blk_crypto_keyslot {
  65. enum blk_crypto_mode_num crypto_mode;
  66. struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
  67. } *blk_crypto_keyslots;
  68. static struct blk_keyslot_manager blk_crypto_ksm;
  69. static struct workqueue_struct *blk_crypto_wq;
  70. static mempool_t *blk_crypto_bounce_page_pool;
  71. /*
  72. * This is the key we set when evicting a keyslot. This *should* be the all 0's
  73. * key, but AES-XTS rejects that key, so we use some random bytes instead.
  74. */
  75. static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
  76. static void blk_crypto_evict_keyslot(unsigned int slot)
  77. {
  78. struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
  79. enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
  80. int err;
  81. WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
  82. /* Clear the key in the skcipher */
  83. err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
  84. blk_crypto_modes[crypto_mode].keysize);
  85. WARN_ON(err);
  86. slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
  87. }
  88. static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
  89. const struct blk_crypto_key *key,
  90. unsigned int slot)
  91. {
  92. struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
  93. const enum blk_crypto_mode_num crypto_mode =
  94. key->crypto_cfg.crypto_mode;
  95. int err;
  96. if (crypto_mode != slotp->crypto_mode &&
  97. slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
  98. blk_crypto_evict_keyslot(slot);
  99. slotp->crypto_mode = crypto_mode;
  100. err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
  101. key->size);
  102. if (err) {
  103. blk_crypto_evict_keyslot(slot);
  104. return err;
  105. }
  106. return 0;
  107. }
  108. static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
  109. const struct blk_crypto_key *key,
  110. unsigned int slot)
  111. {
  112. blk_crypto_evict_keyslot(slot);
  113. return 0;
  114. }
  115. /*
  116. * The crypto API fallback KSM ops - only used for a bio when it specifies a
  117. * blk_crypto_key that was not supported by the device's inline encryption
  118. * hardware.
  119. */
  120. static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
  121. .keyslot_program = blk_crypto_keyslot_program,
  122. .keyslot_evict = blk_crypto_keyslot_evict,
  123. };
  124. static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
  125. {
  126. struct bio *src_bio = enc_bio->bi_private;
  127. int i;
  128. for (i = 0; i < enc_bio->bi_vcnt; i++)
  129. mempool_free(enc_bio->bi_io_vec[i].bv_page,
  130. blk_crypto_bounce_page_pool);
  131. src_bio->bi_status = enc_bio->bi_status;
  132. bio_put(enc_bio);
  133. bio_endio(src_bio);
  134. }
  135. static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
  136. {
  137. struct bvec_iter iter;
  138. struct bio_vec bv;
  139. struct bio *bio;
  140. bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL);
  141. if (!bio)
  142. return NULL;
  143. bio->bi_disk = bio_src->bi_disk;
  144. bio->bi_opf = bio_src->bi_opf;
  145. bio->bi_ioprio = bio_src->bi_ioprio;
  146. bio->bi_write_hint = bio_src->bi_write_hint;
  147. bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
  148. bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
  149. bio_for_each_segment(bv, bio_src, iter)
  150. bio->bi_io_vec[bio->bi_vcnt++] = bv;
  151. bio_clone_blkg_association(bio, bio_src);
  152. blkcg_bio_issue_init(bio);
  153. bio_clone_skip_dm_default_key(bio, bio_src);
  154. return bio;
  155. }
  156. static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
  157. struct skcipher_request **ciph_req_ret,
  158. struct crypto_wait *wait)
  159. {
  160. struct skcipher_request *ciph_req;
  161. const struct blk_crypto_keyslot *slotp;
  162. int keyslot_idx = blk_ksm_get_slot_idx(slot);
  163. slotp = &blk_crypto_keyslots[keyslot_idx];
  164. ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
  165. GFP_NOIO);
  166. if (!ciph_req)
  167. return false;
  168. skcipher_request_set_callback(ciph_req,
  169. CRYPTO_TFM_REQ_MAY_BACKLOG |
  170. CRYPTO_TFM_REQ_MAY_SLEEP,
  171. crypto_req_done, wait);
  172. *ciph_req_ret = ciph_req;
  173. return true;
  174. }
  175. static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
  176. {
  177. struct bio *bio = *bio_ptr;
  178. unsigned int i = 0;
  179. unsigned int num_sectors = 0;
  180. struct bio_vec bv;
  181. struct bvec_iter iter;
  182. bio_for_each_segment(bv, bio, iter) {
  183. num_sectors += bv.bv_len >> SECTOR_SHIFT;
  184. if (++i == BIO_MAX_PAGES)
  185. break;
  186. }
  187. if (num_sectors < bio_sectors(bio)) {
  188. struct bio *split_bio;
  189. split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
  190. if (!split_bio) {
  191. bio->bi_status = BLK_STS_RESOURCE;
  192. return false;
  193. }
  194. bio_chain(split_bio, bio);
  195. submit_bio_noacct(bio);
  196. *bio_ptr = split_bio;
  197. }
  198. return true;
  199. }
  200. union blk_crypto_iv {
  201. __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  202. u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
  203. };
  204. static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  205. union blk_crypto_iv *iv)
  206. {
  207. int i;
  208. for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
  209. iv->dun[i] = cpu_to_le64(dun[i]);
  210. }
  211. /*
  212. * The crypto API fallback's encryption routine.
  213. * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
  214. * and replace *bio_ptr with the bounce bio. May split input bio if it's too
  215. * large. Returns true on success. Returns false and sets bio->bi_status on
  216. * error.
  217. */
  218. static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
  219. {
  220. struct bio *src_bio, *enc_bio;
  221. struct bio_crypt_ctx *bc;
  222. struct blk_ksm_keyslot *slot;
  223. int data_unit_size;
  224. struct skcipher_request *ciph_req = NULL;
  225. DECLARE_CRYPTO_WAIT(wait);
  226. u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  227. struct scatterlist src, dst;
  228. union blk_crypto_iv iv;
  229. unsigned int i, j;
  230. bool ret = false;
  231. blk_status_t blk_st;
  232. /* Split the bio if it's too big for single page bvec */
  233. if (!blk_crypto_split_bio_if_needed(bio_ptr))
  234. return false;
  235. src_bio = *bio_ptr;
  236. bc = src_bio->bi_crypt_context;
  237. data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
  238. /* Allocate bounce bio for encryption */
  239. enc_bio = blk_crypto_clone_bio(src_bio);
  240. if (!enc_bio) {
  241. src_bio->bi_status = BLK_STS_RESOURCE;
  242. return false;
  243. }
  244. /*
  245. * Use the crypto API fallback keyslot manager to get a crypto_skcipher
  246. * for the algorithm and key specified for this bio.
  247. */
  248. blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
  249. if (blk_st != BLK_STS_OK) {
  250. src_bio->bi_status = blk_st;
  251. goto out_put_enc_bio;
  252. }
  253. /* and then allocate an skcipher_request for it */
  254. if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
  255. src_bio->bi_status = BLK_STS_RESOURCE;
  256. goto out_release_keyslot;
  257. }
  258. memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
  259. sg_init_table(&src, 1);
  260. sg_init_table(&dst, 1);
  261. skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
  262. iv.bytes);
  263. /* Encrypt each page in the bounce bio */
  264. for (i = 0; i < enc_bio->bi_vcnt; i++) {
  265. struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
  266. struct page *plaintext_page = enc_bvec->bv_page;
  267. struct page *ciphertext_page =
  268. mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
  269. enc_bvec->bv_page = ciphertext_page;
  270. if (!ciphertext_page) {
  271. src_bio->bi_status = BLK_STS_RESOURCE;
  272. goto out_free_bounce_pages;
  273. }
  274. sg_set_page(&src, plaintext_page, data_unit_size,
  275. enc_bvec->bv_offset);
  276. sg_set_page(&dst, ciphertext_page, data_unit_size,
  277. enc_bvec->bv_offset);
  278. /* Encrypt each data unit in this page */
  279. for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
  280. blk_crypto_dun_to_iv(curr_dun, &iv);
  281. if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
  282. &wait)) {
  283. i++;
  284. src_bio->bi_status = BLK_STS_IOERR;
  285. goto out_free_bounce_pages;
  286. }
  287. bio_crypt_dun_increment(curr_dun, 1);
  288. src.offset += data_unit_size;
  289. dst.offset += data_unit_size;
  290. }
  291. }
  292. enc_bio->bi_private = src_bio;
  293. enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
  294. *bio_ptr = enc_bio;
  295. ret = true;
  296. enc_bio = NULL;
  297. goto out_free_ciph_req;
  298. out_free_bounce_pages:
  299. while (i > 0)
  300. mempool_free(enc_bio->bi_io_vec[--i].bv_page,
  301. blk_crypto_bounce_page_pool);
  302. out_free_ciph_req:
  303. skcipher_request_free(ciph_req);
  304. out_release_keyslot:
  305. blk_ksm_put_slot(slot);
  306. out_put_enc_bio:
  307. if (enc_bio)
  308. bio_put(enc_bio);
  309. return ret;
  310. }
  311. /*
  312. * The crypto API fallback's main decryption routine.
  313. * Decrypts input bio in place, and calls bio_endio on the bio.
  314. */
  315. static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
  316. {
  317. struct bio_fallback_crypt_ctx *f_ctx =
  318. container_of(work, struct bio_fallback_crypt_ctx, work);
  319. struct bio *bio = f_ctx->bio;
  320. struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
  321. struct blk_ksm_keyslot *slot;
  322. struct skcipher_request *ciph_req = NULL;
  323. DECLARE_CRYPTO_WAIT(wait);
  324. u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  325. union blk_crypto_iv iv;
  326. struct scatterlist sg;
  327. struct bio_vec bv;
  328. struct bvec_iter iter;
  329. const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
  330. unsigned int i;
  331. blk_status_t blk_st;
  332. /*
  333. * Use the crypto API fallback keyslot manager to get a crypto_skcipher
  334. * for the algorithm and key specified for this bio.
  335. */
  336. blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
  337. if (blk_st != BLK_STS_OK) {
  338. bio->bi_status = blk_st;
  339. goto out_no_keyslot;
  340. }
  341. /* and then allocate an skcipher_request for it */
  342. if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
  343. bio->bi_status = BLK_STS_RESOURCE;
  344. goto out;
  345. }
  346. memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
  347. sg_init_table(&sg, 1);
  348. skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
  349. iv.bytes);
  350. /* Decrypt each segment in the bio */
  351. __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
  352. struct page *page = bv.bv_page;
  353. sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
  354. /* Decrypt each data unit in the segment */
  355. for (i = 0; i < bv.bv_len; i += data_unit_size) {
  356. blk_crypto_dun_to_iv(curr_dun, &iv);
  357. if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
  358. &wait)) {
  359. bio->bi_status = BLK_STS_IOERR;
  360. goto out;
  361. }
  362. bio_crypt_dun_increment(curr_dun, 1);
  363. sg.offset += data_unit_size;
  364. }
  365. }
  366. out:
  367. skcipher_request_free(ciph_req);
  368. blk_ksm_put_slot(slot);
  369. out_no_keyslot:
  370. mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
  371. bio_endio(bio);
  372. }
  373. /**
  374. * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
  375. *
  376. * @bio: the bio to queue
  377. *
  378. * Restore bi_private and bi_end_io, and queue the bio for decryption into a
  379. * workqueue, since this function will be called from an atomic context.
  380. */
  381. static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
  382. {
  383. struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
  384. bio->bi_private = f_ctx->bi_private_orig;
  385. bio->bi_end_io = f_ctx->bi_end_io_orig;
  386. /* If there was an IO error, don't queue for decrypt. */
  387. if (bio->bi_status) {
  388. mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
  389. bio_endio(bio);
  390. return;
  391. }
  392. INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
  393. f_ctx->bio = bio;
  394. queue_work(blk_crypto_wq, &f_ctx->work);
  395. }
  396. /**
  397. * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
  398. *
  399. * @bio_ptr: pointer to the bio to prepare
  400. *
  401. * If bio is doing a WRITE operation, this splits the bio into two parts if it's
  402. * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
  403. * for the first part, encrypts it, and update bio_ptr to point to the bounce
  404. * bio.
  405. *
  406. * For a READ operation, we mark the bio for decryption by using bi_private and
  407. * bi_end_io.
  408. *
  409. * In either case, this function will make the bio look like a regular bio (i.e.
  410. * as if no encryption context was ever specified) for the purposes of the rest
  411. * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
  412. * currently supported together).
  413. *
  414. * Return: true on success. Sets bio->bi_status and returns false on error.
  415. */
  416. bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
  417. {
  418. struct bio *bio = *bio_ptr;
  419. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  420. struct bio_fallback_crypt_ctx *f_ctx;
  421. if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
  422. /* User didn't call blk_crypto_start_using_key() first */
  423. bio->bi_status = BLK_STS_IOERR;
  424. return false;
  425. }
  426. if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm,
  427. &bc->bc_key->crypto_cfg)) {
  428. bio->bi_status = BLK_STS_NOTSUPP;
  429. return false;
  430. }
  431. if (bio_data_dir(bio) == WRITE)
  432. return blk_crypto_fallback_encrypt_bio(bio_ptr);
  433. /*
  434. * bio READ case: Set up a f_ctx in the bio's bi_private and set the
  435. * bi_end_io appropriately to trigger decryption when the bio is ended.
  436. */
  437. f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
  438. f_ctx->crypt_ctx = *bc;
  439. f_ctx->crypt_iter = bio->bi_iter;
  440. f_ctx->bi_private_orig = bio->bi_private;
  441. f_ctx->bi_end_io_orig = bio->bi_end_io;
  442. bio->bi_private = (void *)f_ctx;
  443. bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
  444. bio_crypt_free_ctx(bio);
  445. return true;
  446. }
  447. int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
  448. {
  449. return blk_ksm_evict_key(&blk_crypto_ksm, key);
  450. }
  451. static bool blk_crypto_fallback_inited;
  452. static int blk_crypto_fallback_init(void)
  453. {
  454. int i;
  455. int err;
  456. if (blk_crypto_fallback_inited)
  457. return 0;
  458. prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
  459. err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
  460. if (err)
  461. goto out;
  462. err = -ENOMEM;
  463. blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
  464. blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
  465. blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS;
  466. /* All blk-crypto modes have a crypto API fallback. */
  467. for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
  468. blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF;
  469. blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
  470. blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
  471. WQ_UNBOUND | WQ_HIGHPRI |
  472. WQ_MEM_RECLAIM, num_online_cpus());
  473. if (!blk_crypto_wq)
  474. goto fail_free_ksm;
  475. blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
  476. sizeof(blk_crypto_keyslots[0]),
  477. GFP_KERNEL);
  478. if (!blk_crypto_keyslots)
  479. goto fail_free_wq;
  480. blk_crypto_bounce_page_pool =
  481. mempool_create_page_pool(num_prealloc_bounce_pg, 0);
  482. if (!blk_crypto_bounce_page_pool)
  483. goto fail_free_keyslots;
  484. bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
  485. if (!bio_fallback_crypt_ctx_cache)
  486. goto fail_free_bounce_page_pool;
  487. bio_fallback_crypt_ctx_pool =
  488. mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
  489. bio_fallback_crypt_ctx_cache);
  490. if (!bio_fallback_crypt_ctx_pool)
  491. goto fail_free_crypt_ctx_cache;
  492. blk_crypto_fallback_inited = true;
  493. return 0;
  494. fail_free_crypt_ctx_cache:
  495. kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
  496. fail_free_bounce_page_pool:
  497. mempool_destroy(blk_crypto_bounce_page_pool);
  498. fail_free_keyslots:
  499. kfree(blk_crypto_keyslots);
  500. fail_free_wq:
  501. destroy_workqueue(blk_crypto_wq);
  502. fail_free_ksm:
  503. blk_ksm_destroy(&blk_crypto_ksm);
  504. out:
  505. return err;
  506. }
  507. /*
  508. * Prepare blk-crypto-fallback for the specified crypto mode.
  509. * Returns -ENOPKG if the needed crypto API support is missing.
  510. */
  511. int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
  512. {
  513. const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
  514. struct blk_crypto_keyslot *slotp;
  515. unsigned int i;
  516. int err = 0;
  517. /*
  518. * Fast path
  519. * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
  520. * for each i are visible before we try to access them.
  521. */
  522. if (likely(smp_load_acquire(&tfms_inited[mode_num])))
  523. return 0;
  524. mutex_lock(&tfms_init_lock);
  525. if (tfms_inited[mode_num])
  526. goto out;
  527. err = blk_crypto_fallback_init();
  528. if (err)
  529. goto out;
  530. for (i = 0; i < blk_crypto_num_keyslots; i++) {
  531. slotp = &blk_crypto_keyslots[i];
  532. slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
  533. if (IS_ERR(slotp->tfms[mode_num])) {
  534. err = PTR_ERR(slotp->tfms[mode_num]);
  535. if (err == -ENOENT) {
  536. pr_warn_once("Missing crypto API support for \"%s\"\n",
  537. cipher_str);
  538. err = -ENOPKG;
  539. }
  540. slotp->tfms[mode_num] = NULL;
  541. goto out_free_tfms;
  542. }
  543. crypto_skcipher_set_flags(slotp->tfms[mode_num],
  544. CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
  545. }
  546. /*
  547. * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
  548. * for each i are visible before we set tfms_inited[mode_num].
  549. */
  550. smp_store_release(&tfms_inited[mode_num], true);
  551. goto out;
  552. out_free_tfms:
  553. for (i = 0; i < blk_crypto_num_keyslots; i++) {
  554. slotp = &blk_crypto_keyslots[i];
  555. crypto_free_skcipher(slotp->tfms[mode_num]);
  556. slotp->tfms[mode_num] = NULL;
  557. }
  558. out:
  559. mutex_unlock(&tfms_init_lock);
  560. return err;
  561. }