inline_crypt.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Inline encryption support for fscrypt
  4. *
  5. * Copyright 2019 Google LLC
  6. */
  7. /*
  8. * With "inline encryption", the block layer handles the decryption/encryption
  9. * as part of the bio, instead of the filesystem doing the crypto itself via
  10. * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still
  11. * provides the key and IV to use.
  12. */
  13. #include <linux/blk-crypto.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/keyslot-manager.h>
  17. #include <linux/sched/mm.h>
  18. #include <linux/slab.h>
  19. #include <linux/uio.h>
  20. #include "fscrypt_private.h"
  21. struct fscrypt_blk_crypto_key {
  22. struct blk_crypto_key base;
  23. int num_devs;
  24. struct request_queue *devs[];
  25. };
  26. static int fscrypt_get_num_devices(struct super_block *sb)
  27. {
  28. if (sb->s_cop->get_num_devices)
  29. return sb->s_cop->get_num_devices(sb);
  30. return 1;
  31. }
  32. static void fscrypt_get_devices(struct super_block *sb, int num_devs,
  33. struct request_queue **devs)
  34. {
  35. if (num_devs == 1)
  36. devs[0] = bdev_get_queue(sb->s_bdev);
  37. else
  38. sb->s_cop->get_devices(sb, devs);
  39. }
  40. static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
  41. {
  42. struct super_block *sb = ci->ci_inode->i_sb;
  43. unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
  44. int ino_bits = 64, lblk_bits = 64;
  45. if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
  46. return offsetofend(union fscrypt_iv, nonce);
  47. if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
  48. return sizeof(__le64);
  49. if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
  50. return sizeof(__le32);
  51. /* Default case: IVs are just the file logical block number */
  52. if (sb->s_cop->get_ino_and_lblk_bits)
  53. sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
  54. return DIV_ROUND_UP(lblk_bits, 8);
  55. }
  56. /* Enable inline encryption for this file if supported. */
  57. int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
  58. bool is_hw_wrapped_key)
  59. {
  60. const struct inode *inode = ci->ci_inode;
  61. struct super_block *sb = inode->i_sb;
  62. struct blk_crypto_config crypto_cfg;
  63. int num_devs;
  64. struct request_queue **devs;
  65. int i;
  66. /* The file must need contents encryption, not filenames encryption */
  67. if (!S_ISREG(inode->i_mode))
  68. return 0;
  69. /* The crypto mode must have a blk-crypto counterpart */
  70. if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
  71. return 0;
  72. /* The filesystem must be mounted with -o inlinecrypt */
  73. if (!(sb->s_flags & SB_INLINECRYPT))
  74. return 0;
  75. /*
  76. * When a page contains multiple logically contiguous filesystem blocks,
  77. * some filesystem code only calls fscrypt_mergeable_bio() for the first
  78. * block in the page. This is fine for most of fscrypt's IV generation
  79. * strategies, where contiguous blocks imply contiguous IVs. But it
  80. * doesn't work with IV_INO_LBLK_32. For now, simply exclude
  81. * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.
  82. */
  83. if ((fscrypt_policy_flags(&ci->ci_policy) &
  84. FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
  85. sb->s_blocksize != PAGE_SIZE)
  86. return 0;
  87. /*
  88. * On all the filesystem's devices, blk-crypto must support the crypto
  89. * configuration that the file would use.
  90. */
  91. crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
  92. crypto_cfg.data_unit_size = sb->s_blocksize;
  93. crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
  94. crypto_cfg.is_hw_wrapped = is_hw_wrapped_key;
  95. num_devs = fscrypt_get_num_devices(sb);
  96. devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
  97. if (!devs)
  98. return -ENOMEM;
  99. fscrypt_get_devices(sb, num_devs, devs);
  100. for (i = 0; i < num_devs; i++) {
  101. if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
  102. goto out_free_devs;
  103. }
  104. ci->ci_inlinecrypt = true;
  105. out_free_devs:
  106. kfree(devs);
  107. return 0;
  108. }
  109. int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
  110. const u8 *raw_key,
  111. unsigned int raw_key_size,
  112. bool is_hw_wrapped,
  113. const struct fscrypt_info *ci)
  114. {
  115. const struct inode *inode = ci->ci_inode;
  116. struct super_block *sb = inode->i_sb;
  117. enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
  118. int num_devs = fscrypt_get_num_devices(sb);
  119. int queue_refs = 0;
  120. struct fscrypt_blk_crypto_key *blk_key;
  121. int err;
  122. int i;
  123. blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_KERNEL);
  124. if (!blk_key)
  125. return -ENOMEM;
  126. blk_key->num_devs = num_devs;
  127. fscrypt_get_devices(sb, num_devs, blk_key->devs);
  128. BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
  129. BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
  130. err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
  131. is_hw_wrapped, crypto_mode,
  132. fscrypt_get_dun_bytes(ci), sb->s_blocksize);
  133. if (err) {
  134. fscrypt_err(inode, "error %d initializing blk-crypto key", err);
  135. goto fail;
  136. }
  137. /*
  138. * We have to start using blk-crypto on all the filesystem's devices.
  139. * We also have to save all the request_queue's for later so that the
  140. * key can be evicted from them. This is needed because some keys
  141. * aren't destroyed until after the filesystem was already unmounted
  142. * (namely, the per-mode keys in struct fscrypt_master_key).
  143. */
  144. for (i = 0; i < num_devs; i++) {
  145. if (!blk_get_queue(blk_key->devs[i])) {
  146. fscrypt_err(inode, "couldn't get request_queue");
  147. err = -EAGAIN;
  148. goto fail;
  149. }
  150. queue_refs++;
  151. err = blk_crypto_start_using_key(&blk_key->base,
  152. blk_key->devs[i]);
  153. if (err) {
  154. fscrypt_err(inode,
  155. "error %d starting to use blk-crypto", err);
  156. goto fail;
  157. }
  158. }
  159. /*
  160. * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
  161. * I.e., here we publish ->blk_key with a RELEASE barrier so that
  162. * concurrent tasks can ACQUIRE it. Note that this concurrency is only
  163. * possible for per-mode keys, not for per-file keys.
  164. */
  165. smp_store_release(&prep_key->blk_key, blk_key);
  166. return 0;
  167. fail:
  168. for (i = 0; i < queue_refs; i++)
  169. blk_put_queue(blk_key->devs[i]);
  170. kfree_sensitive(blk_key);
  171. return err;
  172. }
  173. void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key)
  174. {
  175. struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key;
  176. int i;
  177. if (blk_key) {
  178. for (i = 0; i < blk_key->num_devs; i++) {
  179. blk_crypto_evict_key(blk_key->devs[i], &blk_key->base);
  180. blk_put_queue(blk_key->devs[i]);
  181. }
  182. kfree_sensitive(blk_key);
  183. }
  184. }
  185. int fscrypt_derive_raw_secret(struct super_block *sb,
  186. const u8 *wrapped_key,
  187. unsigned int wrapped_key_size,
  188. u8 *raw_secret, unsigned int raw_secret_size)
  189. {
  190. struct request_queue *q;
  191. q = bdev_get_queue(sb->s_bdev);
  192. if (!q->ksm)
  193. return -EOPNOTSUPP;
  194. return blk_ksm_derive_raw_secret(q->ksm, wrapped_key, wrapped_key_size,
  195. raw_secret, raw_secret_size);
  196. }
  197. bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
  198. {
  199. return inode->i_crypt_info->ci_inlinecrypt;
  200. }
  201. EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
  202. static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
  203. u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
  204. {
  205. union fscrypt_iv iv;
  206. int i;
  207. fscrypt_generate_iv(&iv, lblk_num, ci);
  208. BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
  209. memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
  210. for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
  211. dun[i] = le64_to_cpu(iv.dun[i]);
  212. }
  213. /**
  214. * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
  215. * @bio: a bio which will eventually be submitted to the file
  216. * @inode: the file's inode
  217. * @first_lblk: the first file logical block number in the I/O
  218. * @gfp_mask: memory allocation flags - these must be a waiting mask so that
  219. * bio_crypt_set_ctx can't fail.
  220. *
  221. * If the contents of the file should be encrypted (or decrypted) with inline
  222. * encryption, then assign the appropriate encryption context to the bio.
  223. *
  224. * Normally the bio should be newly allocated (i.e. no pages added yet), as
  225. * otherwise fscrypt_mergeable_bio() won't work as intended.
  226. *
  227. * The encryption context will be freed automatically when the bio is freed.
  228. *
  229. * This function also handles setting bi_skip_dm_default_key when needed.
  230. */
  231. void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
  232. u64 first_lblk, gfp_t gfp_mask)
  233. {
  234. const struct fscrypt_info *ci;
  235. u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  236. if (fscrypt_inode_should_skip_dm_default_key(inode))
  237. bio_set_skip_dm_default_key(bio);
  238. if (!fscrypt_inode_uses_inline_crypto(inode))
  239. return;
  240. ci = inode->i_crypt_info;
  241. fscrypt_generate_dun(ci, first_lblk, dun);
  242. bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask);
  243. }
  244. EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
  245. /* Extract the inode and logical block number from a buffer_head. */
  246. static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
  247. const struct inode **inode_ret,
  248. u64 *lblk_num_ret)
  249. {
  250. struct page *page = bh->b_page;
  251. const struct address_space *mapping;
  252. const struct inode *inode;
  253. /*
  254. * The ext4 journal (jbd2) can submit a buffer_head it directly created
  255. * for a non-pagecache page. fscrypt doesn't care about these.
  256. */
  257. mapping = page_mapping(page);
  258. if (!mapping)
  259. return false;
  260. inode = mapping->host;
  261. *inode_ret = inode;
  262. *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
  263. (bh_offset(bh) >> inode->i_blkbits);
  264. return true;
  265. }
  266. /**
  267. * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
  268. * crypto
  269. * @bio: a bio which will eventually be submitted to the file
  270. * @first_bh: the first buffer_head for which I/O will be submitted
  271. * @gfp_mask: memory allocation flags
  272. *
  273. * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
  274. * of an inode and block number directly.
  275. */
  276. void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
  277. const struct buffer_head *first_bh,
  278. gfp_t gfp_mask)
  279. {
  280. const struct inode *inode;
  281. u64 first_lblk;
  282. if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
  283. fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
  284. }
  285. EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
  286. /**
  287. * fscrypt_mergeable_bio() - test whether data can be added to a bio
  288. * @bio: the bio being built up
  289. * @inode: the inode for the next part of the I/O
  290. * @next_lblk: the next file logical block number in the I/O
  291. *
  292. * When building a bio which may contain data which should undergo inline
  293. * encryption (or decryption) via fscrypt, filesystems should call this function
  294. * to ensure that the resulting bio contains only contiguous data unit numbers.
  295. * This will return false if the next part of the I/O cannot be merged with the
  296. * bio because either the encryption key would be different or the encryption
  297. * data unit numbers would be discontiguous.
  298. *
  299. * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
  300. *
  301. * This function also returns false if the next part of the I/O would need to
  302. * have a different value for the bi_skip_dm_default_key flag.
  303. *
  304. * Return: true iff the I/O is mergeable
  305. */
  306. bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
  307. u64 next_lblk)
  308. {
  309. const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  310. u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  311. if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
  312. return false;
  313. if (bio_should_skip_dm_default_key(bio) !=
  314. fscrypt_inode_should_skip_dm_default_key(inode))
  315. return false;
  316. if (!bc)
  317. return true;
  318. /*
  319. * Comparing the key pointers is good enough, as all I/O for each key
  320. * uses the same pointer. I.e., there's currently no need to support
  321. * merging requests where the keys are the same but the pointers differ.
  322. */
  323. if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base)
  324. return false;
  325. fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
  326. return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
  327. }
  328. EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
  329. /**
  330. * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
  331. * @bio: the bio being built up
  332. * @next_bh: the next buffer_head for which I/O will be submitted
  333. *
  334. * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
  335. * an inode and block number directly.
  336. *
  337. * Return: true iff the I/O is mergeable
  338. */
  339. bool fscrypt_mergeable_bio_bh(struct bio *bio,
  340. const struct buffer_head *next_bh)
  341. {
  342. const struct inode *inode;
  343. u64 next_lblk;
  344. if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
  345. return !bio->bi_crypt_context &&
  346. !bio_should_skip_dm_default_key(bio);
  347. return fscrypt_mergeable_bio(bio, inode, next_lblk);
  348. }
  349. EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
  350. /**
  351. * fscrypt_dio_supported() - check whether a direct I/O request is unsupported
  352. * due to encryption constraints
  353. * @iocb: the file and position the I/O is targeting
  354. * @iter: the I/O data segment(s)
  355. *
  356. * Return: true if direct I/O is supported
  357. */
  358. bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
  359. {
  360. const struct inode *inode = file_inode(iocb->ki_filp);
  361. const unsigned int blocksize = i_blocksize(inode);
  362. /* If the file is unencrypted, no veto from us. */
  363. if (!fscrypt_needs_contents_encryption(inode))
  364. return true;
  365. /* We only support direct I/O with inline crypto, not fs-layer crypto */
  366. if (!fscrypt_inode_uses_inline_crypto(inode))
  367. return false;
  368. /*
  369. * Since the granularity of encryption is filesystem blocks, the I/O
  370. * must be block aligned -- not just disk sector aligned.
  371. */
  372. if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
  373. return false;
  374. return true;
  375. }
  376. EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
  377. /**
  378. * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
  379. * @inode: the file on which I/O is being done
  380. * @lblk: the block at which the I/O is being started from
  381. * @nr_blocks: the number of blocks we want to submit starting at @pos
  382. *
  383. * Determine the limit to the number of blocks that can be submitted in the bio
  384. * targeting @pos without causing a data unit number (DUN) discontinuity.
  385. *
  386. * This is normally just @nr_blocks, as normally the DUNs just increment along
  387. * with the logical blocks. (Or the file is not encrypted.)
  388. *
  389. * In rare cases, fscrypt can be using an IV generation method that allows the
  390. * DUN to wrap around within logically continuous blocks, and that wraparound
  391. * will occur. If this happens, a value less than @nr_blocks will be returned
  392. * so that the wraparound doesn't occur in the middle of the bio.
  393. *
  394. * Return: the actual number of blocks that can be submitted
  395. */
  396. u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
  397. {
  398. const struct fscrypt_info *ci = inode->i_crypt_info;
  399. u32 dun;
  400. if (!fscrypt_inode_uses_inline_crypto(inode))
  401. return nr_blocks;
  402. if (nr_blocks <= 1)
  403. return nr_blocks;
  404. if (!(fscrypt_policy_flags(&ci->ci_policy) &
  405. FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
  406. return nr_blocks;
  407. /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
  408. dun = ci->ci_hashed_ino + lblk;
  409. return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
  410. }
  411. EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);