diff --git a/block/bio-crypt-ctx.c b/block/bio-crypt-ctx.c index b6df3dcf28cc..75008b2afea2 100644 --- a/block/bio-crypt-ctx.c +++ b/block/bio-crypt-ctx.c @@ -61,6 +61,8 @@ void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) { const struct bio_crypt_ctx *src_bc = src->bi_crypt_context; + bio_clone_skip_dm_default_key(dst, src); + /* * If a bio is fallback_crypted, then it will be decrypted when * bio_endio is called. As we only want the data to be decrypted once, diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index ef293547c56b..cce3317cba80 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -192,6 +192,8 @@ static struct bio *blk_crypto_clone_bio(struct bio *bio_src) bio_clone_blkcg_association(bio, bio_src); + bio_clone_skip_dm_default_key(bio, bio_src); + return bio; } diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 499e424f5e9a..bb4406cd78b0 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -294,6 +294,24 @@ config DM_CRYPT If unsure, say N. +config DM_DEFAULT_KEY + tristate "Default-key target support" + depends on BLK_DEV_DM + depends on BLK_INLINE_ENCRYPTION + help + This device-mapper target allows you to create a device that + assigns a default encryption key to bios that aren't for the + contents of an encrypted file. + + This ensures that all blocks on-disk will be encrypted with + some key, without the performance hit of file contents being + encrypted twice when fscrypt (File-Based Encryption) is used. + + It is only appropriate to use dm-default-key when key + configuration is tightly controlled, like it is in Android, + such that all fscrypt keys are at least as hard to compromise + as the default key. + config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 8bb6800f1880..715f2fc6b4fb 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o +obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c new file mode 100644 index 000000000000..1eb9ddfad04c --- /dev/null +++ b/drivers/md/dm-default-key.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 Google, Inc. + */ + +#include +#include +#include + +#define DM_MSG_PREFIX "default-key" + +#define DM_DEFAULT_KEY_MAX_KEY_SIZE 64 + +static const struct dm_default_key_cipher { + const char *name; + enum blk_crypto_mode_num mode_num; + int key_size; +} dm_default_key_ciphers[] = { + { + .name = "aes-xts-plain64", + .mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS, + .key_size = 64, + }, { + .name = "xchacha12,aes-adiantum-plain64", + .mode_num = BLK_ENCRYPTION_MODE_ADIANTUM, + .key_size = 32, + }, +}; + +/** + * struct dm_default_c - private data of a default-key target + * @dev: the underlying device + * @start: starting sector of the range of @dev which this target actually maps. + * For this purpose a "sector" is 512 bytes. + * @cipher_string: the name of the encryption algorithm being used + * @iv_offset: starting offset for IVs. IVs are generated as if the target were + * preceded by @iv_offset 512-byte sectors. + * @sector_size: crypto sector size in bytes (usually 4096) + * @sector_bits: log2(sector_size) + * @key: the encryption key to use + */ +struct default_key_c { + struct dm_dev *dev; + sector_t start; + const char *cipher_string; + u64 iv_offset; + unsigned int sector_size; + unsigned int sector_bits; + struct blk_crypto_key key; +}; + +static const struct dm_default_key_cipher * +lookup_cipher(const char *cipher_string) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) { + if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0) + return &dm_default_key_ciphers[i]; + } + return NULL; +} + +static void default_key_dtr(struct dm_target *ti) +{ + struct default_key_c *dkc = ti->private; + int err; + + if (dkc->dev) { + err = blk_crypto_evict_key(dkc->dev->bdev->bd_queue, &dkc->key); + if (err && err != -ENOKEY) + DMWARN("Failed to evict crypto key: %d", err); + dm_put_device(ti, dkc->dev); + } + kzfree(dkc->cipher_string); + kzfree(dkc); +} + +static int default_key_ctr_optional(struct dm_target *ti, + unsigned int argc, char **argv) +{ + struct default_key_c *dkc = ti->private; + struct dm_arg_set as; + static const struct dm_arg _args[] = { + {0, 3, "Invalid number of feature args"}, + }; + unsigned int opt_params; + const char *opt_string; + bool iv_large_sectors = false; + char dummy; + int err; + + as.argc = argc; + as.argv = argv; + + err = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (err) + return err; + + while (opt_params--) { + opt_string = dm_shift_arg(&as); + if (!opt_string) { + ti->error = "Not enough feature arguments"; + return -EINVAL; + } + if (!strcmp(opt_string, "allow_discards")) { + ti->num_discard_bios = 1; + } else if (sscanf(opt_string, "sector_size:%u%c", + &dkc->sector_size, &dummy) == 1) { + if (dkc->sector_size < SECTOR_SIZE || + dkc->sector_size > 4096 || + !is_power_of_2(dkc->sector_size)) { + ti->error = "Invalid sector_size"; + return -EINVAL; + } + } else if (!strcmp(opt_string, "iv_large_sectors")) { + iv_large_sectors = true; + } else { + ti->error = "Invalid feature arguments"; + return -EINVAL; + } + } + + /* dm-default-key doesn't implement iv_large_sectors=false. */ + if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) { + ti->error = "iv_large_sectors must be specified"; + return -EINVAL; + } + + return 0; +} + +/* + * Construct a default-key mapping: + * + * + * This syntax matches dm-crypt's, but lots of unneeded functionality has been + * removed. Also, dm-default-key requires that the "iv_large_sectors" option be + * given whenever a non-default sector size is used. + */ +static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct default_key_c *dkc; + const struct dm_default_key_cipher *cipher; + u8 raw_key[DM_DEFAULT_KEY_MAX_KEY_SIZE]; + unsigned long long tmpll; + char dummy; + int err; + + if (argc < 5) { + ti->error = "Not enough arguments"; + return -EINVAL; + } + + dkc = kzalloc(sizeof(*dkc), GFP_KERNEL); + if (!dkc) { + ti->error = "Out of memory"; + return -ENOMEM; + } + ti->private = dkc; + + /* */ + dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL); + if (!dkc->cipher_string) { + ti->error = "Out of memory"; + err = -ENOMEM; + goto bad; + } + cipher = lookup_cipher(dkc->cipher_string); + if (!cipher) { + ti->error = "Unsupported cipher"; + err = -EINVAL; + goto bad; + } + + /* */ + if (strlen(argv[1]) != 2 * cipher->key_size) { + ti->error = "Incorrect key size for cipher"; + err = -EINVAL; + goto bad; + } + if (hex2bin(raw_key, argv[1], cipher->key_size) != 0) { + ti->error = "Malformed key string"; + err = -EINVAL; + goto bad; + } + + /* */ + if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) { + ti->error = "Invalid iv_offset sector"; + err = -EINVAL; + goto bad; + } + + /* */ + err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), + &dkc->dev); + if (err) { + ti->error = "Device lookup failed"; + goto bad; + } + + /* */ + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || + tmpll != (sector_t)tmpll) { + ti->error = "Invalid start sector"; + err = -EINVAL; + goto bad; + } + dkc->start = tmpll; + + /* optional arguments */ + dkc->sector_size = SECTOR_SIZE; + if (argc > 5) { + err = default_key_ctr_optional(ti, argc - 5, &argv[5]); + if (err) + goto bad; + } + dkc->sector_bits = ilog2(dkc->sector_size); + if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) { + ti->error = "Device size is not a multiple of sector_size"; + err = -EINVAL; + goto bad; + } + + err = blk_crypto_init_key(&dkc->key, raw_key, cipher->key_size, + cipher->mode_num, dkc->sector_size); + if (err) { + ti->error = "Error initializing blk-crypto key"; + goto bad; + } + + err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size, + dkc->dev->bdev->bd_queue); + if (err) { + ti->error = "Error starting to use blk-crypto"; + goto bad; + } + + ti->num_flush_bios = 1; + + ti->may_passthrough_inline_crypto = true; + + err = 0; + goto out; + +bad: + default_key_dtr(ti); +out: + memzero_explicit(raw_key, sizeof(raw_key)); + return err; +} + +static int default_key_map(struct dm_target *ti, struct bio *bio) +{ + const struct default_key_c *dkc = ti->private; + sector_t sector_in_target; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 }; + + bio_set_dev(bio, dkc->dev->bdev); + + /* + * If the bio is a device-level request which doesn't target a specific + * sector, there's nothing more to do. + */ + if (bio_sectors(bio) == 0) + return DM_MAPIO_REMAPPED; + + /* Map the bio's sector to the underlying device. (512-byte sectors) */ + sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector); + bio->bi_iter.bi_sector = dkc->start + sector_in_target; + + /* + * If the bio should skip dm-default-key (i.e. if it's for an encrypted + * file's contents), or if it doesn't have any data (e.g. if it's a + * DISCARD request), there's nothing more to do. + */ + if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio)) + return DM_MAPIO_REMAPPED; + + /* + * Else, dm-default-key needs to set this bio's encryption context. + * It must not already have one. + */ + if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) + return DM_MAPIO_KILL; + + /* Calculate the DUN and enforce data-unit (crypto sector) alignment. */ + dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */ + if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) + return DM_MAPIO_KILL; + dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */ + + bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO); + + return DM_MAPIO_REMAPPED; +} + +static void default_key_status(struct dm_target *ti, status_type_t type, + unsigned int status_flags, char *result, + unsigned int maxlen) +{ + const struct default_key_c *dkc = ti->private; + unsigned int sz = 0; + int num_feature_args = 0; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + /* Omit the key for now. */ + DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset, + dkc->dev->name, (unsigned long long)dkc->start); + + num_feature_args += !!ti->num_discard_bios; + if (dkc->sector_size != SECTOR_SIZE) + num_feature_args += 2; + if (num_feature_args != 0) { + DMEMIT(" %d", num_feature_args); + if (ti->num_discard_bios) + DMEMIT(" allow_discards"); + if (dkc->sector_size != SECTOR_SIZE) { + DMEMIT(" sector_size:%u", dkc->sector_size); + DMEMIT(" iv_large_sectors"); + } + } + break; + } +} + +static int default_key_prepare_ioctl(struct dm_target *ti, + struct block_device **bdev) +{ + const struct default_key_c *dkc = ti->private; + const struct dm_dev *dev = dkc->dev; + + *bdev = dev->bdev; + + /* Only pass ioctls through if the device sizes match exactly. */ + if (dkc->start != 0 || + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + return 1; + return 0; +} + +static int default_key_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data) +{ + const struct default_key_c *dkc = ti->private; + + return fn(ti, dkc->dev, dkc->start, ti->len, data); +} + +static void default_key_io_hints(struct dm_target *ti, + struct queue_limits *limits) +{ + const struct default_key_c *dkc = ti->private; + const unsigned int sector_size = dkc->sector_size; + + limits->logical_block_size = + max_t(unsigned short, limits->logical_block_size, sector_size); + limits->physical_block_size = + max_t(unsigned int, limits->physical_block_size, sector_size); + limits->io_min = max_t(unsigned int, limits->io_min, sector_size); +} + +static struct target_type default_key_target = { + .name = "default-key", + .version = {2, 0, 0}, + .module = THIS_MODULE, + .ctr = default_key_ctr, + .dtr = default_key_dtr, + .map = default_key_map, + .status = default_key_status, + .prepare_ioctl = default_key_prepare_ioctl, + .iterate_devices = default_key_iterate_devices, + .io_hints = default_key_io_hints, +}; + +static int __init dm_default_key_init(void) +{ + return dm_register_target(&default_key_target); +} + +static void __exit dm_default_key_exit(void) +{ + dm_unregister_target(&default_key_target); +} + +module_init(dm_default_key_init); +module_exit(dm_default_key_exit); + +MODULE_AUTHOR("Paul Lawrence "); +MODULE_AUTHOR("Paul Crowley "); +MODULE_AUTHOR("Eric Biggers "); +MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata"); +MODULE_LICENSE("GPL"); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c index 407fe4049b00..9c830772b7f5 100644 --- a/fs/crypto/inline_crypt.c +++ b/fs/crypto/inline_crypt.c @@ -214,6 +214,8 @@ static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, * otherwise fscrypt_mergeable_bio() won't work as intended. * * The encryption context will be freed automatically when the bio is freed. + * + * This function also handles setting bi_skip_dm_default_key when needed. */ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, u64 first_lblk, gfp_t gfp_mask) @@ -221,6 +223,9 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, const struct fscrypt_info *ci = inode->i_crypt_info; u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + if (fscrypt_inode_should_skip_dm_default_key(inode)) + bio_set_skip_dm_default_key(bio); + if (!fscrypt_inode_uses_inline_crypto(inode)) return; @@ -290,6 +295,9 @@ EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); * * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. * + * This function also returns false if the next part of the I/O would need to + * have a different value for the bi_skip_dm_default_key flag. + * * Return: true iff the I/O is mergeable */ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, @@ -300,6 +308,9 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) return false; + if (bio_should_skip_dm_default_key(bio) != + fscrypt_inode_should_skip_dm_default_key(inode)) + return false; if (!bc) return true; @@ -333,7 +344,8 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio, u64 next_lblk; if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) - return !bio->bi_crypt_context; + return !bio->bi_crypt_context && + !bio_should_skip_dm_default_key(bio); return fscrypt_mergeable_bio(bio, inode, next_lblk); } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index e68f9858750d..bbf89634a63e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -328,6 +328,8 @@ static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, */ if (!fio || !fio->encrypted_page) fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); + else if (fscrypt_inode_should_skip_dm_default_key(inode)) + bio_set_skip_dm_default_key(bio); } static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, @@ -339,7 +341,9 @@ static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, * read/write raw data without encryption. */ if (fio && fio->encrypted_page) - return !bio_has_crypt_ctx(bio); + return !bio_has_crypt_ctx(bio) && + (bio_should_skip_dm_default_key(bio) == + fscrypt_inode_should_skip_dm_default_key(inode)); return fscrypt_mergeable_bio(bio, inode, next_idx); } diff --git a/include/linux/bio-crypt-ctx.h b/include/linux/bio-crypt-ctx.h index 652f92ff75fd..ab22dbe7b880 100644 --- a/include/linux/bio-crypt-ctx.h +++ b/include/linux/bio-crypt-ctx.h @@ -189,6 +189,38 @@ static inline bool bio_crypt_ctx_mergeable(struct bio *b_1, #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ +#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY) +static inline void bio_set_skip_dm_default_key(struct bio *bio) +{ + bio->bi_skip_dm_default_key = true; +} + +static inline bool bio_should_skip_dm_default_key(const struct bio *bio) +{ + return bio->bi_skip_dm_default_key; +} + +static inline void bio_clone_skip_dm_default_key(struct bio *dst, + const struct bio *src) +{ + dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key; +} +#else /* CONFIG_DM_DEFAULT_KEY */ +static inline void bio_set_skip_dm_default_key(struct bio *bio) +{ +} + +static inline bool bio_should_skip_dm_default_key(const struct bio *bio) +{ + return false; +} + +static inline void bio_clone_skip_dm_default_key(struct bio *dst, + const struct bio *src) +{ +} +#endif /* !CONFIG_DM_DEFAULT_KEY */ + #endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_CRYPT_CTX_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 73568b238a5c..51165b1c1ad9 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -186,6 +186,9 @@ struct bio { #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct bio_crypt_ctx *bi_crypt_context; +#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY) + bool bi_skip_dm_default_key; +#endif #endif union { diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 841dcbec26b5..6ac092cc4067 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -590,6 +590,20 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, } #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ +#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY) +static inline bool +fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) +{ + return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); +} +#else +static inline bool +fscrypt_inode_should_skip_dm_default_key(const struct inode *inode) +{ + return false; +} +#endif + /** * fscrypt_require_key - require an inode's encryption key * @inode: the inode we need the key for