5515324258
Add a device-mapper target "dm-default-key" which assigns an encryption key to bios that aren't for the contents of an encrypted file. This ensures that all blocks on-disk will be encrypted with some key, without the performance hit of file contents being encrypted twice when fscrypt (File-Based Encryption) is used. It is only appropriate to use dm-default-key when key configuration is tightly controlled, like it is in Android, such that all fscrypt keys are at least as hard to compromise as the default key. Compared to the original version of dm-default-key, this has been modified to use the new vendor-independent inline encryption framework (which works even when no inline encryption hardware is present), the table syntax has been changed to match dm-crypt, and support for specifying Adiantum encryption has been added. These changes also mean that dm-default-key now always explicitly specifies the DUN (the IV). Also, to handle f2fs moving blocks of encrypted files around without the key, and to handle ext4 and f2fs filesystems mounted without '-o inlinecrypt', the mapping logic is no longer "set a key on the bio if it doesn't have one already", but rather "set a key on the bio unless the bio has the bi_skip_dm_default_key flag set". Filesystems set this flag on *all* bios for encrypted file contents, regardless of whether they are encrypting/decrypting the file using inline encryption or the traditional filesystem-layer encryption, or moving the raw data. For the bi_skip_dm_default_key flag, a new field in struct bio is used rather than a bit in bi_opf so that fscrypt_set_bio_crypt_ctx() can set the flag, minimizing the changes needed to filesystems. (bi_opf is usually overwritten after fscrypt_set_bio_crypt_ctx() is called.) Bug: 137270441 Bug: 147814592 Change-Id: I69c9cd1e968ccf990e4ad96e5115b662237f5095 Signed-off-by: Eric Biggers <ebiggers@google.com>
143 lines
3.6 KiB
C
143 lines
3.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright 2019 Google LLC
|
|
*/
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/keyslot-manager.h>
|
|
#include <linux/module.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "blk-crypto-internal.h"
|
|
|
|
static int num_prealloc_crypt_ctxs = 128;
|
|
|
|
module_param(num_prealloc_crypt_ctxs, int, 0444);
|
|
MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
|
|
"Number of bio crypto contexts to preallocate");
|
|
|
|
static struct kmem_cache *bio_crypt_ctx_cache;
|
|
static mempool_t *bio_crypt_ctx_pool;
|
|
|
|
int __init bio_crypt_ctx_init(void)
|
|
{
|
|
size_t i;
|
|
|
|
bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
|
|
if (!bio_crypt_ctx_cache)
|
|
return -ENOMEM;
|
|
|
|
bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
|
|
bio_crypt_ctx_cache);
|
|
if (!bio_crypt_ctx_pool)
|
|
return -ENOMEM;
|
|
|
|
/* This is assumed in various places. */
|
|
BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
|
|
|
|
/* Sanity check that no algorithm exceeds the defined limits. */
|
|
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
|
|
BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
|
|
BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
|
|
{
|
|
return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bio_crypt_alloc_ctx);
|
|
|
|
void bio_crypt_free_ctx(struct bio *bio)
|
|
{
|
|
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
|
|
bio->bi_crypt_context = NULL;
|
|
}
|
|
|
|
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
|
|
{
|
|
const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;
|
|
|
|
bio_clone_skip_dm_default_key(dst, src);
|
|
|
|
/*
|
|
* If a bio is fallback_crypted, then it will be decrypted when
|
|
* bio_endio is called. As we only want the data to be decrypted once,
|
|
* copies of the bio must not have have a crypt context.
|
|
*/
|
|
if (!src_bc || bio_crypt_fallback_crypted(src_bc))
|
|
return;
|
|
|
|
dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
|
|
*dst->bi_crypt_context = *src_bc;
|
|
|
|
if (src_bc->bc_keyslot >= 0)
|
|
keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bio_crypt_clone);
|
|
|
|
bool bio_crypt_should_process(struct request *rq)
|
|
{
|
|
struct bio *bio = rq->bio;
|
|
|
|
if (!bio || !bio->bi_crypt_context)
|
|
return false;
|
|
|
|
return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
|
|
}
|
|
EXPORT_SYMBOL_GPL(bio_crypt_should_process);
|
|
|
|
/*
|
|
* Checks that two bio crypt contexts are compatible - i.e. that
|
|
* they are mergeable except for data_unit_num continuity.
|
|
*/
|
|
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
|
|
{
|
|
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
|
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
|
|
|
if (!bc1)
|
|
return !bc2;
|
|
return bc2 && bc1->bc_key == bc2->bc_key;
|
|
}
|
|
|
|
/*
|
|
* Checks that two bio crypt contexts are compatible, and also
|
|
* that their data_unit_nums are continuous (and can hence be merged)
|
|
* in the order b_1 followed by b_2.
|
|
*/
|
|
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
|
|
struct bio *b_2)
|
|
{
|
|
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
|
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
|
|
|
if (!bio_crypt_ctx_compatible(b_1, b_2))
|
|
return false;
|
|
|
|
return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
|
|
}
|
|
|
|
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
|
|
{
|
|
keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
|
|
bc->bc_ksm = NULL;
|
|
bc->bc_keyslot = -1;
|
|
}
|
|
|
|
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
|
|
struct keyslot_manager *ksm)
|
|
{
|
|
int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);
|
|
|
|
if (slot < 0)
|
|
return slot;
|
|
|
|
bc->bc_keyslot = slot;
|
|
bc->bc_ksm = ksm;
|
|
return 0;
|
|
}
|