b7b3af9614
Changes v5 => v6: - Blk-crypto's kernel crypto API fallback is no longer restricted to 8-byte DUNs. It's also now separately configurable from blk-crypto, and can be disabled entirely, while still allowing the kernel to use inline encryption hardware. Further, struct bio_crypt_ctx takes up less space, and no longer contains the information needed by the crypto API fallback - the fallback allocates the required memory when necessary. - Blk-crypto now supports all file content encryption modes supported by fscrypt. - Fixed bio merging logic in blk-merge.c - Fscrypt now supports inline encryption with the direct key policy, since blk-crypto now has support for larger DUNs. - Keyslot manager now uses a hashtable to lookup which keyslot contains any particular key (thanks Eric!) - Fscrypt support for inline encryption now handles filesystems with multiple underlying block devices (thanks Eric!) - Numerous cleanups Bug: 137270441 Test: refer to I26376479ee38259b8c35732cb3a1d7e15f9b05a3 Change-Id: I13e2e327e0b4784b394cb1e7cf32a04856d95f01 Link: https://lore.kernel.org/linux-block/20191218145136.172774-1-satyat@google.com/ Signed-off-by: Satya Tangirala <satyat@google.com>
141 lines
3.5 KiB
C
141 lines
3.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright 2019 Google LLC
|
|
*/
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/keyslot-manager.h>
|
|
#include <linux/module.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "blk-crypto-internal.h"
|
|
|
|
static int num_prealloc_crypt_ctxs = 128;
|
|
|
|
module_param(num_prealloc_crypt_ctxs, int, 0444);
|
|
MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
|
|
"Number of bio crypto contexts to preallocate");
|
|
|
|
static struct kmem_cache *bio_crypt_ctx_cache;
|
|
static mempool_t *bio_crypt_ctx_pool;
|
|
|
|
int __init bio_crypt_ctx_init(void)
|
|
{
|
|
size_t i;
|
|
|
|
bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
|
|
if (!bio_crypt_ctx_cache)
|
|
return -ENOMEM;
|
|
|
|
bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
|
|
bio_crypt_ctx_cache);
|
|
if (!bio_crypt_ctx_pool)
|
|
return -ENOMEM;
|
|
|
|
/* This is assumed in various places. */
|
|
BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
|
|
|
|
/* Sanity check that no algorithm exceeds the defined limits. */
|
|
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
|
|
BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
|
|
BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct bio_crypt_ctx *bio_crypt_alloc_ctx(gfp_t gfp_mask)
|
|
{
|
|
return mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
|
|
}
|
|
|
|
void bio_crypt_free_ctx(struct bio *bio)
|
|
{
|
|
mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
|
|
bio->bi_crypt_context = NULL;
|
|
}
|
|
|
|
void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
|
|
{
|
|
const struct bio_crypt_ctx *src_bc = src->bi_crypt_context;
|
|
|
|
/*
|
|
* If a bio is fallback_crypted, then it will be decrypted when
|
|
* bio_endio is called. As we only want the data to be decrypted once,
|
|
* copies of the bio must not have have a crypt context.
|
|
*/
|
|
if (!src_bc || bio_crypt_fallback_crypted(src_bc))
|
|
return;
|
|
|
|
dst->bi_crypt_context = bio_crypt_alloc_ctx(gfp_mask);
|
|
*dst->bi_crypt_context = *src_bc;
|
|
|
|
if (src_bc->bc_keyslot >= 0)
|
|
keyslot_manager_get_slot(src_bc->bc_ksm, src_bc->bc_keyslot);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bio_crypt_clone);
|
|
|
|
bool bio_crypt_should_process(struct request *rq)
|
|
{
|
|
struct bio *bio = rq->bio;
|
|
|
|
if (!bio || !bio->bi_crypt_context)
|
|
return false;
|
|
|
|
return rq->q->ksm == bio->bi_crypt_context->bc_ksm;
|
|
}
|
|
EXPORT_SYMBOL_GPL(bio_crypt_should_process);
|
|
|
|
/*
|
|
* Checks that two bio crypt contexts are compatible - i.e. that
|
|
* they are mergeable except for data_unit_num continuity.
|
|
*/
|
|
bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2)
|
|
{
|
|
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
|
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
|
|
|
if (bc1 != bc2)
|
|
return false;
|
|
|
|
return !bc1 || bc1->bc_key == bc2->bc_key;
|
|
}
|
|
|
|
/*
|
|
* Checks that two bio crypt contexts are compatible, and also
|
|
* that their data_unit_nums are continuous (and can hence be merged)
|
|
* in the order b_1 followed by b_2.
|
|
*/
|
|
bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes,
|
|
struct bio *b_2)
|
|
{
|
|
struct bio_crypt_ctx *bc1 = b_1->bi_crypt_context;
|
|
struct bio_crypt_ctx *bc2 = b_2->bi_crypt_context;
|
|
|
|
if (!bio_crypt_ctx_compatible(b_1, b_2))
|
|
return false;
|
|
|
|
return !bc1 || bio_crypt_dun_is_contiguous(bc1, b1_bytes, bc2->bc_dun);
|
|
}
|
|
|
|
void bio_crypt_ctx_release_keyslot(struct bio_crypt_ctx *bc)
|
|
{
|
|
keyslot_manager_put_slot(bc->bc_ksm, bc->bc_keyslot);
|
|
bc->bc_ksm = NULL;
|
|
bc->bc_keyslot = -1;
|
|
}
|
|
|
|
int bio_crypt_ctx_acquire_keyslot(struct bio_crypt_ctx *bc,
|
|
struct keyslot_manager *ksm)
|
|
{
|
|
int slot = keyslot_manager_get_slot_for_key(ksm, bc->bc_key);
|
|
|
|
if (slot < 0)
|
|
return slot;
|
|
|
|
bc->bc_keyslot = slot;
|
|
bc->bc_ksm = ksm;
|
|
return 0;
|
|
}
|