ANDROID: bpf: validate bpf_func when BPF_JIT is enabled with CFI
With CONFIG_BPF_JIT, the kernel makes indirect calls to dynamically generated code, which the compile-time Control-Flow Integrity (CFI) checking cannot validate. This change adds basic sanity checking to ensure we are jumping to a valid location, which narrows down the attack surface on the stored pointer. In addition, this change adds a weak arch_bpf_jit_check_func function, which architectures that implement BPF JIT can override to perform additional validation, such as verifying that the pointer points to the correct memory region. Bug: 145210207 Change-Id: I1a90c70cdcef25673a870d3c4f2586a829c0d32e Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:
parent
d71a92631c
commit
ff5bf35998
@ -511,7 +511,12 @@ struct sock_fprog_kern {
|
||||
struct sock_filter *filter;
|
||||
};
|
||||
|
||||
#define BPF_BINARY_HEADER_MAGIC 0x05de0e82
|
||||
|
||||
struct bpf_binary_header {
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
u32 magic;
|
||||
#endif
|
||||
u32 pages;
|
||||
/* Some arches need word alignment for their instructions */
|
||||
u8 image[] __aligned(4);
|
||||
@ -553,20 +558,75 @@ struct sk_filter {
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
|
||||
|
||||
#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
|
||||
/*
|
||||
* With JIT, the kernel makes an indirect call to dynamically generated
|
||||
* code. Use bpf_call_func to perform additional validation of the call
|
||||
* target to narrow down attack surface. Architectures implementing BPF
|
||||
* JIT can override arch_bpf_jit_check_func for arch-specific checking.
|
||||
*/
|
||||
extern bool arch_bpf_jit_check_func(const struct bpf_prog *prog);
|
||||
|
||||
static inline unsigned int __bpf_call_func(const struct bpf_prog *prog,
|
||||
const void *ctx)
|
||||
{
|
||||
/* Call interpreter with CFI checking. */
|
||||
return prog->bpf_func(ctx, prog->insnsi);
|
||||
}
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
bpf_jit_binary_hdr(const struct bpf_prog *fp);
|
||||
|
||||
static inline unsigned int __nocfi bpf_call_func(const struct bpf_prog *prog,
|
||||
const void *ctx)
|
||||
{
|
||||
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) && !prog->jited)
|
||||
return __bpf_call_func(prog, ctx);
|
||||
|
||||
/*
|
||||
* We are about to call dynamically generated code. Check that the
|
||||
* page has bpf_binary_header with a valid magic to limit possible
|
||||
* call targets.
|
||||
*/
|
||||
BUG_ON(hdr->magic != BPF_BINARY_HEADER_MAGIC ||
|
||||
!arch_bpf_jit_check_func(prog));
|
||||
|
||||
/* Call jited function without CFI checking. */
|
||||
return prog->bpf_func(ctx, prog->insnsi);
|
||||
}
|
||||
|
||||
static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
|
||||
{
|
||||
hdr->magic = BPF_BINARY_HEADER_MAGIC;
|
||||
}
|
||||
#else
|
||||
static inline unsigned int bpf_call_func(const struct bpf_prog *prog,
|
||||
const void *ctx)
|
||||
{
|
||||
return prog->bpf_func(ctx, prog->insnsi);
|
||||
}
|
||||
|
||||
static inline void bpf_jit_set_header_magic(struct bpf_binary_header *hdr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#define BPF_PROG_RUN(prog, ctx) ({ \
|
||||
u32 ret; \
|
||||
cant_sleep(); \
|
||||
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
|
||||
struct bpf_prog_stats *stats; \
|
||||
u64 start = sched_clock(); \
|
||||
ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
|
||||
ret = bpf_call_func(prog, ctx); \
|
||||
stats = this_cpu_ptr(prog->aux->stats); \
|
||||
u64_stats_update_begin(&stats->syncp); \
|
||||
stats->cnt++; \
|
||||
stats->nsecs += sched_clock() - start; \
|
||||
u64_stats_update_end(&stats->syncp); \
|
||||
} else { \
|
||||
ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
|
||||
ret = bpf_call_func(prog, ctx); \
|
||||
} \
|
||||
ret; })
|
||||
|
||||
|
@ -792,6 +792,14 @@ void __weak bpf_jit_free_exec(void *addr)
|
||||
module_memfree(addr);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BPF_JIT) && IS_ENABLED(CONFIG_CFI_CLANG)
|
||||
bool __weak arch_bpf_jit_check_func(const struct bpf_prog *prog)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_bpf_jit_check_func);
|
||||
#endif
|
||||
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
unsigned int alignment,
|
||||
@ -818,6 +826,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
/* Fill space with illegal/arch-dep instructions. */
|
||||
bpf_fill_ill_insns(hdr, size);
|
||||
|
||||
bpf_jit_set_header_magic(hdr);
|
||||
hdr->pages = pages;
|
||||
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
|
||||
PAGE_SIZE - sizeof(*hdr));
|
||||
|
Loading…
Reference in New Issue
Block a user