ad70668a3d
In current mainline, the degree of access to perf_event_open(2) system call depends on the perf_event_paranoid sysctl. This has a number of limitations: 1. The sysctl is only a single value. Many types of accesses are controlled based on the single value thus making the control very limited and coarse grained. 2. The sysctl is global, so if the sysctl is changed, then that means all processes get access to perf_event_open(2) opening the door to security issues. This patch adds LSM and SELinux access checking which will be used in Android to access perf_event_open(2) for the purposes of attaching BPF programs to tracepoints, perf profiling and other operations from userspace. These operations are intended for production systems. 5 new LSM hooks are added: 1. perf_event_open: This controls access during the perf_event_open(2) syscall itself. The hook is called from all the places that the perf_event_paranoid sysctl is checked to keep it consistent with the systctl. The hook gets passed a 'type' argument which controls CPU, kernel and tracepoint accesses (in this context, CPU, kernel and tracepoint have the same semantics as the perf_event_paranoid sysctl). Additionally, I added an 'open' type which is similar to perf_event_paranoid sysctl == 3 patch carried in Android and several other distros but was rejected in mainline [1] in 2016. 2. perf_event_alloc: This allocates a new security object for the event which stores the current SID within the event. It will be useful when the perf event's FD is passed through IPC to another process which may try to read the FD. Appropriate security checks will limit access. 3. perf_event_free: Called when the event is closed. 4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event. 5. perf_event_write: Called from the ioctl(2) syscalls for the event. [1] https://lwn.net/Articles/696240/ Since Peter had suggest LSM hooks in 2016 [1], I am adding his Suggested-by tag below. To use this patch, we set the perf_event_paranoid sysctl to -1 and then apply selinux checking as appropriate (default deny everything, and then add policy rules to give access to domains that need it). In the future we can remove the perf_event_paranoid sysctl altogether. Suggested-by: Peter Zijlstra <peterz@infradead.org> Co-developed-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: James Morris <jmorris@namei.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: rostedt@goodmis.org Cc: Yonghong Song <yhs@fb.com> Cc: Kees Cook <keescook@chromium.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: jeffv@google.com Cc: Jiri Olsa <jolsa@redhat.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: primiano@google.com Cc: Song Liu <songliubraving@fb.com> Cc: rsavitski@google.com Cc: Namhyung Kim <namhyung@kernel.org> Cc: Matthew Garrett <matthewgarrett@google.com> Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org Bug: 137092007 (cherry picked from commit da97e18458fb42d7c00fac5fd1c56a3896ec666e) [ Ryan Savitski: resolved merge conflicts with perf_event_paranoid=3 code ] Signed-off-by: Ryan Savitski <rsavitski@google.com> [ Ryan Savitski: Folded in upstream ae79d5588a04 (perf/core: Fix !CONFIG_PERF_EVENTS build warnings and failures). This should fix the build errors from the previous backport attempt, where certain configurations would end up with functions referring to the perf_event struct prior to its declaration (and therefore declaring it with a different scope). ] Signed-off-by: Ryan Savitski <rsavitski@google.com> Change-Id: I50769ede23fbfd8996657c6dae99cab98a3042bc
522 lines
12 KiB
C
522 lines
12 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* trace event based perf event profiling/tracing
|
|
*
|
|
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
|
|
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/security.h>
|
|
#include "trace.h"
|
|
#include "trace_probe.h"
|
|
|
|
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
|
|
|
|
/*
|
|
* Force it to be aligned to unsigned long to avoid misaligned accesses
|
|
* suprises
|
|
*/
|
|
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
|
perf_trace_t;
|
|
|
|
/* Count the events in use (per event id, not per instance) */
|
|
static int total_ref_count;
|
|
|
|
static int perf_trace_event_perm(struct trace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
int ret;
|
|
|
|
if (tp_event->perf_perm) {
|
|
ret = tp_event->perf_perm(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* We checked and allowed to create parent,
|
|
* allow children without checking.
|
|
*/
|
|
if (p_event->parent)
|
|
return 0;
|
|
|
|
/*
|
|
* It's ok to check current process (owner) permissions in here,
|
|
* because code below is called only via perf_event_open syscall.
|
|
*/
|
|
|
|
/* The ftrace function trace is allowed only for root. */
|
|
if (ftrace_event_is_function(tp_event)) {
|
|
ret = perf_allow_tracepoint(&p_event->attr);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (!is_sampling_event(p_event))
|
|
return 0;
|
|
|
|
/*
|
|
* We don't allow user space callchains for function trace
|
|
* event, due to issues with page faults while tracing page
|
|
* fault handler and its overall trickiness nature.
|
|
*/
|
|
if (!p_event->attr.exclude_callchain_user)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Same reason to disable user stack dump as for user space
|
|
* callchains above.
|
|
*/
|
|
if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* No tracing, just counting, so no obvious leak */
|
|
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
|
|
return 0;
|
|
|
|
/* Some events are ok to be traced by non-root users... */
|
|
if (p_event->attach_state == PERF_ATTACH_TASK) {
|
|
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* ...otherwise raw tracepoint data can be a severe data leak,
|
|
* only allow root to have these.
|
|
*/
|
|
ret = perf_allow_tracepoint(&p_event->attr);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int perf_trace_event_reg(struct trace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
struct hlist_head __percpu *list;
|
|
int ret = -ENOMEM;
|
|
int cpu;
|
|
|
|
p_event->tp_event = tp_event;
|
|
if (tp_event->perf_refcount++ > 0)
|
|
return 0;
|
|
|
|
list = alloc_percpu(struct hlist_head);
|
|
if (!list)
|
|
goto fail;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
|
|
|
|
tp_event->perf_events = list;
|
|
|
|
if (!total_ref_count) {
|
|
char __percpu *buf;
|
|
int i;
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
buf = (char __percpu *)alloc_percpu(perf_trace_t);
|
|
if (!buf)
|
|
goto fail;
|
|
|
|
perf_trace_buf[i] = buf;
|
|
}
|
|
}
|
|
|
|
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
total_ref_count++;
|
|
return 0;
|
|
|
|
fail:
|
|
if (!total_ref_count) {
|
|
int i;
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
free_percpu(perf_trace_buf[i]);
|
|
perf_trace_buf[i] = NULL;
|
|
}
|
|
}
|
|
|
|
if (!--tp_event->perf_refcount) {
|
|
free_percpu(tp_event->perf_events);
|
|
tp_event->perf_events = NULL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void perf_trace_event_unreg(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
int i;
|
|
|
|
if (--tp_event->perf_refcount > 0)
|
|
goto out;
|
|
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
|
|
|
|
/*
|
|
* Ensure our callback won't be called anymore. The buffers
|
|
* will be freed after that.
|
|
*/
|
|
tracepoint_synchronize_unregister();
|
|
|
|
free_percpu(tp_event->perf_events);
|
|
tp_event->perf_events = NULL;
|
|
|
|
if (!--total_ref_count) {
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
free_percpu(perf_trace_buf[i]);
|
|
perf_trace_buf[i] = NULL;
|
|
}
|
|
}
|
|
out:
|
|
module_put(tp_event->mod);
|
|
}
|
|
|
|
static int perf_trace_event_open(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
|
|
}
|
|
|
|
static void perf_trace_event_close(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
|
|
}
|
|
|
|
static int perf_trace_event_init(struct trace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
int ret;
|
|
|
|
ret = perf_trace_event_perm(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = perf_trace_event_reg(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = perf_trace_event_open(p_event);
|
|
if (ret) {
|
|
perf_trace_event_unreg(p_event);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int perf_trace_init(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event;
|
|
u64 event_id = p_event->attr.config;
|
|
int ret = -EINVAL;
|
|
|
|
mutex_lock(&event_mutex);
|
|
list_for_each_entry(tp_event, &ftrace_events, list) {
|
|
if (tp_event->event.type == event_id &&
|
|
tp_event->class && tp_event->class->reg &&
|
|
try_module_get(tp_event->mod)) {
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
if (ret)
|
|
module_put(tp_event->mod);
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&event_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void perf_trace_destroy(struct perf_event *p_event)
|
|
{
|
|
mutex_lock(&event_mutex);
|
|
perf_trace_event_close(p_event);
|
|
perf_trace_event_unreg(p_event);
|
|
mutex_unlock(&event_mutex);
|
|
}
|
|
|
|
#ifdef CONFIG_KPROBE_EVENTS
|
|
int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
|
|
{
|
|
int ret;
|
|
char *func = NULL;
|
|
struct trace_event_call *tp_event;
|
|
|
|
if (p_event->attr.kprobe_func) {
|
|
func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
|
|
if (!func)
|
|
return -ENOMEM;
|
|
ret = strncpy_from_user(
|
|
func, u64_to_user_ptr(p_event->attr.kprobe_func),
|
|
KSYM_NAME_LEN);
|
|
if (ret == KSYM_NAME_LEN)
|
|
ret = -E2BIG;
|
|
if (ret < 0)
|
|
goto out;
|
|
|
|
if (func[0] == '\0') {
|
|
kfree(func);
|
|
func = NULL;
|
|
}
|
|
}
|
|
|
|
tp_event = create_local_trace_kprobe(
|
|
func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
|
|
p_event->attr.probe_offset, is_retprobe);
|
|
if (IS_ERR(tp_event)) {
|
|
ret = PTR_ERR(tp_event);
|
|
goto out;
|
|
}
|
|
|
|
mutex_lock(&event_mutex);
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
if (ret)
|
|
destroy_local_trace_kprobe(tp_event);
|
|
mutex_unlock(&event_mutex);
|
|
out:
|
|
kfree(func);
|
|
return ret;
|
|
}
|
|
|
|
void perf_kprobe_destroy(struct perf_event *p_event)
|
|
{
|
|
mutex_lock(&event_mutex);
|
|
perf_trace_event_close(p_event);
|
|
perf_trace_event_unreg(p_event);
|
|
mutex_unlock(&event_mutex);
|
|
|
|
destroy_local_trace_kprobe(p_event->tp_event);
|
|
}
|
|
#endif /* CONFIG_KPROBE_EVENTS */
|
|
|
|
#ifdef CONFIG_UPROBE_EVENTS
|
|
int perf_uprobe_init(struct perf_event *p_event,
|
|
unsigned long ref_ctr_offset, bool is_retprobe)
|
|
{
|
|
int ret;
|
|
char *path = NULL;
|
|
struct trace_event_call *tp_event;
|
|
|
|
if (!p_event->attr.uprobe_path)
|
|
return -EINVAL;
|
|
|
|
path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
|
|
PATH_MAX);
|
|
if (IS_ERR(path)) {
|
|
ret = PTR_ERR(path);
|
|
return (ret == -EINVAL) ? -E2BIG : ret;
|
|
}
|
|
if (path[0] == '\0') {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
|
|
ref_ctr_offset, is_retprobe);
|
|
if (IS_ERR(tp_event)) {
|
|
ret = PTR_ERR(tp_event);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* local trace_uprobe need to hold event_mutex to call
|
|
* uprobe_buffer_enable() and uprobe_buffer_disable().
|
|
* event_mutex is not required for local trace_kprobes.
|
|
*/
|
|
mutex_lock(&event_mutex);
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
if (ret)
|
|
destroy_local_trace_uprobe(tp_event);
|
|
mutex_unlock(&event_mutex);
|
|
out:
|
|
kfree(path);
|
|
return ret;
|
|
}
|
|
|
|
void perf_uprobe_destroy(struct perf_event *p_event)
|
|
{
|
|
mutex_lock(&event_mutex);
|
|
perf_trace_event_close(p_event);
|
|
perf_trace_event_unreg(p_event);
|
|
mutex_unlock(&event_mutex);
|
|
destroy_local_trace_uprobe(p_event->tp_event);
|
|
}
|
|
#endif /* CONFIG_UPROBE_EVENTS */
|
|
|
|
int perf_trace_add(struct perf_event *p_event, int flags)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
|
|
if (!(flags & PERF_EF_START))
|
|
p_event->hw.state = PERF_HES_STOPPED;
|
|
|
|
/*
|
|
* If TRACE_REG_PERF_ADD returns false; no custom action was performed
|
|
* and we need to take the default action of enqueueing our event on
|
|
* the right per-cpu hlist.
|
|
*/
|
|
if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
|
|
struct hlist_head __percpu *pcpu_list;
|
|
struct hlist_head *list;
|
|
|
|
pcpu_list = tp_event->perf_events;
|
|
if (WARN_ON_ONCE(!pcpu_list))
|
|
return -EINVAL;
|
|
|
|
list = this_cpu_ptr(pcpu_list);
|
|
hlist_add_head_rcu(&p_event->hlist_entry, list);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void perf_trace_del(struct perf_event *p_event, int flags)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
|
|
/*
|
|
* If TRACE_REG_PERF_DEL returns false; no custom action was performed
|
|
* and we need to take the default action of dequeueing our event from
|
|
* the right per-cpu hlist.
|
|
*/
|
|
if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
|
|
hlist_del_rcu(&p_event->hlist_entry);
|
|
}
|
|
|
|
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
|
|
{
|
|
char *raw_data;
|
|
int rctx;
|
|
|
|
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
|
|
|
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
|
"perf buffer not large enough"))
|
|
return NULL;
|
|
|
|
*rctxp = rctx = perf_swevent_get_recursion_context();
|
|
if (rctx < 0)
|
|
return NULL;
|
|
|
|
if (regs)
|
|
*regs = this_cpu_ptr(&__perf_regs[rctx]);
|
|
raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
|
|
|
|
/* zero the dead bytes from align to not leak stack to user */
|
|
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
|
return raw_data;
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
|
|
NOKPROBE_SYMBOL(perf_trace_buf_alloc);
|
|
|
|
void perf_trace_buf_update(void *record, u16 type)
|
|
{
|
|
struct trace_entry *entry = record;
|
|
int pc = preempt_count();
|
|
unsigned long flags;
|
|
|
|
local_save_flags(flags);
|
|
tracing_generic_entry_update(entry, type, flags, pc);
|
|
}
|
|
NOKPROBE_SYMBOL(perf_trace_buf_update);
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
static void
|
|
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *ops, struct pt_regs *pt_regs)
|
|
{
|
|
struct ftrace_entry *entry;
|
|
struct perf_event *event;
|
|
struct hlist_head head;
|
|
struct pt_regs regs;
|
|
int rctx;
|
|
|
|
if ((unsigned long)ops->private != smp_processor_id())
|
|
return;
|
|
|
|
event = container_of(ops, struct perf_event, ftrace_ops);
|
|
|
|
/*
|
|
* @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
|
|
* the perf code does is hlist_for_each_entry_rcu(), so we can
|
|
* get away with simply setting the @head.first pointer in order
|
|
* to create a singular list.
|
|
*/
|
|
head.first = &event->hlist_entry;
|
|
|
|
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
|
|
sizeof(u64)) - sizeof(u32))
|
|
|
|
BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
|
|
|
|
memset(®s, 0, sizeof(regs));
|
|
perf_fetch_caller_regs(®s);
|
|
|
|
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
|
|
if (!entry)
|
|
return;
|
|
|
|
entry->ip = ip;
|
|
entry->parent_ip = parent_ip;
|
|
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
|
|
1, ®s, &head, NULL);
|
|
|
|
#undef ENTRY_SIZE
|
|
}
|
|
|
|
static int perf_ftrace_function_register(struct perf_event *event)
|
|
{
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
|
|
ops->flags = FTRACE_OPS_FL_RCU;
|
|
ops->func = perf_ftrace_function_call;
|
|
ops->private = (void *)(unsigned long)nr_cpu_ids;
|
|
|
|
return register_ftrace_function(ops);
|
|
}
|
|
|
|
static int perf_ftrace_function_unregister(struct perf_event *event)
|
|
{
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
int ret = unregister_ftrace_function(ops);
|
|
ftrace_free_filter(ops);
|
|
return ret;
|
|
}
|
|
|
|
int perf_ftrace_event_register(struct trace_event_call *call,
|
|
enum trace_reg type, void *data)
|
|
{
|
|
struct perf_event *event = data;
|
|
|
|
switch (type) {
|
|
case TRACE_REG_REGISTER:
|
|
case TRACE_REG_UNREGISTER:
|
|
break;
|
|
case TRACE_REG_PERF_REGISTER:
|
|
case TRACE_REG_PERF_UNREGISTER:
|
|
return 0;
|
|
case TRACE_REG_PERF_OPEN:
|
|
return perf_ftrace_function_register(data);
|
|
case TRACE_REG_PERF_CLOSE:
|
|
return perf_ftrace_function_unregister(data);
|
|
case TRACE_REG_PERF_ADD:
|
|
event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
|
|
return 1;
|
|
case TRACE_REG_PERF_DEL:
|
|
event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
|
|
return 1;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|