perf env: Avoid recursively taking env->bpf_progs.lock

[ Upstream commit 9c51f8788b5d4e9f46afbcf563255cfd355690b3 ]

Add variants of perf_env__insert_bpf_prog_info(), perf_env__insert_btf()
and perf_env__find_btf prefixed with __ to indicate the
env->bpf_progs.lock is assumed held.

Call these variants when the lock is held to avoid recursively taking it
and potentially having a thread deadlock with itself.

Fixes: f8dfeae009 ("perf bpf: Show more BPF program info in print_bpf_prog_info()")
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Song Liu <song@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20231207014655.1252484-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Ian Rogers 2023-12-06 17:46:55 -08:00 committed by Greg Kroah-Hartman
parent f19a1cb1f9
commit 4cd5db4fc4
5 changed files with 50 additions and 32 deletions

View File

@ -442,9 +442,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env); return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
} }
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
struct perf_env *env, struct perf_env *env,
FILE *fp) FILE *fp)
{ {
__u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
__u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
@ -460,7 +460,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
if (info->btf_id) { if (info->btf_id) {
struct btf_node *node; struct btf_node *node;
node = perf_env__find_btf(env, info->btf_id); node = __perf_env__find_btf(env, info->btf_id);
if (node) if (node)
btf = btf__new((__u8 *)(node->data), btf = btf__new((__u8 *)(node->data),
node->data_size); node->data_size);

View File

@ -34,9 +34,9 @@ struct btf_node {
int machine__process_bpf(struct machine *machine, union perf_event *event, int machine__process_bpf(struct machine *machine, union perf_event *event,
struct perf_sample *sample); struct perf_sample *sample);
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env); int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env);
void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
struct perf_env *env, struct perf_env *env,
FILE *fp); FILE *fp);
#else #else
static inline int machine__process_bpf(struct machine *machine __maybe_unused, static inline int machine__process_bpf(struct machine *machine __maybe_unused,
union perf_event *event __maybe_unused, union perf_event *event __maybe_unused,
@ -51,9 +51,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused,
return 0; return 0;
} }
static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused, static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
struct perf_env *env __maybe_unused, struct perf_env *env __maybe_unused,
FILE *fp __maybe_unused) FILE *fp __maybe_unused)
{ {
} }

View File

@ -15,13 +15,19 @@ struct perf_env perf_env;
void perf_env__insert_bpf_prog_info(struct perf_env *env, void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node) struct bpf_prog_info_node *info_node)
{
down_write(&env->bpf_progs.lock);
__perf_env__insert_bpf_prog_info(env, info_node);
up_write(&env->bpf_progs.lock);
}
void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
{ {
__u32 prog_id = info_node->info_linear->info.id; __u32 prog_id = info_node->info_linear->info.id;
struct bpf_prog_info_node *node; struct bpf_prog_info_node *node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
struct rb_node **p; struct rb_node **p;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.infos.rb_node; p = &env->bpf_progs.infos.rb_node;
while (*p != NULL) { while (*p != NULL) {
@ -33,15 +39,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env,
p = &(*p)->rb_right; p = &(*p)->rb_right;
} else { } else {
pr_debug("duplicated bpf prog info %u\n", prog_id); pr_debug("duplicated bpf prog info %u\n", prog_id);
goto out; return;
} }
} }
rb_link_node(&info_node->rb_node, parent, p); rb_link_node(&info_node->rb_node, parent, p);
rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
env->bpf_progs.infos_cnt++; env->bpf_progs.infos_cnt++;
out:
up_write(&env->bpf_progs.lock);
} }
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
@ -70,14 +74,22 @@ out:
} }
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{
bool ret;
down_write(&env->bpf_progs.lock);
ret = __perf_env__insert_btf(env, btf_node);
up_write(&env->bpf_progs.lock);
return ret;
}
bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
{ {
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
__u32 btf_id = btf_node->id; __u32 btf_id = btf_node->id;
struct btf_node *node; struct btf_node *node;
struct rb_node **p; struct rb_node **p;
bool ret = true;
down_write(&env->bpf_progs.lock);
p = &env->bpf_progs.btfs.rb_node; p = &env->bpf_progs.btfs.rb_node;
while (*p != NULL) { while (*p != NULL) {
@ -89,25 +101,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
p = &(*p)->rb_right; p = &(*p)->rb_right;
} else { } else {
pr_debug("duplicated btf %u\n", btf_id); pr_debug("duplicated btf %u\n", btf_id);
ret = false; return false;
goto out;
} }
} }
rb_link_node(&btf_node->rb_node, parent, p); rb_link_node(&btf_node->rb_node, parent, p);
rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
env->bpf_progs.btfs_cnt++; env->bpf_progs.btfs_cnt++;
out: return true;
up_write(&env->bpf_progs.lock);
return ret;
} }
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{
struct btf_node *res;
down_read(&env->bpf_progs.lock);
res = __perf_env__find_btf(env, btf_id);
up_read(&env->bpf_progs.lock);
return res;
}
struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
{ {
struct btf_node *node = NULL; struct btf_node *node = NULL;
struct rb_node *n; struct rb_node *n;
down_read(&env->bpf_progs.lock);
n = env->bpf_progs.btfs.rb_node; n = env->bpf_progs.btfs.rb_node;
while (n) { while (n) {
@ -117,13 +135,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
else if (btf_id > node->id) else if (btf_id > node->id)
n = n->rb_right; n = n->rb_right;
else else
goto out; return node;
} }
node = NULL; return NULL;
out:
up_read(&env->bpf_progs.lock);
return node;
} }
/* purge data in bpf_progs.infos tree */ /* purge data in bpf_progs.infos tree */

View File

@ -117,12 +117,16 @@ const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env); int perf_env__nr_cpus_avail(struct perf_env *env);
void perf_env__init(struct perf_env *env); void perf_env__init(struct perf_env *env);
void __perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node);
void perf_env__insert_bpf_prog_info(struct perf_env *env, void perf_env__insert_bpf_prog_info(struct perf_env *env,
struct bpf_prog_info_node *info_node); struct bpf_prog_info_node *info_node);
struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id); __u32 prog_id);
bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
int perf_env__numa_node(struct perf_env *env, int cpu); int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */ #endif /* __PERF_ENV_H */

View File

@ -1546,8 +1546,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
node = rb_entry(next, struct bpf_prog_info_node, rb_node); node = rb_entry(next, struct bpf_prog_info_node, rb_node);
next = rb_next(&node->rb_node); next = rb_next(&node->rb_node);
bpf_event__print_bpf_prog_info(&node->info_linear->info, __bpf_event__print_bpf_prog_info(&node->info_linear->info,
env, fp); env, fp);
} }
up_read(&env->bpf_progs.lock); up_read(&env->bpf_progs.lock);
@ -2724,7 +2724,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
/* after reading from file, translate offset to address */ /* after reading from file, translate offset to address */
bpf_program__bpil_offs_to_addr(info_linear); bpf_program__bpil_offs_to_addr(info_linear);
info_node->info_linear = info_linear; info_node->info_linear = info_linear;
perf_env__insert_bpf_prog_info(env, info_node); __perf_env__insert_bpf_prog_info(env, info_node);
} }
up_write(&env->bpf_progs.lock); up_write(&env->bpf_progs.lock);
@ -2777,7 +2777,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
if (__do_read(ff, node->data, data_size)) if (__do_read(ff, node->data, data_size))
goto out; goto out;
perf_env__insert_btf(env, node); __perf_env__insert_btf(env, node);
node = NULL; node = NULL;
} }