x86/kprobes: Fix __recover_optprobed_insn check optimizing logic

commit 868a6fc0ca2407622d2833adefe1c4d284766c4c upstream.

Since the following commit:

  commit f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code")

modified the update timing of the KPROBE_FLAG_OPTIMIZED, a optimized_kprobe
may be in the optimizing or unoptimizing state when op.kp->flags
has KPROBE_FLAG_OPTIMIZED and op->list is not empty.

The __recover_optprobed_insn check logic is incorrect, a kprobe in the
unoptimizing state may be incorrectly determined as unoptimizing.
As a result, incorrect instructions are copied.

The optprobe_queued_unopt function needs to be exported for invoking in
arch directory.

Link: https://lore.kernel.org/all/20230216034247.32348-2-yangjihong1@huawei.com/

Fixes: f66c0447cca1 ("kprobes: Set unoptimized flag after unoptimizing code")
Cc: stable@vger.kernel.org
Signed-off-by: Yang Jihong <yangjihong1@huawei.com>
Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Yang Jihong 2023-02-21 08:49:16 +09:00 committed by Greg Kroah-Hartman
parent e4ce333cc6
commit ec206a38d3
3 changed files with 4 additions and 3 deletions

View File

@ -43,8 +43,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
/* This function only handles jump-optimized kprobe */ /* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) { if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp); op = container_of(kp, struct optimized_kprobe, kp);
/* If op->list is not empty, op is under optimizing */ /* If op is optimized or under unoptimizing */
if (list_empty(&op->list)) if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found; goto found;
} }
} }

View File

@ -318,6 +318,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
size_t *length, loff_t *ppos); size_t *length, loff_t *ppos);
#endif #endif
extern void wait_for_kprobe_optimizer(void); extern void wait_for_kprobe_optimizer(void);
bool optprobe_queued_unopt(struct optimized_kprobe *op);
#else #else
static inline void wait_for_kprobe_optimizer(void) { } static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */ #endif /* CONFIG_OPTPROBES */

View File

@ -614,7 +614,7 @@ void wait_for_kprobe_optimizer(void)
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
} }
static bool optprobe_queued_unopt(struct optimized_kprobe *op) bool optprobe_queued_unopt(struct optimized_kprobe *op)
{ {
struct optimized_kprobe *_op; struct optimized_kprobe *_op;