tracing: Fix a possible race when disabling buffered events
commit c0591b1cccf708a47bc465c62436d669a4213323 upstream.
Function trace_buffered_event_disable() is responsible for freeing pages
backing buffered events and this process can run concurrently with
trace_event_buffer_lock_reserve().
The following race is currently possible:
* Function trace_buffered_event_disable() is called on CPU 0. It
increments trace_buffered_event_cnt on each CPU and waits via
synchronize_rcu() for each user of trace_buffered_event to complete.
* After synchronize_rcu() is finished, function
trace_buffered_event_disable() has the exclusive access to
trace_buffered_event. All counters trace_buffered_event_cnt are at 1
and all pointers trace_buffered_event are still valid.
* At this point, on a different CPU 1, the execution reaches
trace_event_buffer_lock_reserve(). The function calls
preempt_disable_notrace() and only now enters an RCU read-side
critical section. The function proceeds and reads a still valid
pointer from trace_buffered_event[CPU1] into the local variable
"entry". However, it doesn't yet read trace_buffered_event_cnt[CPU1]
which happens later.
* Function trace_buffered_event_disable() continues. It frees
trace_buffered_event[CPU1] and decrements
trace_buffered_event_cnt[CPU1] back to 0.
* Function trace_event_buffer_lock_reserve() continues. It reads and
increments trace_buffered_event_cnt[CPU1] from 0 to 1. This makes it
believe that it can use the "entry" that it already obtained but the
pointer is now invalid and any access results in a use-after-free.
Fix the problem by making a second synchronize_rcu() call after all
trace_buffered_event values are set to NULL. This waits on all potential
users in trace_event_buffer_lock_reserve() that still read a previous
pointer from trace_buffered_event.
Link: https://lore.kernel.org/all/20231127151248.7232-2-petr.pavlu@suse.com/
Link: https://lkml.kernel.org/r/20231205161736.19663-4-petr.pavlu@suse.com
Cc: stable@vger.kernel.org
Fixes: 0fc1b09ff1
("tracing: Use temp buffer when filtering events")
Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
6f2e50961f
commit
965cbc6b62
@ -2501,13 +2501,17 @@ void trace_buffered_event_disable(void)
|
|||||||
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
|
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
|
||||||
per_cpu(trace_buffered_event, cpu) = NULL;
|
per_cpu(trace_buffered_event, cpu) = NULL;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* Make sure trace_buffered_event is NULL before clearing
|
|
||||||
* trace_buffered_event_cnt.
|
|
||||||
*/
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
/* Do the work on each cpu */
|
/*
|
||||||
|
* Wait for all CPUs that potentially started checking if they can use
|
||||||
|
* their event buffer only after the previous synchronize_rcu() call and
|
||||||
|
* they still read a valid pointer from trace_buffered_event. It must be
|
||||||
|
* ensured they don't see cleared trace_buffered_event_cnt else they
|
||||||
|
* could wrongly decide to use the pointed-to buffer which is now freed.
|
||||||
|
*/
|
||||||
|
synchronize_rcu();
|
||||||
|
|
||||||
|
/* For each CPU, relinquish the buffer */
|
||||||
on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
|
on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user