Reapply "ANDROID: Revert "tracing/ring-buffer: Have polling block on watermark""

This reverts commit 541686ad47d2b3eb58a3d809322fe558b3b2c948, which
reapplys the original revert as the original is still causing crashes.

Bug: 263508491
Cc: Lee Jones <joneslee@google.com>
Change-Id: I35cd4d9cef24e64f2dc3afad85d90d588c09e5f0
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-10-24 13:32:28 +00:00
parent 574430d8ef
commit 8a59cb3011
3 changed files with 21 additions and 37 deletions

View File

@ -99,7 +99,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full); int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table, int full); struct file *filp, poll_table *poll_table);
#define RING_BUFFER_ALL_CPUS -1 #define RING_BUFFER_ALL_CPUS -1

View File

@ -569,21 +569,6 @@ size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
return cnt - read; return cnt - read;
} }
static __always_inline bool full_hit(struct ring_buffer *buffer, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
size_t nr_pages;
size_t dirty;
nr_pages = cpu_buffer->nr_pages;
if (!nr_pages || !full)
return true;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
return (dirty * 100) > (full * nr_pages);
}
/* /*
* rb_wake_up_waiters - wake up tasks waiting for ring buffer input * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
* *
@ -679,20 +664,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
!ring_buffer_empty_cpu(buffer, cpu)) { !ring_buffer_empty_cpu(buffer, cpu)) {
unsigned long flags; unsigned long flags;
bool pagebusy; bool pagebusy;
bool done; size_t nr_pages;
size_t dirty;
if (!full) if (!full)
break; break;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
done = !pagebusy && full_hit(buffer, cpu, full); nr_pages = cpu_buffer->nr_pages;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
if (!cpu_buffer->shortest_full || if (!cpu_buffer->shortest_full ||
cpu_buffer->shortest_full > full) cpu_buffer->shortest_full > full)
cpu_buffer->shortest_full = full; cpu_buffer->shortest_full = full;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (done) if (!pagebusy &&
(!nr_pages || (dirty * 100) > full * nr_pages))
break; break;
} }
@ -713,7 +700,6 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* @cpu: the cpu buffer to wait on * @cpu: the cpu buffer to wait on
* @filp: the file descriptor * @filp: the file descriptor
* @poll_table: The poll descriptor * @poll_table: The poll descriptor
* @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
* *
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
* as data is added to any of the @buffer's cpu buffers. Otherwise * as data is added to any of the @buffer's cpu buffers. Otherwise
@ -723,14 +709,14 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* zero otherwise. * zero otherwise.
*/ */
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table, int full) struct file *filp, poll_table *poll_table)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct rb_irq_work *work; struct rb_irq_work *work;
if (cpu == RING_BUFFER_ALL_CPUS) { if (cpu == RING_BUFFER_ALL_CPUS)
work = &buffer->irq_work; work = &buffer->irq_work;
} else { else {
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -EINVAL; return -EINVAL;
@ -738,14 +724,8 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
work = &cpu_buffer->irq_work; work = &cpu_buffer->irq_work;
} }
if (full) { poll_wait(filp, &work->waiters, poll_table);
poll_wait(filp, &work->full_waiters, poll_table); work->waiters_pending = true;
work->full_waiters_pending = true;
} else {
poll_wait(filp, &work->waiters, poll_table);
work->waiters_pending = true;
}
/* /*
* There's a tight race between setting the waiters_pending and * There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit * checking if the ring buffer is empty. Once the waiters_pending bit
@ -761,9 +741,6 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
*/ */
smp_mb(); smp_mb();
if (full)
return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return EPOLLIN | EPOLLRDNORM; return EPOLLIN | EPOLLRDNORM;
@ -2674,6 +2651,10 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{ {
size_t nr_pages;
size_t dirty;
size_t full;
if (buffer->irq_work.waiters_pending) { if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false; buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */ /* irq_work_queue() supplies it's own memory barriers */
@ -2697,7 +2678,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) full = cpu_buffer->shortest_full;
nr_pages = cpu_buffer->nr_pages;
dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
if (full && nr_pages && (dirty * 100) <= full * nr_pages)
return; return;
cpu_buffer->irq_work.wakeup_full = true; cpu_buffer->irq_work.wakeup_full = true;

View File

@ -6036,7 +6036,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
return EPOLLIN | EPOLLRDNORM; return EPOLLIN | EPOLLRDNORM;
else else
return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
filp, poll_table, iter->tr->buffer_percent); filp, poll_table);
} }
static __poll_t static __poll_t