perf/ring-buffer: Use regular variables for nesting
While the IRQ/NMI will nest, the nest-count will be invariant over the actual exception, since it will decrement equal to increment. This means we can -- carefully -- use a regular variable since the typical LOAD-STORE race doesn't exist (similar to preempt_count). This optimizes the ring-buffer for all LOAD-STORE architectures, since they need to use atomic ops to implement local_t. Suggested-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: acme@kernel.org Cc: mark.rutland@arm.com Cc: namhyung@kernel.org Cc: yabinc@google.com Link: http://lkml.kernel.org/r/20190517115418.481392777@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4d839dd9e4
commit
5322ea58a0
|
@ -24,7 +24,7 @@ struct ring_buffer {
|
|||
atomic_t poll; /* POLL_ for wakeups */
|
||||
|
||||
local_t head; /* write position */
|
||||
local_t nest; /* nested writers */
|
||||
unsigned int nest; /* nested writers */
|
||||
local_t events; /* event limit */
|
||||
local_t wakeup; /* wakeup stamp */
|
||||
local_t lost; /* nr records lost */
|
||||
|
@ -41,7 +41,7 @@ struct ring_buffer {
|
|||
|
||||
/* AUX area */
|
||||
long aux_head;
|
||||
local_t aux_nest;
|
||||
unsigned int aux_nest;
|
||||
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
|
||||
unsigned long aux_pgoff;
|
||||
int aux_nr_pages;
|
||||
|
|
|
@ -38,7 +38,12 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
|
|||
struct ring_buffer *rb = handle->rb;
|
||||
|
||||
preempt_disable();
|
||||
local_inc(&rb->nest);
|
||||
|
||||
/*
|
||||
* Avoid an explicit LOAD/STORE such that architectures with memops
|
||||
* can use them.
|
||||
*/
|
||||
(*(volatile unsigned int *)&rb->nest)++;
|
||||
handle->wakeup = local_read(&rb->wakeup);
|
||||
}
|
||||
|
||||
|
@ -46,6 +51,17 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
{
|
||||
struct ring_buffer *rb = handle->rb;
|
||||
unsigned long head;
|
||||
unsigned int nest;
|
||||
|
||||
/*
|
||||
* If this isn't the outermost nesting, we don't have to update
|
||||
* @rb->user_page->data_head.
|
||||
*/
|
||||
nest = READ_ONCE(rb->nest);
|
||||
if (nest > 1) {
|
||||
WRITE_ONCE(rb->nest, nest - 1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
again:
|
||||
/*
|
||||
|
@ -64,15 +80,6 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
* load above to be stale.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If this isn't the outermost nesting, we don't have to update
|
||||
* @rb->user_page->data_head.
|
||||
*/
|
||||
if (local_read(&rb->nest) > 1) {
|
||||
local_dec(&rb->nest);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the mmap() consumer (userspace) can run on a different CPU:
|
||||
*
|
||||
|
@ -108,7 +115,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
* write will (temporarily) publish a stale value.
|
||||
*/
|
||||
barrier();
|
||||
local_set(&rb->nest, 0);
|
||||
WRITE_ONCE(rb->nest, 0);
|
||||
|
||||
/*
|
||||
* Ensure we decrement @rb->nest before we validate the @rb->head.
|
||||
|
@ -116,7 +123,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
*/
|
||||
barrier();
|
||||
if (unlikely(head != local_read(&rb->head))) {
|
||||
local_inc(&rb->nest);
|
||||
WRITE_ONCE(rb->nest, 1);
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
@ -355,6 +362,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
|
|||
struct perf_event *output_event = event;
|
||||
unsigned long aux_head, aux_tail;
|
||||
struct ring_buffer *rb;
|
||||
unsigned int nest;
|
||||
|
||||
if (output_event->parent)
|
||||
output_event = output_event->parent;
|
||||
|
@ -385,13 +393,16 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
|
|||
if (!refcount_inc_not_zero(&rb->aux_refcount))
|
||||
goto err;
|
||||
|
||||
nest = READ_ONCE(rb->aux_nest);
|
||||
/*
|
||||
* Nesting is not supported for AUX area, make sure nested
|
||||
* writers are caught early
|
||||
*/
|
||||
if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
|
||||
if (WARN_ON_ONCE(nest))
|
||||
goto err_put;
|
||||
|
||||
WRITE_ONCE(rb->aux_nest, nest + 1);
|
||||
|
||||
aux_head = rb->aux_head;
|
||||
|
||||
handle->rb = rb;
|
||||
|
@ -419,7 +430,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
|
|||
if (!handle->size) { /* A, matches D */
|
||||
event->pending_disable = smp_processor_id();
|
||||
perf_output_wakeup(handle);
|
||||
local_set(&rb->aux_nest, 0);
|
||||
WRITE_ONCE(rb->aux_nest, 0);
|
||||
goto err_put;
|
||||
}
|
||||
}
|
||||
|
@ -508,7 +519,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
|
|||
|
||||
handle->event = NULL;
|
||||
|
||||
local_set(&rb->aux_nest, 0);
|
||||
WRITE_ONCE(rb->aux_nest, 0);
|
||||
/* can't be last */
|
||||
rb_free_aux(rb);
|
||||
ring_buffer_put(rb);
|
||||
|
|
Loading…
Reference in New Issue
Block a user