forked from luck/tmp_suning_uos_patched
tracing: Use CONFIG_PREEMPTION
CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Switch the conditionals in the tracer over to CONFIG_PREEMPTION. This is the first step to make the tracer work on RT. The other small tweaks are submitted separately. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/20190726212124.409766323@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
27972765bd
commit
30c937043b
|
@ -179,7 +179,7 @@ config TRACE_PREEMPT_TOGGLE
|
|||
config PREEMPTIRQ_EVENTS
|
||||
bool "Enable trace events for preempt and irq disable/enable"
|
||||
select TRACE_IRQFLAGS
|
||||
select TRACE_PREEMPT_TOGGLE if PREEMPT
|
||||
select TRACE_PREEMPT_TOGGLE if PREEMPTION
|
||||
select GENERIC_TRACER
|
||||
default n
|
||||
help
|
||||
|
@ -214,7 +214,7 @@ config PREEMPT_TRACER
|
|||
bool "Preemption-off Latency Tracer"
|
||||
default n
|
||||
depends on !ARCH_USES_GETTIMEOFFSET
|
||||
depends on PREEMPT
|
||||
depends on PREEMPTION
|
||||
select GENERIC_TRACER
|
||||
select TRACER_MAX_TRACE
|
||||
select RING_BUFFER_ALLOW_SWAP
|
||||
|
|
|
@ -2814,7 +2814,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
|||
* synchornize_rcu_tasks() will wait for those tasks to
|
||||
* execute and either schedule voluntarily or enter user space.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT))
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
synchronize_rcu_tasks();
|
||||
|
||||
free_ops:
|
||||
|
|
|
@ -267,7 +267,7 @@ static void ring_buffer_producer(void)
|
|||
if (consumer && !(cnt % wakeup_interval))
|
||||
wake_up_process(consumer);
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
/*
|
||||
* If we are a non preempt kernel, the 10 second run will
|
||||
* stop everything while it runs. Instead, we will call
|
||||
|
|
|
@ -255,12 +255,12 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
|
|||
local_save_flags(fbuffer->flags);
|
||||
fbuffer->pc = preempt_count();
|
||||
/*
|
||||
* If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
|
||||
* If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
|
||||
* preemption (adding one to the preempt_count). Since we are
|
||||
* interested in the preempt_count at the time the tracepoint was
|
||||
* hit, we need to subtract one to offset the increment.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT))
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
fbuffer->pc--;
|
||||
fbuffer->trace_file = trace_file;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user