kernel_optimize_test/include/asm-generic/preempt.h
Peter Zijlstra 609ca06638 sched/core: Create preempt_count invariant
Assuming units of PREEMPT_DISABLE_OFFSET for preempt_count() numbers.

Now that TASK_DEAD no longer results in preempt_count() == 3 during
scheduling, we will always call context_switch() with preempt_count()
== 2.

However, we don't always end up with preempt_count() == 2 in
finish_task_switch() because new tasks get created with
preempt_count() == 1.

Create FORK_PREEMPT_COUNT and set it to 2 and use that in the right
places. Note that we cannot use INIT_PREEMPT_COUNT as that serves
another purpose (boot).

After this, preempt_count() is invariant across the context switch,
with exception of PREEMPT_ACTIVE.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-10-06 17:08:14 +02:00

88 lines
1.9 KiB
C

#ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H
#include <linux/thread_info.h>
#define PREEMPT_ENABLED (0)
static __always_inline int preempt_count(void)
{
return current_thread_info()->preempt_count;
}
static __always_inline int *preempt_count_ptr(void)
{
return &current_thread_info()->preempt_count;
}
static __always_inline void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
}
/*
* must be macros to avoid header recursion hell
*/
#define init_task_preempt_count(p) do { \
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
{
}
static __always_inline void clear_preempt_need_resched(void)
{
}
static __always_inline bool test_preempt_need_resched(void)
{
return false;
}
/*
* The various preempt_count add/sub methods
*/
static __always_inline void __preempt_count_add(int val)
{
*preempt_count_ptr() += val;
}
static __always_inline void __preempt_count_sub(int val)
{
*preempt_count_ptr() -= val;
}
static __always_inline bool __preempt_count_dec_and_test(void)
{
/*
* Because of load-store architectures cannot do per-cpu atomic
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
* lost.
*/
return !--*preempt_count_ptr() && tif_need_resched();
}
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(preempt_count() == preempt_offset &&
tif_need_resched());
}
#ifdef CONFIG_PREEMPT
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* __ASM_PREEMPT_H */