forked from luck/tmp_suning_uos_patched
locking/core: Provide common cpu_relax_yield() definition
No need to duplicate the same define everywhere. Since the only user is stop-machine and the only provider is s390, we can use a default implementation of cpu_relax_yield() in sched.h. Suggested-by: Russell King <rmk+kernel@armlinux.org.uk> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Russell King <rmk+kernel@armlinux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Noam Camus <noamc@ezchip.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: kvm@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-s390 <linux-s390@vger.kernel.org> Cc: linuxppc-dev@lists.ozlabs.org Cc: sparclinux@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1479298985-191589-1-git-send-email-borntraeger@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
43496d3551
commit
6d0d287891
|
@ -58,7 +58,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#define ARCH_HAS_PREFETCH
|
||||
#define ARCH_HAS_PREFETCHW
|
||||
|
|
|
@ -60,15 +60,12 @@ struct task_struct;
|
|||
#ifndef CONFIG_EZNPS_MTM_EXT
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#else
|
||||
|
||||
#define cpu_relax() \
|
||||
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#endif
|
||||
|
||||
#define copy_segments(tsk, mm) do { } while (0)
|
||||
|
|
|
@ -82,8 +82,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
||||
|
|
|
@ -149,8 +149,6 @@ static inline void cpu_relax(void)
|
|||
asm volatile("yield" ::: "memory");
|
||||
}
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Thread switching */
|
||||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
|
|
@ -92,7 +92,6 @@ extern struct avr32_cpuinfo boot_cpu_data;
|
|||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
|
||||
|
||||
struct cpu_context {
|
||||
|
|
|
@ -92,7 +92,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() smp_mb()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Get the Silicon Revision of the chip */
|
||||
static inline uint32_t __pure bfin_revid(void)
|
||||
|
|
|
@ -121,7 +121,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
||||
|
||||
#define cpu_relax() do { } while (0)
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
|
|
|
@ -63,7 +63,6 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
|
||||
|
|
|
@ -107,7 +107,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* data cache prefetch */
|
||||
#define ARCH_HAS_PREFETCH
|
||||
|
|
|
@ -127,7 +127,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
local_irq_disable(); \
|
||||
|
|
|
@ -56,7 +56,6 @@ struct thread_struct {
|
|||
}
|
||||
|
||||
#define cpu_relax() __vmyield()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/*
|
||||
* Decides where the kernel will search for a free chunk of vm space during
|
||||
|
|
|
@ -547,7 +547,6 @@ ia64_eoi (void)
|
|||
}
|
||||
|
||||
#define cpu_relax() ia64_hint(ia64_hint_pause)
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
static inline int
|
||||
ia64_get_irr(unsigned int vector)
|
||||
|
|
|
@ -133,6 +133,5 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#endif /* _ASM_M32R_PROCESSOR_H */
|
||||
|
|
|
@ -156,6 +156,5 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#endif
|
||||
|
|
|
@ -152,7 +152,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
extern void setup_priv(void);
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
# define cpu_relax() barrier()
|
||||
# define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#define task_pt_regs(tsk) \
|
||||
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
|
||||
|
|
|
@ -389,7 +389,6 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/*
|
||||
* Return_address is a replacement for __builtin_return_address(count)
|
||||
|
|
|
@ -69,7 +69,6 @@ extern void print_cpu_info(struct mn10300_cpuinfo *);
|
|||
extern void dodgy_tsc(void);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/*
|
||||
* User space process size: 1.75GB (default).
|
||||
|
|
|
@ -88,7 +88,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -92,7 +92,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
|
|||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_OPENRISC_PROCESSOR_H */
|
||||
|
|
|
@ -309,7 +309,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/*
|
||||
* parisc_requires_coherency() is used to identify the combined VIPT/PIPT
|
||||
|
|
|
@ -404,8 +404,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
|
|||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Check that a certain kernel stack pointer is valid in task_struct p */
|
||||
int validate_sp(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes);
|
||||
|
|
|
@ -234,6 +234,7 @@ static inline unsigned short stap(void)
|
|||
/*
|
||||
* Give up the time slice of the virtual PU.
|
||||
*/
|
||||
#define cpu_relax_yield cpu_relax_yield
|
||||
void cpu_relax_yield(void);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
|
|
|
@ -24,7 +24,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define release_thread(thread) do {} while (0)
|
||||
|
||||
/*
|
||||
|
|
|
@ -97,7 +97,6 @@ extern struct sh_cpuinfo cpu_data[];
|
|||
|
||||
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
void stop_this_cpu(void *);
|
||||
|
|
|
@ -119,7 +119,6 @@ extern struct task_struct *last_task_used_math;
|
|||
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
extern void (*sparc_idle)(void);
|
||||
|
||||
|
|
|
@ -216,7 +216,6 @@ unsigned long get_wchan(struct task_struct *task);
|
|||
"nop\n\t" \
|
||||
".previous" \
|
||||
::: "memory")
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Prefetch support. This is tuned for UltraSPARC-III and later.
|
||||
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
|
||||
|
|
|
@ -264,8 +264,6 @@ static inline void cpu_relax(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
||||
struct seq_operations;
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
|
|
@ -71,7 +71,6 @@ extern void release_thread(struct task_struct *);
|
|||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
|
||||
|
|
|
@ -588,8 +588,6 @@ static __always_inline void cpu_relax(void)
|
|||
rep_nop();
|
||||
}
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
|
|
|
@ -26,7 +26,6 @@ static inline void rep_nop(void)
|
|||
}
|
||||
|
||||
#define cpu_relax() rep_nop()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
#define task_pt_regs(t) (&(t)->thread.regs)
|
||||
|
||||
|
|
|
@ -206,7 +206,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
|
||||
/* Special register access. */
|
||||
|
||||
|
|
|
@ -2444,6 +2444,10 @@ static inline void calc_load_enter_idle(void) { }
|
|||
static inline void calc_load_exit_idle(void) { }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
#ifndef cpu_relax_yield
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not use outside of architecture code which knows its limitations.
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue
Block a user