forked from luck/tmp_suning_uos_patched
354714dd26
Although the lock_spinning calls in the spinlock code are on the uncommon path, their presence can cause the compiler to generate many more register save/restores in the function pre/postamble, which is in the fast path. To avoid this, convert it to using the pvops callee-save calling convention, which defers all the save/restores until the actual function is called, keeping the fastpath clean. Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org> Link: http://lkml.kernel.org/r/1376058122-8248-8-git-send-email-raghavendra.kt@linux.vnet.ibm.com Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Attilio Rao <attilio.rao@citrix.com> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
18 lines
373 B
C
18 lines
373 B
C
/*
|
|
* Split spinlock implementation out into its own file, so it can be
|
|
* compiled in a FTRACE-compatible way.
|
|
*/
|
|
#include <linux/spinlock.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/paravirt.h>
|
|
|
|
struct pv_lock_ops pv_lock_ops = {
|
|
#ifdef CONFIG_SMP
|
|
.lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
|
|
.unlock_kick = paravirt_nop,
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL(pv_lock_ops);
|
|
|