forked from luck/tmp_suning_uos_patched
locking/spinlocks, s390: Implement vcpu_is_preempted(cpu)
This implements the s390 version for vcpu_is_preempted(cpu), by reworking the existing smp_vcpu_scheduled() function into arch_vcpu_is_preempted(). We can then also get rid of the local cpu_is_preempted() function by moving the CIF_ENABLED_WAIT test into arch_vcpu_is_preempted(). Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: benh@kernel.crashing.org Cc: boqun.feng@gmail.com Cc: bsingharora@gmail.com Cc: dave@stgolabs.net Cc: jgross@suse.com Cc: kernellwp@gmail.com Cc: konrad.wilk@oracle.com Cc: linuxppc-dev@lists.ozlabs.org Cc: mpe@ellerman.id.au Cc: paulmck@linux.vnet.ibm.com Cc: paulus@samba.org Cc: pbonzini@redhat.com Cc: rkrcmar@redhat.com Cc: virtualization@lists.linux-foundation.org Cc: will.deacon@arm.com Cc: xen-devel-request@lists.xenproject.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1478077718-37424-6-git-send-email-xinhui.pan@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
41946c8687
commit
760928c0da
@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
||||
return __sync_bool_compare_and_swap(lock, old, new);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
|
||||
#else
|
||||
bool arch_vcpu_is_preempted(int cpu);
|
||||
#endif
|
||||
|
||||
#define vcpu_is_preempted arch_vcpu_is_preempted
|
||||
|
||||
/*
|
||||
* Simple spin lock operations. There are two variants, one clears IRQ's
|
||||
* on the local processor, one does not.
|
||||
|
@ -368,10 +368,15 @@ int smp_find_processor_id(u16 address)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int smp_vcpu_scheduled(int cpu)
|
||||
bool arch_vcpu_is_preempted(int cpu)
|
||||
{
|
||||
return pcpu_running(pcpu_devices + cpu);
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return false;
|
||||
if (pcpu_running(pcpu_devices + cpu))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_vcpu_is_preempted);
|
||||
|
||||
void smp_yield_cpu(int cpu)
|
||||
{
|
||||
|
@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
|
||||
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
|
||||
}
|
||||
|
||||
static inline int cpu_is_preempted(int cpu)
|
||||
{
|
||||
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
|
||||
return 0;
|
||||
if (smp_vcpu_scheduled(cpu))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
unsigned int cpu = SPINLOCK_LOCKVAL;
|
||||
@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
continue;
|
||||
}
|
||||
/* First iteration: check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
continue;
|
||||
}
|
||||
/* Check if the lock owner is running. */
|
||||
if (first_diag && cpu_is_preempted(~owner)) {
|
||||
if (first_diag && arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
continue;
|
||||
@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
|
||||
smp_yield_cpu(~owner);
|
||||
first_diag = 0;
|
||||
}
|
||||
@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && cpu_is_preempted(~owner))
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
|
||||
{
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
|
||||
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user