forked from luck/tmp_suning_uos_patched
locking/rwsem: Use acquire/release semantics
As of654672d4ba
(locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations) and6d79ef2d30
(locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly ordered archs can benefit from more relaxed use of barriers when locking and unlocking, instead of regular full barrier semantics. While currently only arm64 supports such optimizations, updating corresponding locking primitives serves for other archs to immediately benefit as well, once the necessary machinery is implemented of course. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1443643395-17016-6-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3552a07a9c
commit
00eb4bab69
|
@ -33,7 +33,7 @@
|
|||
*/
|
||||
static inline void __down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
|
||||
if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
|
||||
rwsem_down_read_failed(sem);
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|||
long tmp;
|
||||
|
||||
while ((tmp = sem->count) >= 0) {
|
||||
if (tmp == cmpxchg(&sem->count, tmp,
|
||||
if (tmp == cmpxchg_acquire(&sem->count, tmp,
|
||||
tmp + RWSEM_ACTIVE_READ_BIAS)) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
|||
{
|
||||
long tmp;
|
||||
|
||||
tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
(atomic_long_t *)&sem->count);
|
||||
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
|
||||
rwsem_down_write_failed(sem);
|
||||
|
@ -72,7 +72,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
|
|||
{
|
||||
long tmp;
|
||||
|
||||
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||
tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
|
||||
RWSEM_ACTIVE_WRITE_BIAS);
|
||||
return tmp == RWSEM_UNLOCKED_VALUE;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
|||
{
|
||||
long tmp;
|
||||
|
||||
tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
|
||||
tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
|
||||
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
|||
*/
|
||||
static inline void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
|
||||
(atomic_long_t *)&sem->count) < 0))
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
@ -114,7 +114,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|||
{
|
||||
long tmp;
|
||||
|
||||
tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
|
||||
/*
|
||||
* When downgrading from exclusive to shared ownership,
|
||||
* anything inside the write-locked region cannot leak
|
||||
* into the read side. In contrast, anything in the
|
||||
* read-locked region is ok to be re-ordered into the
|
||||
* write side. As such, rely on RELEASE semantics.
|
||||
*/
|
||||
tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
|
||||
(atomic_long_t *)&sem->count);
|
||||
if (tmp < 0)
|
||||
rwsem_downgrade_wake(sem);
|
||||
|
|
|
@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
|
|||
* to reduce unnecessary expensive cmpxchg() operations.
|
||||
*/
|
||||
if (count == RWSEM_WAITING_BIAS &&
|
||||
cmpxchg(&sem->count, RWSEM_WAITING_BIAS,
|
||||
cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
|
||||
RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
|
||||
if (!list_is_singular(&sem->wait_list))
|
||||
rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
|
||||
|
@ -285,7 +285,8 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
|
|||
if (!(count == 0 || count == RWSEM_WAITING_BIAS))
|
||||
return false;
|
||||
|
||||
old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS);
|
||||
old = cmpxchg_acquire(&sem->count, count,
|
||||
count + RWSEM_ACTIVE_WRITE_BIAS);
|
||||
if (old == count) {
|
||||
rwsem_set_owner(sem);
|
||||
return true;
|
||||
|
|
Loading…
Reference in New Issue
Block a user