forked from luck/tmp_suning_uos_patched
locking/mcs: Micro-optimize the MCS code, add extra comments
Remove unnecessary operation to assign locked status to 1 if lock is acquired without contention. Lock status will not be checked by lock holder again once it is acquired and any lock contenders will not be looking at the lock holder's lock status. Make the cmpxchg(lock, node, NULL) == node check in mcs_spin_unlock() likely() as it is likely that a race did not occur most of the time. Also add in more comments describing how the local node is used in MCS locks. Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/1390347365.3138.64.camel@schen9-DESK Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e72246748f
commit
5faeb8adb9
@ -25,6 +25,17 @@ struct mcs_spinlock {
|
||||
* with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
|
||||
* used after mcs_lock.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In order to acquire the lock, the caller should declare a local node and
|
||||
* pass a reference of the node to this function in addition to the lock.
|
||||
* If the lock has already been acquired, then this will proceed to spin
|
||||
* on this node->locked until the previous lock holder sets the node->locked
|
||||
* in mcs_spin_unlock().
|
||||
*
|
||||
* We don't inline mcs_spin_lock() so that perf can correctly account for the
|
||||
* time spent in this lock function.
|
||||
*/
|
||||
static inline
|
||||
void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
{
|
||||
@ -36,8 +47,14 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
|
||||
prev = xchg(lock, node);
|
||||
if (likely(prev == NULL)) {
|
||||
/* Lock acquired */
|
||||
node->locked = 1;
|
||||
/*
|
||||
* Lock acquired, don't need to set node->locked to 1. Threads
|
||||
* only spin on its own node->locked value for lock acquisition.
|
||||
* However, since this thread can immediately acquire the lock
|
||||
* and does not proceed to spin on its own node->locked, this
|
||||
* value won't be used. If a debug mode is needed to
|
||||
* audit lock status, then set node->locked value here.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
ACCESS_ONCE(prev->next) = node;
|
||||
@ -50,6 +67,10 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
arch_mutex_cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* Releases the lock. The caller should pass in the corresponding node that
|
||||
* was used to acquire the lock.
|
||||
*/
|
||||
static inline
|
||||
void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
{
|
||||
@ -59,7 +80,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
|
||||
/*
|
||||
* Release the lock by setting it to NULL
|
||||
*/
|
||||
if (cmpxchg(lock, node, NULL) == node)
|
||||
if (likely(cmpxchg(lock, node, NULL) == node))
|
||||
return;
|
||||
/* Wait until the next pointer is set */
|
||||
while (!(next = ACCESS_ONCE(node->next)))
|
||||
|
Loading…
Reference in New Issue
Block a user