forked from luck/tmp_suning_uos_patched
locking/ww_mutex: Notify waiters that have to back off while adding tasks to wait list
While adding our task as a waiter, detect if another task should back off because of us. With this patch, we establish the invariant that the wait list contains at most one (sleeping) waiter with ww_ctx->acquired > 0, and this waiter will be the first waiter with a context. Since only waiters with ww_ctx->acquired > 0 have to back off, this allows us to be much more economical with wakeups. Signed-off-by: Nicolai Hähnle <Nicolai.Haehnle@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Maarten Lankhorst <dev@mblankhorst.nl> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dri-devel@lists.freedesktop.org Link: http://lkml.kernel.org/r/1482346000-9927-8-git-send-email-nhaehnle@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6baa5c60a9
commit
200b187440
|
@ -596,23 +596,34 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
|||
EXPORT_SYMBOL(ww_mutex_unlock);
|
||||
|
||||
static inline int __sched
|
||||
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
|
||||
struct mutex_waiter *cur;
|
||||
|
||||
if (!hold_ctx)
|
||||
return 0;
|
||||
if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
|
||||
goto deadlock;
|
||||
|
||||
if (__ww_ctx_stamp_after(ctx, hold_ctx)) {
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
||||
ctx->contending_lock = ww;
|
||||
#endif
|
||||
return -EDEADLK;
|
||||
/*
|
||||
* If there is a waiter in front of us that has a context, then its
|
||||
* stamp is earlier than ours and we must back off.
|
||||
*/
|
||||
cur = waiter;
|
||||
list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
|
||||
if (cur->ww_ctx)
|
||||
goto deadlock;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
deadlock:
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
|
||||
ctx->contending_lock = ww;
|
||||
#endif
|
||||
return -EDEADLK;
|
||||
}
|
||||
|
||||
static inline int __sched
|
||||
|
@ -655,6 +666,15 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
|
|||
}
|
||||
|
||||
pos = &cur->list;
|
||||
|
||||
/*
|
||||
* Wake up the waiter so that it gets a chance to back
|
||||
* off.
|
||||
*/
|
||||
if (cur->ww_ctx->acquired > 0) {
|
||||
debug_mutex_wake_waiter(lock, cur);
|
||||
wake_up_process(cur->task);
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail(&waiter->list, pos);
|
||||
|
@ -746,7 +766,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
}
|
||||
|
||||
if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
|
||||
ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
|
||||
ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user