forked from luck/tmp_suning_uos_patched
x86/kvm: Alloc dummy async #PF token outside of raw spinlock
commit 0547758a6de3cc71a0cfdd031a3621a30db6a68b upstream. Drop the raw spinlock in kvm_async_pf_task_wake() before allocating the the dummy async #PF token, the allocator is preemptible on PREEMPT_RT kernels and must not be called from truly atomic contexts. Opportunistically document why it's ok to loop on allocation failure, i.e. why the function won't get stuck in an infinite loop. Reported-by: Yajun Deng <yajun.deng@linux.dev> Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
4c4a11c74a
commit
4a9f3a9c28
|
@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
|
|||
{
|
||||
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
|
||||
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
|
||||
struct kvm_task_sleep_node *n;
|
||||
struct kvm_task_sleep_node *n, *dummy = NULL;
|
||||
|
||||
if (token == ~0) {
|
||||
apf_task_wake_all();
|
||||
|
@ -200,28 +200,41 @@ void kvm_async_pf_task_wake(u32 token)
|
|||
n = _find_apf_task(b, token);
|
||||
if (!n) {
|
||||
/*
|
||||
* async PF was not yet handled.
|
||||
* Add dummy entry for the token.
|
||||
* Async #PF not yet handled, add a dummy entry for the token.
|
||||
* Allocating the token must be down outside of the raw lock
|
||||
* as the allocator is preemptible on PREEMPT_RT kernels.
|
||||
*/
|
||||
n = kzalloc(sizeof(*n), GFP_ATOMIC);
|
||||
if (!n) {
|
||||
/*
|
||||
* Allocation failed! Busy wait while other cpu
|
||||
* handles async PF.
|
||||
*/
|
||||
if (!dummy) {
|
||||
raw_spin_unlock(&b->lock);
|
||||
cpu_relax();
|
||||
dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* Continue looping on allocation failure, eventually
|
||||
* the async #PF will be handled and allocating a new
|
||||
* node will be unnecessary.
|
||||
*/
|
||||
if (!dummy)
|
||||
cpu_relax();
|
||||
|
||||
/*
|
||||
* Recheck for async #PF completion before enqueueing
|
||||
* the dummy token to avoid duplicate list entries.
|
||||
*/
|
||||
goto again;
|
||||
}
|
||||
n->token = token;
|
||||
n->cpu = smp_processor_id();
|
||||
init_swait_queue_head(&n->wq);
|
||||
hlist_add_head(&n->link, &b->list);
|
||||
dummy->token = token;
|
||||
dummy->cpu = smp_processor_id();
|
||||
init_swait_queue_head(&dummy->wq);
|
||||
hlist_add_head(&dummy->link, &b->list);
|
||||
dummy = NULL;
|
||||
} else {
|
||||
apf_task_wake_one(n);
|
||||
}
|
||||
raw_spin_unlock(&b->lock);
|
||||
return;
|
||||
|
||||
/* A dummy token might be allocated and ultimately not used. */
|
||||
if (dummy)
|
||||
kfree(dummy);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user