forked from luck/tmp_suning_uos_patched
net: Generalise wq_has_sleeper helper
The memory barrier in the helper wq_has_sleeper is needed by just about every user of waitqueue_active. This patch generalises it by making it take a wait_queue_head_t directly. The existing helper is renamed to skwq_has_sleeper. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c52fd05a2f
commit
1ce0bf50ae
|
@ -106,7 +106,7 @@ static void aead_wmem_wakeup(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
|
||||
POLLRDNORM |
|
||||
POLLRDBAND);
|
||||
|
@ -157,7 +157,7 @@ static void aead_data_wakeup(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
|
||||
POLLRDNORM |
|
||||
POLLRDBAND);
|
||||
|
|
|
@ -238,7 +238,7 @@ static void skcipher_wmem_wakeup(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
|
||||
POLLRDNORM |
|
||||
POLLRDBAND);
|
||||
|
@ -288,7 +288,7 @@ static void skcipher_data_wakeup(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
|
||||
POLLRDNORM |
|
||||
POLLRDBAND);
|
||||
|
|
|
@ -107,6 +107,27 @@ static inline int waitqueue_active(wait_queue_head_t *q)
|
|||
return !list_empty(&q->task_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* wq_has_sleeper - check if there are any waiting processes
|
||||
* @wq: wait queue head
|
||||
*
|
||||
* Returns true if wq has waiting processes
|
||||
*
|
||||
* Please refer to the comment for waitqueue_active.
|
||||
*/
|
||||
static inline bool wq_has_sleeper(wait_queue_head_t *wq)
|
||||
{
|
||||
/*
|
||||
* We need to be sure we are in sync with the
|
||||
* add_wait_queue modifications to the wait queue.
|
||||
*
|
||||
* This memory barrier should be paired with one on the
|
||||
* waiting side.
|
||||
*/
|
||||
smp_mb();
|
||||
return waitqueue_active(wq);
|
||||
}
|
||||
|
||||
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
|
||||
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
|
||||
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
|
||||
|
|
|
@ -58,6 +58,7 @@
|
|||
#include <linux/memcontrol.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <linux/filter.h>
|
||||
#include <linux/rculist_nulls.h>
|
||||
|
@ -1879,12 +1880,12 @@ static inline bool sk_has_allocations(const struct sock *sk)
|
|||
}
|
||||
|
||||
/**
|
||||
* wq_has_sleeper - check if there are any waiting processes
|
||||
* skwq_has_sleeper - check if there are any waiting processes
|
||||
* @wq: struct socket_wq
|
||||
*
|
||||
* Returns true if socket_wq has waiting processes
|
||||
*
|
||||
* The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
|
||||
* The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
|
||||
* barrier call. They were added due to the race found within the tcp code.
|
||||
*
|
||||
* Consider following tcp code paths:
|
||||
|
@ -1910,15 +1911,9 @@ static inline bool sk_has_allocations(const struct sock *sk)
|
|||
* data on the socket.
|
||||
*
|
||||
*/
|
||||
static inline bool wq_has_sleeper(struct socket_wq *wq)
|
||||
static inline bool skwq_has_sleeper(struct socket_wq *wq)
|
||||
{
|
||||
/* We need to be sure we are in sync with the
|
||||
* add_wait_queue modifications to the wait queue.
|
||||
*
|
||||
* This memory barrier is paired in the sock_poll_wait.
|
||||
*/
|
||||
smp_mb();
|
||||
return wq && waitqueue_active(&wq->wait);
|
||||
return wq && wq_has_sleeper(&wq->wait);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -96,7 +96,7 @@ static void vcc_def_wakeup(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up(&wq->wait);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ static void vcc_write_space(struct sock *sk)
|
|||
|
||||
if (vcc_writable(sk)) {
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible(&wq->wait);
|
||||
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
|
|
|
@ -2283,7 +2283,7 @@ static void sock_def_wakeup(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_all(&wq->wait);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -2294,7 +2294,7 @@ static void sock_def_error_report(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_poll(&wq->wait, POLLERR);
|
||||
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
|
||||
rcu_read_unlock();
|
||||
|
@ -2306,7 +2306,7 @@ static void sock_def_readable(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
|
@ -2324,7 +2324,7 @@ static void sock_def_write_space(struct sock *sk)
|
|||
*/
|
||||
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ void sk_stream_write_space(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_poll(&wq->wait, POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
|
|
|
@ -201,7 +201,7 @@ void dccp_write_space(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible(&wq->wait);
|
||||
/* Should agree with poll, otherwise some programs break */
|
||||
if (sock_writeable(sk))
|
||||
|
|
|
@ -303,7 +303,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_all(&wq->wait);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -67,7 +67,7 @@ static void rxrpc_write_space(struct sock *sk)
|
|||
if (rxrpc_writable(sk)) {
|
||||
struct socket_wq *wq = rcu_dereference(sk->sk_wq);
|
||||
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible(&wq->wait);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
|
|
|
@ -6978,7 +6978,7 @@ void sctp_data_ready(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
||||
|
|
|
@ -1492,7 +1492,7 @@ static void tipc_write_space(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
|
||||
POLLWRNORM | POLLWRBAND);
|
||||
rcu_read_unlock();
|
||||
|
@ -1509,7 +1509,7 @@ static void tipc_data_ready(struct sock *sk)
|
|||
|
||||
rcu_read_lock();
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
|
||||
POLLRDNORM | POLLRDBAND);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -339,7 +339,7 @@ static void unix_write_space(struct sock *sk)
|
|||
rcu_read_lock();
|
||||
if (unix_writable(sk)) {
|
||||
wq = rcu_dereference(sk->sk_wq);
|
||||
if (wq_has_sleeper(wq))
|
||||
if (skwq_has_sleeper(wq))
|
||||
wake_up_interruptible_sync_poll(&wq->wait,
|
||||
POLLOUT | POLLWRNORM | POLLWRBAND);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
|
|
Loading…
Reference in New Issue
Block a user