forked from luck/tmp_suning_uos_patched
tcp: do not block bh during prequeue processing
AFAIK, nothing in current TCP stack absolutely wants BH being disabled once socket is owned by a thread running in process context. As mentioned in my prior patch ("tcp: give prequeue mode some care"), processing a batch of packets might take time, better not block BH at all. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c10d9310ed
commit
fb3477c0f4
|
@ -1449,12 +1449,8 @@ static void tcp_prequeue_process(struct sock *sk)
|
|||
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
|
||||
|
||||
/* RX process wants to run with disabled BHs, though it is not
|
||||
* necessary */
|
||||
local_bh_disable();
|
||||
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
||||
sk_backlog_rcv(sk, skb);
|
||||
local_bh_enable();
|
||||
|
||||
/* Clear memory counter. */
|
||||
tp->ucopy.memory = 0;
|
||||
|
|
|
@ -4611,14 +4611,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
local_bh_enable();
|
||||
if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) {
|
||||
tp->ucopy.len -= chunk;
|
||||
tp->copied_seq += chunk;
|
||||
eaten = (chunk == skb->len);
|
||||
tcp_rcv_space_adjust(sk);
|
||||
}
|
||||
local_bh_disable();
|
||||
}
|
||||
|
||||
if (eaten <= 0) {
|
||||
|
@ -5134,7 +5132,6 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
|
|||
int chunk = skb->len - hlen;
|
||||
int err;
|
||||
|
||||
local_bh_enable();
|
||||
if (skb_csum_unnecessary(skb))
|
||||
err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk);
|
||||
else
|
||||
|
@ -5146,32 +5143,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
|
|||
tcp_rcv_space_adjust(sk);
|
||||
}
|
||||
|
||||
local_bh_disable();
|
||||
return err;
|
||||
}
|
||||
|
||||
static __sum16 __tcp_checksum_complete_user(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
__sum16 result;
|
||||
|
||||
if (sock_owned_by_user(sk)) {
|
||||
local_bh_enable();
|
||||
result = __tcp_checksum_complete(skb);
|
||||
local_bh_disable();
|
||||
} else {
|
||||
result = __tcp_checksum_complete(skb);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline bool tcp_checksum_complete_user(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
return !skb_csum_unnecessary(skb) &&
|
||||
__tcp_checksum_complete_user(sk, skb);
|
||||
}
|
||||
|
||||
/* Does PAWS and seqno based validation of an incoming segment, flags will
|
||||
* play significant role here.
|
||||
*/
|
||||
|
@ -5386,7 +5360,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
if (!eaten) {
|
||||
if (tcp_checksum_complete_user(sk, skb))
|
||||
if (tcp_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
|
||||
if ((int)skb->truesize > sk->sk_forward_alloc)
|
||||
|
@ -5430,7 +5404,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
slow_path:
|
||||
if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
|
||||
if (len < (th->doff << 2) || tcp_checksum_complete(skb))
|
||||
goto csum_error;
|
||||
|
||||
if (!th->ack && !th->rst && !th->syn)
|
||||
|
|
Loading…
Reference in New Issue
Block a user