forked from luck/tmp_suning_uos_patched
tcp: increment sk_drops for dropped rx packets
Now ss can report sk_drops, we can instruct TCP to increment this per socket counter when it drops an incoming frame, to refine monitoring and debugging. Following patch takes care of listeners drops. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
15239302ed
commit
532182cd61
@ -2012,6 +2012,13 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
|
||||
SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
|
||||
}
|
||||
|
||||
static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
|
||||
|
||||
atomic_add(segs, &sk->sk_drops);
|
||||
}
|
||||
|
||||
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
|
||||
struct sk_buff *skb);
|
||||
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
|
||||
|
@ -4307,6 +4307,12 @@ static bool tcp_try_coalesce(struct sock *sk,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void tcp_drop(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sk_drops_add(sk, skb);
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* This one checks to see if we can put data from the
|
||||
* out_of_order queue into the receive_queue.
|
||||
*/
|
||||
@ -4331,7 +4337,7 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||
__skb_unlink(skb, &tp->out_of_order_queue);
|
||||
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
|
||||
SOCK_DEBUG(sk, "ofo packet was already received\n");
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
continue;
|
||||
}
|
||||
SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
|
||||
@ -4383,7 +4389,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -4447,7 +4453,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
|
||||
/* All the bits are present. Drop. */
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
skb = NULL;
|
||||
tcp_dsack_set(sk, seq, end_seq);
|
||||
goto add_sack;
|
||||
@ -4486,7 +4492,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
|
||||
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
|
||||
TCP_SKB_CB(skb1)->end_seq);
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
|
||||
__kfree_skb(skb1);
|
||||
tcp_drop(sk, skb1);
|
||||
}
|
||||
|
||||
add_sack:
|
||||
@ -4569,12 +4575,13 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int eaten = -1;
|
||||
bool fragstolen = false;
|
||||
int eaten = -1;
|
||||
|
||||
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
|
||||
goto drop;
|
||||
|
||||
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
|
||||
__kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
skb_dst_drop(skb);
|
||||
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
|
||||
|
||||
@ -4656,7 +4663,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||
tcp_enter_quickack_mode(sk);
|
||||
inet_csk_schedule_ack(sk);
|
||||
drop:
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -5233,7 +5240,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
||||
return true;
|
||||
|
||||
discard:
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -5451,7 +5458,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
|
||||
|
||||
discard:
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_rcv_established);
|
||||
|
||||
@ -5682,7 +5689,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
TCP_DELACK_MAX, TCP_RTO_MAX);
|
||||
|
||||
discard:
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
return 0;
|
||||
} else {
|
||||
tcp_send_ack(sk);
|
||||
@ -6043,7 +6050,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (!queued) {
|
||||
discard:
|
||||
__kfree_skb(skb);
|
||||
tcp_drop(sk, skb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1694,6 +1694,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
||||
return 0;
|
||||
|
||||
discard_and_relse:
|
||||
sk_drops_add(sk, skb);
|
||||
if (refcounted)
|
||||
sock_put(sk);
|
||||
goto discard_it;
|
||||
|
@ -1484,6 +1484,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
|
||||
return 0;
|
||||
|
||||
discard_and_relse:
|
||||
sk_drops_add(sk, skb);
|
||||
if (refcounted)
|
||||
sock_put(sk);
|
||||
goto discard_it;
|
||||
|
Loading…
Reference in New Issue
Block a user