forked from luck/tmp_suning_uos_patched
tcp: remove tcp_queue argument from tso_fragment()
tso_fragment() is only called for packets still in write queue. Remove the tcp_queue parameter to make this more obvious, even if the comment clearly states this. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6aedbf986f
commit
564833419f
|
@ -1846,17 +1846,17 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
|
||||||
* know that all the data is in scatter-gather pages, and that the
|
* know that all the data is in scatter-gather pages, and that the
|
||||||
* packet has never been sent out before (and thus is not cloned).
|
* packet has never been sent out before (and thus is not cloned).
|
||||||
*/
|
*/
|
||||||
static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
|
||||||
struct sk_buff *skb, unsigned int len,
|
|
||||||
unsigned int mss_now, gfp_t gfp)
|
unsigned int mss_now, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct sk_buff *buff;
|
|
||||||
int nlen = skb->len - len;
|
int nlen = skb->len - len;
|
||||||
|
struct sk_buff *buff;
|
||||||
u8 flags;
|
u8 flags;
|
||||||
|
|
||||||
/* All of a TSO frame must be composed of paged data. */
|
/* All of a TSO frame must be composed of paged data. */
|
||||||
if (skb->len != skb->data_len)
|
if (skb->len != skb->data_len)
|
||||||
return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
|
return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
|
||||||
|
skb, len, mss_now, gfp);
|
||||||
|
|
||||||
buff = sk_stream_alloc_skb(sk, 0, gfp, true);
|
buff = sk_stream_alloc_skb(sk, 0, gfp, true);
|
||||||
if (unlikely(!buff))
|
if (unlikely(!buff))
|
||||||
|
@ -1892,7 +1892,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
||||||
|
|
||||||
/* Link BUFF into the send queue. */
|
/* Link BUFF into the send queue. */
|
||||||
__skb_header_release(buff);
|
__skb_header_release(buff);
|
||||||
tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
|
tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2391,8 +2391,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
||||||
nonagle);
|
nonagle);
|
||||||
|
|
||||||
if (skb->len > limit &&
|
if (skb->len > limit &&
|
||||||
unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
|
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
|
||||||
skb, limit, mss_now, gfp)))
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (tcp_small_queue_check(sk, skb, 0))
|
if (tcp_small_queue_check(sk, skb, 0))
|
||||||
|
|
Loading…
Reference in New Issue
Block a user