forked from luck/tmp_suning_uos_patched
tcp: PRR uses CRB mode by default and SS mode conditionally
PRR slow start is often too aggressive especially when drops are caused by traffic policers. The policers mainly use token bucket to enforce the rate so sending (twice) faster than the delivery rate causes excessive drops. This patch changes PRR to the conservative reduction bound (CRB) mode in RFC 6937 by default. CRB follows the packet conservation rule to send at most the delivery rate by default. But if many packets are lost and the pipe is empty, CRB may take N round trips to repair N losses. We conditionally turn on slow start mode if all these conditions are made to speed up the recovery: 1) on the second round or later in recovery 2) retransmission sent in the previous round is delivered on this ACK 3) no retransmission is marked lost on this ACK By using packet conservation by default, this change reduces the loss retransmits signicantly on networks that deploy traffic policers, up to 20% reduction of overall loss rate. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Nandita Dukkipati <nanditad@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
291a00d1a7
commit
3759824da8
|
@ -2476,15 +2476,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The cwnd reduction in CWR and Recovery use the PRR algorithm
|
/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
|
||||||
* https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
|
|
||||||
* It computes the number of packets to send (sndcnt) based on packets newly
|
* It computes the number of packets to send (sndcnt) based on packets newly
|
||||||
* delivered:
|
* delivered:
|
||||||
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
|
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
|
||||||
* cwnd reductions across a full RTT.
|
* cwnd reductions across a full RTT.
|
||||||
* 2) If packets in flight is lower than ssthresh (such as due to excess
|
* 2) Otherwise PRR uses packet conservation to send as much as delivered.
|
||||||
* losses and/or application stalls), do not perform any further cwnd
|
* But when the retransmits are acked without further losses, PRR
|
||||||
* reductions, but instead slow start up to ssthresh.
|
* slow starts cwnd up to ssthresh to speed up the recovery.
|
||||||
*/
|
*/
|
||||||
static void tcp_init_cwnd_reduction(struct sock *sk)
|
static void tcp_init_cwnd_reduction(struct sock *sk)
|
||||||
{
|
{
|
||||||
|
@ -2501,7 +2500,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
||||||
int fast_rexmit)
|
int fast_rexmit, int flag)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
int sndcnt = 0;
|
int sndcnt = 0;
|
||||||
|
@ -2510,16 +2509,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
|
||||||
(tp->packets_out - tp->sacked_out);
|
(tp->packets_out - tp->sacked_out);
|
||||||
|
|
||||||
tp->prr_delivered += newly_acked_sacked;
|
tp->prr_delivered += newly_acked_sacked;
|
||||||
if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
|
if (delta < 0) {
|
||||||
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
|
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
|
||||||
tp->prior_cwnd - 1;
|
tp->prior_cwnd - 1;
|
||||||
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
|
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
|
||||||
} else {
|
} else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
|
||||||
|
!(flag & FLAG_LOST_RETRANS)) {
|
||||||
sndcnt = min_t(int, delta,
|
sndcnt = min_t(int, delta,
|
||||||
max_t(int, tp->prr_delivered - tp->prr_out,
|
max_t(int, tp->prr_delivered - tp->prr_out,
|
||||||
newly_acked_sacked) + 1);
|
newly_acked_sacked) + 1);
|
||||||
|
} else {
|
||||||
|
sndcnt = min(delta, newly_acked_sacked);
|
||||||
}
|
}
|
||||||
|
|
||||||
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
|
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
|
||||||
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
||||||
}
|
}
|
||||||
|
@ -2580,7 +2581,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
|
||||||
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
|
||||||
tcp_try_keep_open(sk);
|
tcp_try_keep_open(sk);
|
||||||
} else {
|
} else {
|
||||||
tcp_cwnd_reduction(sk, prior_unsacked, 0);
|
tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2737,7 +2738,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
|
||||||
|
|
||||||
/* Undo during fast recovery after partial ACK. */
|
/* Undo during fast recovery after partial ACK. */
|
||||||
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
||||||
const int prior_unsacked)
|
const int prior_unsacked, int flag)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
|
@ -2753,7 +2754,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
|
||||||
* mark more packets lost or retransmit more.
|
* mark more packets lost or retransmit more.
|
||||||
*/
|
*/
|
||||||
if (tp->retrans_out) {
|
if (tp->retrans_out) {
|
||||||
tcp_cwnd_reduction(sk, prior_unsacked, 0);
|
tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2840,7 +2841,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
||||||
if (tcp_is_reno(tp) && is_dupack)
|
if (tcp_is_reno(tp) && is_dupack)
|
||||||
tcp_add_reno_sack(sk);
|
tcp_add_reno_sack(sk);
|
||||||
} else {
|
} else {
|
||||||
if (tcp_try_undo_partial(sk, acked, prior_unsacked))
|
if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
|
||||||
return;
|
return;
|
||||||
/* Partial ACK arrived. Force fast retransmit. */
|
/* Partial ACK arrived. Force fast retransmit. */
|
||||||
do_lost = tcp_is_reno(tp) ||
|
do_lost = tcp_is_reno(tp) ||
|
||||||
|
@ -2891,7 +2892,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
|
||||||
|
|
||||||
if (do_lost)
|
if (do_lost)
|
||||||
tcp_update_scoreboard(sk, fast_rexmit);
|
tcp_update_scoreboard(sk, fast_rexmit);
|
||||||
tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
|
tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
|
||||||
tcp_xmit_retransmit_queue(sk);
|
tcp_xmit_retransmit_queue(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user