Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
659a8ad5 YC |
2 | #include <linux/tcp.h> |
3 | #include <net/tcp.h> | |
4f41b1c5 | 4 | |
1f7455c3 | 5 | static u32 tcp_rack_reo_wnd(const struct sock *sk) |
20b654df YC |
6 | { |
7 | struct tcp_sock *tp = tcp_sk(sk); | |
8 | ||
7ec65372 | 9 | if (!tp->reord_seen) { |
20b654df YC |
10 | /* If reordering has not been observed, be aggressive during |
11 | * the recovery or starting the recovery by DUPACK threshold. | |
12 | */ | |
13 | if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) | |
14 | return 0; | |
15 | ||
16 | if (tp->sacked_out >= tp->reordering && | |
e7d2ef83 KI |
17 | !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & |
18 | TCP_RACK_NO_DUPTHRESH)) | |
20b654df YC |
19 | return 0; |
20 | } | |
21 | ||
22 | /* To be more reordering resilient, allow min_rtt/4 settling delay. | |
23 | * Use min_rtt instead of the smoothed RTT because reordering is | |
24 | * often a path property and less related to queuing or delayed ACKs. | |
25 | * Upon receiving DSACKs, linearly increase the window up to the | |
26 | * smoothed RTT. | |
27 | */ | |
28 | return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, | |
29 | tp->srtt_us >> 3); | |
30 | } | |
31 | ||
b8fef65a YC |
32 | s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) |
33 | { | |
34 | return tp->rack.rtt_us + reo_wnd - | |
2fd66ffb | 35 | tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); |
b8fef65a YC |
36 | } |
37 | ||
a0370b3f YC |
38 | /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): |
39 | * | |
40 | * Marks a packet lost, if some packet sent later has been (s)acked. | |
4f41b1c5 YC |
41 | * The underlying idea is similar to the traditional dupthresh and FACK |
42 | * but they look at different metrics: | |
43 | * | |
44 | * dupthresh: 3 OOO packets delivered (packet count) | |
45 | * FACK: sequence delta to highest sacked sequence (sequence space) | |
46 | * RACK: sent time delta to the latest delivered packet (time domain) | |
47 | * | |
48 | * The advantage of RACK is it applies to both original and retransmitted | |
49 | * packet and therefore is robust against tail losses. Another advantage | |
50 | * is being more resilient to reordering by simply allowing some | |
51 | * "settling delay", instead of tweaking the dupthresh. | |
52 | * | |
a0370b3f YC |
53 | * When tcp_rack_detect_loss() detects some packets are lost and we |
54 | * are not already in the CA_Recovery state, either tcp_rack_reo_timeout() | |
55 | * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will | |
56 | * make us enter the CA_Recovery state. | |
4f41b1c5 | 57 | */ |
7c1c7308 | 58 | static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) |
4f41b1c5 YC |
59 | { |
60 | struct tcp_sock *tp = tcp_sk(sk); | |
043b87d7 | 61 | struct sk_buff *skb, *n; |
e636f8b0 | 62 | u32 reo_wnd; |
4f41b1c5 | 63 | |
57dde7f7 | 64 | *reo_timeout = 0; |
20b654df | 65 | reo_wnd = tcp_rack_reo_wnd(sk); |
043b87d7 YC |
66 | list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, |
67 | tcp_tsorted_anchor) { | |
4f41b1c5 | 68 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
bef06223 | 69 | s32 remaining; |
4f41b1c5 | 70 | |
bef06223 YC |
71 | /* Skip ones marked lost but not yet retransmitted */ |
72 | if ((scb->sacked & TCPCB_LOST) && | |
73 | !(scb->sacked & TCPCB_SACKED_RETRANS)) | |
74 | continue; | |
57dde7f7 | 75 | |
5a8ad1ce PY |
76 | if (!tcp_skb_sent_after(tp->rack.mstamp, |
77 | tcp_skb_timestamp_us(skb), | |
78 | tp->rack.end_seq, scb->end_seq)) | |
bef06223 | 79 | break; |
57dde7f7 | 80 | |
bef06223 YC |
81 | /* A packet is lost if it has not been s/acked beyond |
82 | * the recent RTT plus the reordering window. | |
83 | */ | |
b8fef65a | 84 | remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd); |
428aec5e | 85 | if (remaining <= 0) { |
d716bfdb | 86 | tcp_mark_skb_lost(sk, skb); |
bef06223 YC |
87 | list_del_init(&skb->tcp_tsorted_anchor); |
88 | } else { | |
428aec5e YC |
89 | /* Record maximum wait time */ |
90 | *reo_timeout = max_t(u32, *reo_timeout, remaining); | |
4f41b1c5 YC |
91 | } |
92 | } | |
e636f8b0 YC |
93 | } |
94 | ||
62d9f1a6 | 95 | bool tcp_rack_mark_lost(struct sock *sk) |
e636f8b0 YC |
96 | { |
97 | struct tcp_sock *tp = tcp_sk(sk); | |
57dde7f7 | 98 | u32 timeout; |
e636f8b0 | 99 | |
a0370b3f | 100 | if (!tp->rack.advanced) |
62d9f1a6 | 101 | return false; |
57dde7f7 | 102 | |
e636f8b0 YC |
103 | /* Reset the advanced flag to avoid unnecessary queue scanning */ |
104 | tp->rack.advanced = 0; | |
7c1c7308 | 105 | tcp_rack_detect_loss(sk, &timeout); |
57dde7f7 | 106 | if (timeout) { |
bb4d991a | 107 | timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN; |
57dde7f7 YC |
108 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, |
109 | timeout, inet_csk(sk)->icsk_rto); | |
110 | } | |
62d9f1a6 | 111 | return !!timeout; |
4f41b1c5 YC |
112 | } |
113 | ||
deed7be7 YC |
114 | /* Record the most recently (re)sent time among the (s)acked packets |
115 | * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from | |
116 | * draft-cheng-tcpm-rack-00.txt | |
117 | */ | |
1d0833df | 118 | void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, |
9a568de4 | 119 | u64 xmit_time) |
659a8ad5 | 120 | { |
deed7be7 YC |
121 | u32 rtt_us; |
122 | ||
9a568de4 | 123 | rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); |
6065fd0d | 124 | if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { |
659a8ad5 YC |
125 | /* If the sacked packet was retransmitted, it's ambiguous |
126 | * whether the retransmission or the original (or the prior | |
127 | * retransmission) was sacked. | |
128 | * | |
129 | * If the original is lost, there is no ambiguity. Otherwise | |
130 | * we assume the original can be delayed up to aRTT + min_rtt. | |
131 | * the aRTT term is bounded by the fast recovery or timeout, | |
132 | * so it's at least one RTT (i.e., retransmission is at least | |
133 | * an RTT later). | |
134 | */ | |
6065fd0d | 135 | return; |
659a8ad5 | 136 | } |
659a8ad5 | 137 | tp->rack.advanced = 1; |
6065fd0d | 138 | tp->rack.rtt_us = rtt_us; |
5a8ad1ce PY |
139 | if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp, |
140 | end_seq, tp->rack.end_seq)) { | |
6065fd0d YC |
141 | tp->rack.mstamp = xmit_time; |
142 | tp->rack.end_seq = end_seq; | |
143 | } | |
659a8ad5 | 144 | } |
57dde7f7 YC |
145 | |
146 | /* We have waited long enough to accommodate reordering. Mark the expired | |
147 | * packets lost and retransmit them. | |
148 | */ | |
149 | void tcp_rack_reo_timeout(struct sock *sk) | |
150 | { | |
151 | struct tcp_sock *tp = tcp_sk(sk); | |
57dde7f7 | 152 | u32 timeout, prior_inflight; |
7e901ee7 | 153 | u32 lost = tp->lost; |
57dde7f7 | 154 | |
57dde7f7 | 155 | prior_inflight = tcp_packets_in_flight(tp); |
7c1c7308 | 156 | tcp_rack_detect_loss(sk, &timeout); |
57dde7f7 YC |
157 | if (prior_inflight != tcp_packets_in_flight(tp)) { |
158 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { | |
159 | tcp_enter_recovery(sk, false); | |
160 | if (!inet_csk(sk)->icsk_ca_ops->cong_control) | |
7e901ee7 | 161 | tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0); |
57dde7f7 YC |
162 | } |
163 | tcp_xmit_retransmit_queue(sk); | |
164 | } | |
165 | if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) | |
166 | tcp_rearm_rto(sk); | |
167 | } | |
1f255691 PJ |
168 | |
169 | /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries. | |
170 | * | |
a657db03 NC |
171 | * If a DSACK is received that seems like it may have been due to reordering |
172 | * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded | |
1f255691 PJ |
173 | * by srtt), since there is possibility that spurious retransmission was |
174 | * due to reordering delay longer than reo_wnd. | |
175 | * | |
176 | * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16) | |
177 | * no. of successful recoveries (accounts for full DSACK-based loss | |
178 | * recovery undo). After that, reset it to default (min_rtt/4). | |
179 | * | |
180 | * At max, reo_wnd is incremented only once per rtt. So that the new | |
181 | * DSACK on which we are reacting, is due to the spurious retx (approx) | |
182 | * after the reo_wnd has been updated last time. | |
183 | * | |
184 | * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than | |
185 | * absolute value to account for change in rtt. | |
186 | */ | |
187 | void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) | |
188 | { | |
189 | struct tcp_sock *tp = tcp_sk(sk); | |
190 | ||
e7d2ef83 KI |
191 | if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & |
192 | TCP_RACK_STATIC_REO_WND) || | |
1f255691 PJ |
193 | !rs->prior_delivered) |
194 | return; | |
195 | ||
196 | /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */ | |
197 | if (before(rs->prior_delivered, tp->rack.last_delivered)) | |
198 | tp->rack.dsack_seen = 0; | |
199 | ||
200 | /* Adjust the reo_wnd if update is pending */ | |
201 | if (tp->rack.dsack_seen) { | |
202 | tp->rack.reo_wnd_steps = min_t(u32, 0xFF, | |
203 | tp->rack.reo_wnd_steps + 1); | |
204 | tp->rack.dsack_seen = 0; | |
205 | tp->rack.last_delivered = tp->delivered; | |
206 | tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; | |
207 | } else if (!tp->rack.reo_wnd_persist) { | |
208 | tp->rack.reo_wnd_steps = 1; | |
209 | } | |
210 | } | |
6ac06ecd YC |
211 | |
212 | /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits | |
213 | * the next unacked packet upon receiving | |
214 | * a) three or more DUPACKs to start the fast recovery | |
215 | * b) an ACK acknowledging new data during the fast recovery. | |
216 | */ | |
217 | void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced) | |
218 | { | |
219 | const u8 state = inet_csk(sk)->icsk_ca_state; | |
220 | struct tcp_sock *tp = tcp_sk(sk); | |
221 | ||
222 | if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || | |
223 | (state == TCP_CA_Recovery && snd_una_advanced)) { | |
224 | struct sk_buff *skb = tcp_rtx_queue_head(sk); | |
225 | u32 mss; | |
226 | ||
227 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) | |
228 | return; | |
229 | ||
230 | mss = tcp_skb_mss(skb); | |
231 | if (tcp_skb_pcount(skb) > 1 && skb->len > mss) | |
232 | tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, | |
233 | mss, mss, GFP_ATOMIC); | |
234 | ||
179ac35f | 235 | tcp_mark_skb_lost(sk, skb); |
6ac06ecd YC |
236 | } |
237 | } |