Commit | Line | Data |
---|---|---|
0f8782ea NC |
1 | /* Bottleneck Bandwidth and RTT (BBR) congestion control |
2 | * | |
3 | * BBR congestion control computes the sending rate based on the delivery | |
4 | * rate (throughput) estimated from ACKs. In a nutshell: | |
5 | * | |
6 | * On each ACK, update our model of the network path: | |
7 | * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips) | |
8 | * min_rtt = windowed_min(rtt, 10 seconds) | |
9 | * pacing_rate = pacing_gain * bottleneck_bandwidth | |
10 | * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4) | |
11 | * | |
12 | * The core algorithm does not react directly to packet losses or delays, | |
13 | * although BBR may adjust the size of next send per ACK when loss is | |
14 | * observed, or adjust the sending rate if it estimates there is a | |
15 | * traffic policer, in order to keep the drop rate reasonable. | |
16 | * | |
9b9375b5 NC |
17 | * Here is a state transition diagram for BBR: |
18 | * | |
19 | * | | |
20 | * V | |
21 | * +---> STARTUP ----+ | |
22 | * | | | | |
23 | * | V | | |
24 | * | DRAIN ----+ | |
25 | * | | | | |
26 | * | V | | |
27 | * +---> PROBE_BW ----+ | |
28 | * | ^ | | | |
29 | * | | | | | |
30 | * | +----+ | | |
31 | * | | | |
32 | * +---- PROBE_RTT <--+ | |
33 | * | |
34 | * A BBR flow starts in STARTUP, and ramps up its sending rate quickly. | |
35 | * When it estimates the pipe is full, it enters DRAIN to drain the queue. | |
36 | * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT. | |
37 | * A long-lived BBR flow spends the vast majority of its time remaining | |
38 | * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth | |
39 | * in a fair manner, with a small, bounded queue. *If* a flow has been | |
40 | * continuously sending for the entire min_rtt window, and hasn't seen an RTT | |
41 | * sample that matches or decreases its min_rtt estimate for 10 seconds, then | |
42 | * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe | |
43 | * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if | |
44 | * we estimated that we reached the full bw of the pipe then we enter PROBE_BW; | |
45 | * otherwise we enter STARTUP to try to fill the pipe. | |
46 | * | |
0f8782ea NC |
47 | * BBR is described in detail in: |
48 | * "BBR: Congestion-Based Congestion Control", | |
49 | * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh, | |
50 | * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016. | |
51 | * | |
52 | * There is a public e-mail list for discussing BBR development and testing: | |
53 | * https://groups.google.com/forum/#!forum/bbr-dev | |
54 | * | |
218af599 ED |
55 | * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled, |
56 | * otherwise TCP stack falls back to an internal pacing using one high | |
57 | * resolution timer per TCP socket and may use more resources. | |
0f8782ea | 58 | */ |
0e32dfc8 KKD |
59 | #include <linux/btf.h> |
60 | #include <linux/btf_ids.h> | |
0f8782ea NC |
61 | #include <linux/module.h> |
62 | #include <net/tcp.h> | |
63 | #include <linux/inet_diag.h> | |
64 | #include <linux/inet.h> | |
65 | #include <linux/random.h> | |
66 | #include <linux/win_minmax.h> | |
67 | ||
68 | /* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth | |
69 | * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps. | |
70 | * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32. | |
71 | * Since the minimum window is >=4 packets, the lower bound isn't | |
72 | * an issue. The upper bound isn't an issue with existing technologies. | |
73 | */ | |
74 | #define BW_SCALE 24 | |
75 | #define BW_UNIT (1 << BW_SCALE) | |
76 | ||
77 | #define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */ | |
78 | #define BBR_UNIT (1 << BBR_SCALE) | |
79 | ||
80 | /* BBR has the following modes for deciding how fast to send: */ | |
81 | enum bbr_mode { | |
82 | BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */ | |
83 | BBR_DRAIN, /* drain any queue created during startup */ | |
84 | BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */ | |
9b9375b5 | 85 | BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */ |
0f8782ea NC |
86 | }; |
87 | ||
88 | /* BBR congestion control block */ | |
89 | struct bbr { | |
90 | u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */ | |
91 | u32 min_rtt_stamp; /* timestamp of min_rtt_us */ | |
92 | u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */ | |
93 | struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ | |
94 | u32 rtt_cnt; /* count of packet-timed rounds elapsed */ | |
95 | u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ | |
9a568de4 | 96 | u64 cycle_mstamp; /* time of this cycle phase start */ |
0f8782ea NC |
97 | u32 mode:3, /* current bbr_mode in state machine */ |
98 | prev_ca_state:3, /* CA state on previous ACK */ | |
99 | packet_conservation:1, /* use packet conservation? */ | |
0f8782ea | 100 | round_start:1, /* start of packet-timed tx->ack round? */ |
0f8782ea NC |
101 | idle_restart:1, /* restarting after idle? */ |
102 | probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ | |
fb998862 | 103 | unused:13, |
0f8782ea NC |
104 | lt_is_sampling:1, /* taking long-term ("LT") samples now? */ |
105 | lt_rtt_cnt:7, /* round trips in long-term interval */ | |
106 | lt_use_bw:1; /* use lt_bw as our bw estimate? */ | |
107 | u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */ | |
108 | u32 lt_last_delivered; /* LT intvl start: tp->delivered */ | |
109 | u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */ | |
110 | u32 lt_last_lost; /* LT intvl start: tp->lost */ | |
111 | u32 pacing_gain:10, /* current gain for setting pacing rate */ | |
112 | cwnd_gain:10, /* current gain for setting cwnd */ | |
c589e69b NC |
113 | full_bw_reached:1, /* reached full bw in Startup? */ |
114 | full_bw_cnt:2, /* number of rounds without large bw gains */ | |
0f8782ea | 115 | cycle_idx:3, /* current index in pacing_gain cycle array */ |
32984565 NC |
116 | has_seen_rtt:1, /* have we seen an RTT sample yet? */ |
117 | unused_b:5; | |
0f8782ea NC |
118 | u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ |
119 | u32 full_bw; /* recent bw, to estimate if pipe is full */ | |
78dc70eb PJ |
120 | |
121 | /* For tracking ACK aggregation: */ | |
122 | u64 ack_epoch_mstamp; /* start of ACK sampling epoch */ | |
123 | u16 extra_acked[2]; /* max excess data ACKed in epoch */ | |
124 | u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */ | |
125 | extra_acked_win_rtts:5, /* age of extra_acked, in round trips */ | |
126 | extra_acked_win_idx:1, /* current index in extra_acked array */ | |
127 | unused_c:6; | |
0f8782ea NC |
128 | }; |
129 | ||
130 | #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */ | |
131 | ||
132 | /* Window length of bw filter (in rounds): */ | |
133 | static const int bbr_bw_rtts = CYCLE_LEN + 2; | |
134 | /* Window length of min_rtt filter (in sec): */ | |
135 | static const u32 bbr_min_rtt_win_sec = 10; | |
136 | /* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */ | |
137 | static const u32 bbr_probe_rtt_mode_ms = 200; | |
138 | /* Skip TSO below the following bandwidth (bits/sec): */ | |
139 | static const int bbr_min_tso_rate = 1200000; | |
140 | ||
1106a5ad NC |
141 | /* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. |
142 | * In order to help drive the network toward lower queues and low latency while | |
143 | * maintaining high utilization, the average pacing rate aims to be slightly | |
144 | * lower than the estimated bandwidth. This is an important aspect of the | |
145 | * design. | |
146 | */ | |
97ec3eb3 | 147 | static const int bbr_pacing_margin_percent = 1; |
ab408b6d | 148 | |
0f8782ea NC |
149 | /* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain |
150 | * that will allow a smoothly increasing pacing rate that will double each RTT | |
151 | * and send the same number of packets per RTT that an un-paced, slow-starting | |
152 | * Reno or CUBIC flow would: | |
153 | */ | |
154 | static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1; | |
155 | /* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain | |
156 | * the queue created in BBR_STARTUP in a single round: | |
157 | */ | |
158 | static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885; | |
159 | /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */ | |
160 | static const int bbr_cwnd_gain = BBR_UNIT * 2; | |
161 | /* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */ | |
162 | static const int bbr_pacing_gain[] = { | |
163 | BBR_UNIT * 5 / 4, /* probe for more available bw */ | |
164 | BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */ | |
165 | BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */ | |
166 | BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */ | |
167 | }; | |
168 | /* Randomize the starting gain cycling phase over N phases: */ | |
169 | static const u32 bbr_cycle_rand = 7; | |
170 | ||
171 | /* Try to keep at least this many packets in flight, if things go smoothly. For | |
172 | * smooth functioning, a sliding window protocol ACKing every other packet | |
173 | * needs at least 4 packets in flight: | |
174 | */ | |
175 | static const u32 bbr_cwnd_min_target = 4; | |
176 | ||
177 | /* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */ | |
178 | /* If bw has increased significantly (1.25x), there may be more bw available: */ | |
179 | static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4; | |
180 | /* But after 3 rounds w/o significant bw growth, estimate pipe is full: */ | |
181 | static const u32 bbr_full_bw_cnt = 3; | |
182 | ||
183 | /* "long-term" ("LT") bandwidth estimator parameters... */ | |
184 | /* The minimum number of rounds in an LT bw sampling interval: */ | |
185 | static const u32 bbr_lt_intvl_min_rtts = 4; | |
186 | /* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */ | |
187 | static const u32 bbr_lt_loss_thresh = 50; | |
188 | /* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */ | |
189 | static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8; | |
190 | /* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */ | |
191 | static const u32 bbr_lt_bw_diff = 4000 / 8; | |
192 | /* If we estimate we're policed, use lt_bw for this many round trips: */ | |
193 | static const u32 bbr_lt_bw_max_rtts = 48; | |
194 | ||
78dc70eb PJ |
195 | /* Gain factor for adding extra_acked to target cwnd: */ |
196 | static const int bbr_extra_acked_gain = BBR_UNIT; | |
197 | /* Window length of extra_acked window. */ | |
198 | static const u32 bbr_extra_acked_win_rtts = 5; | |
199 | /* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */ | |
200 | static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20; | |
201 | /* Time period for clamping cwnd increment due to ack aggregation */ | |
202 | static const u32 bbr_extra_acked_max_us = 100 * 1000; | |
203 | ||
5490b32d KY |
204 | static void bbr_check_probe_rtt_done(struct sock *sk); |
205 | ||
0f8782ea NC |
206 | /* Do we estimate that STARTUP filled the pipe? */ |
207 | static bool bbr_full_bw_reached(const struct sock *sk) | |
208 | { | |
209 | const struct bbr *bbr = inet_csk_ca(sk); | |
210 | ||
c589e69b | 211 | return bbr->full_bw_reached; |
0f8782ea NC |
212 | } |
213 | ||
214 | /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ | |
215 | static u32 bbr_max_bw(const struct sock *sk) | |
216 | { | |
217 | struct bbr *bbr = inet_csk_ca(sk); | |
218 | ||
219 | return minmax_get(&bbr->bw); | |
220 | } | |
221 | ||
222 | /* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */ | |
223 | static u32 bbr_bw(const struct sock *sk) | |
224 | { | |
225 | struct bbr *bbr = inet_csk_ca(sk); | |
226 | ||
227 | return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); | |
228 | } | |
229 | ||
78dc70eb PJ |
230 | /* Return maximum extra acked in past k-2k round trips, |
231 | * where k = bbr_extra_acked_win_rtts. | |
232 | */ | |
233 | static u16 bbr_extra_acked(const struct sock *sk) | |
234 | { | |
235 | struct bbr *bbr = inet_csk_ca(sk); | |
236 | ||
237 | return max(bbr->extra_acked[0], bbr->extra_acked[1]); | |
238 | } | |
239 | ||
0f8782ea NC |
240 | /* Return rate in bytes per second, optionally with a gain. |
241 | * The order here is chosen carefully to avoid overflow of u64. This should | |
242 | * work for input rates of up to 2.9Tbit/sec and gain of 2.89x. | |
243 | */ | |
244 | static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) | |
245 | { | |
cadefe5f ED |
246 | unsigned int mss = tcp_sk(sk)->mss_cache; |
247 | ||
cadefe5f | 248 | rate *= mss; |
0f8782ea NC |
249 | rate *= gain; |
250 | rate >>= BBR_SCALE; | |
97ec3eb3 | 251 | rate *= USEC_PER_SEC / 100 * (100 - bbr_pacing_margin_percent); |
0f8782ea NC |
252 | return rate >> BW_SCALE; |
253 | } | |
254 | ||
f19fd62d | 255 | /* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */ |
76a9ebe8 | 256 | static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain) |
f19fd62d NC |
257 | { |
258 | u64 rate = bw; | |
259 | ||
260 | rate = bbr_rate_bytes_per_sec(sk, rate, gain); | |
261 | rate = min_t(u64, rate, sk->sk_max_pacing_rate); | |
262 | return rate; | |
263 | } | |
264 | ||
79135b89 NC |
265 | /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ |
266 | static void bbr_init_pacing_rate_from_rtt(struct sock *sk) | |
267 | { | |
268 | struct tcp_sock *tp = tcp_sk(sk); | |
32984565 | 269 | struct bbr *bbr = inet_csk_ca(sk); |
79135b89 NC |
270 | u64 bw; |
271 | u32 rtt_us; | |
272 | ||
273 | if (tp->srtt_us) { /* any RTT sample yet? */ | |
274 | rtt_us = max(tp->srtt_us >> 3, 1U); | |
32984565 | 275 | bbr->has_seen_rtt = 1; |
79135b89 NC |
276 | } else { /* no RTT sample yet */ |
277 | rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ | |
278 | } | |
40570375 | 279 | bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT; |
79135b89 NC |
280 | do_div(bw, rtt_us); |
281 | sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); | |
282 | } | |
283 | ||
1106a5ad | 284 | /* Pace using current bw estimate and a gain factor. */ |
0f8782ea NC |
285 | static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) |
286 | { | |
32984565 NC |
287 | struct tcp_sock *tp = tcp_sk(sk); |
288 | struct bbr *bbr = inet_csk_ca(sk); | |
76a9ebe8 | 289 | unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain); |
0f8782ea | 290 | |
32984565 NC |
291 | if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) |
292 | bbr_init_pacing_rate_from_rtt(sk); | |
4aea287e | 293 | if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) |
0f8782ea NC |
294 | sk->sk_pacing_rate = rate; |
295 | } | |
296 | ||
dcb8c9b4 ED |
297 | /* override sysctl_tcp_min_tso_segs */ |
298 | static u32 bbr_min_tso_segs(struct sock *sk) | |
0f8782ea | 299 | { |
dcb8c9b4 | 300 | return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2; |
0f8782ea NC |
301 | } |
302 | ||
71abf467 | 303 | static u32 bbr_tso_segs_goal(struct sock *sk) |
0f8782ea NC |
304 | { |
305 | struct tcp_sock *tp = tcp_sk(sk); | |
dcb8c9b4 ED |
306 | u32 segs, bytes; |
307 | ||
308 | /* Sort of tcp_tso_autosize() but ignoring | |
309 | * driver provided sk_gso_max_size. | |
310 | */ | |
7c68fa2b ED |
311 | bytes = min_t(unsigned long, |
312 | sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), | |
7c4e983c | 313 | GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER); |
dcb8c9b4 | 314 | segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); |
0f8782ea | 315 | |
71abf467 | 316 | return min(segs, 0x7FU); |
0f8782ea NC |
317 | } |
318 | ||
319 | /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */ | |
320 | static void bbr_save_cwnd(struct sock *sk) | |
321 | { | |
322 | struct tcp_sock *tp = tcp_sk(sk); | |
323 | struct bbr *bbr = inet_csk_ca(sk); | |
324 | ||
325 | if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT) | |
40570375 | 326 | bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */ |
0f8782ea | 327 | else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ |
40570375 | 328 | bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp)); |
0f8782ea NC |
329 | } |
330 | ||
331 | static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) | |
332 | { | |
333 | struct tcp_sock *tp = tcp_sk(sk); | |
334 | struct bbr *bbr = inet_csk_ca(sk); | |
335 | ||
336 | if (event == CA_EVENT_TX_START && tp->app_limited) { | |
337 | bbr->idle_restart = 1; | |
78dc70eb PJ |
338 | bbr->ack_epoch_mstamp = tp->tcp_mstamp; |
339 | bbr->ack_epoch_acked = 0; | |
0f8782ea NC |
340 | /* Avoid pointless buffer overflows: pace at est. bw if we don't |
341 | * need more speed (we're restarting from idle and app-limited). | |
342 | */ | |
343 | if (bbr->mode == BBR_PROBE_BW) | |
344 | bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); | |
5490b32d KY |
345 | else if (bbr->mode == BBR_PROBE_RTT) |
346 | bbr_check_probe_rtt_done(sk); | |
0f8782ea NC |
347 | } |
348 | } | |
349 | ||
232aa8ec | 350 | /* Calculate bdp based on min RTT and the estimated bottleneck bandwidth: |
0f8782ea | 351 | * |
de8e1beb | 352 | * bdp = ceil(bw * min_rtt * gain) |
0f8782ea NC |
353 | * |
354 | * The key factor, gain, controls the amount of queue. While a small gain | |
355 | * builds a smaller queue, it becomes more vulnerable to noise in RTT | |
356 | * measurements (e.g., delayed ACKs or other ACK compression effects). This | |
357 | * noise may cause BBR to under-estimate the rate. | |
0f8782ea | 358 | */ |
232aa8ec | 359 | static u32 bbr_bdp(struct sock *sk, u32 bw, int gain) |
0f8782ea NC |
360 | { |
361 | struct bbr *bbr = inet_csk_ca(sk); | |
232aa8ec | 362 | u32 bdp; |
0f8782ea NC |
363 | u64 w; |
364 | ||
365 | /* If we've never had a valid RTT sample, cap cwnd at the initial | |
366 | * default. This should only happen when the connection is not using TCP | |
367 | * timestamps and has retransmitted all of the SYN/SYNACK/data packets | |
368 | * ACKed so far. In this case, an RTO can cut cwnd to 1, in which | |
369 | * case we need to slow-start up toward something safe: TCP_INIT_CWND. | |
370 | */ | |
371 | if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */ | |
372 | return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/ | |
373 | ||
374 | w = (u64)bw * bbr->min_rtt_us; | |
375 | ||
de8e1beb LH |
376 | /* Apply a gain to the given value, remove the BW_SCALE shift, and |
377 | * round the value up to avoid a negative feedback loop. | |
378 | */ | |
232aa8ec PJ |
379 | bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; |
380 | ||
381 | return bdp; | |
382 | } | |
383 | ||
384 | /* To achieve full performance in high-speed paths, we budget enough cwnd to | |
385 | * fit full-sized skbs in-flight on both end hosts to fully utilize the path: | |
386 | * - one skb in sending host Qdisc, | |
387 | * - one skb in sending host TSO/GSO engine | |
388 | * - one skb being received by receiver host LRO/GRO/delayed-ACK engine | |
389 | * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because | |
390 | * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, | |
391 | * which allows 2 outstanding 2-packet sequences, to try to keep pipe | |
392 | * full even with ACK-every-other-packet delayed ACKs. | |
393 | */ | |
6b3656a6 | 394 | static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd) |
232aa8ec PJ |
395 | { |
396 | struct bbr *bbr = inet_csk_ca(sk); | |
0f8782ea NC |
397 | |
398 | /* Allow enough full-sized skbs in flight to utilize end systems. */ | |
71abf467 | 399 | cwnd += 3 * bbr_tso_segs_goal(sk); |
0f8782ea NC |
400 | |
401 | /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ | |
402 | cwnd = (cwnd + 1) & ~1U; | |
403 | ||
383d4709 | 404 | /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ |
6b3656a6 | 405 | if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0) |
383d4709 NC |
406 | cwnd += 2; |
407 | ||
0f8782ea NC |
408 | return cwnd; |
409 | } | |
410 | ||
232aa8ec PJ |
411 | /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ |
412 | static u32 bbr_inflight(struct sock *sk, u32 bw, int gain) | |
413 | { | |
414 | u32 inflight; | |
415 | ||
416 | inflight = bbr_bdp(sk, bw, gain); | |
6b3656a6 | 417 | inflight = bbr_quantization_budget(sk, inflight); |
232aa8ec PJ |
418 | |
419 | return inflight; | |
420 | } | |
421 | ||
a87c83d5 NC |
422 | /* With pacing at lower layers, there's often less data "in the network" than |
423 | * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq), | |
424 | * we often have several skbs queued in the pacing layer with a pre-scheduled | |
425 | * earliest departure time (EDT). BBR adapts its pacing rate based on the | |
426 | * inflight level that it estimates has already been "baked in" by previous | |
427 | * departure time decisions. We calculate a rough estimate of the number of our | |
428 | * packets that might be in the network at the earliest departure time for the | |
429 | * next skb scheduled: | |
430 | * in_network_at_edt = inflight_at_edt - (EDT - now) * bw | |
431 | * If we're increasing inflight, then we want to know if the transmit of the | |
432 | * EDT skb will push inflight above the target, so inflight_at_edt includes | |
433 | * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight, | |
434 | * then estimate if inflight will sink too low just before the EDT transmit. | |
435 | */ | |
436 | static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now) | |
437 | { | |
438 | struct tcp_sock *tp = tcp_sk(sk); | |
439 | struct bbr *bbr = inet_csk_ca(sk); | |
440 | u64 now_ns, edt_ns, interval_us; | |
441 | u32 interval_delivered, inflight_at_edt; | |
442 | ||
443 | now_ns = tp->tcp_clock_cache; | |
444 | edt_ns = max(tp->tcp_wstamp_ns, now_ns); | |
445 | interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC); | |
446 | interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE; | |
447 | inflight_at_edt = inflight_now; | |
448 | if (bbr->pacing_gain > BBR_UNIT) /* increasing inflight */ | |
449 | inflight_at_edt += bbr_tso_segs_goal(sk); /* include EDT skb */ | |
450 | if (interval_delivered >= inflight_at_edt) | |
451 | return 0; | |
452 | return inflight_at_edt - interval_delivered; | |
453 | } | |
454 | ||
78dc70eb PJ |
455 | /* Find the cwnd increment based on estimate of ack aggregation */ |
456 | static u32 bbr_ack_aggregation_cwnd(struct sock *sk) | |
457 | { | |
458 | u32 max_aggr_cwnd, aggr_cwnd = 0; | |
459 | ||
460 | if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) { | |
461 | max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us) | |
462 | / BW_UNIT; | |
463 | aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk)) | |
464 | >> BBR_SCALE; | |
465 | aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd); | |
466 | } | |
467 | ||
468 | return aggr_cwnd; | |
469 | } | |
470 | ||
0f8782ea NC |
471 | /* An optimization in BBR to reduce losses: On the first round of recovery, we |
472 | * follow the packet conservation principle: send P packets per P packets acked. | |
473 | * After that, we slow-start and send at most 2*P packets per P packets acked. | |
474 | * After recovery finishes, or upon undo, we restore the cwnd we had when | |
475 | * recovery started (capped by the target cwnd based on estimated BDP). | |
476 | * | |
477 | * TODO(ycheng/ncardwell): implement a rate-based approach. | |
478 | */ | |
479 | static bool bbr_set_cwnd_to_recover_or_restore( | |
480 | struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd) | |
481 | { | |
482 | struct tcp_sock *tp = tcp_sk(sk); | |
483 | struct bbr *bbr = inet_csk_ca(sk); | |
484 | u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; | |
40570375 | 485 | u32 cwnd = tcp_snd_cwnd(tp); |
0f8782ea NC |
486 | |
487 | /* An ACK for P pkts should release at most 2*P packets. We do this | |
488 | * in two steps. First, here we deduct the number of lost packets. | |
489 | * Then, in bbr_set_cwnd() we slow start up toward the target cwnd. | |
490 | */ | |
491 | if (rs->losses > 0) | |
492 | cwnd = max_t(s32, cwnd - rs->losses, 1); | |
493 | ||
494 | if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) { | |
495 | /* Starting 1st round of Recovery, so do packet conservation. */ | |
496 | bbr->packet_conservation = 1; | |
497 | bbr->next_rtt_delivered = tp->delivered; /* start round now */ | |
498 | /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */ | |
499 | cwnd = tcp_packets_in_flight(tp) + acked; | |
500 | } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { | |
501 | /* Exiting loss recovery; restore cwnd saved before recovery. */ | |
fb998862 | 502 | cwnd = max(cwnd, bbr->prior_cwnd); |
0f8782ea NC |
503 | bbr->packet_conservation = 0; |
504 | } | |
505 | bbr->prev_ca_state = state; | |
506 | ||
0f8782ea NC |
507 | if (bbr->packet_conservation) { |
508 | *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); | |
509 | return true; /* yes, using packet conservation */ | |
510 | } | |
511 | *new_cwnd = cwnd; | |
512 | return false; | |
513 | } | |
514 | ||
515 | /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss | |
516 | * has drawn us down below target), or snap down to target if we're above it. | |
517 | */ | |
518 | static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, | |
519 | u32 acked, u32 bw, int gain) | |
520 | { | |
521 | struct tcp_sock *tp = tcp_sk(sk); | |
522 | struct bbr *bbr = inet_csk_ca(sk); | |
40570375 | 523 | u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0; |
0f8782ea NC |
524 | |
525 | if (!acked) | |
8e995bf1 | 526 | goto done; /* no packet fully ACKed; just apply caps */ |
0f8782ea NC |
527 | |
528 | if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) | |
529 | goto done; | |
530 | ||
232aa8ec | 531 | target_cwnd = bbr_bdp(sk, bw, gain); |
78dc70eb PJ |
532 | |
533 | /* Increment the cwnd to account for excess ACKed data that seems | |
534 | * due to aggregation (of data and/or ACKs) visible in the ACK stream. | |
535 | */ | |
536 | target_cwnd += bbr_ack_aggregation_cwnd(sk); | |
6b3656a6 | 537 | target_cwnd = bbr_quantization_budget(sk, target_cwnd); |
78dc70eb PJ |
538 | |
539 | /* If we're below target cwnd, slow start cwnd toward target cwnd. */ | |
0f8782ea NC |
540 | if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ |
541 | cwnd = min(cwnd + acked, target_cwnd); | |
542 | else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) | |
543 | cwnd = cwnd + acked; | |
544 | cwnd = max(cwnd, bbr_cwnd_min_target); | |
545 | ||
546 | done: | |
40570375 | 547 | tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */ |
0f8782ea | 548 | if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */ |
40570375 | 549 | tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target)); |
0f8782ea NC |
550 | } |
551 | ||
552 | /* End cycle phase if it's time and/or we hit the phase's in-flight target. */ | |
553 | static bool bbr_is_next_cycle_phase(struct sock *sk, | |
554 | const struct rate_sample *rs) | |
555 | { | |
556 | struct tcp_sock *tp = tcp_sk(sk); | |
557 | struct bbr *bbr = inet_csk_ca(sk); | |
558 | bool is_full_length = | |
9a568de4 | 559 | tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) > |
0f8782ea NC |
560 | bbr->min_rtt_us; |
561 | u32 inflight, bw; | |
562 | ||
563 | /* The pacing_gain of 1.0 paces at the estimated bw to try to fully | |
564 | * use the pipe without increasing the queue. | |
565 | */ | |
566 | if (bbr->pacing_gain == BBR_UNIT) | |
567 | return is_full_length; /* just use wall clock time */ | |
568 | ||
a87c83d5 | 569 | inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight); |
0f8782ea NC |
570 | bw = bbr_max_bw(sk); |
571 | ||
572 | /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at | |
573 | * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is | |
574 | * small (e.g. on a LAN). We do not persist if packets are lost, since | |
575 | * a path with small buffers may not hold that much. | |
576 | */ | |
577 | if (bbr->pacing_gain > BBR_UNIT) | |
578 | return is_full_length && | |
579 | (rs->losses || /* perhaps pacing_gain*BDP won't fit */ | |
232aa8ec | 580 | inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); |
0f8782ea NC |
581 | |
582 | /* A pacing_gain < 1.0 tries to drain extra queue we added if bw | |
583 | * probing didn't find more bw. If inflight falls to match BDP then we | |
584 | * estimate queue is drained; persisting would underutilize the pipe. | |
585 | */ | |
586 | return is_full_length || | |
232aa8ec | 587 | inflight <= bbr_inflight(sk, bw, BBR_UNIT); |
0f8782ea NC |
588 | } |
589 | ||
590 | static void bbr_advance_cycle_phase(struct sock *sk) | |
591 | { | |
592 | struct tcp_sock *tp = tcp_sk(sk); | |
593 | struct bbr *bbr = inet_csk_ca(sk); | |
594 | ||
595 | bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1); | |
596 | bbr->cycle_mstamp = tp->delivered_mstamp; | |
0f8782ea NC |
597 | } |
598 | ||
599 | /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */ | |
600 | static void bbr_update_cycle_phase(struct sock *sk, | |
601 | const struct rate_sample *rs) | |
602 | { | |
603 | struct bbr *bbr = inet_csk_ca(sk); | |
604 | ||
3aff3b4b | 605 | if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs)) |
0f8782ea NC |
606 | bbr_advance_cycle_phase(sk); |
607 | } | |
608 | ||
609 | static void bbr_reset_startup_mode(struct sock *sk) | |
610 | { | |
611 | struct bbr *bbr = inet_csk_ca(sk); | |
612 | ||
613 | bbr->mode = BBR_STARTUP; | |
0f8782ea NC |
614 | } |
615 | ||
616 | static void bbr_reset_probe_bw_mode(struct sock *sk) | |
617 | { | |
618 | struct bbr *bbr = inet_csk_ca(sk); | |
619 | ||
620 | bbr->mode = BBR_PROBE_BW; | |
8032bf12 | 621 | bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand); |
0f8782ea NC |
622 | bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */ |
623 | } | |
624 | ||
625 | static void bbr_reset_mode(struct sock *sk) | |
626 | { | |
627 | if (!bbr_full_bw_reached(sk)) | |
628 | bbr_reset_startup_mode(sk); | |
629 | else | |
630 | bbr_reset_probe_bw_mode(sk); | |
631 | } | |
632 | ||
633 | /* Start a new long-term sampling interval. */ | |
634 | static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) | |
635 | { | |
636 | struct tcp_sock *tp = tcp_sk(sk); | |
637 | struct bbr *bbr = inet_csk_ca(sk); | |
638 | ||
9a568de4 | 639 | bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC); |
0f8782ea NC |
640 | bbr->lt_last_delivered = tp->delivered; |
641 | bbr->lt_last_lost = tp->lost; | |
642 | bbr->lt_rtt_cnt = 0; | |
643 | } | |
644 | ||
645 | /* Completely reset long-term bandwidth sampling. */ | |
646 | static void bbr_reset_lt_bw_sampling(struct sock *sk) | |
647 | { | |
648 | struct bbr *bbr = inet_csk_ca(sk); | |
649 | ||
650 | bbr->lt_bw = 0; | |
651 | bbr->lt_use_bw = 0; | |
652 | bbr->lt_is_sampling = false; | |
653 | bbr_reset_lt_bw_sampling_interval(sk); | |
654 | } | |
655 | ||
656 | /* Long-term bw sampling interval is done. Estimate whether we're policed. */ | |
657 | static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw) | |
658 | { | |
659 | struct bbr *bbr = inet_csk_ca(sk); | |
660 | u32 diff; | |
661 | ||
662 | if (bbr->lt_bw) { /* do we have bw from a previous interval? */ | |
663 | /* Is new bw close to the lt_bw from the previous interval? */ | |
664 | diff = abs(bw - bbr->lt_bw); | |
665 | if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) || | |
666 | (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <= | |
667 | bbr_lt_bw_diff)) { | |
668 | /* All criteria are met; estimate we're policed. */ | |
669 | bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */ | |
670 | bbr->lt_use_bw = 1; | |
671 | bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */ | |
672 | bbr->lt_rtt_cnt = 0; | |
673 | return; | |
674 | } | |
675 | } | |
676 | bbr->lt_bw = bw; | |
677 | bbr_reset_lt_bw_sampling_interval(sk); | |
678 | } | |
679 | ||
680 | /* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of | |
681 | * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and | |
682 | * explicitly models their policed rate, to reduce unnecessary losses. We | |
683 | * estimate that we're policed if we see 2 consecutive sampling intervals with | |
684 | * consistent throughput and high packet loss. If we think we're being policed, | |
685 | * set lt_bw to the "long-term" average delivery rate from those 2 intervals. | |
686 | */ | |
687 | static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) | |
688 | { | |
689 | struct tcp_sock *tp = tcp_sk(sk); | |
690 | struct bbr *bbr = inet_csk_ca(sk); | |
691 | u32 lost, delivered; | |
692 | u64 bw; | |
9a568de4 | 693 | u32 t; |
0f8782ea NC |
694 | |
695 | if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ | |
696 | if (bbr->mode == BBR_PROBE_BW && bbr->round_start && | |
697 | ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) { | |
698 | bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */ | |
699 | bbr_reset_probe_bw_mode(sk); /* restart gain cycling */ | |
700 | } | |
701 | return; | |
702 | } | |
703 | ||
704 | /* Wait for the first loss before sampling, to let the policer exhaust | |
705 | * its tokens and estimate the steady-state rate allowed by the policer. | |
706 | * Starting samples earlier includes bursts that over-estimate the bw. | |
707 | */ | |
708 | if (!bbr->lt_is_sampling) { | |
709 | if (!rs->losses) | |
710 | return; | |
711 | bbr_reset_lt_bw_sampling_interval(sk); | |
712 | bbr->lt_is_sampling = true; | |
713 | } | |
714 | ||
715 | /* To avoid underestimates, reset sampling if we run out of data. */ | |
716 | if (rs->is_app_limited) { | |
717 | bbr_reset_lt_bw_sampling(sk); | |
718 | return; | |
719 | } | |
720 | ||
721 | if (bbr->round_start) | |
722 | bbr->lt_rtt_cnt++; /* count round trips in this interval */ | |
723 | if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts) | |
724 | return; /* sampling interval needs to be longer */ | |
725 | if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) { | |
726 | bbr_reset_lt_bw_sampling(sk); /* interval is too long */ | |
727 | return; | |
728 | } | |
729 | ||
730 | /* End sampling interval when a packet is lost, so we estimate the | |
731 | * policer tokens were exhausted. Stopping the sampling before the | |
732 | * tokens are exhausted under-estimates the policed rate. | |
733 | */ | |
734 | if (!rs->losses) | |
735 | return; | |
736 | ||
737 | /* Calculate packets lost and delivered in sampling interval. */ | |
738 | lost = tp->lost - bbr->lt_last_lost; | |
739 | delivered = tp->delivered - bbr->lt_last_delivered; | |
740 | /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */ | |
741 | if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered) | |
742 | return; | |
743 | ||
744 | /* Find average delivery rate in this sampling interval. */ | |
9a568de4 ED |
745 | t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp; |
746 | if ((s32)t < 1) | |
747 | return; /* interval is less than one ms, so wait */ | |
748 | /* Check if can multiply without overflow */ | |
749 | if (t >= ~0U / USEC_PER_MSEC) { | |
0f8782ea NC |
750 | bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ |
751 | return; | |
752 | } | |
9a568de4 | 753 | t *= USEC_PER_MSEC; |
0f8782ea NC |
754 | bw = (u64)delivered * BW_UNIT; |
755 | do_div(bw, t); | |
756 | bbr_lt_bw_interval_done(sk, bw); | |
757 | } | |
758 | ||
759 | /* Estimate the bandwidth based on how fast packets are delivered */ | |
760 | static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) | |
761 | { | |
762 | struct tcp_sock *tp = tcp_sk(sk); | |
763 | struct bbr *bbr = inet_csk_ca(sk); | |
764 | u64 bw; | |
765 | ||
766 | bbr->round_start = 0; | |
767 | if (rs->delivered < 0 || rs->interval_us <= 0) | |
768 | return; /* Not a valid observation */ | |
769 | ||
770 | /* See if we've reached the next RTT */ | |
771 | if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) { | |
772 | bbr->next_rtt_delivered = tp->delivered; | |
773 | bbr->rtt_cnt++; | |
774 | bbr->round_start = 1; | |
775 | bbr->packet_conservation = 0; | |
776 | } | |
777 | ||
778 | bbr_lt_bw_sampling(sk, rs); | |
779 | ||
780 | /* Divide delivered by the interval to find a (lower bound) bottleneck | |
781 | * bandwidth sample. Delivered is in packets and interval_us in uS and | |
782 | * ratio will be <<1 for most connections. So delivered is first scaled. | |
783 | */ | |
5b2f1f30 | 784 | bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us); |
0f8782ea NC |
785 | |
786 | /* If this sample is application-limited, it is likely to have a very | |
787 | * low delivered count that represents application behavior rather than | |
788 | * the available network rate. Such a sample could drag down estimated | |
789 | * bw, causing needless slow-down. Thus, to continue to send at the | |
790 | * last measured network rate, we filter out app-limited samples unless | |
791 | * they describe the path bw at least as well as our bw model. | |
792 | * | |
793 | * So the goal during app-limited phase is to proceed with the best | |
794 | * network rate no matter how long. We automatically leave this | |
795 | * phase when app writes faster than the network can deliver :) | |
796 | */ | |
797 | if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) { | |
798 | /* Incorporate new sample into our max bw filter. */ | |
799 | minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw); | |
800 | } | |
801 | } | |
802 | ||
78dc70eb PJ |
803 | /* Estimates the windowed max degree of ack aggregation. |
804 | * This is used to provision extra in-flight data to keep sending during | |
805 | * inter-ACK silences. | |
806 | * | |
807 | * Degree of ack aggregation is estimated as extra data acked beyond expected. | |
808 | * | |
809 | * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval" | |
810 | * cwnd += max_extra_acked | |
811 | * | |
812 | * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms). | |
813 | * Max filter is an approximate sliding window of 5-10 (packet timed) round | |
814 | * trips. | |
815 | */ | |
816 | static void bbr_update_ack_aggregation(struct sock *sk, | |
817 | const struct rate_sample *rs) | |
818 | { | |
819 | u32 epoch_us, expected_acked, extra_acked; | |
820 | struct bbr *bbr = inet_csk_ca(sk); | |
821 | struct tcp_sock *tp = tcp_sk(sk); | |
822 | ||
823 | if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 || | |
824 | rs->delivered < 0 || rs->interval_us <= 0) | |
825 | return; | |
826 | ||
827 | if (bbr->round_start) { | |
828 | bbr->extra_acked_win_rtts = min(0x1F, | |
829 | bbr->extra_acked_win_rtts + 1); | |
830 | if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) { | |
831 | bbr->extra_acked_win_rtts = 0; | |
832 | bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ? | |
833 | 0 : 1; | |
834 | bbr->extra_acked[bbr->extra_acked_win_idx] = 0; | |
835 | } | |
836 | } | |
837 | ||
838 | /* Compute how many packets we expected to be delivered over epoch. */ | |
839 | epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp, | |
840 | bbr->ack_epoch_mstamp); | |
841 | expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT; | |
842 | ||
843 | /* Reset the aggregation epoch if ACK rate is below expected rate or | |
844 | * significantly large no. of ack received since epoch (potentially | |
845 | * quite old epoch). | |
846 | */ | |
847 | if (bbr->ack_epoch_acked <= expected_acked || | |
848 | (bbr->ack_epoch_acked + rs->acked_sacked >= | |
849 | bbr_ack_epoch_acked_reset_thresh)) { | |
850 | bbr->ack_epoch_acked = 0; | |
851 | bbr->ack_epoch_mstamp = tp->delivered_mstamp; | |
852 | expected_acked = 0; | |
853 | } | |
854 | ||
855 | /* Compute excess data delivered, beyond what was expected. */ | |
856 | bbr->ack_epoch_acked = min_t(u32, 0xFFFFF, | |
857 | bbr->ack_epoch_acked + rs->acked_sacked); | |
858 | extra_acked = bbr->ack_epoch_acked - expected_acked; | |
40570375 | 859 | extra_acked = min(extra_acked, tcp_snd_cwnd(tp)); |
78dc70eb PJ |
860 | if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) |
861 | bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; | |
862 | } | |
863 | ||
0f8782ea NC |
864 | /* Estimate when the pipe is full, using the change in delivery rate: BBR |
865 | * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by | |
866 | * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited | |
867 | * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the | |
868 | * higher rwin, 3: we get higher delivery rate samples. Or transient | |
869 | * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar | |
870 | * design goal, but uses delay and inter-ACK spacing instead of bandwidth. | |
871 | */ | |
872 | static void bbr_check_full_bw_reached(struct sock *sk, | |
873 | const struct rate_sample *rs) | |
874 | { | |
875 | struct bbr *bbr = inet_csk_ca(sk); | |
876 | u32 bw_thresh; | |
877 | ||
878 | if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited) | |
879 | return; | |
880 | ||
881 | bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE; | |
882 | if (bbr_max_bw(sk) >= bw_thresh) { | |
883 | bbr->full_bw = bbr_max_bw(sk); | |
884 | bbr->full_bw_cnt = 0; | |
885 | return; | |
886 | } | |
887 | ++bbr->full_bw_cnt; | |
c589e69b | 888 | bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt; |
0f8782ea NC |
889 | } |
890 | ||
891 | /* If pipe is probably full, drain the queue and then enter steady-state. */ | |
892 | static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) | |
893 | { | |
894 | struct bbr *bbr = inet_csk_ca(sk); | |
895 | ||
896 | if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) { | |
897 | bbr->mode = BBR_DRAIN; /* drain queue we created */ | |
53794570 | 898 | tcp_sk(sk)->snd_ssthresh = |
232aa8ec | 899 | bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT); |
0f8782ea NC |
900 | } /* fall through to check if in-flight is already small: */ |
901 | if (bbr->mode == BBR_DRAIN && | |
a87c83d5 | 902 | bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <= |
232aa8ec | 903 | bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) |
0f8782ea NC |
904 | bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ |
905 | } | |
906 | ||
fb998862 KY |
907 | static void bbr_check_probe_rtt_done(struct sock *sk) |
908 | { | |
909 | struct tcp_sock *tp = tcp_sk(sk); | |
910 | struct bbr *bbr = inet_csk_ca(sk); | |
911 | ||
912 | if (!(bbr->probe_rtt_done_stamp && | |
913 | after(tcp_jiffies32, bbr->probe_rtt_done_stamp))) | |
914 | return; | |
915 | ||
916 | bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ | |
40570375 | 917 | tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd)); |
fb998862 KY |
918 | bbr_reset_mode(sk); |
919 | } | |
920 | ||
0f8782ea NC |
921 | /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and |
922 | * periodically drain the bottleneck queue, to converge to measure the true | |
923 | * min_rtt (unloaded propagation delay). This allows the flows to keep queues | |
924 | * small (reducing queuing delay and packet loss) and achieve fairness among | |
925 | * BBR flows. | |
926 | * | |
927 | * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires, | |
928 | * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets. | |
929 | * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed | |
930 | * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and | |
931 | * re-enter the previous mode. BBR uses 200ms to approximately bound the | |
932 | * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s). | |
933 | * | |
934 | * Note that flows need only pay 2% if they are busy sending over the last 10 | |
935 | * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have | |
936 | * natural silences or low-rate periods within 10 seconds where the rate is low | |
937 | * enough for long enough to drain its queue in the bottleneck. We pick up | |
938 | * these min RTT measurements opportunistically with our min_rtt filter. :-) | |
939 | */ | |
940 | static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) | |
941 | { | |
942 | struct tcp_sock *tp = tcp_sk(sk); | |
943 | struct bbr *bbr = inet_csk_ca(sk); | |
944 | bool filter_expired; | |
945 | ||
946 | /* Track min RTT seen in the min_rtt_win_sec filter window: */ | |
2660bfa8 | 947 | filter_expired = after(tcp_jiffies32, |
0f8782ea NC |
948 | bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); |
949 | if (rs->rtt_us >= 0 && | |
1b9e2a8c | 950 | (rs->rtt_us < bbr->min_rtt_us || |
e4286603 | 951 | (filter_expired && !rs->is_ack_delayed))) { |
0f8782ea | 952 | bbr->min_rtt_us = rs->rtt_us; |
2660bfa8 | 953 | bbr->min_rtt_stamp = tcp_jiffies32; |
0f8782ea NC |
954 | } |
955 | ||
956 | if (bbr_probe_rtt_mode_ms > 0 && filter_expired && | |
957 | !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) { | |
958 | bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */ | |
0f8782ea NC |
959 | bbr_save_cwnd(sk); /* note cwnd so we can restore it */ |
960 | bbr->probe_rtt_done_stamp = 0; | |
961 | } | |
962 | ||
963 | if (bbr->mode == BBR_PROBE_RTT) { | |
964 | /* Ignore low rate samples during this mode. */ | |
965 | tp->app_limited = | |
966 | (tp->delivered + tcp_packets_in_flight(tp)) ? : 1; | |
967 | /* Maintain min packets in flight for max(200 ms, 1 round). */ | |
968 | if (!bbr->probe_rtt_done_stamp && | |
969 | tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) { | |
2660bfa8 | 970 | bbr->probe_rtt_done_stamp = tcp_jiffies32 + |
0f8782ea NC |
971 | msecs_to_jiffies(bbr_probe_rtt_mode_ms); |
972 | bbr->probe_rtt_round_done = 0; | |
973 | bbr->next_rtt_delivered = tp->delivered; | |
974 | } else if (bbr->probe_rtt_done_stamp) { | |
975 | if (bbr->round_start) | |
976 | bbr->probe_rtt_round_done = 1; | |
fb998862 KY |
977 | if (bbr->probe_rtt_round_done) |
978 | bbr_check_probe_rtt_done(sk); | |
0f8782ea NC |
979 | } |
980 | } | |
e6e6a278 NC |
981 | /* Restart after idle ends only once we process a new S/ACK for data */ |
982 | if (rs->delivered > 0) | |
983 | bbr->idle_restart = 0; | |
0f8782ea NC |
984 | } |
985 | ||
cf33e25c NC |
986 | static void bbr_update_gains(struct sock *sk) |
987 | { | |
988 | struct bbr *bbr = inet_csk_ca(sk); | |
989 | ||
990 | switch (bbr->mode) { | |
991 | case BBR_STARTUP: | |
992 | bbr->pacing_gain = bbr_high_gain; | |
993 | bbr->cwnd_gain = bbr_high_gain; | |
994 | break; | |
995 | case BBR_DRAIN: | |
996 | bbr->pacing_gain = bbr_drain_gain; /* slow, to drain */ | |
997 | bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */ | |
998 | break; | |
999 | case BBR_PROBE_BW: | |
1000 | bbr->pacing_gain = (bbr->lt_use_bw ? | |
1001 | BBR_UNIT : | |
1002 | bbr_pacing_gain[bbr->cycle_idx]); | |
1003 | bbr->cwnd_gain = bbr_cwnd_gain; | |
1004 | break; | |
1005 | case BBR_PROBE_RTT: | |
1006 | bbr->pacing_gain = BBR_UNIT; | |
1007 | bbr->cwnd_gain = BBR_UNIT; | |
1008 | break; | |
1009 | default: | |
1010 | WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode); | |
1011 | break; | |
1012 | } | |
1013 | } | |
1014 | ||
0f8782ea NC |
1015 | static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) |
1016 | { | |
1017 | bbr_update_bw(sk, rs); | |
78dc70eb | 1018 | bbr_update_ack_aggregation(sk, rs); |
0f8782ea NC |
1019 | bbr_update_cycle_phase(sk, rs); |
1020 | bbr_check_full_bw_reached(sk, rs); | |
1021 | bbr_check_drain(sk, rs); | |
1022 | bbr_update_min_rtt(sk, rs); | |
cf33e25c | 1023 | bbr_update_gains(sk); |
0f8782ea NC |
1024 | } |
1025 | ||
1026 | static void bbr_main(struct sock *sk, const struct rate_sample *rs) | |
1027 | { | |
1028 | struct bbr *bbr = inet_csk_ca(sk); | |
1029 | u32 bw; | |
1030 | ||
1031 | bbr_update_model(sk, rs); | |
1032 | ||
1033 | bw = bbr_bw(sk); | |
1034 | bbr_set_pacing_rate(sk, bw, bbr->pacing_gain); | |
0f8782ea NC |
1035 | bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain); |
1036 | } | |
1037 | ||
1038 | static void bbr_init(struct sock *sk) | |
1039 | { | |
1040 | struct tcp_sock *tp = tcp_sk(sk); | |
1041 | struct bbr *bbr = inet_csk_ca(sk); | |
0f8782ea NC |
1042 | |
1043 | bbr->prior_cwnd = 0; | |
53794570 | 1044 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
0f8782ea | 1045 | bbr->rtt_cnt = 0; |
6de035fe | 1046 | bbr->next_rtt_delivered = tp->delivered; |
0f8782ea NC |
1047 | bbr->prev_ca_state = TCP_CA_Open; |
1048 | bbr->packet_conservation = 0; | |
1049 | ||
1050 | bbr->probe_rtt_done_stamp = 0; | |
1051 | bbr->probe_rtt_round_done = 0; | |
1052 | bbr->min_rtt_us = tcp_min_rtt(tp); | |
2660bfa8 | 1053 | bbr->min_rtt_stamp = tcp_jiffies32; |
0f8782ea NC |
1054 | |
1055 | minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ | |
1056 | ||
32984565 | 1057 | bbr->has_seen_rtt = 0; |
79135b89 | 1058 | bbr_init_pacing_rate_from_rtt(sk); |
0f8782ea | 1059 | |
0f8782ea NC |
1060 | bbr->round_start = 0; |
1061 | bbr->idle_restart = 0; | |
c589e69b | 1062 | bbr->full_bw_reached = 0; |
0f8782ea NC |
1063 | bbr->full_bw = 0; |
1064 | bbr->full_bw_cnt = 0; | |
9a568de4 | 1065 | bbr->cycle_mstamp = 0; |
0f8782ea NC |
1066 | bbr->cycle_idx = 0; |
1067 | bbr_reset_lt_bw_sampling(sk); | |
1068 | bbr_reset_startup_mode(sk); | |
218af599 | 1069 | |
78dc70eb PJ |
1070 | bbr->ack_epoch_mstamp = tp->tcp_mstamp; |
1071 | bbr->ack_epoch_acked = 0; | |
1072 | bbr->extra_acked_win_rtts = 0; | |
1073 | bbr->extra_acked_win_idx = 0; | |
1074 | bbr->extra_acked[0] = 0; | |
1075 | bbr->extra_acked[1] = 0; | |
1076 | ||
218af599 | 1077 | cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); |
0f8782ea NC |
1078 | } |
1079 | ||
1080 | static u32 bbr_sndbuf_expand(struct sock *sk) | |
1081 | { | |
1082 | /* Provision 3 * cwnd since BBR may slow-start even during recovery. */ | |
1083 | return 3; | |
1084 | } | |
1085 | ||
1086 | /* In theory BBR does not need to undo the cwnd since it does not | |
1087 | * always reduce cwnd on losses (see bbr_main()). Keep it for now. | |
1088 | */ | |
1089 | static u32 bbr_undo_cwnd(struct sock *sk) | |
1090 | { | |
2f6c498e NC |
1091 | struct bbr *bbr = inet_csk_ca(sk); |
1092 | ||
1093 | bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ | |
1094 | bbr->full_bw_cnt = 0; | |
600647d4 | 1095 | bbr_reset_lt_bw_sampling(sk); |
40570375 | 1096 | return tcp_snd_cwnd(tcp_sk(sk)); |
0f8782ea NC |
1097 | } |
1098 | ||
1099 | /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */ | |
1100 | static u32 bbr_ssthresh(struct sock *sk) | |
1101 | { | |
1102 | bbr_save_cwnd(sk); | |
53794570 | 1103 | return tcp_sk(sk)->snd_ssthresh; |
0f8782ea NC |
1104 | } |
1105 | ||
1106 | static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr, | |
1107 | union tcp_cc_info *info) | |
1108 | { | |
1109 | if (ext & (1 << (INET_DIAG_BBRINFO - 1)) || | |
1110 | ext & (1 << (INET_DIAG_VEGASINFO - 1))) { | |
1111 | struct tcp_sock *tp = tcp_sk(sk); | |
1112 | struct bbr *bbr = inet_csk_ca(sk); | |
1113 | u64 bw = bbr_bw(sk); | |
1114 | ||
1115 | bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE; | |
1116 | memset(&info->bbr, 0, sizeof(info->bbr)); | |
1117 | info->bbr.bbr_bw_lo = (u32)bw; | |
1118 | info->bbr.bbr_bw_hi = (u32)(bw >> 32); | |
1119 | info->bbr.bbr_min_rtt = bbr->min_rtt_us; | |
1120 | info->bbr.bbr_pacing_gain = bbr->pacing_gain; | |
1121 | info->bbr.bbr_cwnd_gain = bbr->cwnd_gain; | |
1122 | *attr = INET_DIAG_BBRINFO; | |
1123 | return sizeof(info->bbr); | |
1124 | } | |
1125 | return 0; | |
1126 | } | |
1127 | ||
1128 | static void bbr_set_state(struct sock *sk, u8 new_state) | |
1129 | { | |
1130 | struct bbr *bbr = inet_csk_ca(sk); | |
1131 | ||
1132 | if (new_state == TCP_CA_Loss) { | |
1133 | struct rate_sample rs = { .losses = 1 }; | |
1134 | ||
1135 | bbr->prev_ca_state = TCP_CA_Loss; | |
1136 | bbr->full_bw = 0; | |
1137 | bbr->round_start = 1; /* treat RTO like end of a round */ | |
1138 | bbr_lt_bw_sampling(sk, &rs); | |
1139 | } | |
1140 | } | |
1141 | ||
1142 | static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { | |
1143 | .flags = TCP_CONG_NON_RESTRICTED, | |
1144 | .name = "bbr", | |
1145 | .owner = THIS_MODULE, | |
1146 | .init = bbr_init, | |
1147 | .cong_control = bbr_main, | |
1148 | .sndbuf_expand = bbr_sndbuf_expand, | |
1149 | .undo_cwnd = bbr_undo_cwnd, | |
1150 | .cwnd_event = bbr_cwnd_event, | |
1151 | .ssthresh = bbr_ssthresh, | |
dcb8c9b4 | 1152 | .min_tso_segs = bbr_min_tso_segs, |
0f8782ea NC |
1153 | .get_info = bbr_get_info, |
1154 | .set_state = bbr_set_state, | |
1155 | }; | |
1156 | ||
a4703e31 | 1157 | BTF_SET8_START(tcp_bbr_check_kfunc_ids) |
0e32dfc8 KKD |
1158 | #ifdef CONFIG_X86 |
1159 | #ifdef CONFIG_DYNAMIC_FTRACE | |
a4703e31 KKD |
1160 | BTF_ID_FLAGS(func, bbr_init) |
1161 | BTF_ID_FLAGS(func, bbr_main) | |
1162 | BTF_ID_FLAGS(func, bbr_sndbuf_expand) | |
1163 | BTF_ID_FLAGS(func, bbr_undo_cwnd) | |
1164 | BTF_ID_FLAGS(func, bbr_cwnd_event) | |
1165 | BTF_ID_FLAGS(func, bbr_ssthresh) | |
1166 | BTF_ID_FLAGS(func, bbr_min_tso_segs) | |
1167 | BTF_ID_FLAGS(func, bbr_set_state) | |
0e32dfc8 KKD |
1168 | #endif |
1169 | #endif | |
a4703e31 | 1170 | BTF_SET8_END(tcp_bbr_check_kfunc_ids) |
0e32dfc8 | 1171 | |
b202d844 | 1172 | static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = { |
a4703e31 KKD |
1173 | .owner = THIS_MODULE, |
1174 | .set = &tcp_bbr_check_kfunc_ids, | |
b202d844 | 1175 | }; |
0e32dfc8 | 1176 | |
0f8782ea NC |
1177 | static int __init bbr_register(void) |
1178 | { | |
0e32dfc8 KKD |
1179 | int ret; |
1180 | ||
0f8782ea | 1181 | BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); |
b202d844 KKD |
1182 | |
1183 | ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_bbr_kfunc_set); | |
1184 | if (ret < 0) | |
0e32dfc8 | 1185 | return ret; |
b202d844 | 1186 | return tcp_register_congestion_control(&tcp_bbr_cong_ops); |
0f8782ea NC |
1187 | } |
1188 | ||
1189 | static void __exit bbr_unregister(void) | |
1190 | { | |
1191 | tcp_unregister_congestion_control(&tcp_bbr_cong_ops); | |
1192 | } | |
1193 | ||
1194 | module_init(bbr_register); | |
1195 | module_exit(bbr_unregister); | |
1196 | ||
1197 | MODULE_AUTHOR("Van Jacobson <vanj@google.com>"); | |
1198 | MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>"); | |
1199 | MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>"); | |
1200 | MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>"); | |
1201 | MODULE_LICENSE("Dual BSD/GPL"); | |
1202 | MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)"); |