Commit | Line | Data |
---|---|---|
2a91aa39 | 1 | /* |
2a91aa39 AB |
2 | * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk> |
3 | * | |
4 | * Changes to meet Linux coding standards, and DCCP infrastructure fixes. | |
5 | * | |
6 | * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | /* | |
0e64e94e | 24 | * This implementation should follow RFC 4341 |
2a91aa39 | 25 | */ |
5a0e3ad6 | 26 | #include <linux/slab.h> |
e8ef967a | 27 | #include "../feat.h" |
2a91aa39 AB |
28 | #include "ccid2.h" |
29 | ||
2a91aa39 | 30 | |
8d424f6c | 31 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
eb939922 | 32 | static bool ccid2_debug; |
84116716 | 33 | #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) |
2a91aa39 | 34 | #else |
84116716 | 35 | #define ccid2_pr_debug(format, a...) |
2a91aa39 AB |
36 | #endif |
37 | ||
77d2dd93 | 38 | static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) |
07978aab AB |
39 | { |
40 | struct ccid2_seq *seqp; | |
41 | int i; | |
42 | ||
43 | /* check if we have space to preserve the pointer to the buffer */ | |
77d2dd93 GR |
44 | if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) / |
45 | sizeof(struct ccid2_seq *))) | |
07978aab AB |
46 | return -ENOMEM; |
47 | ||
48 | /* allocate buffer and initialize linked list */ | |
6da2ec56 KC |
49 | seqp = kmalloc_array(CCID2_SEQBUF_LEN, sizeof(struct ccid2_seq), |
50 | gfp_any()); | |
07978aab AB |
51 | if (seqp == NULL) |
52 | return -ENOMEM; | |
53 | ||
cd1f7d34 | 54 | for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) { |
07978aab AB |
55 | seqp[i].ccid2s_next = &seqp[i + 1]; |
56 | seqp[i + 1].ccid2s_prev = &seqp[i]; | |
57 | } | |
cd1f7d34 GR |
58 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp; |
59 | seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; | |
07978aab AB |
60 | |
61 | /* This is the first allocation. Initiate the head and tail. */ | |
77d2dd93 GR |
62 | if (hc->tx_seqbufc == 0) |
63 | hc->tx_seqh = hc->tx_seqt = seqp; | |
07978aab AB |
64 | else { |
65 | /* link the existing list with the one we just created */ | |
77d2dd93 GR |
66 | hc->tx_seqh->ccid2s_next = seqp; |
67 | seqp->ccid2s_prev = hc->tx_seqh; | |
07978aab | 68 | |
77d2dd93 GR |
69 | hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; |
70 | seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt; | |
07978aab AB |
71 | } |
72 | ||
73 | /* store the original pointer to the buffer so we can free it */ | |
77d2dd93 GR |
74 | hc->tx_seqbuf[hc->tx_seqbufc] = seqp; |
75 | hc->tx_seqbufc++; | |
07978aab AB |
76 | |
77 | return 0; | |
78 | } | |
79 | ||
6b57c93d | 80 | static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) |
2a91aa39 | 81 | { |
1c0e0a05 GR |
82 | if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) |
83 | return CCID_PACKET_WILL_DEQUEUE_LATER; | |
84 | return CCID_PACKET_SEND_AT_ONCE; | |
2a91aa39 AB |
85 | } |
86 | ||
df054e1d | 87 | static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) |
2a91aa39 | 88 | { |
b1c00fe3 | 89 | u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2); |
d50ad163 | 90 | |
2a91aa39 | 91 | /* |
d50ad163 GR |
92 | * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from |
93 | * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always | |
94 | * acceptable since this causes starvation/deadlock whenever cwnd < 2. | |
95 | * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled). | |
2a91aa39 | 96 | */ |
d50ad163 GR |
97 | if (val == 0 || val > max_ratio) { |
98 | DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio); | |
99 | val = max_ratio; | |
2a91aa39 | 100 | } |
d346d886 SJ |
101 | dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO, |
102 | min_t(u32, val, DCCPF_ACK_RATIO_MAX)); | |
103 | } | |
d50ad163 | 104 | |
d96a9e8d SJ |
105 | static void ccid2_check_l_ack_ratio(struct sock *sk) |
106 | { | |
107 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | |
108 | ||
109 | /* | |
110 | * After a loss, idle period, application limited period, or RTO we | |
111 | * need to check that the ack ratio is still less than the congestion | |
112 | * window. Otherwise, we will send an entire congestion window of | |
113 | * packets and got no response because we haven't sent ack ratio | |
114 | * packets yet. | |
115 | * If the ack ratio does need to be reduced, we reduce it to half of | |
116 | * the congestion window (or 1 if that's zero) instead of to the | |
117 | * congestion window. This prevents problems if one ack is lost. | |
118 | */ | |
119 | if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd) | |
120 | ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U); | |
121 | } | |
122 | ||
d346d886 SJ |
123 | static void ccid2_change_l_seq_window(struct sock *sk, u64 val) |
124 | { | |
125 | dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW, | |
126 | clamp_val(val, DCCPF_SEQ_WMIN, | |
127 | DCCPF_SEQ_WMAX)); | |
2a91aa39 AB |
128 | } |
129 | ||
a8d7aa17 ED |
130 | static void dccp_tasklet_schedule(struct sock *sk) |
131 | { | |
132 | struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet; | |
133 | ||
134 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
135 | sock_hold(sk); | |
136 | __tasklet_schedule(t); | |
137 | } | |
138 | } | |
139 | ||
839a6094 | 140 | static void ccid2_hc_tx_rto_expire(struct timer_list *t) |
2a91aa39 | 141 | { |
839a6094 KC |
142 | struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); |
143 | struct sock *sk = hc->sk; | |
1c0e0a05 | 144 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); |
2a91aa39 | 145 | |
2a91aa39 AB |
146 | bh_lock_sock(sk); |
147 | if (sock_owned_by_user(sk)) { | |
77d2dd93 | 148 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5); |
2a91aa39 AB |
149 | goto out; |
150 | } | |
151 | ||
152 | ccid2_pr_debug("RTO_EXPIRE\n"); | |
153 | ||
dd5684ec AK |
154 | if (sk->sk_state == DCCP_CLOSED) |
155 | goto out; | |
156 | ||
2a91aa39 | 157 | /* back-off timer */ |
77d2dd93 | 158 | hc->tx_rto <<= 1; |
231cc2aa GR |
159 | if (hc->tx_rto > DCCP_RTO_MAX) |
160 | hc->tx_rto = DCCP_RTO_MAX; | |
410e27a4 | 161 | |
2a91aa39 | 162 | /* adjust pipe, cwnd etc */ |
77d2dd93 GR |
163 | hc->tx_ssthresh = hc->tx_cwnd / 2; |
164 | if (hc->tx_ssthresh < 2) | |
165 | hc->tx_ssthresh = 2; | |
67b67e36 GR |
166 | hc->tx_cwnd = 1; |
167 | hc->tx_pipe = 0; | |
2a91aa39 AB |
168 | |
169 | /* clear state about stuff we sent */ | |
77d2dd93 GR |
170 | hc->tx_seqt = hc->tx_seqh; |
171 | hc->tx_packets_acked = 0; | |
2a91aa39 AB |
172 | |
173 | /* clear ack ratio state. */ | |
77d2dd93 GR |
174 | hc->tx_rpseq = 0; |
175 | hc->tx_rpdupack = -1; | |
2a91aa39 | 176 | ccid2_change_l_ack_ratio(sk, 1); |
1c0e0a05 GR |
177 | |
178 | /* if we were blocked before, we may now send cwnd=1 packet */ | |
179 | if (sender_was_blocked) | |
a8d7aa17 | 180 | dccp_tasklet_schedule(sk); |
1c0e0a05 GR |
181 | /* restart backed-off timer */ |
182 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); | |
2a91aa39 AB |
183 | out: |
184 | bh_unlock_sock(sk); | |
77ff72d5 | 185 | sock_put(sk); |
2a91aa39 AB |
186 | } |
187 | ||
113ced1f GR |
188 | /* |
189 | * Congestion window validation (RFC 2861). | |
190 | */ | |
eb939922 | 191 | static bool ccid2_do_cwv = true; |
113ced1f GR |
192 | module_param(ccid2_do_cwv, bool, 0644); |
193 | MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation"); | |
194 | ||
195 | /** | |
196 | * ccid2_update_used_window - Track how much of cwnd is actually used | |
197 | * This is done in addition to CWV. The sender needs to have an idea of how many | |
198 | * packets may be in flight, to set the local Sequence Window value accordingly | |
199 | * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the | |
200 | * maximum-used window. We use an EWMA low-pass filter to filter out noise. | |
201 | */ | |
202 | static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd) | |
203 | { | |
204 | hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4; | |
205 | } | |
206 | ||
207 | /* This borrows the code of tcp_cwnd_application_limited() */ | |
208 | static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now) | |
209 | { | |
210 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | |
211 | /* don't reduce cwnd below the initial window (IW) */ | |
212 | u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache), | |
213 | win_used = max(hc->tx_cwnd_used, init_win); | |
214 | ||
215 | if (win_used < hc->tx_cwnd) { | |
216 | hc->tx_ssthresh = max(hc->tx_ssthresh, | |
217 | (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2)); | |
218 | hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1; | |
219 | } | |
220 | hc->tx_cwnd_used = 0; | |
221 | hc->tx_cwnd_stamp = now; | |
d96a9e8d SJ |
222 | |
223 | ccid2_check_l_ack_ratio(sk); | |
113ced1f GR |
224 | } |
225 | ||
226 | /* This borrows the code of tcp_cwnd_restart() */ | |
227 | static void ccid2_cwnd_restart(struct sock *sk, const u32 now) | |
228 | { | |
229 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | |
230 | u32 cwnd = hc->tx_cwnd, restart_cwnd, | |
231 | iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); | |
232 | ||
233 | hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); | |
234 | ||
235 | /* don't reduce cwnd below the initial window (IW) */ | |
236 | restart_cwnd = min(cwnd, iwnd); | |
237 | cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto; | |
238 | hc->tx_cwnd = max(cwnd, restart_cwnd); | |
239 | ||
240 | hc->tx_cwnd_stamp = now; | |
241 | hc->tx_cwnd_used = 0; | |
d96a9e8d SJ |
242 | |
243 | ccid2_check_l_ack_ratio(sk); | |
113ced1f GR |
244 | } |
245 | ||
baf9e782 | 246 | static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) |
2a91aa39 AB |
247 | { |
248 | struct dccp_sock *dp = dccp_sk(sk); | |
77d2dd93 | 249 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
d011b9a4 | 250 | const u32 now = ccid2_jiffies32; |
07978aab | 251 | struct ccid2_seq *next; |
2a91aa39 | 252 | |
113ced1f GR |
253 | /* slow-start after idle periods (RFC 2581, RFC 2861) */ |
254 | if (ccid2_do_cwv && !hc->tx_pipe && | |
255 | (s32)(now - hc->tx_lsndtime) >= hc->tx_rto) | |
256 | ccid2_cwnd_restart(sk, now); | |
257 | ||
258 | hc->tx_lsndtime = now; | |
259 | hc->tx_pipe += 1; | |
260 | ||
261 | /* see whether cwnd was fully used (RFC 2861), update expected window */ | |
262 | if (ccid2_cwnd_network_limited(hc)) { | |
263 | ccid2_update_used_window(hc, hc->tx_cwnd); | |
264 | hc->tx_cwnd_used = 0; | |
265 | hc->tx_cwnd_stamp = now; | |
266 | } else { | |
267 | if (hc->tx_pipe > hc->tx_cwnd_used) | |
268 | hc->tx_cwnd_used = hc->tx_pipe; | |
269 | ||
270 | ccid2_update_used_window(hc, hc->tx_cwnd_used); | |
271 | ||
272 | if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto) | |
273 | ccid2_cwnd_application_limited(sk, now); | |
274 | } | |
2a91aa39 | 275 | |
77d2dd93 GR |
276 | hc->tx_seqh->ccid2s_seq = dp->dccps_gss; |
277 | hc->tx_seqh->ccid2s_acked = 0; | |
113ced1f | 278 | hc->tx_seqh->ccid2s_sent = now; |
2a91aa39 | 279 | |
77d2dd93 | 280 | next = hc->tx_seqh->ccid2s_next; |
07978aab | 281 | /* check if we need to alloc more space */ |
77d2dd93 GR |
282 | if (next == hc->tx_seqt) { |
283 | if (ccid2_hc_tx_alloc_seq(hc)) { | |
7d9e8931 GR |
284 | DCCP_CRIT("packet history - out of memory!"); |
285 | /* FIXME: find a more graceful way to bail out */ | |
286 | return; | |
287 | } | |
77d2dd93 GR |
288 | next = hc->tx_seqh->ccid2s_next; |
289 | BUG_ON(next == hc->tx_seqt); | |
2a91aa39 | 290 | } |
77d2dd93 | 291 | hc->tx_seqh = next; |
07978aab | 292 | |
77d2dd93 | 293 | ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe); |
2a91aa39 | 294 | |
900bfed4 GR |
295 | /* |
296 | * FIXME: The code below is broken and the variables have been removed | |
297 | * from the socket struct. The `ackloss' variable was always set to 0, | |
298 | * and with arsent there are several problems: | |
299 | * (i) it doesn't just count the number of Acks, but all sent packets; | |
300 | * (ii) it is expressed in # of packets, not # of windows, so the | |
301 | * comparison below uses the wrong formula: Appendix A of RFC 4341 | |
302 | * comes up with the number K = cwnd / (R^2 - R) of consecutive windows | |
303 | * of data with no lost or marked Ack packets. If arsent were the # of | |
304 | * consecutive Acks received without loss, then Ack Ratio needs to be | |
305 | * decreased by 1 when | |
306 | * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2) | |
307 | * where cwnd / R is the number of Acks received per window of data | |
308 | * (cf. RFC 4341, App. A). The problems are that | |
309 | * - arsent counts other packets as well; | |
310 | * - the comparison uses a formula different from RFC 4341; | |
311 | * - computing a cubic/quadratic equation each time is too complicated. | |
312 | * Hence a different algorithm is needed. | |
313 | */ | |
314 | #if 0 | |
2a91aa39 | 315 | /* Ack Ratio. Need to maintain a concept of how many windows we sent */ |
77d2dd93 | 316 | hc->tx_arsent++; |
2a91aa39 | 317 | /* We had an ack loss in this window... */ |
77d2dd93 GR |
318 | if (hc->tx_ackloss) { |
319 | if (hc->tx_arsent >= hc->tx_cwnd) { | |
320 | hc->tx_arsent = 0; | |
321 | hc->tx_ackloss = 0; | |
2a91aa39 | 322 | } |
c0c736db ACM |
323 | } else { |
324 | /* No acks lost up to now... */ | |
2a91aa39 AB |
325 | /* decrease ack ratio if enough packets were sent */ |
326 | if (dp->dccps_l_ack_ratio > 1) { | |
327 | /* XXX don't calculate denominator each time */ | |
c0c736db ACM |
328 | int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio - |
329 | dp->dccps_l_ack_ratio; | |
2a91aa39 | 330 | |
77d2dd93 | 331 | denom = hc->tx_cwnd * hc->tx_cwnd / denom; |
2a91aa39 | 332 | |
77d2dd93 | 333 | if (hc->tx_arsent >= denom) { |
2a91aa39 | 334 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1); |
77d2dd93 | 335 | hc->tx_arsent = 0; |
2a91aa39 | 336 | } |
c0c736db ACM |
337 | } else { |
338 | /* we can't increase ack ratio further [1] */ | |
77d2dd93 | 339 | hc->tx_arsent = 0; /* or maybe set it to cwnd*/ |
2a91aa39 AB |
340 | } |
341 | } | |
900bfed4 | 342 | #endif |
2a91aa39 | 343 | |
d26eeb07 | 344 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); |
c0c736db | 345 | |
8d424f6c | 346 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
2a91aa39 | 347 | do { |
77d2dd93 | 348 | struct ccid2_seq *seqp = hc->tx_seqt; |
2a91aa39 | 349 | |
77d2dd93 | 350 | while (seqp != hc->tx_seqh) { |
d82b6f85 | 351 | ccid2_pr_debug("out seq=%llu acked=%d time=%u\n", |
8109b02b | 352 | (unsigned long long)seqp->ccid2s_seq, |
234af484 | 353 | seqp->ccid2s_acked, seqp->ccid2s_sent); |
2a91aa39 AB |
354 | seqp = seqp->ccid2s_next; |
355 | } | |
c0c736db | 356 | } while (0); |
2a91aa39 | 357 | ccid2_pr_debug("=========\n"); |
2a91aa39 AB |
358 | #endif |
359 | } | |
360 | ||
231cc2aa GR |
361 | /** |
362 | * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm | |
363 | * This code is almost identical with TCP's tcp_rtt_estimator(), since | |
364 | * - it has a higher sampling frequency (recommended by RFC 1323), | |
365 | * - the RTO does not collapse into RTT due to RTTVAR going towards zero, | |
366 | * - it is simple (cf. more complex proposals such as Eifel timer or research | |
367 | * which suggests that the gain should be set according to window size), | |
368 | * - in tests it was found to work well with CCID2 [gerrit]. | |
369 | */ | |
370 | static void ccid2_rtt_estimator(struct sock *sk, const long mrtt) | |
371 | { | |
372 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | |
373 | long m = mrtt ? : 1; | |
374 | ||
375 | if (hc->tx_srtt == 0) { | |
376 | /* First measurement m */ | |
377 | hc->tx_srtt = m << 3; | |
378 | hc->tx_mdev = m << 1; | |
379 | ||
4886fcad | 380 | hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk)); |
231cc2aa | 381 | hc->tx_rttvar = hc->tx_mdev_max; |
4886fcad | 382 | |
231cc2aa GR |
383 | hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; |
384 | } else { | |
385 | /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */ | |
386 | m -= (hc->tx_srtt >> 3); | |
387 | hc->tx_srtt += m; | |
388 | ||
389 | /* Similarly, update scaled mdev with regard to |m| */ | |
390 | if (m < 0) { | |
391 | m = -m; | |
392 | m -= (hc->tx_mdev >> 2); | |
393 | /* | |
394 | * This neutralises RTO increase when RTT < SRTT - mdev | |
395 | * (see P. Sarolahti, A. Kuznetsov,"Congestion Control | |
396 | * in Linux TCP", USENIX 2002, pp. 49-62). | |
397 | */ | |
398 | if (m > 0) | |
399 | m >>= 3; | |
400 | } else { | |
401 | m -= (hc->tx_mdev >> 2); | |
402 | } | |
403 | hc->tx_mdev += m; | |
404 | ||
405 | if (hc->tx_mdev > hc->tx_mdev_max) { | |
406 | hc->tx_mdev_max = hc->tx_mdev; | |
407 | if (hc->tx_mdev_max > hc->tx_rttvar) | |
408 | hc->tx_rttvar = hc->tx_mdev_max; | |
409 | } | |
410 | ||
411 | /* | |
412 | * Decay RTTVAR at most once per flight, exploiting that | |
413 | * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2) | |
414 | * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1) | |
415 | * GAR is a useful bound for FlightSize = pipe. | |
416 | * AWL is probably too low here, as it over-estimates pipe. | |
417 | */ | |
418 | if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) { | |
419 | if (hc->tx_mdev_max < hc->tx_rttvar) | |
420 | hc->tx_rttvar -= (hc->tx_rttvar - | |
421 | hc->tx_mdev_max) >> 2; | |
422 | hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss; | |
4886fcad | 423 | hc->tx_mdev_max = tcp_rto_min(sk); |
231cc2aa GR |
424 | } |
425 | } | |
426 | ||
427 | /* | |
428 | * Set RTO from SRTT and RTTVAR | |
429 | * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms. | |
430 | * This agrees with RFC 4341, 5: | |
431 | * "Because DCCP does not retransmit data, DCCP does not require | |
432 | * TCP's recommended minimum timeout of one second". | |
433 | */ | |
434 | hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar; | |
435 | ||
436 | if (hc->tx_rto > DCCP_RTO_MAX) | |
437 | hc->tx_rto = DCCP_RTO_MAX; | |
438 | } | |
439 | ||
440 | static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, | |
441 | unsigned int *maxincr) | |
374bcf32 | 442 | { |
77d2dd93 | 443 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
d346d886 SJ |
444 | struct dccp_sock *dp = dccp_sk(sk); |
445 | int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio; | |
446 | ||
447 | if (hc->tx_cwnd < dp->dccps_l_seq_win && | |
448 | r_seq_used < dp->dccps_r_seq_win) { | |
449 | if (hc->tx_cwnd < hc->tx_ssthresh) { | |
0ce95dc7 | 450 | if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) { |
d346d886 SJ |
451 | hc->tx_cwnd += 1; |
452 | *maxincr -= 1; | |
453 | hc->tx_packets_acked = 0; | |
454 | } | |
455 | } else if (++hc->tx_packets_acked >= hc->tx_cwnd) { | |
77d2dd93 | 456 | hc->tx_cwnd += 1; |
77d2dd93 | 457 | hc->tx_packets_acked = 0; |
410e27a4 | 458 | } |
374bcf32 | 459 | } |
d346d886 SJ |
460 | |
461 | /* | |
462 | * Adjust the local sequence window and the ack ratio to allow about | |
463 | * 5 times the number of packets in the network (RFC 4340 7.5.2) | |
464 | */ | |
465 | if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win) | |
466 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2); | |
467 | else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2) | |
468 | ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U); | |
469 | ||
470 | if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win) | |
471 | ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2); | |
472 | else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2) | |
473 | ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2); | |
474 | ||
231cc2aa GR |
475 | /* |
476 | * FIXME: RTT is sampled several times per acknowledgment (for each | |
477 | * entry in the Ack Vector), instead of once per Ack (as in TCP SACK). | |
478 | * This causes the RTT to be over-estimated, since the older entries | |
479 | * in the Ack Vector have earlier sending times. | |
480 | * The cleanest solution is to not use the ccid2s_sent field at all | |
481 | * and instead use DCCP timestamps: requires changes in other places. | |
482 | */ | |
d011b9a4 | 483 | ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent); |
410e27a4 GR |
484 | } |
485 | ||
486 | static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) | |
487 | { | |
77d2dd93 | 488 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
410e27a4 | 489 | |
d82b6f85 | 490 | if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) { |
410e27a4 GR |
491 | ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); |
492 | return; | |
c8bf462b | 493 | } |
410e27a4 | 494 | |
d011b9a4 | 495 | hc->tx_last_cong = ccid2_jiffies32; |
410e27a4 | 496 | |
77d2dd93 GR |
497 | hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; |
498 | hc->tx_ssthresh = max(hc->tx_cwnd, 2U); | |
410e27a4 | 499 | |
d96a9e8d | 500 | ccid2_check_l_ack_ratio(sk); |
c8bf462b GR |
501 | } |
502 | ||
7e87fe84 GR |
503 | static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, |
504 | u8 option, u8 *optval, u8 optlen) | |
505 | { | |
506 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); | |
507 | ||
508 | switch (option) { | |
509 | case DCCPO_ACK_VECTOR_0: | |
510 | case DCCPO_ACK_VECTOR_1: | |
511 | return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen, | |
512 | option - DCCPO_ACK_VECTOR_0); | |
513 | } | |
514 | return 0; | |
515 | } | |
516 | ||
2a91aa39 AB |
517 | static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) |
518 | { | |
519 | struct dccp_sock *dp = dccp_sk(sk); | |
77d2dd93 | 520 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
1c0e0a05 | 521 | const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); |
7e87fe84 | 522 | struct dccp_ackvec_parsed *avp; |
2a91aa39 AB |
523 | u64 ackno, seqno; |
524 | struct ccid2_seq *seqp; | |
2a91aa39 | 525 | int done = 0; |
2a91aa39 AB |
526 | unsigned int maxincr = 0; |
527 | ||
2a91aa39 AB |
528 | /* check reverse path congestion */ |
529 | seqno = DCCP_SKB_CB(skb)->dccpd_seq; | |
530 | ||
531 | /* XXX this whole "algorithm" is broken. Need to fix it to keep track | |
532 | * of the seqnos of the dupacks so that rpseq and rpdupack are correct | |
533 | * -sorbo. | |
534 | */ | |
535 | /* need to bootstrap */ | |
77d2dd93 GR |
536 | if (hc->tx_rpdupack == -1) { |
537 | hc->tx_rpdupack = 0; | |
538 | hc->tx_rpseq = seqno; | |
c0c736db | 539 | } else { |
2a91aa39 | 540 | /* check if packet is consecutive */ |
77d2dd93 GR |
541 | if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1) |
542 | hc->tx_rpseq = seqno; | |
2a91aa39 | 543 | /* it's a later packet */ |
77d2dd93 GR |
544 | else if (after48(seqno, hc->tx_rpseq)) { |
545 | hc->tx_rpdupack++; | |
2a91aa39 AB |
546 | |
547 | /* check if we got enough dupacks */ | |
77d2dd93 GR |
548 | if (hc->tx_rpdupack >= NUMDUPACK) { |
549 | hc->tx_rpdupack = -1; /* XXX lame */ | |
550 | hc->tx_rpseq = 0; | |
31daf039 GR |
551 | #ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__ |
552 | /* | |
553 | * FIXME: Ack Congestion Control is broken; in | |
554 | * the current state instabilities occurred with | |
555 | * Ack Ratios greater than 1; causing hang-ups | |
556 | * and long RTO timeouts. This needs to be fixed | |
557 | * before opening up dynamic changes. -- gerrit | |
558 | */ | |
df054e1d | 559 | ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio); |
31daf039 | 560 | #endif |
2a91aa39 AB |
561 | } |
562 | } | |
563 | } | |
564 | ||
565 | /* check forward path congestion */ | |
7e87fe84 | 566 | if (dccp_packet_without_ack(skb)) |
2a91aa39 AB |
567 | return; |
568 | ||
7e87fe84 GR |
569 | /* still didn't send out new data packets */ |
570 | if (hc->tx_seqh == hc->tx_seqt) | |
571 | goto done; | |
2a91aa39 AB |
572 | |
573 | ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; | |
77d2dd93 GR |
574 | if (after48(ackno, hc->tx_high_ack)) |
575 | hc->tx_high_ack = ackno; | |
32aac18d | 576 | |
77d2dd93 | 577 | seqp = hc->tx_seqt; |
32aac18d AB |
578 | while (before48(seqp->ccid2s_seq, ackno)) { |
579 | seqp = seqp->ccid2s_next; | |
77d2dd93 GR |
580 | if (seqp == hc->tx_seqh) { |
581 | seqp = hc->tx_seqh->ccid2s_prev; | |
32aac18d AB |
582 | break; |
583 | } | |
584 | } | |
2a91aa39 | 585 | |
a3020025 GR |
586 | /* |
587 | * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2 | |
588 | * packets per acknowledgement. Rounding up avoids that cwnd is not | |
589 | * advanced when Ack Ratio is 1 and gives a slight edge otherwise. | |
2a91aa39 | 590 | */ |
77d2dd93 | 591 | if (hc->tx_cwnd < hc->tx_ssthresh) |
a3020025 | 592 | maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2); |
2a91aa39 AB |
593 | |
594 | /* go through all ack vectors */ | |
7e87fe84 | 595 | list_for_each_entry(avp, &hc->tx_av_chunks, node) { |
2a91aa39 | 596 | /* go through this ack vector */ |
7e87fe84 GR |
597 | for (; avp->len--; avp->vec++) { |
598 | u64 ackno_end_rl = SUB48(ackno, | |
599 | dccp_ackvec_runlen(avp->vec)); | |
2a91aa39 | 600 | |
7e87fe84 | 601 | ccid2_pr_debug("ackvec %llu |%u,%u|\n", |
234af484 | 602 | (unsigned long long)ackno, |
7e87fe84 GR |
603 | dccp_ackvec_state(avp->vec) >> 6, |
604 | dccp_ackvec_runlen(avp->vec)); | |
2a91aa39 AB |
605 | /* if the seqno we are analyzing is larger than the |
606 | * current ackno, then move towards the tail of our | |
607 | * seqnos. | |
608 | */ | |
609 | while (after48(seqp->ccid2s_seq, ackno)) { | |
77d2dd93 | 610 | if (seqp == hc->tx_seqt) { |
2a91aa39 AB |
611 | done = 1; |
612 | break; | |
613 | } | |
614 | seqp = seqp->ccid2s_prev; | |
615 | } | |
616 | if (done) | |
617 | break; | |
618 | ||
619 | /* check all seqnos in the range of the vector | |
620 | * run length | |
621 | */ | |
622 | while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) { | |
7e87fe84 | 623 | const u8 state = dccp_ackvec_state(avp->vec); |
2a91aa39 AB |
624 | |
625 | /* new packet received or marked */ | |
f17a37c9 | 626 | if (state != DCCPAV_NOT_RECEIVED && |
2a91aa39 | 627 | !seqp->ccid2s_acked) { |
f17a37c9 | 628 | if (state == DCCPAV_ECN_MARKED) |
d50ad163 | 629 | ccid2_congestion_event(sk, |
374bcf32 | 630 | seqp); |
f17a37c9 | 631 | else |
2a91aa39 AB |
632 | ccid2_new_ack(sk, seqp, |
633 | &maxincr); | |
2a91aa39 AB |
634 | |
635 | seqp->ccid2s_acked = 1; | |
636 | ccid2_pr_debug("Got ack for %llu\n", | |
234af484 | 637 | (unsigned long long)seqp->ccid2s_seq); |
c38c92a8 | 638 | hc->tx_pipe--; |
2a91aa39 | 639 | } |
77d2dd93 | 640 | if (seqp == hc->tx_seqt) { |
2a91aa39 AB |
641 | done = 1; |
642 | break; | |
643 | } | |
3de5489f | 644 | seqp = seqp->ccid2s_prev; |
2a91aa39 AB |
645 | } |
646 | if (done) | |
647 | break; | |
648 | ||
cfbbeabc | 649 | ackno = SUB48(ackno_end_rl, 1); |
2a91aa39 AB |
650 | } |
651 | if (done) | |
652 | break; | |
653 | } | |
654 | ||
655 | /* The state about what is acked should be correct now | |
656 | * Check for NUMDUPACK | |
657 | */ | |
77d2dd93 GR |
658 | seqp = hc->tx_seqt; |
659 | while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) { | |
32aac18d | 660 | seqp = seqp->ccid2s_next; |
77d2dd93 GR |
661 | if (seqp == hc->tx_seqh) { |
662 | seqp = hc->tx_seqh->ccid2s_prev; | |
32aac18d AB |
663 | break; |
664 | } | |
665 | } | |
2a91aa39 AB |
666 | done = 0; |
667 | while (1) { | |
668 | if (seqp->ccid2s_acked) { | |
669 | done++; | |
63df18ad | 670 | if (done == NUMDUPACK) |
2a91aa39 | 671 | break; |
2a91aa39 | 672 | } |
77d2dd93 | 673 | if (seqp == hc->tx_seqt) |
2a91aa39 | 674 | break; |
2a91aa39 AB |
675 | seqp = seqp->ccid2s_prev; |
676 | } | |
677 | ||
678 | /* If there are at least 3 acknowledgements, anything unacknowledged | |
679 | * below the last sequence number is considered lost | |
680 | */ | |
63df18ad | 681 | if (done == NUMDUPACK) { |
2a91aa39 AB |
682 | struct ccid2_seq *last_acked = seqp; |
683 | ||
684 | /* check for lost packets */ | |
685 | while (1) { | |
686 | if (!seqp->ccid2s_acked) { | |
374bcf32 | 687 | ccid2_pr_debug("Packet lost: %llu\n", |
234af484 | 688 | (unsigned long long)seqp->ccid2s_seq); |
374bcf32 AB |
689 | /* XXX need to traverse from tail -> head in |
690 | * order to detect multiple congestion events in | |
691 | * one ack vector. | |
692 | */ | |
d50ad163 | 693 | ccid2_congestion_event(sk, seqp); |
c38c92a8 | 694 | hc->tx_pipe--; |
2a91aa39 | 695 | } |
77d2dd93 | 696 | if (seqp == hc->tx_seqt) |
2a91aa39 AB |
697 | break; |
698 | seqp = seqp->ccid2s_prev; | |
699 | } | |
700 | ||
77d2dd93 | 701 | hc->tx_seqt = last_acked; |
2a91aa39 AB |
702 | } |
703 | ||
704 | /* trim acked packets in tail */ | |
77d2dd93 GR |
705 | while (hc->tx_seqt != hc->tx_seqh) { |
706 | if (!hc->tx_seqt->ccid2s_acked) | |
2a91aa39 AB |
707 | break; |
708 | ||
77d2dd93 | 709 | hc->tx_seqt = hc->tx_seqt->ccid2s_next; |
2a91aa39 | 710 | } |
c38c92a8 GR |
711 | |
712 | /* restart RTO timer if not all outstanding data has been acked */ | |
713 | if (hc->tx_pipe == 0) | |
714 | sk_stop_timer(sk, &hc->tx_rtotimer); | |
715 | else | |
716 | sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); | |
7e87fe84 | 717 | done: |
1c0e0a05 GR |
718 | /* check if incoming Acks allow pending packets to be sent */ |
719 | if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) | |
a8d7aa17 | 720 | dccp_tasklet_schedule(sk); |
7e87fe84 | 721 | dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); |
2a91aa39 AB |
722 | } |
723 | ||
91f0ebf7 | 724 | static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) |
2a91aa39 | 725 | { |
77d2dd93 | 726 | struct ccid2_hc_tx_sock *hc = ccid_priv(ccid); |
b00d2bbc GR |
727 | struct dccp_sock *dp = dccp_sk(sk); |
728 | u32 max_ratio; | |
2a91aa39 | 729 | |
b00d2bbc | 730 | /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ |
77d2dd93 | 731 | hc->tx_ssthresh = ~0U; |
2a91aa39 | 732 | |
22b71c8f GR |
733 | /* Use larger initial windows (RFC 4341, section 5). */ |
734 | hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache); | |
113ced1f | 735 | hc->tx_expected_wnd = hc->tx_cwnd; |
b00d2bbc GR |
736 | |
737 | /* Make sure that Ack Ratio is enabled and within bounds. */ | |
77d2dd93 | 738 | max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); |
b00d2bbc GR |
739 | if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio) |
740 | dp->dccps_l_ack_ratio = max_ratio; | |
741 | ||
2a91aa39 | 742 | /* XXX init ~ to window size... */ |
77d2dd93 | 743 | if (ccid2_hc_tx_alloc_seq(hc)) |
2a91aa39 | 744 | return -ENOMEM; |
91f0ebf7 | 745 | |
231cc2aa | 746 | hc->tx_rto = DCCP_TIMEOUT_INIT; |
77d2dd93 | 747 | hc->tx_rpdupack = -1; |
d011b9a4 | 748 | hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32; |
113ced1f | 749 | hc->tx_cwnd_used = 0; |
839a6094 KC |
750 | hc->sk = sk; |
751 | timer_setup(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 0); | |
7e87fe84 | 752 | INIT_LIST_HEAD(&hc->tx_av_chunks); |
2a91aa39 AB |
753 | return 0; |
754 | } | |
755 | ||
756 | static void ccid2_hc_tx_exit(struct sock *sk) | |
757 | { | |
77d2dd93 | 758 | struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); |
07978aab | 759 | int i; |
2a91aa39 | 760 | |
d26eeb07 | 761 | sk_stop_timer(sk, &hc->tx_rtotimer); |
07978aab | 762 | |
77d2dd93 GR |
763 | for (i = 0; i < hc->tx_seqbufc; i++) |
764 | kfree(hc->tx_seqbuf[i]); | |
765 | hc->tx_seqbufc = 0; | |
72ef9c41 | 766 | dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); |
2a91aa39 AB |
767 | } |
768 | ||
769 | static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) | |
770 | { | |
77d2dd93 | 771 | struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); |
2a91aa39 | 772 | |
58fdea0f GR |
773 | if (!dccp_data_packet(skb)) |
774 | return; | |
775 | ||
776 | if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) { | |
777 | dccp_send_ack(sk); | |
778 | hc->rx_num_data_pkts = 0; | |
2a91aa39 AB |
779 | } |
780 | } | |
781 | ||
ddebc973 | 782 | struct ccid_operations ccid2_ops = { |
7e87fe84 GR |
783 | .ccid_id = DCCPC_CCID2, |
784 | .ccid_name = "TCP-like", | |
785 | .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock), | |
786 | .ccid_hc_tx_init = ccid2_hc_tx_init, | |
787 | .ccid_hc_tx_exit = ccid2_hc_tx_exit, | |
788 | .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet, | |
789 | .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent, | |
790 | .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options, | |
791 | .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv, | |
792 | .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock), | |
793 | .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv, | |
2a91aa39 AB |
794 | }; |
795 | ||
84116716 | 796 | #ifdef CONFIG_IP_DCCP_CCID2_DEBUG |
43264991 | 797 | module_param(ccid2_debug, bool, 0644); |
ddebc973 | 798 | MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages"); |
84116716 | 799 | #endif |