Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
96b2d69b | 2 | /* Processing of received RxRPC packets |
17926a79 | 3 | * |
96b2d69b | 4 | * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. |
17926a79 | 5 | * Written by David Howells (dhowells@redhat.com) |
17926a79 DH |
6 | */ |
7 | ||
9b6d5398 JP |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | ||
17926a79 DH |
10 | #include "ar-internal.h" |
11 | ||
248f219c DH |
12 | static void rxrpc_proto_abort(const char *why, |
13 | struct rxrpc_call *call, rxrpc_seq_t seq) | |
14 | { | |
a343b174 | 15 | rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG); |
248f219c DH |
16 | } |
17 | ||
57494343 DH |
18 | /* |
19 | * Do TCP-style congestion management [RFC 5681]. | |
20 | */ | |
21 | static void rxrpc_congestion_management(struct rxrpc_call *call, | |
22 | struct sk_buff *skb, | |
ed1e8679 DH |
23 | struct rxrpc_ack_summary *summary, |
24 | rxrpc_serial_t acked_serial) | |
57494343 DH |
25 | { |
26 | enum rxrpc_congest_change change = rxrpc_cong_no_change; | |
57494343 DH |
27 | unsigned int cumulative_acks = call->cong_cumul_acks; |
28 | unsigned int cwnd = call->cong_cwnd; | |
29 | bool resend = false; | |
30 | ||
31 | summary->flight_size = | |
a4ea4c47 | 32 | (call->tx_top - call->acks_hard_ack) - summary->nr_acks; |
57494343 DH |
33 | |
34 | if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { | |
35 | summary->retrans_timeo = true; | |
36 | call->cong_ssthresh = max_t(unsigned int, | |
37 | summary->flight_size / 2, 2); | |
38 | cwnd = 1; | |
8782def2 | 39 | if (cwnd >= call->cong_ssthresh && |
57494343 DH |
40 | call->cong_mode == RXRPC_CALL_SLOW_START) { |
41 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; | |
42 | call->cong_tstamp = skb->tstamp; | |
43 | cumulative_acks = 0; | |
44 | } | |
45 | } | |
46 | ||
47 | cumulative_acks += summary->nr_new_acks; | |
48 | cumulative_acks += summary->nr_rot_new_acks; | |
49 | if (cumulative_acks > 255) | |
50 | cumulative_acks = 255; | |
51 | ||
52 | summary->mode = call->cong_mode; | |
53 | summary->cwnd = call->cong_cwnd; | |
54 | summary->ssthresh = call->cong_ssthresh; | |
55 | summary->cumulative_acks = cumulative_acks; | |
56 | summary->dup_acks = call->cong_dup_acks; | |
57 | ||
58 | switch (call->cong_mode) { | |
59 | case RXRPC_CALL_SLOW_START: | |
d57a3a15 | 60 | if (summary->saw_nacks) |
57494343 DH |
61 | goto packet_loss_detected; |
62 | if (summary->cumulative_acks > 0) | |
63 | cwnd += 1; | |
8782def2 | 64 | if (cwnd >= call->cong_ssthresh) { |
57494343 DH |
65 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; |
66 | call->cong_tstamp = skb->tstamp; | |
67 | } | |
68 | goto out; | |
69 | ||
70 | case RXRPC_CALL_CONGEST_AVOIDANCE: | |
d57a3a15 | 71 | if (summary->saw_nacks) |
57494343 DH |
72 | goto packet_loss_detected; |
73 | ||
74 | /* We analyse the number of packets that get ACK'd per RTT | |
75 | * period and increase the window if we managed to fill it. | |
76 | */ | |
c410bf01 | 77 | if (call->peer->rtt_count == 0) |
57494343 DH |
78 | goto out; |
79 | if (ktime_before(skb->tstamp, | |
c410bf01 DH |
80 | ktime_add_us(call->cong_tstamp, |
81 | call->peer->srtt_us >> 3))) | |
57494343 DH |
82 | goto out_no_clear_ca; |
83 | change = rxrpc_cong_rtt_window_end; | |
84 | call->cong_tstamp = skb->tstamp; | |
85 | if (cumulative_acks >= cwnd) | |
86 | cwnd++; | |
87 | goto out; | |
88 | ||
89 | case RXRPC_CALL_PACKET_LOSS: | |
d57a3a15 | 90 | if (!summary->saw_nacks) |
57494343 DH |
91 | goto resume_normality; |
92 | ||
93 | if (summary->new_low_nack) { | |
94 | change = rxrpc_cong_new_low_nack; | |
95 | call->cong_dup_acks = 1; | |
96 | if (call->cong_extra > 1) | |
97 | call->cong_extra = 1; | |
98 | goto send_extra_data; | |
99 | } | |
100 | ||
101 | call->cong_dup_acks++; | |
102 | if (call->cong_dup_acks < 3) | |
103 | goto send_extra_data; | |
104 | ||
105 | change = rxrpc_cong_begin_retransmission; | |
106 | call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT; | |
107 | call->cong_ssthresh = max_t(unsigned int, | |
108 | summary->flight_size / 2, 2); | |
109 | cwnd = call->cong_ssthresh + 3; | |
110 | call->cong_extra = 0; | |
111 | call->cong_dup_acks = 0; | |
112 | resend = true; | |
113 | goto out; | |
114 | ||
115 | case RXRPC_CALL_FAST_RETRANSMIT: | |
116 | if (!summary->new_low_nack) { | |
117 | if (summary->nr_new_acks == 0) | |
118 | cwnd += 1; | |
119 | call->cong_dup_acks++; | |
120 | if (call->cong_dup_acks == 2) { | |
121 | change = rxrpc_cong_retransmit_again; | |
122 | call->cong_dup_acks = 0; | |
123 | resend = true; | |
124 | } | |
125 | } else { | |
126 | change = rxrpc_cong_progress; | |
127 | cwnd = call->cong_ssthresh; | |
d57a3a15 | 128 | if (!summary->saw_nacks) |
57494343 DH |
129 | goto resume_normality; |
130 | } | |
131 | goto out; | |
132 | ||
133 | default: | |
134 | BUG(); | |
135 | goto out; | |
136 | } | |
137 | ||
138 | resume_normality: | |
139 | change = rxrpc_cong_cleared_nacks; | |
140 | call->cong_dup_acks = 0; | |
141 | call->cong_extra = 0; | |
142 | call->cong_tstamp = skb->tstamp; | |
8782def2 | 143 | if (cwnd < call->cong_ssthresh) |
57494343 DH |
144 | call->cong_mode = RXRPC_CALL_SLOW_START; |
145 | else | |
146 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; | |
147 | out: | |
148 | cumulative_acks = 0; | |
149 | out_no_clear_ca: | |
a4ea4c47 DH |
150 | if (cwnd >= RXRPC_TX_MAX_WINDOW) |
151 | cwnd = RXRPC_TX_MAX_WINDOW; | |
57494343 DH |
152 | call->cong_cwnd = cwnd; |
153 | call->cong_cumul_acks = cumulative_acks; | |
ed1e8679 | 154 | trace_rxrpc_congest(call, summary, acked_serial, change); |
5e6ef4f1 DH |
155 | if (resend) |
156 | rxrpc_resend(call, skb); | |
57494343 DH |
157 | return; |
158 | ||
159 | packet_loss_detected: | |
160 | change = rxrpc_cong_saw_nack; | |
161 | call->cong_mode = RXRPC_CALL_PACKET_LOSS; | |
162 | call->cong_dup_acks = 0; | |
163 | goto send_extra_data; | |
164 | ||
165 | send_extra_data: | |
166 | /* Send some previously unsent DATA if we have some to advance the ACK | |
167 | * state. | |
168 | */ | |
a4ea4c47 DH |
169 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) || |
170 | summary->nr_acks != call->tx_top - call->acks_hard_ack) { | |
57494343 DH |
171 | call->cong_extra++; |
172 | wake_up(&call->waitq); | |
173 | } | |
174 | goto out_no_clear_ca; | |
175 | } | |
176 | ||
5086d9a9 DH |
177 | /* |
178 | * Degrade the congestion window if we haven't transmitted a packet for >1RTT. | |
179 | */ | |
180 | void rxrpc_congestion_degrade(struct rxrpc_call *call) | |
181 | { | |
182 | ktime_t rtt, now; | |
183 | ||
184 | if (call->cong_mode != RXRPC_CALL_SLOW_START && | |
185 | call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE) | |
186 | return; | |
187 | if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) | |
188 | return; | |
189 | ||
190 | rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8)); | |
191 | now = ktime_get_real(); | |
192 | if (!ktime_before(ktime_add(call->tx_last_sent, rtt), now)) | |
193 | return; | |
194 | ||
195 | trace_rxrpc_reset_cwnd(call, now); | |
196 | rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset); | |
197 | call->tx_last_sent = now; | |
198 | call->cong_mode = RXRPC_CALL_SLOW_START; | |
199 | call->cong_ssthresh = max_t(unsigned int, call->cong_ssthresh, | |
200 | call->cong_cwnd * 3 / 4); | |
201 | call->cong_cwnd = max_t(unsigned int, call->cong_cwnd / 2, RXRPC_MIN_CWND); | |
202 | } | |
203 | ||
17926a79 | 204 | /* |
248f219c | 205 | * Apply a hard ACK by advancing the Tx window. |
17926a79 | 206 | */ |
c479d5f2 | 207 | static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, |
31a1b989 | 208 | struct rxrpc_ack_summary *summary) |
17926a79 | 209 | { |
a4ea4c47 | 210 | struct rxrpc_txbuf *txb; |
c479d5f2 | 211 | bool rot_last = false; |
17926a79 | 212 | |
a4ea4c47 DH |
213 | list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) { |
214 | if (before_eq(txb->seq, call->acks_hard_ack)) | |
215 | continue; | |
d57a3a15 | 216 | summary->nr_rot_new_acks++; |
a4ea4c47 | 217 | if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) { |
70790dbe | 218 | set_bit(RXRPC_CALL_TX_LAST, &call->flags); |
c479d5f2 DH |
219 | rot_last = true; |
220 | } | |
a4ea4c47 DH |
221 | if (txb->seq == to) |
222 | break; | |
248f219c | 223 | } |
17926a79 | 224 | |
a4ea4c47 DH |
225 | if (rot_last) |
226 | set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags); | |
17926a79 | 227 | |
a4ea4c47 | 228 | _enter("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last); |
bc4abfcf | 229 | |
a4ea4c47 DH |
230 | if (call->acks_lowest_nak == call->acks_hard_ack) { |
231 | call->acks_lowest_nak = to; | |
1fc4fa2a | 232 | } else if (after(to, call->acks_lowest_nak)) { |
a4ea4c47 DH |
233 | summary->new_low_nack = true; |
234 | call->acks_lowest_nak = to; | |
17926a79 | 235 | } |
c479d5f2 | 236 | |
a4ea4c47 DH |
237 | smp_store_release(&call->acks_hard_ack, to); |
238 | ||
239 | trace_rxrpc_txqueue(call, (rot_last ? | |
240 | rxrpc_txqueue_rotate_last : | |
241 | rxrpc_txqueue_rotate)); | |
242 | wake_up(&call->waitq); | |
c479d5f2 | 243 | return rot_last; |
248f219c | 244 | } |
17926a79 | 245 | |
248f219c DH |
246 | /* |
247 | * End the transmission phase of a call. | |
248 | * | |
249 | * This occurs when we get an ACKALL packet, the first DATA packet of a reply, | |
250 | * or a final ACK packet. | |
251 | */ | |
70790dbe DH |
252 | static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, |
253 | const char *abort_why) | |
248f219c | 254 | { |
dfe99522 | 255 | unsigned int state; |
17926a79 | 256 | |
70790dbe | 257 | ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); |
17926a79 | 258 | |
248f219c | 259 | write_lock(&call->state_lock); |
651350d1 | 260 | |
dfe99522 DH |
261 | state = call->state; |
262 | switch (state) { | |
70790dbe | 263 | case RXRPC_CALL_CLIENT_SEND_REQUEST: |
248f219c | 264 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: |
70790dbe | 265 | if (reply_begun) |
dfe99522 | 266 | call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; |
70790dbe | 267 | else |
dfe99522 | 268 | call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; |
248f219c | 269 | break; |
70790dbe | 270 | |
248f219c DH |
271 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
272 | __rxrpc_call_completed(call); | |
dfe99522 | 273 | state = call->state; |
248f219c | 274 | break; |
70790dbe DH |
275 | |
276 | default: | |
277 | goto bad_state; | |
17926a79 | 278 | } |
17926a79 | 279 | |
248f219c | 280 | write_unlock(&call->state_lock); |
dfe99522 | 281 | if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) |
a4ea4c47 | 282 | trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply); |
dfe99522 | 283 | else |
a4ea4c47 | 284 | trace_rxrpc_txqueue(call, rxrpc_txqueue_end); |
248f219c DH |
285 | _leave(" = ok"); |
286 | return true; | |
70790dbe DH |
287 | |
288 | bad_state: | |
289 | write_unlock(&call->state_lock); | |
290 | kdebug("end_tx %s", rxrpc_call_states[call->state]); | |
291 | rxrpc_proto_abort(abort_why, call, call->tx_top); | |
292 | return false; | |
293 | } | |
294 | ||
295 | /* | |
296 | * Begin the reply reception phase of a call. | |
297 | */ | |
298 | static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |
299 | { | |
31a1b989 | 300 | struct rxrpc_ack_summary summary = { 0 }; |
a158bdd3 | 301 | unsigned long now, timo; |
70790dbe DH |
302 | rxrpc_seq_t top = READ_ONCE(call->tx_top); |
303 | ||
dd7c1ee5 | 304 | if (call->ackr_reason) { |
a158bdd3 DH |
305 | now = jiffies; |
306 | timo = now + MAX_JIFFY_OFFSET; | |
307 | WRITE_ONCE(call->resend_at, timo); | |
530403d9 | 308 | WRITE_ONCE(call->delay_ack_at, timo); |
a158bdd3 | 309 | trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); |
dd7c1ee5 DH |
310 | } |
311 | ||
70790dbe | 312 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { |
c479d5f2 DH |
313 | if (!rxrpc_rotate_tx_window(call, top, &summary)) { |
314 | rxrpc_proto_abort("TXL", call, top); | |
315 | return false; | |
316 | } | |
70790dbe | 317 | } |
a11e6ff9 | 318 | return rxrpc_end_tx_phase(call, true, "ETD"); |
248f219c DH |
319 | } |
320 | ||
5d7edbc9 DH |
321 | static void rxrpc_input_update_ack_window(struct rxrpc_call *call, |
322 | rxrpc_seq_t window, rxrpc_seq_t wtop) | |
323 | { | |
324 | atomic64_set_release(&call->ackr_window, ((u64)wtop) << 32 | window); | |
325 | } | |
326 | ||
248f219c | 327 | /* |
5d7edbc9 DH |
328 | * Push a DATA packet onto the Rx queue. |
329 | */ | |
330 | static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb, | |
331 | rxrpc_seq_t window, rxrpc_seq_t wtop, | |
332 | enum rxrpc_receive_trace why) | |
333 | { | |
334 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
335 | bool last = sp->hdr.flags & RXRPC_LAST_PACKET; | |
336 | ||
337 | __skb_queue_tail(&call->recvmsg_queue, skb); | |
338 | rxrpc_input_update_ack_window(call, window, wtop); | |
339 | ||
340 | trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq); | |
341 | } | |
342 | ||
343 | /* | |
344 | * Process a DATA packet. | |
248f219c | 345 | */ |
2d1faf7a DH |
346 | static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb, |
347 | bool *_notify) | |
248f219c DH |
348 | { |
349 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
5d7edbc9 | 350 | struct sk_buff *oos; |
d4d02d8b | 351 | rxrpc_serial_t serial = sp->hdr.serial; |
5d7edbc9 DH |
352 | u64 win = atomic64_read(&call->ackr_window); |
353 | rxrpc_seq_t window = lower_32_bits(win); | |
354 | rxrpc_seq_t wtop = upper_32_bits(win); | |
355 | rxrpc_seq_t wlimit = window + call->rx_winsize - 1; | |
356 | rxrpc_seq_t seq = sp->hdr.seq; | |
d4d02d8b | 357 | bool last = sp->hdr.flags & RXRPC_LAST_PACKET; |
5d7edbc9 | 358 | int ack_reason = -1; |
248f219c | 359 | |
d4d02d8b DH |
360 | rxrpc_inc_stat(call->rxnet, stat_rx_data); |
361 | if (sp->hdr.flags & RXRPC_REQUEST_ACK) | |
362 | rxrpc_inc_stat(call->rxnet, stat_rx_data_reqack); | |
363 | if (sp->hdr.flags & RXRPC_JUMBO_PACKET) | |
364 | rxrpc_inc_stat(call->rxnet, stat_rx_data_jumbo); | |
c3c9e3df | 365 | |
d4d02d8b | 366 | if (last) { |
5d7edbc9 DH |
367 | if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
368 | seq + 1 != wtop) { | |
d4d02d8b | 369 | rxrpc_proto_abort("LSN", call, seq); |
2d1faf7a | 370 | return; |
d4d02d8b DH |
371 | } |
372 | } else { | |
373 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | |
5d7edbc9 DH |
374 | after_eq(seq, wtop)) { |
375 | pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n", | |
376 | call->debug_id, seq, window, wtop, wlimit); | |
d4d02d8b | 377 | rxrpc_proto_abort("LSA", call, seq); |
2d1faf7a | 378 | return; |
d4d02d8b | 379 | } |
c3c9e3df | 380 | } |
248f219c | 381 | |
5d7edbc9 DH |
382 | if (after(seq, call->rx_highest_seq)) |
383 | call->rx_highest_seq = seq; | |
384 | ||
d4d02d8b | 385 | trace_rxrpc_rx_data(call->debug_id, seq, serial, sp->hdr.flags); |
17926a79 | 386 | |
5d7edbc9 DH |
387 | if (before(seq, window)) { |
388 | ack_reason = RXRPC_ACK_DUPLICATE; | |
389 | goto send_ack; | |
d4d02d8b | 390 | } |
5d7edbc9 DH |
391 | if (after(seq, wlimit)) { |
392 | ack_reason = RXRPC_ACK_EXCEEDS_WINDOW; | |
393 | goto send_ack; | |
d4d02d8b DH |
394 | } |
395 | ||
5d7edbc9 DH |
396 | /* Queue the packet. */ |
397 | if (seq == window) { | |
398 | rxrpc_seq_t reset_from; | |
399 | bool reset_sack = false; | |
d4d02d8b | 400 | |
5d7edbc9 DH |
401 | if (sp->hdr.flags & RXRPC_REQUEST_ACK) |
402 | ack_reason = RXRPC_ACK_REQUESTED; | |
403 | /* Send an immediate ACK if we fill in a hole */ | |
404 | else if (!skb_queue_empty(&call->rx_oos_queue)) | |
405 | ack_reason = RXRPC_ACK_DELAY; | |
5e6ef4f1 DH |
406 | else |
407 | atomic_inc_return(&call->ackr_nr_unacked); | |
d4d02d8b | 408 | |
5d7edbc9 DH |
409 | window++; |
410 | if (after(window, wtop)) | |
411 | wtop = window; | |
d4d02d8b | 412 | |
2d1faf7a DH |
413 | rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg); |
414 | ||
5d7edbc9 DH |
415 | spin_lock(&call->recvmsg_queue.lock); |
416 | rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue); | |
2d1faf7a | 417 | *_notify = true; |
5d7edbc9 DH |
418 | |
419 | while ((oos = skb_peek(&call->rx_oos_queue))) { | |
420 | struct rxrpc_skb_priv *osp = rxrpc_skb(oos); | |
421 | ||
422 | if (after(osp->hdr.seq, window)) | |
423 | break; | |
424 | ||
425 | __skb_unlink(oos, &call->rx_oos_queue); | |
426 | last = osp->hdr.flags & RXRPC_LAST_PACKET; | |
427 | seq = osp->hdr.seq; | |
428 | if (!reset_sack) { | |
429 | reset_from = seq; | |
430 | reset_sack = true; | |
431 | } | |
432 | ||
433 | window++; | |
434 | rxrpc_input_queue_data(call, oos, window, wtop, | |
435 | rxrpc_receive_queue_oos); | |
d4d02d8b | 436 | } |
d4d02d8b | 437 | |
5d7edbc9 | 438 | spin_unlock(&call->recvmsg_queue.lock); |
d4d02d8b | 439 | |
5d7edbc9 DH |
440 | if (reset_sack) { |
441 | do { | |
442 | call->ackr_sack_table[reset_from % RXRPC_SACK_SIZE] = 0; | |
443 | } while (reset_from++, before(reset_from, window)); | |
444 | } | |
d4d02d8b | 445 | } else { |
5d7edbc9 | 446 | bool keep = false; |
d4d02d8b | 447 | |
5d7edbc9 DH |
448 | ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE; |
449 | ||
450 | if (!call->ackr_sack_table[seq % RXRPC_SACK_SIZE]) { | |
451 | call->ackr_sack_table[seq % RXRPC_SACK_SIZE] = 1; | |
452 | keep = 1; | |
453 | } | |
454 | ||
455 | if (after(seq + 1, wtop)) { | |
456 | wtop = seq + 1; | |
457 | rxrpc_input_update_ack_window(call, window, wtop); | |
458 | } | |
459 | ||
460 | if (!keep) { | |
461 | ack_reason = RXRPC_ACK_DUPLICATE; | |
462 | goto send_ack; | |
463 | } | |
464 | ||
465 | skb_queue_walk(&call->rx_oos_queue, oos) { | |
466 | struct rxrpc_skb_priv *osp = rxrpc_skb(oos); | |
467 | ||
468 | if (after(osp->hdr.seq, seq)) { | |
2d1faf7a | 469 | rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos); |
5d7edbc9 DH |
470 | __skb_queue_before(&call->rx_oos_queue, oos, skb); |
471 | goto oos_queued; | |
472 | } | |
d4d02d8b | 473 | } |
5d7edbc9 | 474 | |
2d1faf7a | 475 | rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos); |
5d7edbc9 DH |
476 | __skb_queue_tail(&call->rx_oos_queue, skb); |
477 | oos_queued: | |
478 | trace_rxrpc_receive(call, last ? rxrpc_receive_oos_last : rxrpc_receive_oos, | |
479 | sp->hdr.serial, sp->hdr.seq); | |
d4d02d8b DH |
480 | } |
481 | ||
5d7edbc9 | 482 | send_ack: |
5d7edbc9 DH |
483 | if (ack_reason >= 0) |
484 | rxrpc_send_ACK(call, ack_reason, serial, | |
d4d02d8b DH |
485 | rxrpc_propose_ack_input_data); |
486 | else | |
487 | rxrpc_propose_delay_ACK(call, serial, | |
488 | rxrpc_propose_ack_input_data); | |
17926a79 DH |
489 | } |
490 | ||
491 | /* | |
d4d02d8b | 492 | * Split a jumbo packet and file the bits separately. |
17926a79 | 493 | */ |
d4d02d8b | 494 | static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb) |
17926a79 | 495 | { |
d4d02d8b DH |
496 | struct rxrpc_jumbo_header jhdr; |
497 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp; | |
498 | struct sk_buff *jskb; | |
499 | unsigned int offset = sizeof(struct rxrpc_wire_header); | |
500 | unsigned int len = skb->len - offset; | |
2d1faf7a | 501 | bool notify = false; |
17926a79 | 502 | |
d4d02d8b DH |
503 | while (sp->hdr.flags & RXRPC_JUMBO_PACKET) { |
504 | if (len < RXRPC_JUMBO_SUBPKTLEN) | |
505 | goto protocol_error; | |
506 | if (sp->hdr.flags & RXRPC_LAST_PACKET) | |
507 | goto protocol_error; | |
508 | if (skb_copy_bits(skb, offset + RXRPC_JUMBO_DATALEN, | |
509 | &jhdr, sizeof(jhdr)) < 0) | |
510 | goto protocol_error; | |
511 | ||
5e6ef4f1 | 512 | jskb = skb_clone(skb, GFP_NOFS); |
d4d02d8b DH |
513 | if (!jskb) { |
514 | kdebug("couldn't clone"); | |
515 | return false; | |
516 | } | |
9a36a6bc | 517 | rxrpc_new_skb(jskb, rxrpc_skb_new_jumbo_subpacket); |
d4d02d8b DH |
518 | jsp = rxrpc_skb(jskb); |
519 | jsp->offset = offset; | |
520 | jsp->len = RXRPC_JUMBO_DATALEN; | |
2d1faf7a DH |
521 | rxrpc_input_data_one(call, jskb, ¬ify); |
522 | rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket); | |
d4d02d8b DH |
523 | |
524 | sp->hdr.flags = jhdr.flags; | |
525 | sp->hdr._rsvd = ntohs(jhdr._rsvd); | |
526 | sp->hdr.seq++; | |
527 | sp->hdr.serial++; | |
528 | offset += RXRPC_JUMBO_SUBPKTLEN; | |
529 | len -= RXRPC_JUMBO_SUBPKTLEN; | |
248f219c | 530 | } |
d4d02d8b DH |
531 | |
532 | sp->offset = offset; | |
533 | sp->len = len; | |
2d1faf7a DH |
534 | rxrpc_input_data_one(call, skb, ¬ify); |
535 | if (notify) { | |
536 | trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial); | |
537 | rxrpc_notify_socket(call); | |
538 | } | |
d4d02d8b DH |
539 | return true; |
540 | ||
541 | protocol_error: | |
542 | return false; | |
248f219c | 543 | } |
17926a79 | 544 | |
248f219c | 545 | /* |
4858e403 DH |
546 | * Process a DATA packet, adding the packet to the Rx ring. The caller's |
547 | * packet ref must be passed on or discarded. | |
248f219c | 548 | */ |
e8c3af6b | 549 | static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) |
248f219c DH |
550 | { |
551 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
146d8fef | 552 | enum rxrpc_call_state state; |
d4d02d8b DH |
553 | rxrpc_serial_t serial = sp->hdr.serial; |
554 | rxrpc_seq_t seq0 = sp->hdr.seq; | |
17926a79 | 555 | |
5d7edbc9 DH |
556 | _enter("{%llx,%x},{%u,%x}", |
557 | atomic64_read(&call->ackr_window), call->rx_highest_seq, | |
558 | skb->len, seq0); | |
17926a79 | 559 | |
146d8fef | 560 | state = READ_ONCE(call->state); |
2d1faf7a | 561 | if (state >= RXRPC_CALL_COMPLETE) |
248f219c | 562 | return; |
17926a79 | 563 | |
a95d25dd | 564 | if (state == RXRPC_CALL_SERVER_RECV_REQUEST) { |
a158bdd3 DH |
565 | unsigned long timo = READ_ONCE(call->next_req_timo); |
566 | unsigned long now, expect_req_by; | |
567 | ||
568 | if (timo) { | |
569 | now = jiffies; | |
570 | expect_req_by = now + timo; | |
571 | WRITE_ONCE(call->expect_req_by, expect_req_by); | |
572 | rxrpc_reduce_call_timer(call, expect_req_by, now, | |
573 | rxrpc_timer_set_for_idle); | |
574 | } | |
575 | } | |
576 | ||
248f219c DH |
577 | /* Received data implicitly ACKs all of the request packets we sent |
578 | * when we're acting as a client. | |
579 | */ | |
146d8fef DH |
580 | if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || |
581 | state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && | |
70790dbe | 582 | !rxrpc_receiving_reply(call)) |
5e6ef4f1 | 583 | goto out_notify; |
72f0c6fb | 584 | |
d4d02d8b DH |
585 | if (!rxrpc_input_split_jumbo(call, skb)) { |
586 | rxrpc_proto_abort("VLD", call, sp->hdr.seq); | |
5e6ef4f1 | 587 | goto out_notify; |
248f219c | 588 | } |
d4d02d8b | 589 | skb = NULL; |
17926a79 | 590 | |
5e6ef4f1 | 591 | out_notify: |
f71dbf2f DH |
592 | trace_rxrpc_notify_socket(call->debug_id, serial); |
593 | rxrpc_notify_socket(call); | |
248f219c | 594 | _leave(" [queued]"); |
17926a79 DH |
595 | } |
596 | ||
50235c4b | 597 | /* |
4700c4d8 | 598 | * See if there's a cached RTT probe to complete. |
50235c4b | 599 | */ |
4700c4d8 DH |
600 | static void rxrpc_complete_rtt_probe(struct rxrpc_call *call, |
601 | ktime_t resp_time, | |
602 | rxrpc_serial_t acked_serial, | |
603 | rxrpc_serial_t ack_serial, | |
604 | enum rxrpc_rtt_rx_trace type) | |
50235c4b | 605 | { |
4700c4d8 DH |
606 | rxrpc_serial_t orig_serial; |
607 | unsigned long avail; | |
50235c4b | 608 | ktime_t sent_at; |
4700c4d8 DH |
609 | bool matched = false; |
610 | int i; | |
50235c4b | 611 | |
4700c4d8 DH |
612 | avail = READ_ONCE(call->rtt_avail); |
613 | smp_rmb(); /* Read avail bits before accessing data. */ | |
50235c4b | 614 | |
4700c4d8 DH |
615 | for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) { |
616 | if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail)) | |
50235c4b | 617 | continue; |
b604dd98 | 618 | |
4700c4d8 DH |
619 | sent_at = call->rtt_sent_at[i]; |
620 | orig_serial = call->rtt_serial[i]; | |
621 | ||
622 | if (orig_serial == acked_serial) { | |
623 | clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); | |
624 | smp_mb(); /* Read data before setting avail bit */ | |
625 | set_bit(i, &call->rtt_avail); | |
626 | if (type != rxrpc_rtt_rx_cancel) | |
627 | rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, | |
628 | sent_at, resp_time); | |
629 | else | |
630 | trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i, | |
631 | orig_serial, acked_serial, 0, 0); | |
632 | matched = true; | |
633 | } | |
634 | ||
635 | /* If a later serial is being acked, then mark this slot as | |
636 | * being available. | |
637 | */ | |
638 | if (after(acked_serial, orig_serial)) { | |
639 | trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i, | |
640 | orig_serial, acked_serial, 0, 0); | |
641 | clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); | |
642 | smp_wmb(); | |
643 | set_bit(i, &call->rtt_avail); | |
644 | } | |
645 | } | |
50235c4b | 646 | |
4700c4d8 DH |
647 | if (!matched) |
648 | trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0); | |
50235c4b DH |
649 | } |
650 | ||
17926a79 | 651 | /* |
248f219c | 652 | * Process the extra information that may be appended to an ACK packet |
17926a79 | 653 | */ |
248f219c DH |
654 | static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, |
655 | struct rxrpc_ackinfo *ackinfo) | |
17926a79 | 656 | { |
248f219c DH |
657 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
658 | struct rxrpc_peer *peer; | |
659 | unsigned int mtu; | |
702f2ac8 | 660 | bool wake = false; |
01fd0742 | 661 | u32 rwind = ntohl(ackinfo->rwind); |
248f219c | 662 | |
a4ea4c47 DH |
663 | if (rwind > RXRPC_TX_MAX_WINDOW) |
664 | rwind = RXRPC_TX_MAX_WINDOW; | |
702f2ac8 | 665 | if (call->tx_winsize != rwind) { |
702f2ac8 DH |
666 | if (rwind > call->tx_winsize) |
667 | wake = true; | |
a2ad7c21 | 668 | trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); |
702f2ac8 DH |
669 | call->tx_winsize = rwind; |
670 | } | |
671 | ||
08511150 DH |
672 | if (call->cong_ssthresh > rwind) |
673 | call->cong_ssthresh = rwind; | |
248f219c DH |
674 | |
675 | mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU)); | |
676 | ||
677 | peer = call->peer; | |
678 | if (mtu < peer->maxdata) { | |
3dd9c8b5 | 679 | spin_lock(&peer->lock); |
248f219c DH |
680 | peer->maxdata = mtu; |
681 | peer->mtu = mtu + peer->hdrsize; | |
3dd9c8b5 | 682 | spin_unlock(&peer->lock); |
248f219c | 683 | } |
702f2ac8 DH |
684 | |
685 | if (wake) | |
686 | wake_up(&call->waitq); | |
248f219c | 687 | } |
17926a79 | 688 | |
248f219c DH |
689 | /* |
690 | * Process individual soft ACKs. | |
691 | * | |
692 | * Each ACK in the array corresponds to one packet and can be either an ACK or | |
693 | * a NAK. If we get find an explicitly NAK'd packet we resend immediately; | |
694 | * packets that lie beyond the end of the ACK list are scheduled for resend by | |
695 | * the timer on the basis that the peer might just not have processed them at | |
696 | * the time the ACK was sent. | |
697 | */ | |
698 | static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, | |
31a1b989 DH |
699 | rxrpc_seq_t seq, int nr_acks, |
700 | struct rxrpc_ack_summary *summary) | |
248f219c | 701 | { |
d57a3a15 | 702 | unsigned int i; |
a4ea4c47 | 703 | |
d57a3a15 DH |
704 | for (i = 0; i < nr_acks; i++) { |
705 | if (acks[i] == RXRPC_ACK_TYPE_ACK) { | |
31a1b989 | 706 | summary->nr_acks++; |
31a1b989 | 707 | summary->nr_new_acks++; |
d57a3a15 DH |
708 | } else { |
709 | if (!summary->saw_nacks && | |
710 | call->acks_lowest_nak != seq + i) { | |
711 | call->acks_lowest_nak = seq + i; | |
31a1b989 DH |
712 | summary->new_low_nack = true; |
713 | } | |
d57a3a15 | 714 | summary->saw_nacks = true; |
17926a79 | 715 | } |
17926a79 DH |
716 | } |
717 | } | |
718 | ||
441fdee1 DH |
719 | /* |
720 | * Return true if the ACK is valid - ie. it doesn't appear to have regressed | |
721 | * with respect to the ack state conveyed by preceding ACKs. | |
722 | */ | |
723 | static bool rxrpc_is_ack_valid(struct rxrpc_call *call, | |
724 | rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) | |
725 | { | |
8940ba3c | 726 | rxrpc_seq_t base = READ_ONCE(call->acks_first_seq); |
441fdee1 DH |
727 | |
728 | if (after(first_pkt, base)) | |
729 | return true; /* The window advanced */ | |
730 | ||
731 | if (before(first_pkt, base)) | |
732 | return false; /* firstPacket regressed */ | |
733 | ||
8940ba3c | 734 | if (after_eq(prev_pkt, call->acks_prev_seq)) |
441fdee1 DH |
735 | return true; /* previousPacket hasn't regressed. */ |
736 | ||
737 | /* Some rx implementations put a serial number in previousPacket. */ | |
738 | if (after_eq(prev_pkt, base + call->tx_winsize)) | |
739 | return false; | |
740 | return true; | |
741 | } | |
742 | ||
17926a79 | 743 | /* |
248f219c DH |
744 | * Process an ACK packet. |
745 | * | |
746 | * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet | |
747 | * in the ACK array. Anything before that is hard-ACK'd and may be discarded. | |
748 | * | |
749 | * A hard-ACK means that a packet has been processed and may be discarded; a | |
750 | * soft-ACK means that the packet may be discarded and retransmission | |
751 | * requested. A phase is complete when all packets are hard-ACK'd. | |
17926a79 | 752 | */ |
e8c3af6b | 753 | static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) |
17926a79 | 754 | { |
31a1b989 | 755 | struct rxrpc_ack_summary summary = { 0 }; |
d57a3a15 | 756 | struct rxrpc_ackpacket ack; |
17926a79 | 757 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
d57a3a15 | 758 | struct rxrpc_ackinfo info; |
68528d93 | 759 | rxrpc_serial_t ack_serial, acked_serial; |
1a2391c3 | 760 | rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; |
775e5b71 | 761 | int nr_acks, offset, ioffset; |
248f219c DH |
762 | |
763 | _enter(""); | |
764 | ||
775e5b71 | 765 | offset = sizeof(struct rxrpc_wire_header); |
5e6ef4f1 DH |
766 | if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0) |
767 | return rxrpc_proto_abort("XAK", call, 0); | |
d57a3a15 | 768 | offset += sizeof(ack); |
248f219c | 769 | |
68528d93 | 770 | ack_serial = sp->hdr.serial; |
d57a3a15 DH |
771 | acked_serial = ntohl(ack.serial); |
772 | first_soft_ack = ntohl(ack.firstPacket); | |
773 | prev_pkt = ntohl(ack.previousPacket); | |
248f219c | 774 | hard_ack = first_soft_ack - 1; |
d57a3a15 DH |
775 | nr_acks = ack.nAcks; |
776 | summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ? | |
777 | ack.reason : RXRPC_ACK__INVALID); | |
248f219c | 778 | |
68528d93 | 779 | trace_rxrpc_rx_ack(call, ack_serial, acked_serial, |
1a2391c3 | 780 | first_soft_ack, prev_pkt, |
b1d9f7fd | 781 | summary.ack_reason, nr_acks); |
d57a3a15 | 782 | rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]); |
ec71eb9a | 783 | |
d57a3a15 | 784 | switch (ack.reason) { |
4700c4d8 | 785 | case RXRPC_ACK_PING_RESPONSE: |
4700c4d8 DH |
786 | rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, |
787 | rxrpc_rtt_rx_ping_response); | |
788 | break; | |
789 | case RXRPC_ACK_REQUESTED: | |
790 | rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, | |
791 | rxrpc_rtt_rx_requested_ack); | |
792 | break; | |
793 | default: | |
794 | if (acked_serial != 0) | |
795 | rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, | |
796 | rxrpc_rtt_rx_cancel); | |
797 | break; | |
798 | } | |
8e83134d | 799 | |
d57a3a15 | 800 | if (ack.reason == RXRPC_ACK_PING) { |
72f0c6fb DH |
801 | rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial, |
802 | rxrpc_propose_ack_respond_to_ping); | |
248f219c | 803 | } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { |
72f0c6fb DH |
804 | rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial, |
805 | rxrpc_propose_ack_respond_to_ack); | |
17926a79 DH |
806 | } |
807 | ||
adc9613f DH |
808 | /* If we get an EXCEEDS_WINDOW ACK from the server, it probably |
809 | * indicates that the client address changed due to NAT. The server | |
810 | * lost the call because it switched to a different peer. | |
811 | */ | |
d57a3a15 | 812 | if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) && |
adc9613f DH |
813 | first_soft_ack == 1 && |
814 | prev_pkt == 0 && | |
815 | rxrpc_is_client_call(call)) { | |
816 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | |
817 | 0, -ENETRESET); | |
5e6ef4f1 | 818 | return; |
adc9613f DH |
819 | } |
820 | ||
821 | /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also | |
822 | * indicate a change of address. However, we can retransmit the call | |
823 | * if we still have it buffered to the beginning. | |
824 | */ | |
d57a3a15 | 825 | if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) && |
adc9613f DH |
826 | first_soft_ack == 1 && |
827 | prev_pkt == 0 && | |
a4ea4c47 | 828 | call->acks_hard_ack == 0 && |
adc9613f DH |
829 | rxrpc_is_client_call(call)) { |
830 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | |
831 | 0, -ENETRESET); | |
5e6ef4f1 | 832 | return; |
adc9613f DH |
833 | } |
834 | ||
1a2391c3 | 835 | /* Discard any out-of-order or duplicate ACKs (outside lock). */ |
441fdee1 | 836 | if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { |
68528d93 | 837 | trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, |
8940ba3c DH |
838 | first_soft_ack, call->acks_first_seq, |
839 | prev_pkt, call->acks_prev_seq); | |
5e6ef4f1 | 840 | return; |
d1f12947 | 841 | } |
c1e15b49 | 842 | |
d57a3a15 | 843 | info.rxMTU = 0; |
775e5b71 | 844 | ioffset = offset + nr_acks + 3; |
d57a3a15 | 845 | if (skb->len >= ioffset + sizeof(info) && |
5e6ef4f1 DH |
846 | skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0) |
847 | return rxrpc_proto_abort("XAI", call, 0); | |
d57a3a15 DH |
848 | |
849 | if (nr_acks > 0) | |
850 | skb_condense(skb); | |
c1e15b49 | 851 | |
298bc15b | 852 | call->acks_latest_ts = skb->tstamp; |
8940ba3c DH |
853 | call->acks_first_seq = first_soft_ack; |
854 | call->acks_prev_seq = prev_pkt; | |
1a2391c3 | 855 | |
d57a3a15 DH |
856 | switch (ack.reason) { |
857 | case RXRPC_ACK_PING: | |
858 | break; | |
d57a3a15 DH |
859 | default: |
860 | if (after(acked_serial, call->acks_highest_serial)) | |
861 | call->acks_highest_serial = acked_serial; | |
862 | break; | |
863 | } | |
589a0c1e | 864 | |
298bc15b | 865 | /* Parse rwind and mtu sizes if provided. */ |
d57a3a15 DH |
866 | if (info.rxMTU) |
867 | rxrpc_input_ackinfo(call, skb, &info); | |
17926a79 | 868 | |
5e6ef4f1 DH |
869 | if (first_soft_ack == 0) |
870 | return rxrpc_proto_abort("AK0", call, 0); | |
17926a79 | 871 | |
248f219c | 872 | /* Ignore ACKs unless we are or have just been transmitting. */ |
146d8fef | 873 | switch (READ_ONCE(call->state)) { |
248f219c DH |
874 | case RXRPC_CALL_CLIENT_SEND_REQUEST: |
875 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | |
876 | case RXRPC_CALL_SERVER_SEND_REPLY: | |
877 | case RXRPC_CALL_SERVER_AWAIT_ACK: | |
878 | break; | |
17926a79 | 879 | default: |
5e6ef4f1 | 880 | return; |
248f219c | 881 | } |
17926a79 | 882 | |
a4ea4c47 | 883 | if (before(hard_ack, call->acks_hard_ack) || |
5e6ef4f1 DH |
884 | after(hard_ack, call->tx_top)) |
885 | return rxrpc_proto_abort("AKW", call, 0); | |
886 | if (nr_acks > call->tx_top - hard_ack) | |
887 | return rxrpc_proto_abort("AKN", call, 0); | |
17926a79 | 888 | |
a4ea4c47 | 889 | if (after(hard_ack, call->acks_hard_ack)) { |
c479d5f2 DH |
890 | if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { |
891 | rxrpc_end_tx_phase(call, false, "ETA"); | |
5e6ef4f1 | 892 | return; |
c479d5f2 DH |
893 | } |
894 | } | |
17926a79 | 895 | |
70790dbe | 896 | if (nr_acks > 0) { |
5e6ef4f1 DH |
897 | if (offset > (int)skb->len - nr_acks) |
898 | return rxrpc_proto_abort("XSA", call, 0); | |
d57a3a15 DH |
899 | rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack, |
900 | nr_acks, &summary); | |
70790dbe DH |
901 | } |
902 | ||
a4ea4c47 | 903 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) && |
a9f312d9 DH |
904 | summary.nr_acks == call->tx_top - hard_ack && |
905 | rxrpc_is_client_call(call)) | |
72f0c6fb DH |
906 | rxrpc_propose_ping(call, ack_serial, |
907 | rxrpc_propose_ack_ping_for_lost_reply); | |
57494343 | 908 | |
c1e15b49 | 909 | rxrpc_congestion_management(call, skb, &summary, acked_serial); |
17926a79 DH |
910 | } |
911 | ||
912 | /* | |
248f219c | 913 | * Process an ACKALL packet. |
17926a79 | 914 | */ |
248f219c | 915 | static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) |
17926a79 | 916 | { |
31a1b989 | 917 | struct rxrpc_ack_summary summary = { 0 }; |
17926a79 | 918 | |
c479d5f2 | 919 | if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) |
70790dbe | 920 | rxrpc_end_tx_phase(call, false, "ETL"); |
248f219c | 921 | } |
17926a79 | 922 | |
248f219c | 923 | /* |
005ede28 | 924 | * Process an ABORT packet directed at a call. |
248f219c DH |
925 | */ |
926 | static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) | |
927 | { | |
928 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
17926a79 | 929 | |
f14febd8 | 930 | trace_rxrpc_rx_abort(call, sp->hdr.serial, skb->priority); |
005ede28 | 931 | |
5ac0d622 | 932 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, |
f14febd8 | 933 | skb->priority, -ECONNABORTED); |
17926a79 DH |
934 | } |
935 | ||
936 | /* | |
248f219c | 937 | * Process an incoming call packet. |
17926a79 | 938 | */ |
5e6ef4f1 | 939 | void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb) |
17926a79 | 940 | { |
248f219c | 941 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
a158bdd3 | 942 | unsigned long timo; |
17926a79 | 943 | |
7727640c | 944 | _enter("%p,%p", call, skb); |
17926a79 | 945 | |
5e6ef4f1 DH |
946 | if (sp->hdr.serviceId != call->dest_srx.srx_service) |
947 | call->dest_srx.srx_service = sp->hdr.serviceId; | |
948 | if ((int)sp->hdr.serial - (int)call->rx_serial > 0) | |
949 | call->rx_serial = sp->hdr.serial; | |
950 | if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags)) | |
951 | set_bit(RXRPC_CALL_RX_HEARD, &call->flags); | |
952 | ||
a158bdd3 DH |
953 | timo = READ_ONCE(call->next_rx_timo); |
954 | if (timo) { | |
955 | unsigned long now = jiffies, expect_rx_by; | |
956 | ||
c54e43d7 | 957 | expect_rx_by = now + timo; |
a158bdd3 DH |
958 | WRITE_ONCE(call->expect_rx_by, expect_rx_by); |
959 | rxrpc_reduce_call_timer(call, expect_rx_by, now, | |
960 | rxrpc_timer_set_for_normal); | |
961 | } | |
3d7682af | 962 | |
248f219c DH |
963 | switch (sp->hdr.type) { |
964 | case RXRPC_PACKET_TYPE_DATA: | |
e8c3af6b | 965 | rxrpc_input_data(call, skb); |
2d1faf7a | 966 | break; |
f5c17aae | 967 | |
248f219c | 968 | case RXRPC_PACKET_TYPE_ACK: |
e8c3af6b | 969 | rxrpc_input_ack(call, skb); |
2d1faf7a | 970 | break; |
17926a79 | 971 | |
248f219c | 972 | case RXRPC_PACKET_TYPE_BUSY: |
248f219c DH |
973 | /* Just ignore BUSY packets from the server; the retry and |
974 | * lifespan timers will take care of business. BUSY packets | |
975 | * from the client don't make sense. | |
976 | */ | |
977 | break; | |
17926a79 | 978 | |
248f219c DH |
979 | case RXRPC_PACKET_TYPE_ABORT: |
980 | rxrpc_input_abort(call, skb); | |
981 | break; | |
17926a79 | 982 | |
248f219c DH |
983 | case RXRPC_PACKET_TYPE_ACKALL: |
984 | rxrpc_input_ackall(call, skb); | |
985 | break; | |
f5c17aae | 986 | |
248f219c | 987 | default: |
248f219c | 988 | break; |
17926a79 | 989 | } |
17926a79 DH |
990 | } |
991 | ||
b3156274 | 992 | /* |
c1e15b49 DH |
993 | * Handle a new service call on a channel implicitly completing the preceding |
994 | * call on that channel. This does not apply to client conns. | |
b3156274 DH |
995 | * |
996 | * TODO: If callNumber > call_id + 1, renegotiate security. | |
997 | */ | |
5e6ef4f1 | 998 | void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb) |
b3156274 | 999 | { |
146d8fef | 1000 | switch (READ_ONCE(call->state)) { |
b3156274 DH |
1001 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
1002 | rxrpc_call_completed(call); | |
df561f66 | 1003 | fallthrough; |
b3156274 DH |
1004 | case RXRPC_CALL_COMPLETE: |
1005 | break; | |
1006 | default: | |
a343b174 | 1007 | rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN); |
c1e15b49 | 1008 | trace_rxrpc_improper_term(call); |
b3156274 DH |
1009 | break; |
1010 | } | |
1011 | ||
5e6ef4f1 | 1012 | rxrpc_input_call_event(call, skb); |
b3156274 | 1013 | } |