Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* RxRPC packet reception |
2 | * | |
248f219c | 3 | * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. |
17926a79 DH |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
17926a79 DH |
14 | #include <linux/module.h> |
15 | #include <linux/net.h> | |
16 | #include <linux/skbuff.h> | |
17 | #include <linux/errqueue.h> | |
18 | #include <linux/udp.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/in6.h> | |
21 | #include <linux/icmp.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
17926a79 DH |
23 | #include <net/sock.h> |
24 | #include <net/af_rxrpc.h> | |
25 | #include <net/ip.h> | |
1781f7f5 | 26 | #include <net/udp.h> |
0283328e | 27 | #include <net/net_namespace.h> |
17926a79 DH |
28 | #include "ar-internal.h" |
29 | ||
248f219c DH |
30 | static void rxrpc_proto_abort(const char *why, |
31 | struct rxrpc_call *call, rxrpc_seq_t seq) | |
32 | { | |
33 | if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) { | |
34 | set_bit(RXRPC_CALL_EV_ABORT, &call->events); | |
35 | rxrpc_queue_call(call); | |
36 | } | |
37 | } | |
38 | ||
57494343 DH |
39 | /* |
40 | * Do TCP-style congestion management [RFC 5681]. | |
41 | */ | |
42 | static void rxrpc_congestion_management(struct rxrpc_call *call, | |
43 | struct sk_buff *skb, | |
ed1e8679 DH |
44 | struct rxrpc_ack_summary *summary, |
45 | rxrpc_serial_t acked_serial) | |
57494343 DH |
46 | { |
47 | enum rxrpc_congest_change change = rxrpc_cong_no_change; | |
57494343 DH |
48 | unsigned int cumulative_acks = call->cong_cumul_acks; |
49 | unsigned int cwnd = call->cong_cwnd; | |
50 | bool resend = false; | |
51 | ||
52 | summary->flight_size = | |
53 | (call->tx_top - call->tx_hard_ack) - summary->nr_acks; | |
54 | ||
55 | if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { | |
56 | summary->retrans_timeo = true; | |
57 | call->cong_ssthresh = max_t(unsigned int, | |
58 | summary->flight_size / 2, 2); | |
59 | cwnd = 1; | |
8782def2 | 60 | if (cwnd >= call->cong_ssthresh && |
57494343 DH |
61 | call->cong_mode == RXRPC_CALL_SLOW_START) { |
62 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; | |
63 | call->cong_tstamp = skb->tstamp; | |
64 | cumulative_acks = 0; | |
65 | } | |
66 | } | |
67 | ||
68 | cumulative_acks += summary->nr_new_acks; | |
69 | cumulative_acks += summary->nr_rot_new_acks; | |
70 | if (cumulative_acks > 255) | |
71 | cumulative_acks = 255; | |
72 | ||
73 | summary->mode = call->cong_mode; | |
74 | summary->cwnd = call->cong_cwnd; | |
75 | summary->ssthresh = call->cong_ssthresh; | |
76 | summary->cumulative_acks = cumulative_acks; | |
77 | summary->dup_acks = call->cong_dup_acks; | |
78 | ||
79 | switch (call->cong_mode) { | |
80 | case RXRPC_CALL_SLOW_START: | |
81 | if (summary->nr_nacks > 0) | |
82 | goto packet_loss_detected; | |
83 | if (summary->cumulative_acks > 0) | |
84 | cwnd += 1; | |
8782def2 | 85 | if (cwnd >= call->cong_ssthresh) { |
57494343 DH |
86 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; |
87 | call->cong_tstamp = skb->tstamp; | |
88 | } | |
89 | goto out; | |
90 | ||
91 | case RXRPC_CALL_CONGEST_AVOIDANCE: | |
92 | if (summary->nr_nacks > 0) | |
93 | goto packet_loss_detected; | |
94 | ||
95 | /* We analyse the number of packets that get ACK'd per RTT | |
96 | * period and increase the window if we managed to fill it. | |
97 | */ | |
98 | if (call->peer->rtt_usage == 0) | |
99 | goto out; | |
100 | if (ktime_before(skb->tstamp, | |
101 | ktime_add_ns(call->cong_tstamp, | |
102 | call->peer->rtt))) | |
103 | goto out_no_clear_ca; | |
104 | change = rxrpc_cong_rtt_window_end; | |
105 | call->cong_tstamp = skb->tstamp; | |
106 | if (cumulative_acks >= cwnd) | |
107 | cwnd++; | |
108 | goto out; | |
109 | ||
110 | case RXRPC_CALL_PACKET_LOSS: | |
111 | if (summary->nr_nacks == 0) | |
112 | goto resume_normality; | |
113 | ||
114 | if (summary->new_low_nack) { | |
115 | change = rxrpc_cong_new_low_nack; | |
116 | call->cong_dup_acks = 1; | |
117 | if (call->cong_extra > 1) | |
118 | call->cong_extra = 1; | |
119 | goto send_extra_data; | |
120 | } | |
121 | ||
122 | call->cong_dup_acks++; | |
123 | if (call->cong_dup_acks < 3) | |
124 | goto send_extra_data; | |
125 | ||
126 | change = rxrpc_cong_begin_retransmission; | |
127 | call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT; | |
128 | call->cong_ssthresh = max_t(unsigned int, | |
129 | summary->flight_size / 2, 2); | |
130 | cwnd = call->cong_ssthresh + 3; | |
131 | call->cong_extra = 0; | |
132 | call->cong_dup_acks = 0; | |
133 | resend = true; | |
134 | goto out; | |
135 | ||
136 | case RXRPC_CALL_FAST_RETRANSMIT: | |
137 | if (!summary->new_low_nack) { | |
138 | if (summary->nr_new_acks == 0) | |
139 | cwnd += 1; | |
140 | call->cong_dup_acks++; | |
141 | if (call->cong_dup_acks == 2) { | |
142 | change = rxrpc_cong_retransmit_again; | |
143 | call->cong_dup_acks = 0; | |
144 | resend = true; | |
145 | } | |
146 | } else { | |
147 | change = rxrpc_cong_progress; | |
148 | cwnd = call->cong_ssthresh; | |
149 | if (summary->nr_nacks == 0) | |
150 | goto resume_normality; | |
151 | } | |
152 | goto out; | |
153 | ||
154 | default: | |
155 | BUG(); | |
156 | goto out; | |
157 | } | |
158 | ||
159 | resume_normality: | |
160 | change = rxrpc_cong_cleared_nacks; | |
161 | call->cong_dup_acks = 0; | |
162 | call->cong_extra = 0; | |
163 | call->cong_tstamp = skb->tstamp; | |
8782def2 | 164 | if (cwnd < call->cong_ssthresh) |
57494343 DH |
165 | call->cong_mode = RXRPC_CALL_SLOW_START; |
166 | else | |
167 | call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; | |
168 | out: | |
169 | cumulative_acks = 0; | |
170 | out_no_clear_ca: | |
171 | if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1) | |
172 | cwnd = RXRPC_RXTX_BUFF_SIZE - 1; | |
173 | call->cong_cwnd = cwnd; | |
174 | call->cong_cumul_acks = cumulative_acks; | |
ed1e8679 | 175 | trace_rxrpc_congest(call, summary, acked_serial, change); |
57494343 DH |
176 | if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) |
177 | rxrpc_queue_call(call); | |
178 | return; | |
179 | ||
180 | packet_loss_detected: | |
181 | change = rxrpc_cong_saw_nack; | |
182 | call->cong_mode = RXRPC_CALL_PACKET_LOSS; | |
183 | call->cong_dup_acks = 0; | |
184 | goto send_extra_data; | |
185 | ||
186 | send_extra_data: | |
187 | /* Send some previously unsent DATA if we have some to advance the ACK | |
188 | * state. | |
189 | */ | |
190 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & | |
191 | RXRPC_TX_ANNO_LAST || | |
192 | summary->nr_acks != call->tx_top - call->tx_hard_ack) { | |
193 | call->cong_extra++; | |
194 | wake_up(&call->waitq); | |
195 | } | |
196 | goto out_no_clear_ca; | |
197 | } | |
198 | ||
8e83134d DH |
199 | /* |
200 | * Ping the other end to fill our RTT cache and to retrieve the rwind | |
201 | * and MTU parameters. | |
202 | */ | |
203 | static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, | |
204 | int skew) | |
205 | { | |
206 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
fc943f67 | 207 | ktime_t now = skb->tstamp; |
8e83134d | 208 | |
fc943f67 DH |
209 | if (call->peer->rtt_usage < 3 || |
210 | ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) | |
211 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | |
9c7ad434 DH |
212 | true, true, |
213 | rxrpc_propose_ack_ping_for_params); | |
8e83134d DH |
214 | } |
215 | ||
17926a79 | 216 | /* |
248f219c | 217 | * Apply a hard ACK by advancing the Tx window. |
17926a79 | 218 | */ |
31a1b989 DH |
219 | static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, |
220 | struct rxrpc_ack_summary *summary) | |
17926a79 | 221 | { |
248f219c DH |
222 | struct sk_buff *skb, *list = NULL; |
223 | int ix; | |
70790dbe | 224 | u8 annotation; |
17926a79 | 225 | |
31a1b989 DH |
226 | if (call->acks_lowest_nak == call->tx_hard_ack) { |
227 | call->acks_lowest_nak = to; | |
228 | } else if (before_eq(call->acks_lowest_nak, to)) { | |
229 | summary->new_low_nack = true; | |
230 | call->acks_lowest_nak = to; | |
231 | } | |
232 | ||
248f219c | 233 | spin_lock(&call->lock); |
17926a79 | 234 | |
248f219c DH |
235 | while (before(call->tx_hard_ack, to)) { |
236 | call->tx_hard_ack++; | |
237 | ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; | |
238 | skb = call->rxtx_buffer[ix]; | |
70790dbe | 239 | annotation = call->rxtx_annotations[ix]; |
71f3ca40 | 240 | rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); |
248f219c DH |
241 | call->rxtx_buffer[ix] = NULL; |
242 | call->rxtx_annotations[ix] = 0; | |
243 | skb->next = list; | |
244 | list = skb; | |
70790dbe DH |
245 | |
246 | if (annotation & RXRPC_TX_ANNO_LAST) | |
247 | set_bit(RXRPC_CALL_TX_LAST, &call->flags); | |
31a1b989 DH |
248 | if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK) |
249 | summary->nr_rot_new_acks++; | |
248f219c | 250 | } |
17926a79 | 251 | |
248f219c | 252 | spin_unlock(&call->lock); |
17926a79 | 253 | |
70790dbe DH |
254 | trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ? |
255 | rxrpc_transmit_rotate_last : | |
256 | rxrpc_transmit_rotate)); | |
bc4abfcf DH |
257 | wake_up(&call->waitq); |
258 | ||
248f219c DH |
259 | while (list) { |
260 | skb = list; | |
261 | list = skb->next; | |
262 | skb->next = NULL; | |
71f3ca40 | 263 | rxrpc_free_skb(skb, rxrpc_skb_tx_freed); |
17926a79 | 264 | } |
248f219c | 265 | } |
17926a79 | 266 | |
248f219c DH |
267 | /* |
268 | * End the transmission phase of a call. | |
269 | * | |
270 | * This occurs when we get an ACKALL packet, the first DATA packet of a reply, | |
271 | * or a final ACK packet. | |
272 | */ | |
70790dbe DH |
273 | static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, |
274 | const char *abort_why) | |
248f219c | 275 | { |
17926a79 | 276 | |
70790dbe | 277 | ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); |
17926a79 | 278 | |
248f219c | 279 | write_lock(&call->state_lock); |
651350d1 | 280 | |
248f219c | 281 | switch (call->state) { |
70790dbe | 282 | case RXRPC_CALL_CLIENT_SEND_REQUEST: |
248f219c | 283 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: |
70790dbe DH |
284 | if (reply_begun) |
285 | call->state = RXRPC_CALL_CLIENT_RECV_REPLY; | |
286 | else | |
287 | call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; | |
248f219c | 288 | break; |
70790dbe | 289 | |
248f219c DH |
290 | case RXRPC_CALL_SERVER_AWAIT_ACK: |
291 | __rxrpc_call_completed(call); | |
292 | rxrpc_notify_socket(call); | |
293 | break; | |
70790dbe DH |
294 | |
295 | default: | |
296 | goto bad_state; | |
17926a79 | 297 | } |
17926a79 | 298 | |
248f219c | 299 | write_unlock(&call->state_lock); |
70790dbe | 300 | if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) { |
0d967960 DH |
301 | rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true, |
302 | rxrpc_propose_ack_client_tx_end); | |
70790dbe DH |
303 | trace_rxrpc_transmit(call, rxrpc_transmit_await_reply); |
304 | } else { | |
305 | trace_rxrpc_transmit(call, rxrpc_transmit_end); | |
306 | } | |
248f219c DH |
307 | _leave(" = ok"); |
308 | return true; | |
70790dbe DH |
309 | |
310 | bad_state: | |
311 | write_unlock(&call->state_lock); | |
312 | kdebug("end_tx %s", rxrpc_call_states[call->state]); | |
313 | rxrpc_proto_abort(abort_why, call, call->tx_top); | |
314 | return false; | |
315 | } | |
316 | ||
317 | /* | |
318 | * Begin the reply reception phase of a call. | |
319 | */ | |
320 | static bool rxrpc_receiving_reply(struct rxrpc_call *call) | |
321 | { | |
31a1b989 | 322 | struct rxrpc_ack_summary summary = { 0 }; |
70790dbe DH |
323 | rxrpc_seq_t top = READ_ONCE(call->tx_top); |
324 | ||
dd7c1ee5 DH |
325 | if (call->ackr_reason) { |
326 | spin_lock_bh(&call->lock); | |
327 | call->ackr_reason = 0; | |
328 | call->resend_at = call->expire_at; | |
329 | call->ack_at = call->expire_at; | |
330 | spin_unlock_bh(&call->lock); | |
331 | rxrpc_set_timer(call, rxrpc_timer_init_for_reply); | |
332 | } | |
333 | ||
70790dbe | 334 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) |
31a1b989 | 335 | rxrpc_rotate_tx_window(call, top, &summary); |
70790dbe DH |
336 | if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { |
337 | rxrpc_proto_abort("TXL", call, top); | |
338 | return false; | |
339 | } | |
340 | if (!rxrpc_end_tx_phase(call, true, "ETD")) | |
341 | return false; | |
342 | call->tx_phase = false; | |
343 | return true; | |
248f219c DH |
344 | } |
345 | ||
346 | /* | |
347 | * Scan a jumbo packet to validate its structure and to work out how many | |
348 | * subpackets it contains. | |
349 | * | |
350 | * A jumbo packet is a collection of consecutive packets glued together with | |
351 | * little headers between that indicate how to change the initial header for | |
352 | * each subpacket. | |
353 | * | |
354 | * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but | |
355 | * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any | |
356 | * size. | |
357 | */ | |
358 | static bool rxrpc_validate_jumbo(struct sk_buff *skb) | |
359 | { | |
360 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
361 | unsigned int offset = sp->offset; | |
89a80ed4 | 362 | unsigned int len = skb->len; |
248f219c DH |
363 | int nr_jumbo = 1; |
364 | u8 flags = sp->hdr.flags; | |
365 | ||
366 | do { | |
367 | nr_jumbo++; | |
368 | if (len - offset < RXRPC_JUMBO_SUBPKTLEN) | |
369 | goto protocol_error; | |
370 | if (flags & RXRPC_LAST_PACKET) | |
371 | goto protocol_error; | |
372 | offset += RXRPC_JUMBO_DATALEN; | |
373 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) | |
374 | goto protocol_error; | |
375 | offset += sizeof(struct rxrpc_jumbo_header); | |
376 | } while (flags & RXRPC_JUMBO_PACKET); | |
377 | ||
378 | sp->nr_jumbo = nr_jumbo; | |
379 | return true; | |
17926a79 | 380 | |
248f219c DH |
381 | protocol_error: |
382 | return false; | |
17926a79 DH |
383 | } |
384 | ||
385 | /* | |
248f219c DH |
386 | * Handle reception of a duplicate packet. |
387 | * | |
388 | * We have to take care to avoid an attack here whereby we're given a series of | |
389 | * jumbograms, each with a sequence number one before the preceding one and | |
390 | * filled up to maximum UDP size. If they never send us the first packet in | |
391 | * the sequence, they can cause us to have to hold on to around 2MiB of kernel | |
392 | * space until the call times out. | |
393 | * | |
394 | * We limit the space usage by only accepting three duplicate jumbo packets per | |
395 | * call. After that, we tell the other side we're no longer accepting jumbos | |
396 | * (that information is encoded in the ACK packet). | |
17926a79 | 397 | */ |
248f219c | 398 | static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, |
75e42126 | 399 | u8 annotation, bool *_jumbo_bad) |
17926a79 | 400 | { |
248f219c DH |
401 | /* Discard normal packets that are duplicates. */ |
402 | if (annotation == 0) | |
403 | return; | |
17926a79 | 404 | |
248f219c DH |
405 | /* Skip jumbo subpackets that are duplicates. When we've had three or |
406 | * more partially duplicate jumbo packets, we refuse to take any more | |
407 | * jumbos for this call. | |
408 | */ | |
75e42126 DH |
409 | if (!*_jumbo_bad) { |
410 | call->nr_jumbo_bad++; | |
411 | *_jumbo_bad = true; | |
248f219c DH |
412 | } |
413 | } | |
17926a79 | 414 | |
248f219c DH |
415 | /* |
416 | * Process a DATA packet, adding the packet to the Rx ring. | |
417 | */ | |
418 | static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, | |
419 | u16 skew) | |
420 | { | |
421 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
422 | unsigned int offset = sp->offset; | |
423 | unsigned int ix; | |
424 | rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; | |
425 | rxrpc_seq_t seq = sp->hdr.seq, hard_ack; | |
75e42126 | 426 | bool immediate_ack = false, jumbo_bad = false, queued; |
248f219c DH |
427 | u16 len; |
428 | u8 ack = 0, flags, annotation = 0; | |
17926a79 | 429 | |
248f219c | 430 | _enter("{%u,%u},{%u,%u}", |
89a80ed4 | 431 | call->rx_hard_ack, call->rx_top, skb->len, seq); |
17926a79 | 432 | |
248f219c DH |
433 | _proto("Rx DATA %%%u { #%u f=%02x }", |
434 | sp->hdr.serial, seq, sp->hdr.flags); | |
17926a79 | 435 | |
248f219c DH |
436 | if (call->state >= RXRPC_CALL_COMPLETE) |
437 | return; | |
17926a79 | 438 | |
248f219c DH |
439 | /* Received data implicitly ACKs all of the request packets we sent |
440 | * when we're acting as a client. | |
441 | */ | |
70790dbe DH |
442 | if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST || |
443 | call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && | |
444 | !rxrpc_receiving_reply(call)) | |
248f219c | 445 | return; |
17926a79 | 446 | |
248f219c | 447 | call->ackr_prev_seq = seq; |
17926a79 | 448 | |
248f219c DH |
449 | hard_ack = READ_ONCE(call->rx_hard_ack); |
450 | if (after(seq, hard_ack + call->rx_winsize)) { | |
17926a79 | 451 | ack = RXRPC_ACK_EXCEEDS_WINDOW; |
248f219c DH |
452 | ack_serial = serial; |
453 | goto ack; | |
17926a79 DH |
454 | } |
455 | ||
248f219c DH |
456 | flags = sp->hdr.flags; |
457 | if (flags & RXRPC_JUMBO_PACKET) { | |
75e42126 | 458 | if (call->nr_jumbo_bad > 3) { |
248f219c DH |
459 | ack = RXRPC_ACK_NOSPACE; |
460 | ack_serial = serial; | |
461 | goto ack; | |
17926a79 | 462 | } |
248f219c | 463 | annotation = 1; |
17926a79 DH |
464 | } |
465 | ||
248f219c DH |
466 | next_subpacket: |
467 | queued = false; | |
468 | ix = seq & RXRPC_RXTX_BUFF_MASK; | |
89a80ed4 | 469 | len = skb->len; |
248f219c DH |
470 | if (flags & RXRPC_JUMBO_PACKET) |
471 | len = RXRPC_JUMBO_DATALEN; | |
472 | ||
473 | if (flags & RXRPC_LAST_PACKET) { | |
816c9fce | 474 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && |
248f219c DH |
475 | seq != call->rx_top) |
476 | return rxrpc_proto_abort("LSN", call, seq); | |
477 | } else { | |
478 | if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && | |
479 | after_eq(seq, call->rx_top)) | |
480 | return rxrpc_proto_abort("LSA", call, seq); | |
17926a79 DH |
481 | } |
482 | ||
248f219c DH |
483 | if (before_eq(seq, hard_ack)) { |
484 | ack = RXRPC_ACK_DUPLICATE; | |
485 | ack_serial = serial; | |
486 | goto skip; | |
487 | } | |
488 | ||
489 | if (flags & RXRPC_REQUEST_ACK && !ack) { | |
490 | ack = RXRPC_ACK_REQUESTED; | |
491 | ack_serial = serial; | |
492 | } | |
493 | ||
494 | if (call->rxtx_buffer[ix]) { | |
75e42126 | 495 | rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); |
248f219c DH |
496 | if (ack != RXRPC_ACK_DUPLICATE) { |
497 | ack = RXRPC_ACK_DUPLICATE; | |
498 | ack_serial = serial; | |
17926a79 | 499 | } |
248f219c DH |
500 | immediate_ack = true; |
501 | goto skip; | |
17926a79 DH |
502 | } |
503 | ||
248f219c DH |
504 | /* Queue the packet. We use a couple of memory barriers here as need |
505 | * to make sure that rx_top is perceived to be set after the buffer | |
506 | * pointer and that the buffer pointer is set after the annotation and | |
507 | * the skb data. | |
508 | * | |
509 | * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() | |
510 | * and also rxrpc_fill_out_ack(). | |
511 | */ | |
71f3ca40 | 512 | rxrpc_get_skb(skb, rxrpc_skb_rx_got); |
248f219c DH |
513 | call->rxtx_annotations[ix] = annotation; |
514 | smp_wmb(); | |
515 | call->rxtx_buffer[ix] = skb; | |
a7056c5b | 516 | if (after(seq, call->rx_top)) { |
248f219c | 517 | smp_store_release(&call->rx_top, seq); |
a7056c5b DH |
518 | } else if (before(seq, call->rx_top)) { |
519 | /* Send an immediate ACK if we fill in a hole */ | |
520 | if (!ack) { | |
521 | ack = RXRPC_ACK_DELAY; | |
522 | ack_serial = serial; | |
523 | } | |
524 | immediate_ack = true; | |
525 | } | |
58dc63c9 | 526 | if (flags & RXRPC_LAST_PACKET) { |
816c9fce | 527 | set_bit(RXRPC_CALL_RX_LAST, &call->flags); |
58dc63c9 DH |
528 | trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); |
529 | } else { | |
530 | trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); | |
531 | } | |
248f219c DH |
532 | queued = true; |
533 | ||
534 | if (after_eq(seq, call->rx_expect_next)) { | |
535 | if (after(seq, call->rx_expect_next)) { | |
536 | _net("OOS %u > %u", seq, call->rx_expect_next); | |
537 | ack = RXRPC_ACK_OUT_OF_SEQUENCE; | |
538 | ack_serial = serial; | |
539 | } | |
540 | call->rx_expect_next = seq + 1; | |
17926a79 DH |
541 | } |
542 | ||
248f219c DH |
543 | skip: |
544 | offset += len; | |
545 | if (flags & RXRPC_JUMBO_PACKET) { | |
546 | if (skb_copy_bits(skb, offset, &flags, 1) < 0) | |
547 | return rxrpc_proto_abort("XJF", call, seq); | |
548 | offset += sizeof(struct rxrpc_jumbo_header); | |
549 | seq++; | |
550 | serial++; | |
551 | annotation++; | |
552 | if (flags & RXRPC_JUMBO_PACKET) | |
553 | annotation |= RXRPC_RX_ANNO_JLAST; | |
75e42126 DH |
554 | if (after(seq, hard_ack + call->rx_winsize)) { |
555 | ack = RXRPC_ACK_EXCEEDS_WINDOW; | |
556 | ack_serial = serial; | |
557 | if (!jumbo_bad) { | |
558 | call->nr_jumbo_bad++; | |
559 | jumbo_bad = true; | |
560 | } | |
561 | goto ack; | |
562 | } | |
248f219c DH |
563 | |
564 | _proto("Rx DATA Jumbo %%%u", serial); | |
565 | goto next_subpacket; | |
566 | } | |
17926a79 | 567 | |
248f219c DH |
568 | if (queued && flags & RXRPC_LAST_PACKET && !ack) { |
569 | ack = RXRPC_ACK_DELAY; | |
570 | ack_serial = serial; | |
571 | } | |
17926a79 | 572 | |
248f219c DH |
573 | ack: |
574 | if (ack) | |
575 | rxrpc_propose_ACK(call, ack, skew, ack_serial, | |
9c7ad434 DH |
576 | immediate_ack, true, |
577 | rxrpc_propose_ack_input_data); | |
17926a79 | 578 | |
248f219c DH |
579 | if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) |
580 | rxrpc_notify_socket(call); | |
581 | _leave(" [queued]"); | |
17926a79 DH |
582 | } |
583 | ||
50235c4b DH |
584 | /* |
585 | * Process a requested ACK. | |
586 | */ | |
587 | static void rxrpc_input_requested_ack(struct rxrpc_call *call, | |
588 | ktime_t resp_time, | |
589 | rxrpc_serial_t orig_serial, | |
590 | rxrpc_serial_t ack_serial) | |
591 | { | |
592 | struct rxrpc_skb_priv *sp; | |
593 | struct sk_buff *skb; | |
594 | ktime_t sent_at; | |
595 | int ix; | |
596 | ||
597 | for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) { | |
598 | skb = call->rxtx_buffer[ix]; | |
599 | if (!skb) | |
600 | continue; | |
601 | ||
602 | sp = rxrpc_skb(skb); | |
603 | if (sp->hdr.serial != orig_serial) | |
604 | continue; | |
605 | smp_rmb(); | |
606 | sent_at = skb->tstamp; | |
607 | goto found; | |
608 | } | |
609 | return; | |
610 | ||
611 | found: | |
612 | rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack, | |
613 | orig_serial, ack_serial, sent_at, resp_time); | |
614 | } | |
615 | ||
8e83134d DH |
616 | /* |
617 | * Process a ping response. | |
618 | */ | |
619 | static void rxrpc_input_ping_response(struct rxrpc_call *call, | |
620 | ktime_t resp_time, | |
621 | rxrpc_serial_t orig_serial, | |
622 | rxrpc_serial_t ack_serial) | |
623 | { | |
624 | rxrpc_serial_t ping_serial; | |
625 | ktime_t ping_time; | |
626 | ||
627 | ping_time = call->ackr_ping_time; | |
628 | smp_rmb(); | |
629 | ping_serial = call->ackr_ping; | |
630 | ||
631 | if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || | |
632 | before(orig_serial, ping_serial)) | |
633 | return; | |
634 | clear_bit(RXRPC_CALL_PINGING, &call->flags); | |
635 | if (after(orig_serial, ping_serial)) | |
636 | return; | |
637 | ||
638 | rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response, | |
639 | orig_serial, ack_serial, ping_time, resp_time); | |
640 | } | |
641 | ||
17926a79 | 642 | /* |
248f219c | 643 | * Process the extra information that may be appended to an ACK packet |
17926a79 | 644 | */ |
248f219c DH |
645 | static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, |
646 | struct rxrpc_ackinfo *ackinfo) | |
17926a79 | 647 | { |
248f219c DH |
648 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
649 | struct rxrpc_peer *peer; | |
650 | unsigned int mtu; | |
01fd0742 | 651 | u32 rwind = ntohl(ackinfo->rwind); |
248f219c DH |
652 | |
653 | _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", | |
654 | sp->hdr.serial, | |
655 | ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), | |
01fd0742 | 656 | rwind, ntohl(ackinfo->jumbo_max)); |
248f219c | 657 | |
01fd0742 DH |
658 | if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) |
659 | rwind = RXRPC_RXTX_BUFF_SIZE - 1; | |
660 | call->tx_winsize = rwind; | |
08511150 DH |
661 | if (call->cong_ssthresh > rwind) |
662 | call->cong_ssthresh = rwind; | |
248f219c DH |
663 | |
664 | mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU)); | |
665 | ||
666 | peer = call->peer; | |
667 | if (mtu < peer->maxdata) { | |
668 | spin_lock_bh(&peer->lock); | |
669 | peer->maxdata = mtu; | |
670 | peer->mtu = mtu + peer->hdrsize; | |
671 | spin_unlock_bh(&peer->lock); | |
672 | _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); | |
673 | } | |
674 | } | |
17926a79 | 675 | |
248f219c DH |
676 | /* |
677 | * Process individual soft ACKs. | |
678 | * | |
679 | * Each ACK in the array corresponds to one packet and can be either an ACK or | |
680 | * a NAK. If we get find an explicitly NAK'd packet we resend immediately; | |
681 | * packets that lie beyond the end of the ACK list are scheduled for resend by | |
682 | * the timer on the basis that the peer might just not have processed them at | |
683 | * the time the ACK was sent. | |
684 | */ | |
685 | static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, | |
31a1b989 DH |
686 | rxrpc_seq_t seq, int nr_acks, |
687 | struct rxrpc_ack_summary *summary) | |
248f219c | 688 | { |
248f219c | 689 | int ix; |
f07373ea | 690 | u8 annotation, anno_type; |
248f219c DH |
691 | |
692 | for (; nr_acks > 0; nr_acks--, seq++) { | |
693 | ix = seq & RXRPC_RXTX_BUFF_MASK; | |
f07373ea DH |
694 | annotation = call->rxtx_annotations[ix]; |
695 | anno_type = annotation & RXRPC_TX_ANNO_MASK; | |
696 | annotation &= ~RXRPC_TX_ANNO_MASK; | |
d01dc4c3 | 697 | switch (*acks++) { |
248f219c | 698 | case RXRPC_ACK_TYPE_ACK: |
31a1b989 | 699 | summary->nr_acks++; |
f07373ea DH |
700 | if (anno_type == RXRPC_TX_ANNO_ACK) |
701 | continue; | |
31a1b989 | 702 | summary->nr_new_acks++; |
f07373ea DH |
703 | call->rxtx_annotations[ix] = |
704 | RXRPC_TX_ANNO_ACK | annotation; | |
248f219c DH |
705 | break; |
706 | case RXRPC_ACK_TYPE_NACK: | |
31a1b989 DH |
707 | if (!summary->nr_nacks && |
708 | call->acks_lowest_nak != seq) { | |
709 | call->acks_lowest_nak = seq; | |
710 | summary->new_low_nack = true; | |
711 | } | |
712 | summary->nr_nacks++; | |
f07373ea | 713 | if (anno_type == RXRPC_TX_ANNO_NAK) |
248f219c | 714 | continue; |
31a1b989 | 715 | summary->nr_new_nacks++; |
be8aa338 DH |
716 | if (anno_type == RXRPC_TX_ANNO_RETRANS) |
717 | continue; | |
f07373ea DH |
718 | call->rxtx_annotations[ix] = |
719 | RXRPC_TX_ANNO_NAK | annotation; | |
248f219c DH |
720 | break; |
721 | default: | |
722 | return rxrpc_proto_abort("SFT", call, 0); | |
17926a79 | 723 | } |
17926a79 DH |
724 | } |
725 | } | |
726 | ||
727 | /* | |
248f219c DH |
728 | * Process an ACK packet. |
729 | * | |
730 | * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet | |
731 | * in the ACK array. Anything before that is hard-ACK'd and may be discarded. | |
732 | * | |
733 | * A hard-ACK means that a packet has been processed and may be discarded; a | |
734 | * soft-ACK means that the packet may be discarded and retransmission | |
735 | * requested. A phase is complete when all packets are hard-ACK'd. | |
17926a79 | 736 | */ |
248f219c DH |
737 | static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, |
738 | u16 skew) | |
17926a79 | 739 | { |
31a1b989 | 740 | struct rxrpc_ack_summary summary = { 0 }; |
17926a79 | 741 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
248f219c DH |
742 | union { |
743 | struct rxrpc_ackpacket ack; | |
744 | struct rxrpc_ackinfo info; | |
745 | u8 acks[RXRPC_MAXACKS]; | |
746 | } buf; | |
8e83134d | 747 | rxrpc_serial_t acked_serial; |
248f219c DH |
748 | rxrpc_seq_t first_soft_ack, hard_ack; |
749 | int nr_acks, offset; | |
750 | ||
751 | _enter(""); | |
752 | ||
753 | if (skb_copy_bits(skb, sp->offset, &buf.ack, sizeof(buf.ack)) < 0) { | |
754 | _debug("extraction failure"); | |
755 | return rxrpc_proto_abort("XAK", call, 0); | |
17926a79 | 756 | } |
248f219c DH |
757 | sp->offset += sizeof(buf.ack); |
758 | ||
8e83134d | 759 | acked_serial = ntohl(buf.ack.serial); |
248f219c DH |
760 | first_soft_ack = ntohl(buf.ack.firstPacket); |
761 | hard_ack = first_soft_ack - 1; | |
762 | nr_acks = buf.ack.nAcks; | |
31a1b989 DH |
763 | summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? |
764 | buf.ack.reason : RXRPC_ACK__INVALID); | |
248f219c | 765 | |
31a1b989 | 766 | trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks); |
ec71eb9a | 767 | |
248f219c DH |
768 | _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", |
769 | sp->hdr.serial, | |
770 | ntohs(buf.ack.maxSkew), | |
771 | first_soft_ack, | |
772 | ntohl(buf.ack.previousPacket), | |
8e83134d | 773 | acked_serial, |
31a1b989 | 774 | rxrpc_ack_names[summary.ack_reason], |
248f219c DH |
775 | buf.ack.nAcks); |
776 | ||
8e83134d DH |
777 | if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) |
778 | rxrpc_input_ping_response(call, skb->tstamp, acked_serial, | |
779 | sp->hdr.serial); | |
50235c4b DH |
780 | if (buf.ack.reason == RXRPC_ACK_REQUESTED) |
781 | rxrpc_input_requested_ack(call, skb->tstamp, acked_serial, | |
782 | sp->hdr.serial); | |
8e83134d | 783 | |
248f219c DH |
784 | if (buf.ack.reason == RXRPC_ACK_PING) { |
785 | _proto("Rx ACK %%%u PING Request", sp->hdr.serial); | |
786 | rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, | |
9c7ad434 DH |
787 | skew, sp->hdr.serial, true, true, |
788 | rxrpc_propose_ack_respond_to_ping); | |
248f219c | 789 | } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { |
563ea7d5 | 790 | rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, |
9c7ad434 DH |
791 | skew, sp->hdr.serial, true, true, |
792 | rxrpc_propose_ack_respond_to_ack); | |
17926a79 DH |
793 | } |
794 | ||
248f219c | 795 | offset = sp->offset + nr_acks + 3; |
89a80ed4 | 796 | if (skb->len >= offset + sizeof(buf.info)) { |
248f219c DH |
797 | if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0) |
798 | return rxrpc_proto_abort("XAI", call, 0); | |
799 | rxrpc_input_ackinfo(call, skb, &buf.info); | |
800 | } | |
17926a79 | 801 | |
248f219c DH |
802 | if (first_soft_ack == 0) |
803 | return rxrpc_proto_abort("AK0", call, 0); | |
17926a79 | 804 | |
248f219c DH |
805 | /* Ignore ACKs unless we are or have just been transmitting. */ |
806 | switch (call->state) { | |
807 | case RXRPC_CALL_CLIENT_SEND_REQUEST: | |
808 | case RXRPC_CALL_CLIENT_AWAIT_REPLY: | |
809 | case RXRPC_CALL_SERVER_SEND_REPLY: | |
810 | case RXRPC_CALL_SERVER_AWAIT_ACK: | |
811 | break; | |
17926a79 | 812 | default: |
248f219c DH |
813 | return; |
814 | } | |
17926a79 | 815 | |
248f219c | 816 | /* Discard any out-of-order or duplicate ACKs. */ |
98dafac5 | 817 | if (before_eq(sp->hdr.serial, call->acks_latest)) { |
248f219c DH |
818 | _debug("discard ACK %d <= %d", |
819 | sp->hdr.serial, call->acks_latest); | |
820 | return; | |
821 | } | |
57494343 | 822 | call->acks_latest_ts = skb->tstamp; |
248f219c | 823 | call->acks_latest = sp->hdr.serial; |
17926a79 | 824 | |
248f219c DH |
825 | if (before(hard_ack, call->tx_hard_ack) || |
826 | after(hard_ack, call->tx_top)) | |
827 | return rxrpc_proto_abort("AKW", call, 0); | |
70790dbe DH |
828 | if (nr_acks > call->tx_top - hard_ack) |
829 | return rxrpc_proto_abort("AKN", call, 0); | |
17926a79 | 830 | |
248f219c | 831 | if (after(hard_ack, call->tx_hard_ack)) |
31a1b989 | 832 | rxrpc_rotate_tx_window(call, hard_ack, &summary); |
17926a79 | 833 | |
70790dbe DH |
834 | if (nr_acks > 0) { |
835 | if (skb_copy_bits(skb, sp->offset, buf.acks, nr_acks) < 0) | |
836 | return rxrpc_proto_abort("XSA", call, 0); | |
31a1b989 DH |
837 | rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks, |
838 | &summary); | |
70790dbe DH |
839 | } |
840 | ||
841 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { | |
842 | rxrpc_end_tx_phase(call, false, "ETA"); | |
248f219c | 843 | return; |
70790dbe | 844 | } |
17926a79 | 845 | |
0d967960 DH |
846 | if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] & |
847 | RXRPC_TX_ANNO_LAST && | |
848 | summary.nr_acks == call->tx_top - hard_ack) | |
849 | rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, | |
850 | false, true, | |
851 | rxrpc_propose_ack_ping_for_lost_reply); | |
57494343 | 852 | |
ed1e8679 | 853 | return rxrpc_congestion_management(call, skb, &summary, acked_serial); |
17926a79 DH |
854 | } |
855 | ||
856 | /* | |
248f219c | 857 | * Process an ACKALL packet. |
17926a79 | 858 | */ |
248f219c | 859 | static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) |
17926a79 | 860 | { |
31a1b989 | 861 | struct rxrpc_ack_summary summary = { 0 }; |
248f219c | 862 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
17926a79 | 863 | |
248f219c | 864 | _proto("Rx ACKALL %%%u", sp->hdr.serial); |
17926a79 | 865 | |
31a1b989 | 866 | rxrpc_rotate_tx_window(call, call->tx_top, &summary); |
70790dbe DH |
867 | if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) |
868 | rxrpc_end_tx_phase(call, false, "ETL"); | |
248f219c | 869 | } |
17926a79 | 870 | |
248f219c DH |
871 | /* |
872 | * Process an ABORT packet. | |
873 | */ | |
874 | static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) | |
875 | { | |
876 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | |
877 | __be32 wtmp; | |
878 | u32 abort_code = RX_CALL_DEAD; | |
17926a79 | 879 | |
248f219c | 880 | _enter(""); |
17926a79 | 881 | |
248f219c DH |
882 | if (skb->len >= 4 && |
883 | skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) >= 0) | |
884 | abort_code = ntohl(wtmp); | |
17926a79 | 885 | |
248f219c | 886 | _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); |
17926a79 | 887 | |
248f219c DH |
888 | if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, |
889 | abort_code, ECONNABORTED)) | |
890 | rxrpc_notify_socket(call); | |
17926a79 DH |
891 | } |
892 | ||
893 | /* | |
248f219c | 894 | * Process an incoming call packet. |
17926a79 | 895 | */ |
248f219c DH |
896 | static void rxrpc_input_call_packet(struct rxrpc_call *call, |
897 | struct sk_buff *skb, u16 skew) | |
17926a79 | 898 | { |
248f219c | 899 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
17926a79 | 900 | |
7727640c | 901 | _enter("%p,%p", call, skb); |
17926a79 | 902 | |
248f219c DH |
903 | switch (sp->hdr.type) { |
904 | case RXRPC_PACKET_TYPE_DATA: | |
905 | rxrpc_input_data(call, skb, skew); | |
906 | break; | |
f5c17aae | 907 | |
248f219c DH |
908 | case RXRPC_PACKET_TYPE_ACK: |
909 | rxrpc_input_ack(call, skb, skew); | |
17926a79 | 910 | break; |
17926a79 | 911 | |
248f219c DH |
912 | case RXRPC_PACKET_TYPE_BUSY: |
913 | _proto("Rx BUSY %%%u", sp->hdr.serial); | |
17926a79 | 914 | |
248f219c DH |
915 | /* Just ignore BUSY packets from the server; the retry and |
916 | * lifespan timers will take care of business. BUSY packets | |
917 | * from the client don't make sense. | |
918 | */ | |
919 | break; | |
17926a79 | 920 | |
248f219c DH |
921 | case RXRPC_PACKET_TYPE_ABORT: |
922 | rxrpc_input_abort(call, skb); | |
923 | break; | |
17926a79 | 924 | |
248f219c DH |
925 | case RXRPC_PACKET_TYPE_ACKALL: |
926 | rxrpc_input_ackall(call, skb); | |
927 | break; | |
f5c17aae | 928 | |
248f219c DH |
929 | default: |
930 | _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial); | |
931 | break; | |
17926a79 | 932 | } |
248f219c | 933 | |
17926a79 DH |
934 | _leave(""); |
935 | } | |
936 | ||
937 | /* | |
938 | * post connection-level events to the connection | |
18bfeba5 DH |
939 | * - this includes challenges, responses, some aborts and call terminal packet |
940 | * retransmission. | |
17926a79 | 941 | */ |
2e7e9758 | 942 | static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, |
17926a79 DH |
943 | struct sk_buff *skb) |
944 | { | |
945 | _enter("%p,%p", conn, skb); | |
946 | ||
17926a79 | 947 | skb_queue_tail(&conn->rx_queue, skb); |
2e7e9758 | 948 | rxrpc_queue_conn(conn); |
17926a79 DH |
949 | } |
950 | ||
44ba0698 DH |
951 | /* |
952 | * post endpoint-level events to the local endpoint | |
953 | * - this includes debug and version messages | |
954 | */ | |
955 | static void rxrpc_post_packet_to_local(struct rxrpc_local *local, | |
956 | struct sk_buff *skb) | |
957 | { | |
958 | _enter("%p,%p", local, skb); | |
959 | ||
44ba0698 | 960 | skb_queue_tail(&local->event_queue, skb); |
5acbee46 | 961 | rxrpc_queue_local(local); |
44ba0698 DH |
962 | } |
963 | ||
248f219c DH |
964 | /* |
965 | * put a packet up for transport-level abort | |
966 | */ | |
967 | static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) | |
968 | { | |
969 | CHECK_SLAB_OKAY(&local->usage); | |
970 | ||
971 | skb_queue_tail(&local->reject_queue, skb); | |
972 | rxrpc_queue_local(local); | |
973 | } | |
974 | ||
0d12f8a4 DH |
975 | /* |
976 | * Extract the wire header from a packet and translate the byte order. | |
977 | */ | |
978 | static noinline | |
979 | int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) | |
980 | { | |
981 | struct rxrpc_wire_header whdr; | |
982 | ||
983 | /* dig out the RxRPC connection details */ | |
4d0fc73e | 984 | if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) |
0d12f8a4 | 985 | return -EBADMSG; |
0d12f8a4 DH |
986 | |
987 | memset(sp, 0, sizeof(*sp)); | |
988 | sp->hdr.epoch = ntohl(whdr.epoch); | |
989 | sp->hdr.cid = ntohl(whdr.cid); | |
990 | sp->hdr.callNumber = ntohl(whdr.callNumber); | |
991 | sp->hdr.seq = ntohl(whdr.seq); | |
992 | sp->hdr.serial = ntohl(whdr.serial); | |
993 | sp->hdr.flags = whdr.flags; | |
994 | sp->hdr.type = whdr.type; | |
995 | sp->hdr.userStatus = whdr.userStatus; | |
996 | sp->hdr.securityIndex = whdr.securityIndex; | |
997 | sp->hdr._rsvd = ntohs(whdr._rsvd); | |
998 | sp->hdr.serviceId = ntohs(whdr.serviceId); | |
248f219c | 999 | sp->offset = sizeof(whdr); |
0d12f8a4 DH |
1000 | return 0; |
1001 | } | |
1002 | ||
17926a79 DH |
1003 | /* |
1004 | * handle data received on the local endpoint | |
1005 | * - may be called in interrupt context | |
4f95dd78 DH |
1006 | * |
1007 | * The socket is locked by the caller and this prevents the socket from being | |
1008 | * shut down and the local endpoint from going away, thus sk_user_data will not | |
1009 | * be cleared until this function returns. | |
17926a79 | 1010 | */ |
248f219c | 1011 | void rxrpc_data_ready(struct sock *udp_sk) |
17926a79 | 1012 | { |
8496af50 | 1013 | struct rxrpc_connection *conn; |
248f219c DH |
1014 | struct rxrpc_channel *chan; |
1015 | struct rxrpc_call *call; | |
17926a79 | 1016 | struct rxrpc_skb_priv *sp; |
248f219c | 1017 | struct rxrpc_local *local = udp_sk->sk_user_data; |
17926a79 | 1018 | struct sk_buff *skb; |
248f219c | 1019 | unsigned int channel; |
563ea7d5 | 1020 | int ret, skew; |
17926a79 | 1021 | |
248f219c | 1022 | _enter("%p", udp_sk); |
17926a79 DH |
1023 | |
1024 | ASSERT(!irqs_disabled()); | |
1025 | ||
248f219c | 1026 | skb = skb_recv_datagram(udp_sk, 0, 1, &ret); |
17926a79 | 1027 | if (!skb) { |
17926a79 DH |
1028 | if (ret == -EAGAIN) |
1029 | return; | |
1030 | _debug("UDP socket error %d", ret); | |
1031 | return; | |
1032 | } | |
1033 | ||
71f3ca40 | 1034 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); |
17926a79 DH |
1035 | |
1036 | _net("recv skb %p", skb); | |
1037 | ||
1038 | /* we'll probably need to checksum it (didn't call sock_recvmsg) */ | |
1039 | if (skb_checksum_complete(skb)) { | |
71f3ca40 | 1040 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
02c22347 | 1041 | __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0); |
17926a79 DH |
1042 | _leave(" [CSUM failed]"); |
1043 | return; | |
1044 | } | |
1045 | ||
02c22347 | 1046 | __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0); |
1781f7f5 | 1047 | |
0d12f8a4 DH |
1048 | /* The socket buffer we have is owned by UDP, with UDP's data all over |
1049 | * it, but we really want our own data there. | |
1050 | */ | |
17926a79 DH |
1051 | skb_orphan(skb); |
1052 | sp = rxrpc_skb(skb); | |
17926a79 | 1053 | |
89b475ab DH |
1054 | /* dig out the RxRPC connection details */ |
1055 | if (rxrpc_extract_header(sp, skb) < 0) | |
1056 | goto bad_message; | |
1057 | ||
8a681c36 DH |
1058 | if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { |
1059 | static int lose; | |
1060 | if ((lose++ & 7) == 7) { | |
89b475ab | 1061 | trace_rxrpc_rx_lose(sp); |
8a681c36 DH |
1062 | rxrpc_lose_skb(skb, rxrpc_skb_rx_lost); |
1063 | return; | |
1064 | } | |
1065 | } | |
1066 | ||
49e19ec7 | 1067 | trace_rxrpc_rx_packet(sp); |
17926a79 DH |
1068 | |
1069 | _net("Rx RxRPC %s ep=%x call=%x:%x", | |
1070 | sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient", | |
0d12f8a4 | 1071 | sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber); |
17926a79 | 1072 | |
351c1e64 DH |
1073 | if (sp->hdr.type >= RXRPC_N_PACKET_TYPES || |
1074 | !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) { | |
17926a79 DH |
1075 | _proto("Rx Bad Packet Type %u", sp->hdr.type); |
1076 | goto bad_message; | |
1077 | } | |
1078 | ||
248f219c DH |
1079 | switch (sp->hdr.type) { |
1080 | case RXRPC_PACKET_TYPE_VERSION: | |
44ba0698 DH |
1081 | rxrpc_post_packet_to_local(local, skb); |
1082 | goto out; | |
bc6e1ea3 | 1083 | |
248f219c DH |
1084 | case RXRPC_PACKET_TYPE_BUSY: |
1085 | if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) | |
1086 | goto discard; | |
1087 | ||
1088 | case RXRPC_PACKET_TYPE_DATA: | |
1089 | if (sp->hdr.callNumber == 0) | |
1090 | goto bad_message; | |
1091 | if (sp->hdr.flags & RXRPC_JUMBO_PACKET && | |
1092 | !rxrpc_validate_jumbo(skb)) | |
1093 | goto bad_message; | |
1094 | break; | |
1095 | } | |
17926a79 | 1096 | |
8496af50 DH |
1097 | rcu_read_lock(); |
1098 | ||
8496af50 | 1099 | conn = rxrpc_find_connection_rcu(local, skb); |
248f219c DH |
1100 | if (conn) { |
1101 | if (sp->hdr.securityIndex != conn->security_ix) | |
1102 | goto wrong_security; | |
563ea7d5 | 1103 | |
248f219c DH |
1104 | if (sp->hdr.callNumber == 0) { |
1105 | /* Connection-level packet */ | |
1106 | _debug("CONN %p {%d}", conn, conn->debug_id); | |
1107 | rxrpc_post_packet_to_conn(conn, skb); | |
1108 | goto out_unlock; | |
1109 | } | |
1110 | ||
1111 | /* Note the serial number skew here */ | |
1112 | skew = (int)sp->hdr.serial - (int)conn->hi_serial; | |
1113 | if (skew >= 0) { | |
1114 | if (skew > 0) | |
1115 | conn->hi_serial = sp->hdr.serial; | |
1116 | } else { | |
1117 | skew = -skew; | |
1118 | skew = min(skew, 65535); | |
1119 | } | |
17926a79 | 1120 | |
8496af50 | 1121 | /* Call-bound packets are routed by connection channel. */ |
248f219c DH |
1122 | channel = sp->hdr.cid & RXRPC_CHANNELMASK; |
1123 | chan = &conn->channels[channel]; | |
18bfeba5 DH |
1124 | |
1125 | /* Ignore really old calls */ | |
1126 | if (sp->hdr.callNumber < chan->last_call) | |
1127 | goto discard_unlock; | |
1128 | ||
1129 | if (sp->hdr.callNumber == chan->last_call) { | |
248f219c DH |
1130 | /* For the previous service call, if completed successfully, we |
1131 | * discard all further packets. | |
18bfeba5 | 1132 | */ |
2266ffde | 1133 | if (rxrpc_conn_is_service(conn) && |
18bfeba5 DH |
1134 | (chan->last_type == RXRPC_PACKET_TYPE_ACK || |
1135 | sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)) | |
1136 | goto discard_unlock; | |
1137 | ||
248f219c DH |
1138 | /* But otherwise we need to retransmit the final packet from |
1139 | * data cached in the connection record. | |
18bfeba5 DH |
1140 | */ |
1141 | rxrpc_post_packet_to_conn(conn, skb); | |
1142 | goto out_unlock; | |
1143 | } | |
0d12f8a4 | 1144 | |
18bfeba5 | 1145 | call = rcu_dereference(chan->call); |
248f219c DH |
1146 | } else { |
1147 | skew = 0; | |
1148 | call = NULL; | |
1149 | } | |
8496af50 | 1150 | |
248f219c DH |
1151 | if (!call || atomic_read(&call->usage) == 0) { |
1152 | if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) || | |
1153 | sp->hdr.callNumber == 0 || | |
1154 | sp->hdr.type != RXRPC_PACKET_TYPE_DATA) | |
1155 | goto bad_message_unlock; | |
1156 | if (sp->hdr.seq != 1) | |
1157 | goto discard_unlock; | |
1158 | call = rxrpc_new_incoming_call(local, conn, skb); | |
1159 | if (!call) { | |
1160 | rcu_read_unlock(); | |
1161 | goto reject_packet; | |
1162 | } | |
8e83134d | 1163 | rxrpc_send_ping(call, skb, skew); |
7727640c | 1164 | } |
44ba0698 | 1165 | |
248f219c DH |
1166 | rxrpc_input_call_packet(call, skb, skew); |
1167 | goto discard_unlock; | |
1168 | ||
18bfeba5 | 1169 | discard_unlock: |
8496af50 | 1170 | rcu_read_unlock(); |
248f219c | 1171 | discard: |
71f3ca40 | 1172 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
44ba0698 | 1173 | out: |
49e19ec7 | 1174 | trace_rxrpc_rx_done(0, 0); |
17926a79 DH |
1175 | return; |
1176 | ||
248f219c | 1177 | out_unlock: |
8496af50 | 1178 | rcu_read_unlock(); |
248f219c | 1179 | goto out; |
8496af50 | 1180 | |
248f219c DH |
1181 | wrong_security: |
1182 | rcu_read_unlock(); | |
1183 | trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, | |
1184 | RXKADINCONSISTENCY, EBADMSG); | |
1185 | skb->priority = RXKADINCONSISTENCY; | |
1186 | goto post_abort; | |
17926a79 | 1187 | |
248f219c DH |
1188 | bad_message_unlock: |
1189 | rcu_read_unlock(); | |
17926a79 | 1190 | bad_message: |
248f219c DH |
1191 | trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
1192 | RX_PROTOCOL_ERROR, EBADMSG); | |
17926a79 | 1193 | skb->priority = RX_PROTOCOL_ERROR; |
248f219c DH |
1194 | post_abort: |
1195 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
49e19ec7 DH |
1196 | reject_packet: |
1197 | trace_rxrpc_rx_done(skb->mark, skb->priority); | |
17926a79 | 1198 | rxrpc_reject_packet(local, skb); |
17926a79 DH |
1199 | _leave(" [badmsg]"); |
1200 | } |