Commit | Line | Data |
---|---|---|
0c3f6f65 XL |
1 | /* SCTP kernel implementation |
2 | * (C) Copyright Red Hat Inc. 2017 | |
3 | * | |
4 | * This file is part of the SCTP kernel implementation | |
5 | * | |
6 | * These functions manipulate sctp stream queue/scheduling. | |
7 | * | |
8 | * This SCTP implementation is free software; | |
9 | * you can redistribute it and/or modify it under the terms of | |
10 | * the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2, or (at your option) | |
12 | * any later version. | |
13 | * | |
14 | * This SCTP implementation is distributed in the hope that it | |
15 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | |
16 | * ************************ | |
17 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
18 | * See the GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with GNU CC; see the file COPYING. If not, see | |
22 | * <http://www.gnu.org/licenses/>. | |
23 | * | |
24 | * Please send any bug reports or fixes you make to the | |
25 | * email addresched(es): | |
26 | * lksctp developers <linux-sctp@vger.kernel.org> | |
27 | * | |
28 | * Written or modified by: | |
29 | * Xin Long <lucien.xin@gmail.com> | |
30 | */ | |
31 | ||
bd4d627d | 32 | #include <net/busy_poll.h> |
0c3f6f65 XL |
33 | #include <net/sctp/sctp.h> |
34 | #include <net/sctp/sm.h> | |
bd4d627d | 35 | #include <net/sctp/ulpevent.h> |
0c3f6f65 XL |
36 | #include <linux/sctp.h> |
37 | ||
38 | static struct sctp_chunk *sctp_make_idatafrag_empty( | |
39 | const struct sctp_association *asoc, | |
40 | const struct sctp_sndrcvinfo *sinfo, | |
41 | int len, __u8 flags, gfp_t gfp) | |
42 | { | |
43 | struct sctp_chunk *retval; | |
44 | struct sctp_idatahdr dp; | |
45 | ||
46 | memset(&dp, 0, sizeof(dp)); | |
47 | dp.stream = htons(sinfo->sinfo_stream); | |
48 | ||
49 | if (sinfo->sinfo_flags & SCTP_UNORDERED) | |
50 | flags |= SCTP_DATA_UNORDERED; | |
51 | ||
52 | retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp); | |
53 | if (!retval) | |
54 | return NULL; | |
55 | ||
56 | retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); | |
57 | memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); | |
58 | ||
59 | return retval; | |
60 | } | |
61 | ||
668c9beb XL |
62 | static void sctp_chunk_assign_mid(struct sctp_chunk *chunk) |
63 | { | |
64 | struct sctp_stream *stream; | |
65 | struct sctp_chunk *lchunk; | |
66 | __u32 cfsn = 0; | |
67 | __u16 sid; | |
68 | ||
69 | if (chunk->has_mid) | |
70 | return; | |
71 | ||
72 | sid = sctp_chunk_stream_no(chunk); | |
73 | stream = &chunk->asoc->stream; | |
74 | ||
75 | list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { | |
76 | struct sctp_idatahdr *hdr; | |
77 | ||
78 | lchunk->has_mid = 1; | |
79 | ||
80 | if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | |
81 | continue; | |
82 | ||
83 | hdr = lchunk->subh.idata_hdr; | |
84 | ||
85 | if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) | |
86 | hdr->ppid = lchunk->sinfo.sinfo_ppid; | |
87 | else | |
88 | hdr->fsn = htonl(cfsn++); | |
89 | ||
90 | if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | |
91 | hdr->mid = htonl(sctp_mid_next(stream, out, sid)); | |
92 | else | |
93 | hdr->mid = htonl(sctp_mid_peek(stream, out, sid)); | |
94 | } | |
95 | } | |
96 | ||
9d4ceaf1 XL |
97 | static bool sctp_validate_data(struct sctp_chunk *chunk) |
98 | { | |
99 | const struct sctp_stream *stream; | |
100 | __u16 sid, ssn; | |
101 | ||
102 | if (chunk->chunk_hdr->type != SCTP_CID_DATA) | |
103 | return false; | |
104 | ||
105 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | |
106 | return true; | |
107 | ||
108 | stream = &chunk->asoc->stream; | |
109 | sid = sctp_chunk_stream_no(chunk); | |
110 | ssn = ntohs(chunk->subh.data_hdr->ssn); | |
111 | ||
112 | return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)); | |
113 | } | |
114 | ||
115 | static bool sctp_validate_idata(struct sctp_chunk *chunk) | |
116 | { | |
117 | struct sctp_stream *stream; | |
118 | __u32 mid; | |
119 | __u16 sid; | |
120 | ||
121 | if (chunk->chunk_hdr->type != SCTP_CID_I_DATA) | |
122 | return false; | |
123 | ||
124 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | |
125 | return true; | |
126 | ||
127 | stream = &chunk->asoc->stream; | |
128 | sid = sctp_chunk_stream_no(chunk); | |
129 | mid = ntohl(chunk->subh.idata_hdr->mid); | |
130 | ||
131 | return !MID_lt(mid, sctp_mid_peek(stream, in, sid)); | |
132 | } | |
133 | ||
bd4d627d XL |
134 | static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq, |
135 | struct sctp_ulpevent *event) | |
136 | { | |
137 | struct sctp_ulpevent *cevent; | |
138 | struct sk_buff *pos; | |
139 | ||
140 | pos = skb_peek_tail(&ulpq->reasm); | |
141 | if (!pos) { | |
142 | __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | |
143 | return; | |
144 | } | |
145 | ||
146 | cevent = sctp_skb2event(pos); | |
147 | ||
148 | if (event->stream == cevent->stream && | |
149 | event->mid == cevent->mid && | |
150 | (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || | |
151 | (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && | |
152 | event->fsn > cevent->fsn))) { | |
153 | __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | |
154 | return; | |
155 | } | |
156 | ||
157 | if ((event->stream == cevent->stream && | |
158 | MID_lt(cevent->mid, event->mid)) || | |
159 | event->stream > cevent->stream) { | |
160 | __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | |
161 | return; | |
162 | } | |
163 | ||
164 | skb_queue_walk(&ulpq->reasm, pos) { | |
165 | cevent = sctp_skb2event(pos); | |
166 | ||
167 | if (event->stream < cevent->stream || | |
168 | (event->stream == cevent->stream && | |
169 | MID_lt(event->mid, cevent->mid))) | |
170 | break; | |
171 | ||
172 | if (event->stream == cevent->stream && | |
173 | event->mid == cevent->mid && | |
174 | !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && | |
175 | (event->msg_flags & SCTP_DATA_FIRST_FRAG || | |
176 | event->fsn < cevent->fsn)) | |
177 | break; | |
178 | } | |
179 | ||
180 | __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); | |
181 | } | |
182 | ||
183 | static struct sctp_ulpevent *sctp_intl_retrieve_partial( | |
184 | struct sctp_ulpq *ulpq, | |
185 | struct sctp_ulpevent *event) | |
186 | { | |
187 | struct sk_buff *first_frag = NULL; | |
188 | struct sk_buff *last_frag = NULL; | |
189 | struct sctp_ulpevent *retval; | |
190 | struct sctp_stream_in *sin; | |
191 | struct sk_buff *pos; | |
192 | __u32 next_fsn = 0; | |
193 | int is_last = 0; | |
194 | ||
195 | sin = sctp_stream_in(ulpq->asoc, event->stream); | |
196 | ||
197 | skb_queue_walk(&ulpq->reasm, pos) { | |
198 | struct sctp_ulpevent *cevent = sctp_skb2event(pos); | |
199 | ||
200 | if (cevent->stream < event->stream) | |
201 | continue; | |
202 | ||
203 | if (cevent->stream > event->stream || | |
204 | cevent->mid != sin->mid) | |
205 | break; | |
206 | ||
207 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | |
208 | case SCTP_DATA_FIRST_FRAG: | |
209 | goto out; | |
210 | case SCTP_DATA_MIDDLE_FRAG: | |
211 | if (!first_frag) { | |
212 | if (cevent->fsn == sin->fsn) { | |
213 | first_frag = pos; | |
214 | last_frag = pos; | |
215 | next_fsn = cevent->fsn + 1; | |
216 | } | |
217 | } else if (cevent->fsn == next_fsn) { | |
218 | last_frag = pos; | |
219 | next_fsn++; | |
220 | } else { | |
221 | goto out; | |
222 | } | |
223 | break; | |
224 | case SCTP_DATA_LAST_FRAG: | |
225 | if (!first_frag) { | |
226 | if (cevent->fsn == sin->fsn) { | |
227 | first_frag = pos; | |
228 | last_frag = pos; | |
229 | next_fsn = 0; | |
230 | is_last = 1; | |
231 | } | |
232 | } else if (cevent->fsn == next_fsn) { | |
233 | last_frag = pos; | |
234 | next_fsn = 0; | |
235 | is_last = 1; | |
236 | } | |
237 | goto out; | |
238 | default: | |
239 | goto out; | |
240 | } | |
241 | } | |
242 | ||
243 | out: | |
244 | if (!first_frag) | |
245 | return NULL; | |
246 | ||
247 | retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | |
248 | &ulpq->reasm, first_frag, | |
249 | last_frag); | |
250 | if (retval) { | |
251 | sin->fsn = next_fsn; | |
252 | if (is_last) { | |
253 | retval->msg_flags |= MSG_EOR; | |
254 | sin->pd_mode = 0; | |
255 | } | |
256 | } | |
257 | ||
258 | return retval; | |
259 | } | |
260 | ||
261 | static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( | |
262 | struct sctp_ulpq *ulpq, | |
263 | struct sctp_ulpevent *event) | |
264 | { | |
265 | struct sctp_association *asoc = ulpq->asoc; | |
266 | struct sk_buff *pos, *first_frag = NULL; | |
267 | struct sctp_ulpevent *retval = NULL; | |
268 | struct sk_buff *pd_first = NULL; | |
269 | struct sk_buff *pd_last = NULL; | |
270 | struct sctp_stream_in *sin; | |
271 | __u32 next_fsn = 0; | |
272 | __u32 pd_point = 0; | |
273 | __u32 pd_len = 0; | |
274 | __u32 mid = 0; | |
275 | ||
276 | sin = sctp_stream_in(ulpq->asoc, event->stream); | |
277 | ||
278 | skb_queue_walk(&ulpq->reasm, pos) { | |
279 | struct sctp_ulpevent *cevent = sctp_skb2event(pos); | |
280 | ||
281 | if (cevent->stream < event->stream) | |
282 | continue; | |
283 | if (cevent->stream > event->stream) | |
284 | break; | |
285 | ||
286 | if (MID_lt(cevent->mid, event->mid)) | |
287 | continue; | |
288 | if (MID_lt(event->mid, cevent->mid)) | |
289 | break; | |
290 | ||
291 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | |
292 | case SCTP_DATA_FIRST_FRAG: | |
293 | if (cevent->mid == sin->mid) { | |
294 | pd_first = pos; | |
295 | pd_last = pos; | |
296 | pd_len = pos->len; | |
297 | } | |
298 | ||
299 | first_frag = pos; | |
300 | next_fsn = 0; | |
301 | mid = cevent->mid; | |
302 | break; | |
303 | ||
304 | case SCTP_DATA_MIDDLE_FRAG: | |
305 | if (first_frag && cevent->mid == mid && | |
306 | cevent->fsn == next_fsn) { | |
307 | next_fsn++; | |
308 | if (pd_first) { | |
309 | pd_last = pos; | |
310 | pd_len += pos->len; | |
311 | } | |
312 | } else { | |
313 | first_frag = NULL; | |
314 | } | |
315 | break; | |
316 | ||
317 | case SCTP_DATA_LAST_FRAG: | |
318 | if (first_frag && cevent->mid == mid && | |
319 | cevent->fsn == next_fsn) | |
320 | goto found; | |
321 | else | |
322 | first_frag = NULL; | |
323 | break; | |
324 | } | |
325 | } | |
326 | ||
327 | if (!pd_first) | |
328 | goto out; | |
329 | ||
330 | pd_point = sctp_sk(asoc->base.sk)->pd_point; | |
331 | if (pd_point && pd_point <= pd_len) { | |
332 | retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | |
333 | &ulpq->reasm, | |
334 | pd_first, pd_last); | |
335 | if (retval) { | |
336 | sin->fsn = next_fsn; | |
337 | sin->pd_mode = 1; | |
338 | } | |
339 | } | |
340 | goto out; | |
341 | ||
342 | found: | |
343 | retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), | |
344 | &ulpq->reasm, | |
345 | first_frag, pos); | |
346 | if (retval) | |
347 | retval->msg_flags |= MSG_EOR; | |
348 | ||
349 | out: | |
350 | return retval; | |
351 | } | |
352 | ||
353 | static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq, | |
354 | struct sctp_ulpevent *event) | |
355 | { | |
356 | struct sctp_ulpevent *retval = NULL; | |
357 | struct sctp_stream_in *sin; | |
358 | ||
359 | if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { | |
360 | event->msg_flags |= MSG_EOR; | |
361 | return event; | |
362 | } | |
363 | ||
364 | sctp_intl_store_reasm(ulpq, event); | |
365 | ||
366 | sin = sctp_stream_in(ulpq->asoc, event->stream); | |
367 | if (sin->pd_mode && event->mid == sin->mid && | |
368 | event->fsn == sin->fsn) | |
369 | retval = sctp_intl_retrieve_partial(ulpq, event); | |
370 | ||
371 | if (!retval) | |
372 | retval = sctp_intl_retrieve_reassembled(ulpq, event); | |
373 | ||
374 | return retval; | |
375 | } | |
376 | ||
377 | static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq, | |
378 | struct sctp_ulpevent *event) | |
379 | { | |
380 | struct sctp_ulpevent *cevent; | |
381 | struct sk_buff *pos; | |
382 | ||
383 | pos = skb_peek_tail(&ulpq->lobby); | |
384 | if (!pos) { | |
385 | __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | |
386 | return; | |
387 | } | |
388 | ||
389 | cevent = (struct sctp_ulpevent *)pos->cb; | |
390 | if (event->stream == cevent->stream && | |
391 | MID_lt(cevent->mid, event->mid)) { | |
392 | __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | |
393 | return; | |
394 | } | |
395 | ||
396 | if (event->stream > cevent->stream) { | |
397 | __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | |
398 | return; | |
399 | } | |
400 | ||
401 | skb_queue_walk(&ulpq->lobby, pos) { | |
402 | cevent = (struct sctp_ulpevent *)pos->cb; | |
403 | ||
404 | if (cevent->stream > event->stream) | |
405 | break; | |
406 | ||
407 | if (cevent->stream == event->stream && | |
408 | MID_lt(event->mid, cevent->mid)) | |
409 | break; | |
410 | } | |
411 | ||
412 | __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); | |
413 | } | |
414 | ||
415 | static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq, | |
416 | struct sctp_ulpevent *event) | |
417 | { | |
418 | struct sk_buff_head *event_list; | |
419 | struct sctp_stream *stream; | |
420 | struct sk_buff *pos, *tmp; | |
421 | __u16 sid = event->stream; | |
422 | ||
423 | stream = &ulpq->asoc->stream; | |
424 | event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev; | |
425 | ||
426 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { | |
427 | struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb; | |
428 | ||
429 | if (cevent->stream > sid) | |
430 | break; | |
431 | ||
432 | if (cevent->stream < sid) | |
433 | continue; | |
434 | ||
435 | if (cevent->mid != sctp_mid_peek(stream, in, sid)) | |
436 | break; | |
437 | ||
438 | sctp_mid_next(stream, in, sid); | |
439 | ||
440 | __skb_unlink(pos, &ulpq->lobby); | |
441 | ||
442 | __skb_queue_tail(event_list, pos); | |
443 | } | |
444 | } | |
445 | ||
446 | static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq, | |
447 | struct sctp_ulpevent *event) | |
448 | { | |
449 | struct sctp_stream *stream; | |
450 | __u16 sid; | |
451 | ||
452 | if (event->msg_flags & SCTP_DATA_UNORDERED) | |
453 | return event; | |
454 | ||
455 | stream = &ulpq->asoc->stream; | |
456 | sid = event->stream; | |
457 | ||
458 | if (event->mid != sctp_mid_peek(stream, in, sid)) { | |
459 | sctp_intl_store_ordered(ulpq, event); | |
460 | return NULL; | |
461 | } | |
462 | ||
463 | sctp_mid_next(stream, in, sid); | |
464 | ||
465 | sctp_intl_retrieve_ordered(ulpq, event); | |
466 | ||
467 | return event; | |
468 | } | |
469 | ||
470 | static int sctp_enqueue_event(struct sctp_ulpq *ulpq, | |
471 | struct sctp_ulpevent *event) | |
472 | { | |
473 | struct sk_buff *skb = sctp_event2skb(event); | |
474 | struct sock *sk = ulpq->asoc->base.sk; | |
475 | struct sctp_sock *sp = sctp_sk(sk); | |
476 | struct sk_buff_head *skb_list; | |
477 | ||
478 | skb_list = (struct sk_buff_head *)skb->prev; | |
479 | ||
480 | if (sk->sk_shutdown & RCV_SHUTDOWN && | |
481 | (sk->sk_shutdown & SEND_SHUTDOWN || | |
482 | !sctp_ulpevent_is_notification(event))) | |
483 | goto out_free; | |
484 | ||
485 | if (!sctp_ulpevent_is_notification(event)) { | |
486 | sk_mark_napi_id(sk, skb); | |
487 | sk_incoming_cpu_update(sk); | |
488 | } | |
489 | ||
490 | if (!sctp_ulpevent_is_enabled(event, &sp->subscribe)) | |
491 | goto out_free; | |
492 | ||
493 | if (skb_list) | |
494 | skb_queue_splice_tail_init(skb_list, | |
495 | &sk->sk_receive_queue); | |
496 | else | |
497 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
498 | ||
499 | if (!sp->data_ready_signalled) { | |
500 | sp->data_ready_signalled = 1; | |
501 | sk->sk_data_ready(sk); | |
502 | } | |
503 | ||
504 | return 1; | |
505 | ||
506 | out_free: | |
507 | if (skb_list) | |
508 | sctp_queue_purge_ulpevents(skb_list); | |
509 | else | |
510 | sctp_ulpevent_free(event); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq, | |
516 | struct sctp_chunk *chunk, gfp_t gfp) | |
517 | { | |
518 | struct sctp_ulpevent *event; | |
519 | struct sk_buff_head temp; | |
520 | int event_eor = 0; | |
521 | ||
522 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | |
523 | if (!event) | |
524 | return -ENOMEM; | |
525 | ||
526 | event->mid = ntohl(chunk->subh.idata_hdr->mid); | |
527 | if (event->msg_flags & SCTP_DATA_FIRST_FRAG) | |
528 | event->ppid = chunk->subh.idata_hdr->ppid; | |
529 | else | |
530 | event->fsn = ntohl(chunk->subh.idata_hdr->fsn); | |
531 | ||
532 | event = sctp_intl_reasm(ulpq, event); | |
533 | if (event && event->msg_flags & MSG_EOR) { | |
534 | skb_queue_head_init(&temp); | |
535 | __skb_queue_tail(&temp, sctp_event2skb(event)); | |
536 | ||
537 | event = sctp_intl_order(ulpq, event); | |
538 | } | |
539 | ||
540 | if (event) { | |
541 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; | |
542 | sctp_enqueue_event(ulpq, event); | |
543 | } | |
544 | ||
545 | return event_eor; | |
546 | } | |
547 | ||
94014e8d XL |
548 | static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) |
549 | { | |
550 | struct sctp_stream_in *csin, *sin = NULL; | |
551 | struct sk_buff *first_frag = NULL; | |
552 | struct sk_buff *last_frag = NULL; | |
553 | struct sctp_ulpevent *retval; | |
554 | struct sk_buff *pos; | |
555 | __u32 next_fsn = 0; | |
556 | __u16 sid = 0; | |
557 | ||
558 | skb_queue_walk(&ulpq->reasm, pos) { | |
559 | struct sctp_ulpevent *cevent = sctp_skb2event(pos); | |
560 | ||
561 | csin = sctp_stream_in(ulpq->asoc, cevent->stream); | |
562 | if (csin->pd_mode) | |
563 | continue; | |
564 | ||
565 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | |
566 | case SCTP_DATA_FIRST_FRAG: | |
567 | if (first_frag) | |
568 | goto out; | |
569 | if (cevent->mid == csin->mid) { | |
570 | first_frag = pos; | |
571 | last_frag = pos; | |
572 | next_fsn = 0; | |
573 | sin = csin; | |
574 | sid = cevent->stream; | |
575 | } | |
576 | break; | |
577 | case SCTP_DATA_MIDDLE_FRAG: | |
578 | if (!first_frag) | |
579 | break; | |
580 | if (cevent->stream == sid && | |
581 | cevent->mid == sin->mid && | |
582 | cevent->fsn == next_fsn) { | |
583 | next_fsn++; | |
584 | last_frag = pos; | |
585 | } else { | |
586 | goto out; | |
587 | } | |
588 | break; | |
589 | case SCTP_DATA_LAST_FRAG: | |
590 | if (first_frag) | |
591 | goto out; | |
592 | break; | |
593 | default: | |
594 | break; | |
595 | } | |
596 | } | |
597 | ||
598 | if (!first_frag) | |
599 | return NULL; | |
600 | ||
601 | out: | |
602 | retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), | |
603 | &ulpq->reasm, first_frag, | |
604 | last_frag); | |
605 | if (retval) { | |
606 | sin->fsn = next_fsn; | |
607 | sin->pd_mode = 1; | |
608 | } | |
609 | ||
610 | return retval; | |
611 | } | |
612 | ||
613 | static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) | |
614 | { | |
615 | struct sctp_ulpevent *event; | |
616 | ||
617 | if (skb_queue_empty(&ulpq->reasm)) | |
618 | return; | |
619 | ||
620 | do { | |
621 | event = sctp_intl_retrieve_first(ulpq); | |
622 | if (event) | |
623 | sctp_enqueue_event(ulpq, event); | |
624 | } while (event); | |
625 | } | |
626 | ||
627 | static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |
628 | gfp_t gfp) | |
629 | { | |
630 | struct sctp_association *asoc = ulpq->asoc; | |
631 | __u32 freed = 0; | |
632 | __u16 needed; | |
633 | ||
634 | if (chunk) { | |
635 | needed = ntohs(chunk->chunk_hdr->length); | |
636 | needed -= sizeof(struct sctp_idata_chunk); | |
637 | } else { | |
638 | needed = SCTP_DEFAULT_MAXWINDOW; | |
639 | } | |
640 | ||
641 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { | |
642 | freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | |
643 | if (freed < needed) | |
644 | freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm, | |
645 | needed); | |
646 | } | |
647 | ||
648 | if (chunk && freed >= needed) | |
649 | if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) | |
650 | sctp_intl_start_pd(ulpq, gfp); | |
651 | ||
652 | sk_mem_reclaim(asoc->base.sk); | |
653 | } | |
654 | ||
65f5e357 XL |
655 | static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid, |
656 | __u32 mid, __u16 flags, gfp_t gfp) | |
657 | { | |
658 | struct sock *sk = ulpq->asoc->base.sk; | |
659 | struct sctp_ulpevent *ev = NULL; | |
660 | ||
661 | if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, | |
662 | &sctp_sk(sk)->subscribe)) | |
663 | return; | |
664 | ||
665 | ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED, | |
666 | sid, mid, flags, gfp); | |
667 | if (ev) { | |
668 | __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); | |
669 | ||
670 | if (!sctp_sk(sk)->data_ready_signalled) { | |
671 | sctp_sk(sk)->data_ready_signalled = 1; | |
672 | sk->sk_data_ready(sk); | |
673 | } | |
674 | } | |
675 | } | |
676 | ||
677 | static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) | |
678 | { | |
679 | struct sctp_stream *stream = &ulpq->asoc->stream; | |
680 | struct sctp_ulpevent *cevent, *event = NULL; | |
681 | struct sk_buff_head *lobby = &ulpq->lobby; | |
682 | struct sk_buff *pos, *tmp; | |
683 | struct sk_buff_head temp; | |
684 | __u16 csid; | |
685 | __u32 cmid; | |
686 | ||
687 | skb_queue_head_init(&temp); | |
688 | sctp_skb_for_each(pos, lobby, tmp) { | |
689 | cevent = (struct sctp_ulpevent *)pos->cb; | |
690 | csid = cevent->stream; | |
691 | cmid = cevent->mid; | |
692 | ||
693 | if (csid > sid) | |
694 | break; | |
695 | ||
696 | if (csid < sid) | |
697 | continue; | |
698 | ||
699 | if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid))) | |
700 | break; | |
701 | ||
702 | __skb_unlink(pos, lobby); | |
703 | if (!event) | |
704 | event = sctp_skb2event(pos); | |
705 | ||
706 | __skb_queue_tail(&temp, pos); | |
707 | } | |
708 | ||
709 | if (!event && pos != (struct sk_buff *)lobby) { | |
710 | cevent = (struct sctp_ulpevent *)pos->cb; | |
711 | csid = cevent->stream; | |
712 | cmid = cevent->mid; | |
713 | ||
714 | if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) { | |
715 | sctp_mid_next(stream, in, csid); | |
716 | __skb_unlink(pos, lobby); | |
717 | __skb_queue_tail(&temp, pos); | |
718 | event = sctp_skb2event(pos); | |
719 | } | |
720 | } | |
721 | ||
722 | if (event) { | |
723 | sctp_intl_retrieve_ordered(ulpq, event); | |
724 | sctp_enqueue_event(ulpq, event); | |
725 | } | |
726 | } | |
727 | ||
728 | static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) | |
729 | { | |
730 | struct sctp_stream *stream = &ulpq->asoc->stream; | |
731 | __u16 sid; | |
732 | ||
733 | for (sid = 0; sid < stream->incnt; sid++) { | |
734 | struct sctp_stream_in *sin = &stream->in[sid]; | |
735 | __u32 mid; | |
736 | ||
737 | if (sin->pd_mode) { | |
738 | sin->pd_mode = 0; | |
739 | ||
740 | mid = sin->mid; | |
741 | sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp); | |
742 | sctp_mid_skip(stream, in, sid, mid); | |
743 | ||
744 | sctp_intl_reap_ordered(ulpq, sid); | |
745 | } | |
746 | } | |
747 | ||
748 | /* intl abort pd happens only when all data needs to be cleaned */ | |
749 | sctp_ulpq_flush(ulpq); | |
750 | } | |
751 | ||
0c3f6f65 XL |
752 | static struct sctp_stream_interleave sctp_stream_interleave_0 = { |
753 | .data_chunk_len = sizeof(struct sctp_data_chunk), | |
754 | /* DATA process functions */ | |
755 | .make_datafrag = sctp_make_datafrag_empty, | |
668c9beb | 756 | .assign_number = sctp_chunk_assign_ssn, |
9d4ceaf1 | 757 | .validate_data = sctp_validate_data, |
bd4d627d | 758 | .ulpevent_data = sctp_ulpq_tail_data, |
9162e0ed | 759 | .enqueue_event = sctp_ulpq_tail_event, |
94014e8d | 760 | .renege_events = sctp_ulpq_renege, |
be4e0ce1 | 761 | .start_pd = sctp_ulpq_partial_delivery, |
65f5e357 | 762 | .abort_pd = sctp_ulpq_abort_pd, |
0c3f6f65 XL |
763 | }; |
764 | ||
765 | static struct sctp_stream_interleave sctp_stream_interleave_1 = { | |
766 | .data_chunk_len = sizeof(struct sctp_idata_chunk), | |
767 | /* I-DATA process functions */ | |
768 | .make_datafrag = sctp_make_idatafrag_empty, | |
668c9beb | 769 | .assign_number = sctp_chunk_assign_mid, |
9d4ceaf1 | 770 | .validate_data = sctp_validate_idata, |
bd4d627d | 771 | .ulpevent_data = sctp_ulpevent_idata, |
9162e0ed | 772 | .enqueue_event = sctp_enqueue_event, |
94014e8d | 773 | .renege_events = sctp_renege_events, |
be4e0ce1 | 774 | .start_pd = sctp_intl_start_pd, |
65f5e357 | 775 | .abort_pd = sctp_intl_abort_pd, |
0c3f6f65 XL |
776 | }; |
777 | ||
778 | void sctp_stream_interleave_init(struct sctp_stream *stream) | |
779 | { | |
780 | struct sctp_association *asoc; | |
781 | ||
782 | asoc = container_of(stream, struct sctp_association, stream); | |
783 | stream->si = asoc->intl_enable ? &sctp_stream_interleave_1 | |
784 | : &sctp_stream_interleave_0; | |
785 | } |