1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
8 * This file is part of the SCTP kernel implementation
10 * This module provides the abstraction for an SCTP association.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <linux-sctp@vger.kernel.org>
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Ryan Layer <rmlayer@us.ibm.com>
41 * Kevin Gao <kevin.gao@intel.com>
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
51 #include <linux/slab.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
57 /* Forward declarations for internal functions. */
58 static void sctp_assoc_bh_rcv(struct work_struct *work);
59 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
60 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62 /* 1st Level Abstractions. */
64 /* Initialize a new association from provided memory. */
65 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
66 const struct sctp_endpoint *ep,
67 const struct sock *sk,
71 struct net *net = sock_net(sk);
77 /* Retrieve the SCTP per socket area. */
78 sp = sctp_sk((struct sock *)sk);
80 /* Discarding const is appropriate here. */
81 asoc->ep = (struct sctp_endpoint *)ep;
82 asoc->base.sk = (struct sock *)sk;
84 sctp_endpoint_hold(asoc->ep);
85 sock_hold(asoc->base.sk);
87 /* Initialize the common base substructure. */
88 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90 /* Initialize the object handling fields. */
91 atomic_set(&asoc->base.refcnt, 1);
93 /* Initialize the bind addr area. */
94 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96 asoc->state = SCTP_STATE_CLOSED;
97 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
98 asoc->user_frag = sp->user_frag;
100 /* Set the association max_retrans and RTO values from the
103 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
104 asoc->pf_retrans = net->sctp.pf_retrans;
106 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
107 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
108 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110 /* Initialize the association's heartbeat interval based on the
111 * sock configured value.
113 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115 /* Initialize path max retrans value. */
116 asoc->pathmaxrxt = sp->pathmaxrxt;
118 /* Initialize default path MTU. */
119 asoc->pathmtu = sp->pathmtu;
121 /* Set association default SACK delay */
122 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
123 asoc->sackfreq = sp->sackfreq;
125 /* Set the association default flags controlling
126 * Heartbeat, SACK delay, and Path MTU Discovery.
128 asoc->param_flags = sp->param_flags;
130 /* Initialize the maximum mumber of new data packets that can be sent
133 asoc->max_burst = sp->max_burst;
135 /* initialize association timers */
136 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
138 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140 /* sctpimpguide Section 2.12.2
141 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
142 * recommended value of 5 times 'RTO.Max'.
144 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
147 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
148 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
149 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
151 /* Initializes the timers */
152 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 (unsigned long)asoc);
156 /* Pull default initialization values from the sock options.
157 * Note: This assumes that the values have already been
158 * validated in the sock.
160 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
162 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
164 asoc->max_init_timeo =
165 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
167 /* Set the local window size for receive.
168 * This is also the rcvbuf space per association.
169 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
170 * 1500 bytes in one SCTP packet.
172 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
175 asoc->rwnd = sk->sk_rcvbuf/2;
177 asoc->a_rwnd = asoc->rwnd;
179 /* Use my own max window until I learn something better. */
180 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
182 /* Initialize the receive memory counter */
183 atomic_set(&asoc->rmem_alloc, 0);
185 init_waitqueue_head(&asoc->wait);
187 asoc->c.my_vtag = sctp_generate_tag(ep);
188 asoc->c.my_port = ep->base.bind_addr.port;
190 asoc->c.initial_tsn = sctp_generate_tsn(ep);
192 asoc->next_tsn = asoc->c.initial_tsn;
194 asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 asoc->highest_sacked = asoc->ctsn_ack_point;
197 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
199 /* ADDIP Section 4.1 Asconf Chunk Procedures
201 * When an endpoint has an ASCONF signaled change to be sent to the
202 * remote endpoint it should do the following:
204 * A2) a serial number should be assigned to the chunk. The serial
205 * number SHOULD be a monotonically increasing number. The serial
206 * numbers SHOULD be initialized at the start of the
207 * association to the same value as the initial TSN.
209 asoc->addip_serial = asoc->c.initial_tsn;
211 INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 INIT_LIST_HEAD(&asoc->asconf_ack_list);
214 /* Make an empty list of remote transport addresses. */
215 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
217 /* RFC 2960 5.1 Normal Establishment of an Association
219 * After the reception of the first data chunk in an
220 * association the endpoint must immediately respond with a
221 * sack to acknowledge the data chunk. Subsequent
222 * acknowledgements should be done as described in Section
225 * [We implement this by telling a new association that it
226 * already received one packet.]
228 asoc->peer.sack_needed = 1;
229 asoc->peer.sack_generation = 1;
231 /* Assume that the peer will tell us if he recognizes ASCONF
232 * as part of INIT exchange.
233 * The sctp_addip_noauth option is there for backward compatibilty
234 * and will revert old behavior.
236 if (net->sctp.addip_noauth)
237 asoc->peer.asconf_capable = 1;
239 /* Create an input queue. */
240 sctp_inq_init(&asoc->base.inqueue);
241 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
243 /* Create an output queue. */
244 sctp_outq_init(asoc, &asoc->outqueue);
246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
249 /* Assume that peer would support both address types unless we are
252 asoc->peer.ipv4_address = 1;
253 if (asoc->base.sk->sk_family == PF_INET6)
254 asoc->peer.ipv6_address = 1;
255 INIT_LIST_HEAD(&asoc->asocs);
257 asoc->autoclose = sp->autoclose;
259 asoc->default_stream = sp->default_stream;
260 asoc->default_ppid = sp->default_ppid;
261 asoc->default_flags = sp->default_flags;
262 asoc->default_context = sp->default_context;
263 asoc->default_timetolive = sp->default_timetolive;
264 asoc->default_rcv_context = sp->default_rcv_context;
266 /* AUTH related initializations */
267 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
268 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
272 asoc->active_key_id = ep->active_key_id;
274 /* Save the hmacs and chunks list into this association */
275 if (ep->auth_hmacs_list)
276 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
277 ntohs(ep->auth_hmacs_list->param_hdr.length));
278 if (ep->auth_chunk_list)
279 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
280 ntohs(ep->auth_chunk_list->param_hdr.length));
282 /* Get the AUTH random number for this association */
283 p = (sctp_paramhdr_t *)asoc->c.auth_random;
284 p->type = SCTP_PARAM_RANDOM;
285 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
286 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
291 sock_put(asoc->base.sk);
292 sctp_endpoint_put(asoc->ep);
296 /* Allocate and initialize a new association */
297 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
298 const struct sock *sk,
302 struct sctp_association *asoc;
304 asoc = kzalloc(sizeof(*asoc), gfp);
308 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
311 SCTP_DBG_OBJCNT_INC(assoc);
313 pr_debug("Created asoc %p\n", asoc);
323 /* Free this association if possible. There may still be users, so
324 * the actual deallocation may be delayed.
326 void sctp_association_free(struct sctp_association *asoc)
328 struct sock *sk = asoc->base.sk;
329 struct sctp_transport *transport;
330 struct list_head *pos, *temp;
333 /* Only real associations count against the endpoint, so
334 * don't bother for if this is a temporary association.
337 list_del(&asoc->asocs);
339 /* Decrement the backlog value for a TCP-style listening
342 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
343 sk->sk_ack_backlog--;
346 /* Mark as dead, so other users can know this structure is
349 asoc->base.dead = true;
351 /* Dispose of any data lying around in the outqueue. */
352 sctp_outq_free(&asoc->outqueue);
354 /* Dispose of any pending messages for the upper layer. */
355 sctp_ulpq_free(&asoc->ulpq);
357 /* Dispose of any pending chunks on the inqueue. */
358 sctp_inq_free(&asoc->base.inqueue);
360 sctp_tsnmap_free(&asoc->peer.tsn_map);
362 /* Free ssnmap storage. */
363 sctp_ssnmap_free(asoc->ssnmap);
365 /* Clean up the bound address list. */
366 sctp_bind_addr_free(&asoc->base.bind_addr);
368 /* Do we need to go through all of our timers and
369 * delete them? To be safe we will try to delete all, but we
370 * should be able to go through and make a guess based
373 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
374 if (del_timer(&asoc->timers[i]))
375 sctp_association_put(asoc);
378 /* Free peer's cached cookie. */
379 kfree(asoc->peer.cookie);
380 kfree(asoc->peer.peer_random);
381 kfree(asoc->peer.peer_chunks);
382 kfree(asoc->peer.peer_hmacs);
384 /* Release the transport structures. */
385 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
386 transport = list_entry(pos, struct sctp_transport, transports);
388 sctp_transport_free(transport);
391 asoc->peer.transport_count = 0;
393 sctp_asconf_queue_teardown(asoc);
395 /* Free pending address space being deleted */
396 if (asoc->asconf_addr_del_pending != NULL)
397 kfree(asoc->asconf_addr_del_pending);
399 /* AUTH - Free the endpoint shared keys */
400 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
402 /* AUTH - Free the association shared key */
403 sctp_auth_key_put(asoc->asoc_shared_key);
405 sctp_association_put(asoc);
408 /* Cleanup and free up an association. */
409 static void sctp_association_destroy(struct sctp_association *asoc)
411 if (unlikely(!asoc->base.dead)) {
412 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
416 sctp_endpoint_put(asoc->ep);
417 sock_put(asoc->base.sk);
419 if (asoc->assoc_id != 0) {
420 spin_lock_bh(&sctp_assocs_id_lock);
421 idr_remove(&sctp_assocs_id, asoc->assoc_id);
422 spin_unlock_bh(&sctp_assocs_id_lock);
425 WARN_ON(atomic_read(&asoc->rmem_alloc));
428 SCTP_DBG_OBJCNT_DEC(assoc);
431 /* Change the primary destination address for the peer. */
432 void sctp_assoc_set_primary(struct sctp_association *asoc,
433 struct sctp_transport *transport)
437 /* it's a changeover only if we already have a primary path
438 * that we are changing
440 if (asoc->peer.primary_path != NULL &&
441 asoc->peer.primary_path != transport)
444 asoc->peer.primary_path = transport;
446 /* Set a default msg_name for events. */
447 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
448 sizeof(union sctp_addr));
450 /* If the primary path is changing, assume that the
451 * user wants to use this new path.
453 if ((transport->state == SCTP_ACTIVE) ||
454 (transport->state == SCTP_UNKNOWN))
455 asoc->peer.active_path = transport;
458 * SFR-CACC algorithm:
459 * Upon the receipt of a request to change the primary
460 * destination address, on the data structure for the new
461 * primary destination, the sender MUST do the following:
463 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
464 * to this destination address earlier. The sender MUST set
465 * CYCLING_CHANGEOVER to indicate that this switch is a
466 * double switch to the same destination address.
468 * Really, only bother is we have data queued or outstanding on
471 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
474 if (transport->cacc.changeover_active)
475 transport->cacc.cycling_changeover = changeover;
477 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
478 * a changeover has occurred.
480 transport->cacc.changeover_active = changeover;
482 /* 3) The sender MUST store the next TSN to be sent in
483 * next_tsn_at_change.
485 transport->cacc.next_tsn_at_change = asoc->next_tsn;
488 /* Remove a transport from an association. */
489 void sctp_assoc_rm_peer(struct sctp_association *asoc,
490 struct sctp_transport *peer)
492 struct list_head *pos;
493 struct sctp_transport *transport;
495 pr_debug("%s: association:%p addr:%pISpc\n",
496 __func__, asoc, &peer->ipaddr.sa);
498 /* If we are to remove the current retran_path, update it
499 * to the next peer before removing this peer from the list.
501 if (asoc->peer.retran_path == peer)
502 sctp_assoc_update_retran_path(asoc);
504 /* Remove this peer from the list. */
505 list_del_rcu(&peer->transports);
507 /* Get the first transport of asoc. */
508 pos = asoc->peer.transport_addr_list.next;
509 transport = list_entry(pos, struct sctp_transport, transports);
511 /* Update any entries that match the peer to be deleted. */
512 if (asoc->peer.primary_path == peer)
513 sctp_assoc_set_primary(asoc, transport);
514 if (asoc->peer.active_path == peer)
515 asoc->peer.active_path = transport;
516 if (asoc->peer.retran_path == peer)
517 asoc->peer.retran_path = transport;
518 if (asoc->peer.last_data_from == peer)
519 asoc->peer.last_data_from = transport;
521 /* If we remove the transport an INIT was last sent to, set it to
522 * NULL. Combined with the update of the retran path above, this
523 * will cause the next INIT to be sent to the next available
524 * transport, maintaining the cycle.
526 if (asoc->init_last_sent_to == peer)
527 asoc->init_last_sent_to = NULL;
529 /* If we remove the transport an SHUTDOWN was last sent to, set it
530 * to NULL. Combined with the update of the retran path above, this
531 * will cause the next SHUTDOWN to be sent to the next available
532 * transport, maintaining the cycle.
534 if (asoc->shutdown_last_sent_to == peer)
535 asoc->shutdown_last_sent_to = NULL;
537 /* If we remove the transport an ASCONF was last sent to, set it to
540 if (asoc->addip_last_asconf &&
541 asoc->addip_last_asconf->transport == peer)
542 asoc->addip_last_asconf->transport = NULL;
544 /* If we have something on the transmitted list, we have to
545 * save it off. The best place is the active path.
547 if (!list_empty(&peer->transmitted)) {
548 struct sctp_transport *active = asoc->peer.active_path;
549 struct sctp_chunk *ch;
551 /* Reset the transport of each chunk on this list */
552 list_for_each_entry(ch, &peer->transmitted,
554 ch->transport = NULL;
555 ch->rtt_in_progress = 0;
558 list_splice_tail_init(&peer->transmitted,
559 &active->transmitted);
561 /* Start a T3 timer here in case it wasn't running so
562 * that these migrated packets have a chance to get
565 if (!timer_pending(&active->T3_rtx_timer))
566 if (!mod_timer(&active->T3_rtx_timer,
567 jiffies + active->rto))
568 sctp_transport_hold(active);
571 asoc->peer.transport_count--;
573 sctp_transport_free(peer);
576 /* Add a transport address to an association. */
577 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
578 const union sctp_addr *addr,
580 const int peer_state)
582 struct net *net = sock_net(asoc->base.sk);
583 struct sctp_transport *peer;
584 struct sctp_sock *sp;
587 sp = sctp_sk(asoc->base.sk);
589 /* AF_INET and AF_INET6 share common port field. */
590 port = ntohs(addr->v4.sin_port);
592 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
593 asoc, &addr->sa, peer_state);
595 /* Set the port if it has not been set yet. */
596 if (0 == asoc->peer.port)
597 asoc->peer.port = port;
599 /* Check to see if this is a duplicate. */
600 peer = sctp_assoc_lookup_paddr(asoc, addr);
602 /* An UNKNOWN state is only set on transports added by
603 * user in sctp_connectx() call. Such transports should be
604 * considered CONFIRMED per RFC 4960, Section 5.4.
606 if (peer->state == SCTP_UNKNOWN) {
607 peer->state = SCTP_ACTIVE;
612 peer = sctp_transport_new(net, addr, gfp);
616 sctp_transport_set_owner(peer, asoc);
618 /* Initialize the peer's heartbeat interval based on the
619 * association configured value.
621 peer->hbinterval = asoc->hbinterval;
623 /* Set the path max_retrans. */
624 peer->pathmaxrxt = asoc->pathmaxrxt;
626 /* And the partial failure retrans threshold */
627 peer->pf_retrans = asoc->pf_retrans;
629 /* Initialize the peer's SACK delay timeout based on the
630 * association configured value.
632 peer->sackdelay = asoc->sackdelay;
633 peer->sackfreq = asoc->sackfreq;
635 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
636 * based on association setting.
638 peer->param_flags = asoc->param_flags;
640 sctp_transport_route(peer, NULL, sp);
642 /* Initialize the pmtu of the transport. */
643 if (peer->param_flags & SPP_PMTUD_DISABLE) {
645 peer->pathmtu = asoc->pathmtu;
647 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
650 /* If this is the first transport addr on this association,
651 * initialize the association PMTU to the peer's PMTU.
652 * If not and the current association PMTU is higher than the new
653 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
656 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
658 asoc->pathmtu = peer->pathmtu;
660 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
663 peer->pmtu_pending = 0;
665 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
667 /* The asoc->peer.port might not be meaningful yet, but
668 * initialize the packet structure anyway.
670 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
675 * o The initial cwnd before DATA transmission or after a sufficiently
676 * long idle period MUST be set to
677 * min(4*MTU, max(2*MTU, 4380 bytes))
679 * o The initial value of ssthresh MAY be arbitrarily high
680 * (for example, implementations MAY use the size of the
681 * receiver advertised window).
683 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
685 /* At this point, we may not have the receiver's advertised window,
686 * so initialize ssthresh to the default value and it will be set
687 * later when we process the INIT.
689 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
691 peer->partial_bytes_acked = 0;
692 peer->flight_size = 0;
693 peer->burst_limited = 0;
695 /* Set the transport's RTO.initial value */
696 peer->rto = asoc->rto_initial;
697 sctp_max_rto(asoc, peer);
699 /* Set the peer's active state. */
700 peer->state = peer_state;
702 /* Attach the remote transport to our asoc. */
703 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
704 asoc->peer.transport_count++;
706 /* If we do not yet have a primary path, set one. */
707 if (!asoc->peer.primary_path) {
708 sctp_assoc_set_primary(asoc, peer);
709 asoc->peer.retran_path = peer;
712 if (asoc->peer.active_path == asoc->peer.retran_path &&
713 peer->state != SCTP_UNCONFIRMED) {
714 asoc->peer.retran_path = peer;
720 /* Delete a transport address from an association. */
721 void sctp_assoc_del_peer(struct sctp_association *asoc,
722 const union sctp_addr *addr)
724 struct list_head *pos;
725 struct list_head *temp;
726 struct sctp_transport *transport;
728 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
729 transport = list_entry(pos, struct sctp_transport, transports);
730 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
731 /* Do book keeping for removing the peer and free it. */
732 sctp_assoc_rm_peer(asoc, transport);
738 /* Lookup a transport by address. */
739 struct sctp_transport *sctp_assoc_lookup_paddr(
740 const struct sctp_association *asoc,
741 const union sctp_addr *address)
743 struct sctp_transport *t;
745 /* Cycle through all transports searching for a peer address. */
747 list_for_each_entry(t, &asoc->peer.transport_addr_list,
749 if (sctp_cmp_addr_exact(address, &t->ipaddr))
756 /* Remove all transports except a give one */
757 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
758 struct sctp_transport *primary)
760 struct sctp_transport *temp;
761 struct sctp_transport *t;
763 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
765 /* if the current transport is not the primary one, delete it */
767 sctp_assoc_rm_peer(asoc, t);
771 /* Engage in transport control operations.
772 * Mark the transport up or down and send a notification to the user.
773 * Select and update the new active and retran paths.
775 void sctp_assoc_control_transport(struct sctp_association *asoc,
776 struct sctp_transport *transport,
777 sctp_transport_cmd_t command,
778 sctp_sn_error_t error)
780 struct sctp_transport *t = NULL;
781 struct sctp_transport *first;
782 struct sctp_transport *second;
783 struct sctp_ulpevent *event;
784 struct sockaddr_storage addr;
786 bool ulp_notify = true;
788 /* Record the transition on the transport. */
790 case SCTP_TRANSPORT_UP:
791 /* If we are moving from UNCONFIRMED state due
792 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
793 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
795 if (SCTP_UNCONFIRMED == transport->state &&
796 SCTP_HEARTBEAT_SUCCESS == error)
797 spc_state = SCTP_ADDR_CONFIRMED;
799 spc_state = SCTP_ADDR_AVAILABLE;
800 /* Don't inform ULP about transition from PF to
801 * active state and set cwnd to 1 MTU, see SCTP
802 * Quick failover draft section 5.1, point 5
804 if (transport->state == SCTP_PF) {
806 transport->cwnd = asoc->pathmtu;
808 transport->state = SCTP_ACTIVE;
811 case SCTP_TRANSPORT_DOWN:
812 /* If the transport was never confirmed, do not transition it
813 * to inactive state. Also, release the cached route since
814 * there may be a better route next time.
816 if (transport->state != SCTP_UNCONFIRMED)
817 transport->state = SCTP_INACTIVE;
819 dst_release(transport->dst);
820 transport->dst = NULL;
823 spc_state = SCTP_ADDR_UNREACHABLE;
826 case SCTP_TRANSPORT_PF:
827 transport->state = SCTP_PF;
835 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
839 memset(&addr, 0, sizeof(struct sockaddr_storage));
840 memcpy(&addr, &transport->ipaddr,
841 transport->af_specific->sockaddr_len);
842 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
843 0, spc_state, error, GFP_ATOMIC);
845 sctp_ulpq_tail_event(&asoc->ulpq, event);
848 /* Select new active and retran paths. */
850 /* Look for the two most recently used active transports.
852 * This code produces the wrong ordering whenever jiffies
853 * rolls over, but we still get usable transports, so we don't
856 first = NULL; second = NULL;
858 list_for_each_entry(t, &asoc->peer.transport_addr_list,
861 if ((t->state == SCTP_INACTIVE) ||
862 (t->state == SCTP_UNCONFIRMED) ||
863 (t->state == SCTP_PF))
865 if (!first || t->last_time_heard > first->last_time_heard) {
868 } else if (!second ||
869 t->last_time_heard > second->last_time_heard)
873 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
875 * By default, an endpoint should always transmit to the
876 * primary path, unless the SCTP user explicitly specifies the
877 * destination transport address (and possibly source
878 * transport address) to use.
880 * [If the primary is active but not most recent, bump the most
881 * recently used transport.]
883 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
884 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
885 first != asoc->peer.primary_path) {
887 first = asoc->peer.primary_path;
892 /* If we failed to find a usable transport, just camp on the
893 * primary, even if it is inactive.
896 first = asoc->peer.primary_path;
897 second = asoc->peer.primary_path;
900 /* Set the active and retran transports. */
901 asoc->peer.active_path = first;
902 asoc->peer.retran_path = second;
905 /* Hold a reference to an association. */
906 void sctp_association_hold(struct sctp_association *asoc)
908 atomic_inc(&asoc->base.refcnt);
911 /* Release a reference to an association and cleanup
912 * if there are no more references.
914 void sctp_association_put(struct sctp_association *asoc)
916 if (atomic_dec_and_test(&asoc->base.refcnt))
917 sctp_association_destroy(asoc);
920 /* Allocate the next TSN, Transmission Sequence Number, for the given
923 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
925 /* From Section 1.6 Serial Number Arithmetic:
926 * Transmission Sequence Numbers wrap around when they reach
927 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
928 * after transmitting TSN = 2*32 - 1 is TSN = 0.
930 __u32 retval = asoc->next_tsn;
937 /* Compare two addresses to see if they match. Wildcard addresses
938 * only match themselves.
940 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
941 const union sctp_addr *ss2)
945 af = sctp_get_af_specific(ss1->sa.sa_family);
949 return af->cmp_addr(ss1, ss2);
952 /* Return an ecne chunk to get prepended to a packet.
953 * Note: We are sly and return a shared, prealloced chunk. FIXME:
954 * No we don't, but we could/should.
956 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
958 if (!asoc->need_ecne)
961 /* Send ECNE if needed.
962 * Not being able to allocate a chunk here is not deadly.
964 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
968 * Find which transport this TSN was sent on.
970 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
973 struct sctp_transport *active;
974 struct sctp_transport *match;
975 struct sctp_transport *transport;
976 struct sctp_chunk *chunk;
977 __be32 key = htonl(tsn);
982 * FIXME: In general, find a more efficient data structure for
987 * The general strategy is to search each transport's transmitted
988 * list. Return which transport this TSN lives on.
990 * Let's be hopeful and check the active_path first.
991 * Another optimization would be to know if there is only one
992 * outbound path and not have to look for the TSN at all.
996 active = asoc->peer.active_path;
998 list_for_each_entry(chunk, &active->transmitted,
1001 if (key == chunk->subh.data_hdr->tsn) {
1007 /* If not found, go search all the other transports. */
1008 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1011 if (transport == active)
1013 list_for_each_entry(chunk, &transport->transmitted,
1015 if (key == chunk->subh.data_hdr->tsn) {
1025 /* Is this the association we are looking for? */
1026 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1028 const union sctp_addr *laddr,
1029 const union sctp_addr *paddr)
1031 struct sctp_transport *transport;
1033 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1034 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1035 net_eq(sock_net(asoc->base.sk), net)) {
1036 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1040 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1041 sctp_sk(asoc->base.sk)))
1050 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
1051 static void sctp_assoc_bh_rcv(struct work_struct *work)
1053 struct sctp_association *asoc =
1054 container_of(work, struct sctp_association,
1055 base.inqueue.immediate);
1056 struct net *net = sock_net(asoc->base.sk);
1057 struct sctp_endpoint *ep;
1058 struct sctp_chunk *chunk;
1059 struct sctp_inq *inqueue;
1061 sctp_subtype_t subtype;
1064 /* The association should be held so we should be safe. */
1067 inqueue = &asoc->base.inqueue;
1068 sctp_association_hold(asoc);
1069 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1070 state = asoc->state;
1071 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1073 /* SCTP-AUTH, Section 6.3:
1074 * The receiver has a list of chunk types which it expects
1075 * to be received only after an AUTH-chunk. This list has
1076 * been sent to the peer during the association setup. It
1077 * MUST silently discard these chunks if they are not placed
1078 * after an AUTH chunk in the packet.
1080 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1083 /* Remember where the last DATA chunk came from so we
1084 * know where to send the SACK.
1086 if (sctp_chunk_is_data(chunk))
1087 asoc->peer.last_data_from = chunk->transport;
1089 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1090 asoc->stats.ictrlchunks++;
1091 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1092 asoc->stats.isacks++;
1095 if (chunk->transport)
1096 chunk->transport->last_time_heard = jiffies;
1098 /* Run through the state machine. */
1099 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1100 state, ep, asoc, chunk, GFP_ATOMIC);
1102 /* Check to see if the association is freed in response to
1103 * the incoming chunk. If so, get out of the while loop.
1105 if (asoc->base.dead)
1108 /* If there is an error on chunk, discard this packet. */
1110 chunk->pdiscard = 1;
1112 sctp_association_put(asoc);
1115 /* This routine moves an association from its old sk to a new sk. */
1116 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1118 struct sctp_sock *newsp = sctp_sk(newsk);
1119 struct sock *oldsk = assoc->base.sk;
1121 /* Delete the association from the old endpoint's list of
1124 list_del_init(&assoc->asocs);
1126 /* Decrement the backlog value for a TCP-style socket. */
1127 if (sctp_style(oldsk, TCP))
1128 oldsk->sk_ack_backlog--;
1130 /* Release references to the old endpoint and the sock. */
1131 sctp_endpoint_put(assoc->ep);
1132 sock_put(assoc->base.sk);
1134 /* Get a reference to the new endpoint. */
1135 assoc->ep = newsp->ep;
1136 sctp_endpoint_hold(assoc->ep);
1138 /* Get a reference to the new sock. */
1139 assoc->base.sk = newsk;
1140 sock_hold(assoc->base.sk);
1142 /* Add the association to the new endpoint's list of associations. */
1143 sctp_endpoint_add_asoc(newsp->ep, assoc);
1146 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1147 void sctp_assoc_update(struct sctp_association *asoc,
1148 struct sctp_association *new)
1150 struct sctp_transport *trans;
1151 struct list_head *pos, *temp;
1153 /* Copy in new parameters of peer. */
1155 asoc->peer.rwnd = new->peer.rwnd;
1156 asoc->peer.sack_needed = new->peer.sack_needed;
1157 asoc->peer.i = new->peer.i;
1158 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1159 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1161 /* Remove any peer addresses not present in the new association. */
1162 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1163 trans = list_entry(pos, struct sctp_transport, transports);
1164 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1165 sctp_assoc_rm_peer(asoc, trans);
1169 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1170 sctp_transport_reset(trans);
1173 /* If the case is A (association restart), use
1174 * initial_tsn as next_tsn. If the case is B, use
1175 * current next_tsn in case data sent to peer
1176 * has been discarded and needs retransmission.
1178 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1179 asoc->next_tsn = new->next_tsn;
1180 asoc->ctsn_ack_point = new->ctsn_ack_point;
1181 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1183 /* Reinitialize SSN for both local streams
1184 * and peer's streams.
1186 sctp_ssnmap_clear(asoc->ssnmap);
1188 /* Flush the ULP reassembly and ordered queue.
1189 * Any data there will now be stale and will
1192 sctp_ulpq_flush(&asoc->ulpq);
1194 /* reset the overall association error count so
1195 * that the restarted association doesn't get torn
1196 * down on the next retransmission timer.
1198 asoc->overall_error_count = 0;
1201 /* Add any peer addresses from the new association. */
1202 list_for_each_entry(trans, &new->peer.transport_addr_list,
1204 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1205 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1206 GFP_ATOMIC, trans->state);
1209 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1210 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1211 if (!asoc->ssnmap) {
1212 /* Move the ssnmap. */
1213 asoc->ssnmap = new->ssnmap;
1217 if (!asoc->assoc_id) {
1218 /* get a new association id since we don't have one
1221 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1225 /* SCTP-AUTH: Save the peer parameters from the new assocaitions
1226 * and also move the association shared keys over
1228 kfree(asoc->peer.peer_random);
1229 asoc->peer.peer_random = new->peer.peer_random;
1230 new->peer.peer_random = NULL;
1232 kfree(asoc->peer.peer_chunks);
1233 asoc->peer.peer_chunks = new->peer.peer_chunks;
1234 new->peer.peer_chunks = NULL;
1236 kfree(asoc->peer.peer_hmacs);
1237 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1238 new->peer.peer_hmacs = NULL;
1240 sctp_auth_key_put(asoc->asoc_shared_key);
1241 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1244 /* Update the retran path for sending a retransmitted packet.
1245 * Round-robin through the active transports, else round-robin
1246 * through the inactive transports as this is the next best thing
1249 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1251 struct sctp_transport *t, *next;
1252 struct list_head *head = &asoc->peer.transport_addr_list;
1253 struct list_head *pos;
1255 if (asoc->peer.transport_count == 1)
1258 /* Find the next transport in a round-robin fashion. */
1259 t = asoc->peer.retran_path;
1260 pos = &t->transports;
1264 /* Skip the head. */
1265 if (pos->next == head)
1270 t = list_entry(pos, struct sctp_transport, transports);
1272 /* We have exhausted the list, but didn't find any
1273 * other active transports. If so, use the next
1276 if (t == asoc->peer.retran_path) {
1281 /* Try to find an active transport. */
1283 if ((t->state == SCTP_ACTIVE) ||
1284 (t->state == SCTP_UNKNOWN)) {
1287 /* Keep track of the next transport in case
1288 * we don't find any active transport.
1290 if (t->state != SCTP_UNCONFIRMED && !next)
1296 asoc->peer.retran_path = t;
1298 t = asoc->peer.retran_path;
1300 pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
1304 /* Choose the transport for sending retransmit packet. */
1305 struct sctp_transport *sctp_assoc_choose_alter_transport(
1306 struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1308 /* If this is the first time packet is sent, use the active path,
1309 * else use the retran path. If the last packet was sent over the
1310 * retran path, update the retran path and use it.
1313 return asoc->peer.active_path;
1315 if (last_sent_to == asoc->peer.retran_path)
1316 sctp_assoc_update_retran_path(asoc);
1317 return asoc->peer.retran_path;
1321 /* Update the association's pmtu and frag_point by going through all the
1322 * transports. This routine is called when a transport's PMTU has changed.
1324 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1326 struct sctp_transport *t;
1332 /* Get the lowest pmtu of all the transports. */
1333 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1335 if (t->pmtu_pending && t->dst) {
1336 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1337 t->pmtu_pending = 0;
1339 if (!pmtu || (t->pathmtu < pmtu))
1344 asoc->pathmtu = pmtu;
1345 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1348 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1349 asoc->pathmtu, asoc->frag_point);
1352 /* Should we send a SACK to update our peer? */
1353 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1355 struct net *net = sock_net(asoc->base.sk);
1356 switch (asoc->state) {
1357 case SCTP_STATE_ESTABLISHED:
1358 case SCTP_STATE_SHUTDOWN_PENDING:
1359 case SCTP_STATE_SHUTDOWN_RECEIVED:
1360 case SCTP_STATE_SHUTDOWN_SENT:
1361 if ((asoc->rwnd > asoc->a_rwnd) &&
1362 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1363 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1373 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1374 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1376 struct sctp_chunk *sack;
1377 struct timer_list *timer;
1379 if (asoc->rwnd_over) {
1380 if (asoc->rwnd_over >= len) {
1381 asoc->rwnd_over -= len;
1383 asoc->rwnd += (len - asoc->rwnd_over);
1384 asoc->rwnd_over = 0;
1390 /* If we had window pressure, start recovering it
1391 * once our rwnd had reached the accumulated pressure
1392 * threshold. The idea is to recover slowly, but up
1393 * to the initial advertised window.
1395 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1396 int change = min(asoc->pathmtu, asoc->rwnd_press);
1397 asoc->rwnd += change;
1398 asoc->rwnd_press -= change;
1401 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1402 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1405 /* Send a window update SACK if the rwnd has increased by at least the
1406 * minimum of the association's PMTU and half of the receive buffer.
1407 * The algorithm used is similar to the one described in
1408 * Section 4.2.3.3 of RFC 1122.
1410 if (sctp_peer_needs_update(asoc)) {
1411 asoc->a_rwnd = asoc->rwnd;
1413 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1414 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1417 sack = sctp_make_sack(asoc);
1421 asoc->peer.sack_needed = 0;
1423 sctp_outq_tail(&asoc->outqueue, sack);
1425 /* Stop the SACK timer. */
1426 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1427 if (del_timer(timer))
1428 sctp_association_put(asoc);
1432 /* Decrease asoc's rwnd by len. */
1433 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1438 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1439 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1440 "asoc->rwnd_over:%u!\n", __func__, asoc,
1441 asoc->rwnd, asoc->rwnd_over);
1443 if (asoc->ep->rcvbuf_policy)
1444 rx_count = atomic_read(&asoc->rmem_alloc);
1446 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1448 /* If we've reached or overflowed our receive buffer, announce
1449 * a 0 rwnd if rwnd would still be positive. Store the
1450 * the pottential pressure overflow so that the window can be restored
1451 * back to original value.
1453 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1456 if (asoc->rwnd >= len) {
1459 asoc->rwnd_press += asoc->rwnd;
1463 asoc->rwnd_over = len - asoc->rwnd;
1467 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1468 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1472 /* Build the bind address list for the association based on info from the
1473 * local endpoint and the remote peer.
1475 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1476 sctp_scope_t scope, gfp_t gfp)
1480 /* Use scoping rules to determine the subset of addresses from
1483 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1484 if (asoc->peer.ipv4_address)
1485 flags |= SCTP_ADDR4_PEERSUPP;
1486 if (asoc->peer.ipv6_address)
1487 flags |= SCTP_ADDR6_PEERSUPP;
1489 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1490 &asoc->base.bind_addr,
1491 &asoc->ep->base.bind_addr,
1495 /* Build the association's bind address list from the cookie. */
1496 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1497 struct sctp_cookie *cookie,
1500 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1501 int var_size3 = cookie->raw_addr_list_len;
1502 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1504 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1505 asoc->ep->base.bind_addr.port, gfp);
1508 /* Lookup laddr in the bind address list of an association. */
1509 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1510 const union sctp_addr *laddr)
1514 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1515 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1516 sctp_sk(asoc->base.sk)))
1522 /* Set an association id for a given association */
1523 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1525 bool preload = gfp & __GFP_WAIT;
1528 /* If the id is already assigned, keep it. */
1534 spin_lock_bh(&sctp_assocs_id_lock);
1535 /* 0 is not a valid assoc_id, must be >= 1 */
1536 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1537 spin_unlock_bh(&sctp_assocs_id_lock);
1543 asoc->assoc_id = (sctp_assoc_t)ret;
1547 /* Free the ASCONF queue */
1548 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1550 struct sctp_chunk *asconf;
1551 struct sctp_chunk *tmp;
1553 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1554 list_del_init(&asconf->list);
1555 sctp_chunk_free(asconf);
1559 /* Free asconf_ack cache */
1560 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1562 struct sctp_chunk *ack;
1563 struct sctp_chunk *tmp;
1565 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1567 list_del_init(&ack->transmitted_list);
1568 sctp_chunk_free(ack);
1572 /* Clean up the ASCONF_ACK queue */
1573 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1575 struct sctp_chunk *ack;
1576 struct sctp_chunk *tmp;
1578 /* We can remove all the entries from the queue up to
1579 * the "Peer-Sequence-Number".
1581 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1583 if (ack->subh.addip_hdr->serial ==
1584 htonl(asoc->peer.addip_serial))
1587 list_del_init(&ack->transmitted_list);
1588 sctp_chunk_free(ack);
1592 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1593 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1594 const struct sctp_association *asoc,
1597 struct sctp_chunk *ack;
1599 /* Walk through the list of cached ASCONF-ACKs and find the
1600 * ack chunk whose serial number matches that of the request.
1602 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1603 if (ack->subh.addip_hdr->serial == serial) {
1604 sctp_chunk_hold(ack);
1612 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1614 /* Free any cached ASCONF_ACK chunk. */
1615 sctp_assoc_free_asconf_acks(asoc);
1617 /* Free the ASCONF queue. */
1618 sctp_assoc_free_asconf_queue(asoc);
1620 /* Free any cached ASCONF chunk. */
1621 if (asoc->addip_last_asconf)
1622 sctp_chunk_free(asoc->addip_last_asconf);