2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
59 .len = TIPC_MAX_LINK_NAME
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
80 * Out-of-range value for link session numbers
82 #define INVALID_SESSION 0x10000
87 #define STARTING_EVT 856384768 /* link processing trigger */
88 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89 #define TIMEOUT_EVT 560817u /* link timer expired */
92 * The following two 'message types' is really just implementation
93 * data conveniently stored in the message header.
94 * They must not be considered part of the protocol
100 * State value stored in 'exp_msg_count'
102 #define START_CHANGEOVER 100000u
104 static void link_handle_out_of_seq_msg(struct tipc_link *link,
105 struct sk_buff *skb);
106 static void tipc_link_proto_rcv(struct tipc_link *link,
107 struct sk_buff *skb);
108 static int tipc_link_tunnel_rcv(struct tipc_node *node,
109 struct sk_buff **skb);
110 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
111 static void link_state_event(struct tipc_link *l_ptr, u32 event);
112 static void link_reset_statistics(struct tipc_link *l_ptr);
113 static void link_print(struct tipc_link *l_ptr, const char *str);
114 static void tipc_link_sync_xmit(struct tipc_link *l);
115 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
116 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
117 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
120 * Simple link routines
122 static unsigned int align(unsigned int i)
124 return (i + 3) & ~3u;
127 static void tipc_link_release(struct kref *kref)
129 kfree(container_of(kref, struct tipc_link, ref));
132 static void tipc_link_get(struct tipc_link *l_ptr)
134 kref_get(&l_ptr->ref);
137 static void tipc_link_put(struct tipc_link *l_ptr)
139 kref_put(&l_ptr->ref, tipc_link_release);
142 static void link_init_max_pkt(struct tipc_link *l_ptr)
144 struct tipc_node *node = l_ptr->owner;
145 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
146 struct tipc_bearer *b_ptr;
150 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
155 max_pkt = (b_ptr->mtu & ~3);
158 if (max_pkt > MAX_MSG_SIZE)
159 max_pkt = MAX_MSG_SIZE;
161 l_ptr->max_pkt_target = max_pkt;
162 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
163 l_ptr->max_pkt = l_ptr->max_pkt_target;
165 l_ptr->max_pkt = MAX_PKT_DEFAULT;
167 l_ptr->max_pkt_probes = 0;
171 * Simple non-static link routines (i.e. referenced outside this file)
173 int tipc_link_is_up(struct tipc_link *l_ptr)
177 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
180 int tipc_link_is_active(struct tipc_link *l_ptr)
182 return (l_ptr->owner->active_links[0] == l_ptr) ||
183 (l_ptr->owner->active_links[1] == l_ptr);
187 * link_timeout - handle expiration of link timer
188 * @l_ptr: pointer to link
190 static void link_timeout(unsigned long data)
192 struct tipc_link *l_ptr = (struct tipc_link *)data;
195 tipc_node_lock(l_ptr->owner);
197 /* update counters used in statistical profiling of send traffic */
198 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
199 l_ptr->stats.queue_sz_counts++;
201 skb = skb_peek(&l_ptr->transmq);
203 struct tipc_msg *msg = buf_msg(skb);
204 u32 length = msg_size(msg);
206 if ((msg_user(msg) == MSG_FRAGMENTER) &&
207 (msg_type(msg) == FIRST_FRAGMENT)) {
208 length = msg_size(msg_get_wrapped(msg));
211 l_ptr->stats.msg_lengths_total += length;
212 l_ptr->stats.msg_length_counts++;
214 l_ptr->stats.msg_length_profile[0]++;
215 else if (length <= 256)
216 l_ptr->stats.msg_length_profile[1]++;
217 else if (length <= 1024)
218 l_ptr->stats.msg_length_profile[2]++;
219 else if (length <= 4096)
220 l_ptr->stats.msg_length_profile[3]++;
221 else if (length <= 16384)
222 l_ptr->stats.msg_length_profile[4]++;
223 else if (length <= 32768)
224 l_ptr->stats.msg_length_profile[5]++;
226 l_ptr->stats.msg_length_profile[6]++;
230 /* do all other link processing performed on a periodic basis */
231 link_state_event(l_ptr, TIMEOUT_EVT);
233 if (skb_queue_len(&l_ptr->backlogq))
234 tipc_link_push_packets(l_ptr);
236 tipc_node_unlock(l_ptr->owner);
237 tipc_link_put(l_ptr);
240 static void link_set_timer(struct tipc_link *link, unsigned long time)
242 if (!mod_timer(&link->timer, jiffies + time))
247 * tipc_link_create - create a new link
248 * @n_ptr: pointer to associated node
249 * @b_ptr: pointer to associated bearer
250 * @media_addr: media address to use when sending messages over link
252 * Returns pointer to link.
254 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
255 struct tipc_bearer *b_ptr,
256 const struct tipc_media_addr *media_addr)
258 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
259 struct tipc_link *l_ptr;
260 struct tipc_msg *msg;
262 char addr_string[16];
263 u32 peer = n_ptr->addr;
265 if (n_ptr->link_cnt >= MAX_BEARERS) {
266 tipc_addr_string_fill(addr_string, n_ptr->addr);
267 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
268 n_ptr->link_cnt, addr_string, MAX_BEARERS);
272 if (n_ptr->links[b_ptr->identity]) {
273 tipc_addr_string_fill(addr_string, n_ptr->addr);
274 pr_err("Attempt to establish second link on <%s> to %s\n",
275 b_ptr->name, addr_string);
279 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
281 pr_warn("Link creation failed, no memory\n");
284 kref_init(&l_ptr->ref);
286 if_name = strchr(b_ptr->name, ':') + 1;
287 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
288 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
289 tipc_node(tn->own_addr),
291 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
292 /* note: peer i/f name is updated by reset/activate message */
293 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
294 l_ptr->owner = n_ptr;
295 l_ptr->checkpoint = 1;
296 l_ptr->peer_session = INVALID_SESSION;
297 l_ptr->bearer_id = b_ptr->identity;
298 link_set_supervision_props(l_ptr, b_ptr->tolerance);
299 l_ptr->state = RESET_UNKNOWN;
301 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
303 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
305 msg_set_size(msg, sizeof(l_ptr->proto_msg));
306 msg_set_session(msg, (tn->random & 0xffff));
307 msg_set_bearer_id(msg, b_ptr->identity);
308 strcpy((char *)msg_data(msg), if_name);
309 l_ptr->net_plane = b_ptr->net_plane;
310 link_init_max_pkt(l_ptr);
311 l_ptr->priority = b_ptr->priority;
312 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
314 l_ptr->next_out_no = 1;
315 __skb_queue_head_init(&l_ptr->transmq);
316 __skb_queue_head_init(&l_ptr->backlogq);
317 __skb_queue_head_init(&l_ptr->deferdq);
318 skb_queue_head_init(&l_ptr->wakeupq);
319 skb_queue_head_init(&l_ptr->inputq);
320 skb_queue_head_init(&l_ptr->namedq);
321 link_reset_statistics(l_ptr);
322 tipc_node_attach_link(n_ptr, l_ptr);
323 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
324 link_state_event(l_ptr, STARTING_EVT);
330 * link_delete - Conditional deletion of link.
331 * If timer still running, real delete is done when it expires
332 * @link: link to be deleted
334 void tipc_link_delete(struct tipc_link *link)
336 tipc_link_reset_fragments(link);
337 tipc_node_detach_link(link->owner, link);
341 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
344 struct tipc_net *tn = net_generic(net, tipc_net_id);
345 struct tipc_link *link;
346 struct tipc_node *node;
350 list_for_each_entry_rcu(node, &tn->node_list, list) {
351 tipc_node_lock(node);
352 link = node->links[bearer_id];
354 tipc_node_unlock(node);
357 del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
358 tipc_link_reset(link);
359 if (del_timer(&link->timer))
361 link->flags |= LINK_STOPPED;
362 /* Delete link now, or when failover is finished: */
363 if (shutting_down || !tipc_node_is_up(node) || del_link)
364 tipc_link_delete(link);
365 tipc_node_unlock(node);
371 * link_schedule_user - schedule user for wakeup after congestion
372 * @link: congested link
373 * @oport: sending port
374 * @chain_sz: size of buffer chain that was attempted sent
375 * @imp: importance of message attempted sent
376 * Create pseudo msg to send back to user when congestion abates
378 static bool link_schedule_user(struct tipc_link *link, u32 oport,
379 uint chain_sz, uint imp)
383 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
384 link_own_addr(link), link_own_addr(link),
388 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
389 TIPC_SKB_CB(buf)->chain_imp = imp;
390 skb_queue_tail(&link->wakeupq, buf);
391 link->stats.link_congs++;
396 * link_prepare_wakeup - prepare users for wakeup after congestion
397 * @link: congested link
398 * Move a number of waiting users, as permitted by available space in
399 * the send queue, from link wait queue to node wait queue for wakeup
401 void link_prepare_wakeup(struct tipc_link *link)
403 uint pend_qsz = skb_queue_len(&link->backlogq);
404 struct sk_buff *skb, *tmp;
406 skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
407 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
409 pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
410 skb_unlink(skb, &link->wakeupq);
411 skb_queue_tail(&link->inputq, skb);
412 link->owner->inputq = &link->inputq;
413 link->owner->action_flags |= TIPC_MSG_EVT;
418 * tipc_link_reset_fragments - purge link's inbound message fragments queue
419 * @l_ptr: pointer to link
421 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
423 kfree_skb(l_ptr->reasm_buf);
424 l_ptr->reasm_buf = NULL;
428 * tipc_link_purge_queues - purge all pkt queues associated with link
429 * @l_ptr: pointer to link
431 void tipc_link_purge_queues(struct tipc_link *l_ptr)
433 __skb_queue_purge(&l_ptr->deferdq);
434 __skb_queue_purge(&l_ptr->transmq);
435 __skb_queue_purge(&l_ptr->backlogq);
436 tipc_link_reset_fragments(l_ptr);
439 void tipc_link_reset(struct tipc_link *l_ptr)
441 u32 prev_state = l_ptr->state;
442 u32 checkpoint = l_ptr->next_in_no;
443 int was_active_link = tipc_link_is_active(l_ptr);
444 struct tipc_node *owner = l_ptr->owner;
446 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
448 /* Link is down, accept any session */
449 l_ptr->peer_session = INVALID_SESSION;
451 /* Prepare for max packet size negotiation */
452 link_init_max_pkt(l_ptr);
454 l_ptr->state = RESET_UNKNOWN;
456 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
459 tipc_node_link_down(l_ptr->owner, l_ptr);
460 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
462 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
463 l_ptr->reset_checkpoint = checkpoint;
464 l_ptr->exp_msg_count = START_CHANGEOVER;
467 /* Clean up all queues, except inputq: */
468 __skb_queue_purge(&l_ptr->transmq);
469 __skb_queue_purge(&l_ptr->backlogq);
470 __skb_queue_purge(&l_ptr->deferdq);
472 owner->inputq = &l_ptr->inputq;
473 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
474 if (!skb_queue_empty(owner->inputq))
475 owner->action_flags |= TIPC_MSG_EVT;
476 l_ptr->rcv_unacked = 0;
477 l_ptr->checkpoint = 1;
478 l_ptr->next_out_no = 1;
479 l_ptr->fsm_msg_cnt = 0;
480 l_ptr->stale_count = 0;
481 link_reset_statistics(l_ptr);
484 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
486 struct tipc_net *tn = net_generic(net, tipc_net_id);
487 struct tipc_link *l_ptr;
488 struct tipc_node *n_ptr;
491 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
492 tipc_node_lock(n_ptr);
493 l_ptr = n_ptr->links[bearer_id];
495 tipc_link_reset(l_ptr);
496 tipc_node_unlock(n_ptr);
501 static void link_activate(struct tipc_link *link)
503 struct tipc_node *node = link->owner;
505 link->next_in_no = 1;
506 link->stats.recv_info = 1;
507 tipc_node_link_up(node, link);
508 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
512 * link_state_event - link finite state machine
513 * @l_ptr: pointer to link
514 * @event: state machine event to process
516 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
518 struct tipc_link *other;
519 unsigned long cont_intv = l_ptr->cont_intv;
521 if (l_ptr->flags & LINK_STOPPED)
524 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
525 return; /* Not yet. */
527 /* Check whether changeover is going on */
528 if (l_ptr->exp_msg_count) {
529 if (event == TIMEOUT_EVT)
530 link_set_timer(l_ptr, cont_intv);
534 switch (l_ptr->state) {
535 case WORKING_WORKING:
537 case TRAFFIC_MSG_EVT:
541 if (l_ptr->next_in_no != l_ptr->checkpoint) {
542 l_ptr->checkpoint = l_ptr->next_in_no;
543 if (tipc_bclink_acks_missing(l_ptr->owner)) {
544 tipc_link_proto_xmit(l_ptr, STATE_MSG,
546 l_ptr->fsm_msg_cnt++;
547 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
548 tipc_link_proto_xmit(l_ptr, STATE_MSG,
550 l_ptr->fsm_msg_cnt++;
552 link_set_timer(l_ptr, cont_intv);
555 l_ptr->state = WORKING_UNKNOWN;
556 l_ptr->fsm_msg_cnt = 0;
557 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
558 l_ptr->fsm_msg_cnt++;
559 link_set_timer(l_ptr, cont_intv / 4);
562 pr_debug("%s<%s>, requested by peer\n",
563 link_rst_msg, l_ptr->name);
564 tipc_link_reset(l_ptr);
565 l_ptr->state = RESET_RESET;
566 l_ptr->fsm_msg_cnt = 0;
567 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
569 l_ptr->fsm_msg_cnt++;
570 link_set_timer(l_ptr, cont_intv);
573 pr_debug("%s%u in WW state\n", link_unk_evt, event);
576 case WORKING_UNKNOWN:
578 case TRAFFIC_MSG_EVT:
580 l_ptr->state = WORKING_WORKING;
581 l_ptr->fsm_msg_cnt = 0;
582 link_set_timer(l_ptr, cont_intv);
585 pr_debug("%s<%s>, requested by peer while probing\n",
586 link_rst_msg, l_ptr->name);
587 tipc_link_reset(l_ptr);
588 l_ptr->state = RESET_RESET;
589 l_ptr->fsm_msg_cnt = 0;
590 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
592 l_ptr->fsm_msg_cnt++;
593 link_set_timer(l_ptr, cont_intv);
596 if (l_ptr->next_in_no != l_ptr->checkpoint) {
597 l_ptr->state = WORKING_WORKING;
598 l_ptr->fsm_msg_cnt = 0;
599 l_ptr->checkpoint = l_ptr->next_in_no;
600 if (tipc_bclink_acks_missing(l_ptr->owner)) {
601 tipc_link_proto_xmit(l_ptr, STATE_MSG,
603 l_ptr->fsm_msg_cnt++;
605 link_set_timer(l_ptr, cont_intv);
606 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
607 tipc_link_proto_xmit(l_ptr, STATE_MSG,
609 l_ptr->fsm_msg_cnt++;
610 link_set_timer(l_ptr, cont_intv / 4);
611 } else { /* Link has failed */
612 pr_debug("%s<%s>, peer not responding\n",
613 link_rst_msg, l_ptr->name);
614 tipc_link_reset(l_ptr);
615 l_ptr->state = RESET_UNKNOWN;
616 l_ptr->fsm_msg_cnt = 0;
617 tipc_link_proto_xmit(l_ptr, RESET_MSG,
619 l_ptr->fsm_msg_cnt++;
620 link_set_timer(l_ptr, cont_intv);
624 pr_err("%s%u in WU state\n", link_unk_evt, event);
629 case TRAFFIC_MSG_EVT:
632 other = l_ptr->owner->active_links[0];
633 if (other && link_working_unknown(other))
635 l_ptr->state = WORKING_WORKING;
636 l_ptr->fsm_msg_cnt = 0;
637 link_activate(l_ptr);
638 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
639 l_ptr->fsm_msg_cnt++;
640 if (l_ptr->owner->working_links == 1)
641 tipc_link_sync_xmit(l_ptr);
642 link_set_timer(l_ptr, cont_intv);
645 l_ptr->state = RESET_RESET;
646 l_ptr->fsm_msg_cnt = 0;
647 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
649 l_ptr->fsm_msg_cnt++;
650 link_set_timer(l_ptr, cont_intv);
653 l_ptr->flags |= LINK_STARTED;
654 l_ptr->fsm_msg_cnt++;
655 link_set_timer(l_ptr, cont_intv);
658 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
659 l_ptr->fsm_msg_cnt++;
660 link_set_timer(l_ptr, cont_intv);
663 pr_err("%s%u in RU state\n", link_unk_evt, event);
668 case TRAFFIC_MSG_EVT:
670 other = l_ptr->owner->active_links[0];
671 if (other && link_working_unknown(other))
673 l_ptr->state = WORKING_WORKING;
674 l_ptr->fsm_msg_cnt = 0;
675 link_activate(l_ptr);
676 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
677 l_ptr->fsm_msg_cnt++;
678 if (l_ptr->owner->working_links == 1)
679 tipc_link_sync_xmit(l_ptr);
680 link_set_timer(l_ptr, cont_intv);
685 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
687 l_ptr->fsm_msg_cnt++;
688 link_set_timer(l_ptr, cont_intv);
691 pr_err("%s%u in RR state\n", link_unk_evt, event);
695 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
699 /* tipc_link_cong: determine return value and how to treat the
700 * sent buffer during link congestion.
701 * - For plain, errorless user data messages we keep the buffer and
703 * - For all other messages we discard the buffer and return -EHOSTUNREACH
704 * - For TIPC internal messages we also reset the link
706 static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
708 struct sk_buff *skb = skb_peek(list);
709 struct tipc_msg *msg = buf_msg(skb);
710 int imp = msg_importance(msg);
711 u32 oport = msg_tot_origport(msg);
713 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
714 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
715 tipc_link_reset(link);
718 if (unlikely(msg_errcode(msg)))
720 if (unlikely(msg_reroute_cnt(msg)))
722 if (TIPC_SKB_CB(skb)->wakeup_pending)
724 if (link_schedule_user(link, oport, skb_queue_len(list), imp))
727 __skb_queue_purge(list);
728 return -EHOSTUNREACH;
732 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
734 * @list: chain of buffers containing message
736 * Consumes the buffer chain, except when returning -ELINKCONG
737 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
738 * user data messages) or -EHOSTUNREACH (all other messages/senders)
739 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
740 * to act on the return value, since they may need to do more send attempts.
742 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
743 struct sk_buff_head *list)
745 struct tipc_msg *msg = buf_msg(skb_peek(list));
746 unsigned int maxwin = link->window;
747 unsigned int imp = msg_importance(msg);
748 uint mtu = link->max_pkt;
749 uint ack = mod(link->next_in_no - 1);
750 uint seqno = link->next_out_no;
751 uint bc_last_in = link->owner->bclink.last_in;
752 struct tipc_media_addr *addr = &link->media_addr;
753 struct sk_buff_head *transmq = &link->transmq;
754 struct sk_buff_head *backlogq = &link->backlogq;
755 struct sk_buff *skb, *tmp;
757 /* Match queue limit against msg importance: */
758 if (unlikely(skb_queue_len(backlogq) >= link->queue_limit[imp]))
759 return tipc_link_cong(link, list);
761 /* Has valid packet limit been used ? */
762 if (unlikely(msg_size(msg) > mtu)) {
763 __skb_queue_purge(list);
767 /* Prepare each packet for sending, and add to relevant queue: */
768 skb_queue_walk_safe(list, skb, tmp) {
769 __skb_unlink(skb, list);
771 msg_set_seqno(msg, seqno);
772 msg_set_ack(msg, ack);
773 msg_set_bcast_ack(msg, bc_last_in);
775 if (likely(skb_queue_len(transmq) < maxwin)) {
776 __skb_queue_tail(transmq, skb);
777 tipc_bearer_send(net, link->bearer_id, skb, addr);
778 link->rcv_unacked = 0;
782 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
783 link->stats.sent_bundled++;
786 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
787 link->stats.sent_bundled++;
788 link->stats.sent_bundles++;
790 __skb_queue_tail(backlogq, skb);
793 link->next_out_no = seqno;
797 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
799 skb_queue_head_init(list);
800 __skb_queue_tail(list, skb);
803 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
805 struct sk_buff_head head;
807 skb2list(skb, &head);
808 return __tipc_link_xmit(link->owner->net, link, &head);
811 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
814 struct sk_buff_head head;
816 skb2list(skb, &head);
817 return tipc_link_xmit(net, &head, dnode, selector);
821 * tipc_link_xmit() is the general link level function for message sending
822 * @net: the applicable net namespace
823 * @list: chain of buffers containing message
824 * @dsz: amount of user data to be sent
825 * @dnode: address of destination node
826 * @selector: a number used for deterministic link selection
827 * Consumes the buffer chain, except when returning -ELINKCONG
828 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
830 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
833 struct tipc_link *link = NULL;
834 struct tipc_node *node;
835 int rc = -EHOSTUNREACH;
837 node = tipc_node_find(net, dnode);
839 tipc_node_lock(node);
840 link = node->active_links[selector & 1];
842 rc = __tipc_link_xmit(net, link, list);
843 tipc_node_unlock(node);
848 if (likely(in_own_node(net, dnode))) {
849 tipc_sk_rcv(net, list);
853 __skb_queue_purge(list);
858 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
860 * Give a newly added peer node the sequence number where it should
861 * start receiving and acking broadcast packets.
863 * Called with node locked
865 static void tipc_link_sync_xmit(struct tipc_link *link)
868 struct tipc_msg *msg;
870 skb = tipc_buf_acquire(INT_H_SIZE);
875 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
876 INT_H_SIZE, link->addr);
877 msg_set_last_bcast(msg, link->owner->bclink.acked);
878 __tipc_link_xmit_skb(link, skb);
882 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
883 * Receive the sequence number where we should start receiving and
884 * acking broadcast packets from a newly added peer node, and open
885 * up for reception of such packets.
887 * Called with node locked
889 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
891 struct tipc_msg *msg = buf_msg(buf);
893 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
894 n->bclink.recv_permitted = true;
899 * tipc_link_push_packets - push unsent packets to bearer
901 * Push out the unsent messages of a link where congestion
902 * has abated. Node is locked.
904 * Called with node locked
906 void tipc_link_push_packets(struct tipc_link *link)
909 struct tipc_msg *msg;
910 unsigned int ack = mod(link->next_in_no - 1);
912 while (skb_queue_len(&link->transmq) < link->window) {
913 skb = __skb_dequeue(&link->backlogq);
917 msg_set_ack(msg, ack);
918 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
919 link->rcv_unacked = 0;
920 __skb_queue_tail(&link->transmq, skb);
921 tipc_bearer_send(link->owner->net, link->bearer_id,
922 skb, &link->media_addr);
926 void tipc_link_reset_all(struct tipc_node *node)
928 char addr_string[16];
931 tipc_node_lock(node);
933 pr_warn("Resetting all links to %s\n",
934 tipc_addr_string_fill(addr_string, node->addr));
936 for (i = 0; i < MAX_BEARERS; i++) {
937 if (node->links[i]) {
938 link_print(node->links[i], "Resetting link\n");
939 tipc_link_reset(node->links[i]);
943 tipc_node_unlock(node);
946 static void link_retransmit_failure(struct tipc_link *l_ptr,
949 struct tipc_msg *msg = buf_msg(buf);
950 struct net *net = l_ptr->owner->net;
952 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
955 /* Handle failure on standard link */
956 link_print(l_ptr, "Resetting link\n");
957 tipc_link_reset(l_ptr);
960 /* Handle failure on broadcast link */
961 struct tipc_node *n_ptr;
962 char addr_string[16];
964 pr_info("Msg seq number: %u, ", msg_seqno(msg));
965 pr_cont("Outstanding acks: %lu\n",
966 (unsigned long) TIPC_SKB_CB(buf)->handle);
968 n_ptr = tipc_bclink_retransmit_to(net);
969 tipc_node_lock(n_ptr);
971 tipc_addr_string_fill(addr_string, n_ptr->addr);
972 pr_info("Broadcast link info for %s\n", addr_string);
973 pr_info("Reception permitted: %d, Acked: %u\n",
974 n_ptr->bclink.recv_permitted,
975 n_ptr->bclink.acked);
976 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
977 n_ptr->bclink.last_in,
978 n_ptr->bclink.oos_state,
979 n_ptr->bclink.last_sent);
981 tipc_node_unlock(n_ptr);
983 tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
984 l_ptr->stale_count = 0;
988 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
991 struct tipc_msg *msg;
998 /* Detect repeated retransmit failures */
999 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1000 if (++l_ptr->stale_count > 100) {
1001 link_retransmit_failure(l_ptr, skb);
1005 l_ptr->last_retransmitted = msg_seqno(msg);
1006 l_ptr->stale_count = 1;
1009 skb_queue_walk_from(&l_ptr->transmq, skb) {
1013 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1014 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1015 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1016 &l_ptr->media_addr);
1018 l_ptr->stats.retransmitted++;
1022 static void link_retrieve_defq(struct tipc_link *link,
1023 struct sk_buff_head *list)
1027 if (skb_queue_empty(&link->deferdq))
1030 seq_no = buf_seqno(skb_peek(&link->deferdq));
1031 if (seq_no == mod(link->next_in_no))
1032 skb_queue_splice_tail_init(&link->deferdq, list);
1036 * tipc_rcv - process TIPC packets/messages arriving from off-node
1037 * @net: the applicable net namespace
1039 * @b_ptr: pointer to bearer message arrived on
1041 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1042 * structure (i.e. cannot be NULL), but bearer can be inactive.
1044 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1046 struct tipc_net *tn = net_generic(net, tipc_net_id);
1047 struct sk_buff_head head;
1048 struct tipc_node *n_ptr;
1049 struct tipc_link *l_ptr;
1050 struct sk_buff *skb1, *tmp;
1051 struct tipc_msg *msg;
1056 skb2list(skb, &head);
1058 while ((skb = __skb_dequeue(&head))) {
1059 /* Ensure message is well-formed */
1060 if (unlikely(!tipc_msg_validate(skb)))
1063 /* Handle arrival of a non-unicast link message */
1065 if (unlikely(msg_non_seq(msg))) {
1066 if (msg_user(msg) == LINK_CONFIG)
1067 tipc_disc_rcv(net, skb, b_ptr);
1069 tipc_bclink_rcv(net, skb);
1073 /* Discard unicast link messages destined for another node */
1074 if (unlikely(!msg_short(msg) &&
1075 (msg_destnode(msg) != tn->own_addr)))
1078 /* Locate neighboring node that sent message */
1079 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1080 if (unlikely(!n_ptr))
1082 tipc_node_lock(n_ptr);
1084 /* Locate unicast link endpoint that should handle message */
1085 l_ptr = n_ptr->links[b_ptr->identity];
1086 if (unlikely(!l_ptr))
1089 /* Verify that communication with node is currently allowed */
1090 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1091 msg_user(msg) == LINK_PROTOCOL &&
1092 (msg_type(msg) == RESET_MSG ||
1093 msg_type(msg) == ACTIVATE_MSG) &&
1094 !msg_redundant_link(msg))
1095 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1097 if (tipc_node_blocked(n_ptr))
1100 /* Validate message sequence number info */
1101 seq_no = msg_seqno(msg);
1102 ackd = msg_ack(msg);
1104 /* Release acked messages */
1105 if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1106 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1109 skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
1110 if (more(buf_seqno(skb1), ackd))
1112 __skb_unlink(skb1, &l_ptr->transmq);
1117 /* Try sending any messages link endpoint has pending */
1118 if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1119 tipc_link_push_packets(l_ptr);
1121 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1122 link_prepare_wakeup(l_ptr);
1124 /* Process the incoming packet */
1125 if (unlikely(!link_working_working(l_ptr))) {
1126 if (msg_user(msg) == LINK_PROTOCOL) {
1127 tipc_link_proto_rcv(l_ptr, skb);
1128 link_retrieve_defq(l_ptr, &head);
1133 /* Traffic message. Conditionally activate link */
1134 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1136 if (link_working_working(l_ptr)) {
1137 /* Re-insert buffer in front of queue */
1138 __skb_queue_head(&head, skb);
1145 /* Link is now in state WORKING_WORKING */
1146 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1147 link_handle_out_of_seq_msg(l_ptr, skb);
1148 link_retrieve_defq(l_ptr, &head);
1152 l_ptr->next_in_no++;
1153 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1154 link_retrieve_defq(l_ptr, &head);
1155 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1156 l_ptr->stats.sent_acks++;
1157 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1159 tipc_link_input(l_ptr, skb);
1162 tipc_node_unlock(n_ptr);
1169 /* tipc_data_input - deliver data and name distr msgs to upper layer
1171 * Consumes buffer if message is of right type
1172 * Node lock must be held
1174 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1176 struct tipc_node *node = link->owner;
1177 struct tipc_msg *msg = buf_msg(skb);
1178 u32 dport = msg_destport(msg);
1180 switch (msg_user(msg)) {
1181 case TIPC_LOW_IMPORTANCE:
1182 case TIPC_MEDIUM_IMPORTANCE:
1183 case TIPC_HIGH_IMPORTANCE:
1184 case TIPC_CRITICAL_IMPORTANCE:
1186 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1187 node->inputq = &link->inputq;
1188 node->action_flags |= TIPC_MSG_EVT;
1191 case NAME_DISTRIBUTOR:
1192 node->bclink.recv_permitted = true;
1193 node->namedq = &link->namedq;
1194 skb_queue_tail(&link->namedq, skb);
1195 if (skb_queue_len(&link->namedq) == 1)
1196 node->action_flags |= TIPC_NAMED_MSG_EVT;
1199 case CHANGEOVER_PROTOCOL:
1200 case MSG_FRAGMENTER:
1201 case BCAST_PROTOCOL:
1204 pr_warn("Dropping received illegal msg type\n");
1210 /* tipc_link_input - process packet that has passed link protocol check
1213 * Node lock must be held
1215 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1217 struct tipc_node *node = link->owner;
1218 struct tipc_msg *msg = buf_msg(skb);
1219 struct sk_buff *iskb;
1222 if (likely(tipc_data_input(link, skb)))
1225 switch (msg_user(msg)) {
1226 case CHANGEOVER_PROTOCOL:
1227 if (!tipc_link_tunnel_rcv(node, &skb))
1229 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1230 tipc_data_input(link, skb);
1234 link->stats.recv_bundles++;
1235 link->stats.recv_bundled += msg_msgcnt(msg);
1237 while (tipc_msg_extract(skb, &iskb, &pos))
1238 tipc_data_input(link, iskb);
1240 case MSG_FRAGMENTER:
1241 link->stats.recv_fragments++;
1242 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1243 link->stats.recv_fragmented++;
1244 tipc_data_input(link, skb);
1245 } else if (!link->reasm_buf) {
1246 tipc_link_reset(link);
1249 case BCAST_PROTOCOL:
1250 tipc_link_sync_rcv(node, skb);
1258 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1260 * Returns increase in queue length (i.e. 0 or 1)
1262 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1264 struct sk_buff *skb1;
1265 u32 seq_no = buf_seqno(skb);
1268 if (skb_queue_empty(list)) {
1269 __skb_queue_tail(list, skb);
1274 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1275 __skb_queue_tail(list, skb);
1279 /* Locate insertion point in queue, then insert; discard if duplicate */
1280 skb_queue_walk(list, skb1) {
1281 u32 curr_seqno = buf_seqno(skb1);
1283 if (seq_no == curr_seqno) {
1288 if (less(seq_no, curr_seqno))
1292 __skb_queue_before(list, skb1, skb);
1297 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1299 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1300 struct sk_buff *buf)
1302 u32 seq_no = buf_seqno(buf);
1304 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1305 tipc_link_proto_rcv(l_ptr, buf);
1309 /* Record OOS packet arrival (force mismatch on next timeout) */
1310 l_ptr->checkpoint--;
1313 * Discard packet if a duplicate; otherwise add it to deferred queue
1314 * and notify peer of gap as per protocol specification
1316 if (less(seq_no, mod(l_ptr->next_in_no))) {
1317 l_ptr->stats.duplicates++;
1322 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
1323 l_ptr->stats.deferred_recv++;
1324 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1325 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1327 l_ptr->stats.duplicates++;
1332 * Send protocol message to the other endpoint.
1334 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1335 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1337 struct sk_buff *buf = NULL;
1338 struct tipc_msg *msg = l_ptr->pmsg;
1339 u32 msg_size = sizeof(l_ptr->proto_msg);
1342 /* Don't send protocol message during link changeover */
1343 if (l_ptr->exp_msg_count)
1346 /* Abort non-RESET send if communication with node is prohibited */
1347 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1350 /* Create protocol message with "out-of-sequence" sequence number */
1351 msg_set_type(msg, msg_typ);
1352 msg_set_net_plane(msg, l_ptr->net_plane);
1353 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1354 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1356 if (msg_typ == STATE_MSG) {
1357 u32 next_sent = mod(l_ptr->next_out_no);
1359 if (!tipc_link_is_up(l_ptr))
1361 if (skb_queue_len(&l_ptr->backlogq))
1362 next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
1363 msg_set_next_sent(msg, next_sent);
1364 if (!skb_queue_empty(&l_ptr->deferdq)) {
1365 u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
1366 gap = mod(rec - mod(l_ptr->next_in_no));
1368 msg_set_seq_gap(msg, gap);
1370 l_ptr->stats.sent_nacks++;
1371 msg_set_link_tolerance(msg, tolerance);
1372 msg_set_linkprio(msg, priority);
1373 msg_set_max_pkt(msg, ack_mtu);
1374 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1375 msg_set_probe(msg, probe_msg != 0);
1377 u32 mtu = l_ptr->max_pkt;
1379 if ((mtu < l_ptr->max_pkt_target) &&
1380 link_working_working(l_ptr) &&
1381 l_ptr->fsm_msg_cnt) {
1382 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1383 if (l_ptr->max_pkt_probes == 10) {
1384 l_ptr->max_pkt_target = (msg_size - 4);
1385 l_ptr->max_pkt_probes = 0;
1386 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1388 l_ptr->max_pkt_probes++;
1391 l_ptr->stats.sent_probes++;
1393 l_ptr->stats.sent_states++;
1394 } else { /* RESET_MSG or ACTIVATE_MSG */
1395 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1396 msg_set_seq_gap(msg, 0);
1397 msg_set_next_sent(msg, 1);
1398 msg_set_probe(msg, 0);
1399 msg_set_link_tolerance(msg, l_ptr->tolerance);
1400 msg_set_linkprio(msg, l_ptr->priority);
1401 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1404 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1405 msg_set_redundant_link(msg, r_flag);
1406 msg_set_linkprio(msg, l_ptr->priority);
1407 msg_set_size(msg, msg_size);
1409 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1411 buf = tipc_buf_acquire(msg_size);
1415 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1416 buf->priority = TC_PRIO_CONTROL;
1417 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1418 &l_ptr->media_addr);
1419 l_ptr->rcv_unacked = 0;
1424 * Receive protocol message :
1425 * Note that network plane id propagates through the network, and may
1426 * change at any time. The node with lowest address rules
1428 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1429 struct sk_buff *buf)
1435 struct tipc_msg *msg = buf_msg(buf);
1437 /* Discard protocol message during link changeover */
1438 if (l_ptr->exp_msg_count)
1441 if (l_ptr->net_plane != msg_net_plane(msg))
1442 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1443 l_ptr->net_plane = msg_net_plane(msg);
1445 switch (msg_type(msg)) {
1448 if (!link_working_unknown(l_ptr) &&
1449 (l_ptr->peer_session != INVALID_SESSION)) {
1450 if (less_eq(msg_session(msg), l_ptr->peer_session))
1451 break; /* duplicate or old reset: ignore */
1454 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1455 link_working_unknown(l_ptr))) {
1457 * peer has lost contact -- don't allow peer's links
1458 * to reactivate before we recognize loss & clean up
1460 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1463 link_state_event(l_ptr, RESET_MSG);
1467 /* Update link settings according other endpoint's values */
1468 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1470 msg_tol = msg_link_tolerance(msg);
1471 if (msg_tol > l_ptr->tolerance)
1472 link_set_supervision_props(l_ptr, msg_tol);
1474 if (msg_linkprio(msg) > l_ptr->priority)
1475 l_ptr->priority = msg_linkprio(msg);
1477 max_pkt_info = msg_max_pkt(msg);
1479 if (max_pkt_info < l_ptr->max_pkt_target)
1480 l_ptr->max_pkt_target = max_pkt_info;
1481 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1482 l_ptr->max_pkt = l_ptr->max_pkt_target;
1484 l_ptr->max_pkt = l_ptr->max_pkt_target;
1487 /* Synchronize broadcast link info, if not done previously */
1488 if (!tipc_node_is_up(l_ptr->owner)) {
1489 l_ptr->owner->bclink.last_sent =
1490 l_ptr->owner->bclink.last_in =
1491 msg_last_bcast(msg);
1492 l_ptr->owner->bclink.oos_state = 0;
1495 l_ptr->peer_session = msg_session(msg);
1496 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1498 if (msg_type(msg) == ACTIVATE_MSG)
1499 link_state_event(l_ptr, ACTIVATE_MSG);
1503 msg_tol = msg_link_tolerance(msg);
1505 link_set_supervision_props(l_ptr, msg_tol);
1507 if (msg_linkprio(msg) &&
1508 (msg_linkprio(msg) != l_ptr->priority)) {
1509 pr_debug("%s<%s>, priority change %u->%u\n",
1510 link_rst_msg, l_ptr->name,
1511 l_ptr->priority, msg_linkprio(msg));
1512 l_ptr->priority = msg_linkprio(msg);
1513 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1517 /* Record reception; force mismatch at next timeout: */
1518 l_ptr->checkpoint--;
1520 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1521 l_ptr->stats.recv_states++;
1522 if (link_reset_unknown(l_ptr))
1525 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1526 rec_gap = mod(msg_next_sent(msg) -
1527 mod(l_ptr->next_in_no));
1530 max_pkt_ack = msg_max_pkt(msg);
1531 if (max_pkt_ack > l_ptr->max_pkt) {
1532 l_ptr->max_pkt = max_pkt_ack;
1533 l_ptr->max_pkt_probes = 0;
1537 if (msg_probe(msg)) {
1538 l_ptr->stats.recv_probes++;
1539 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1540 max_pkt_ack = msg_size(msg);
1543 /* Protocol message before retransmits, reduce loss risk */
1544 if (l_ptr->owner->bclink.recv_permitted)
1545 tipc_bclink_update_link_state(l_ptr->owner,
1546 msg_last_bcast(msg));
1548 if (rec_gap || (msg_probe(msg))) {
1549 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1552 if (msg_seq_gap(msg)) {
1553 l_ptr->stats.recv_nacks++;
1554 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1564 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1565 * a different bearer. Owner node is locked.
1567 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1568 struct tipc_msg *tunnel_hdr,
1569 struct tipc_msg *msg,
1572 struct tipc_link *tunnel;
1573 struct sk_buff *skb;
1574 u32 length = msg_size(msg);
1576 tunnel = l_ptr->owner->active_links[selector & 1];
1577 if (!tipc_link_is_up(tunnel)) {
1578 pr_warn("%stunnel link no longer available\n", link_co_err);
1581 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1582 skb = tipc_buf_acquire(length + INT_H_SIZE);
1584 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1587 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1588 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1589 __tipc_link_xmit_skb(tunnel, skb);
1593 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1594 * link is still active. We can do failover. Tunnel the failing link's
1595 * whole send queue via the remaining link. This way, we don't lose
1596 * any packets, and sequence order is preserved for subsequent traffic
1597 * sent over the remaining link. Owner node is locked.
1599 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1602 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1603 struct tipc_msg tunnel_hdr;
1604 struct sk_buff *skb;
1610 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1611 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1612 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1613 msgcount = skb_queue_len(&l_ptr->transmq);
1614 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1615 msg_set_msgcnt(&tunnel_hdr, msgcount);
1617 if (skb_queue_empty(&l_ptr->transmq)) {
1618 skb = tipc_buf_acquire(INT_H_SIZE);
1620 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1621 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1622 __tipc_link_xmit_skb(tunnel, skb);
1624 pr_warn("%sunable to send changeover msg\n",
1630 split_bundles = (l_ptr->owner->active_links[0] !=
1631 l_ptr->owner->active_links[1]);
1633 skb_queue_walk(&l_ptr->transmq, skb) {
1634 struct tipc_msg *msg = buf_msg(skb);
1636 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1637 struct tipc_msg *m = msg_get_wrapped(msg);
1638 unchar *pos = (unchar *)m;
1640 msgcount = msg_msgcnt(msg);
1641 while (msgcount--) {
1642 msg_set_seqno(m, msg_seqno(msg));
1643 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1644 msg_link_selector(m));
1645 pos += align(msg_size(m));
1646 m = (struct tipc_msg *)pos;
1649 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1650 msg_link_selector(msg));
1655 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1656 * duplicate of the first link's send queue via the new link. This way, we
1657 * are guaranteed that currently queued packets from a socket are delivered
1658 * before future traffic from the same socket, even if this is using the
1659 * new link. The last arriving copy of each duplicate packet is dropped at
1660 * the receiving end by the regular protocol check, so packet cardinality
1661 * and sequence order is preserved per sender/receiver socket pair.
1662 * Owner node is locked.
1664 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1665 struct tipc_link *tnl)
1667 struct sk_buff *skb;
1668 struct tipc_msg tnl_hdr;
1669 struct sk_buff_head *queue = &link->transmq;
1672 tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL,
1673 DUPLICATE_MSG, INT_H_SIZE, link->addr);
1674 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1675 msg_set_msgcnt(&tnl_hdr, mcnt);
1676 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1679 skb_queue_walk(queue, skb) {
1680 struct sk_buff *outskb;
1681 struct tipc_msg *msg = buf_msg(skb);
1682 u32 len = msg_size(msg);
1684 msg_set_ack(msg, mod(link->next_in_no - 1));
1685 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1686 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1687 outskb = tipc_buf_acquire(len + INT_H_SIZE);
1688 if (outskb == NULL) {
1689 pr_warn("%sunable to send duplicate msg\n",
1693 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1694 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1696 __tipc_link_xmit_skb(tnl, outskb);
1697 if (!tipc_link_is_up(link))
1700 if (queue == &link->backlogq)
1702 queue = &link->backlogq;
1706 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1707 * Owner node is locked.
1709 static void tipc_link_dup_rcv(struct tipc_link *link,
1710 struct sk_buff *skb)
1712 struct sk_buff *iskb;
1715 if (!tipc_link_is_up(link))
1718 if (!tipc_msg_extract(skb, &iskb, &pos)) {
1719 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1722 /* Append buffer to deferred queue, if applicable: */
1723 link_handle_out_of_seq_msg(link, iskb);
1726 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1727 * Owner node is locked.
1729 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1730 struct sk_buff *t_buf)
1732 struct tipc_msg *t_msg = buf_msg(t_buf);
1733 struct sk_buff *buf = NULL;
1734 struct tipc_msg *msg;
1737 if (tipc_link_is_up(l_ptr))
1738 tipc_link_reset(l_ptr);
1740 /* First failover packet? */
1741 if (l_ptr->exp_msg_count == START_CHANGEOVER)
1742 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1744 /* Should there be an inner packet? */
1745 if (l_ptr->exp_msg_count) {
1746 l_ptr->exp_msg_count--;
1747 if (!tipc_msg_extract(t_buf, &buf, &pos)) {
1748 pr_warn("%sno inner failover pkt\n", link_co_err);
1753 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1758 if (msg_user(msg) == MSG_FRAGMENTER) {
1759 l_ptr->stats.recv_fragments++;
1760 tipc_buf_append(&l_ptr->reasm_buf, &buf);
1764 if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1765 tipc_link_delete(l_ptr);
1769 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1770 * via other link as result of a failover (ORIGINAL_MSG) or
1771 * a new active link (DUPLICATE_MSG). Failover packets are
1772 * returned to the active link for delivery upwards.
1773 * Owner node is locked.
1775 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1776 struct sk_buff **buf)
1778 struct sk_buff *t_buf = *buf;
1779 struct tipc_link *l_ptr;
1780 struct tipc_msg *t_msg = buf_msg(t_buf);
1781 u32 bearer_id = msg_bearer_id(t_msg);
1785 if (bearer_id >= MAX_BEARERS)
1788 l_ptr = n_ptr->links[bearer_id];
1792 if (msg_type(t_msg) == DUPLICATE_MSG)
1793 tipc_link_dup_rcv(l_ptr, t_buf);
1794 else if (msg_type(t_msg) == ORIGINAL_MSG)
1795 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
1797 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1800 return *buf != NULL;
1803 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1805 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1807 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1810 l_ptr->tolerance = tol;
1811 l_ptr->cont_intv = msecs_to_jiffies(intv);
1812 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1815 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1817 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE);
1820 l->queue_limit[TIPC_LOW_IMPORTANCE] = win / 2;
1821 l->queue_limit[TIPC_MEDIUM_IMPORTANCE] = win;
1822 l->queue_limit[TIPC_HIGH_IMPORTANCE] = win / 2 * 3;
1823 l->queue_limit[TIPC_CRITICAL_IMPORTANCE] = win * 2;
1824 l->queue_limit[TIPC_SYSTEM_IMPORTANCE] = max_bulk;
1827 /* tipc_link_find_owner - locate owner node of link by link's name
1828 * @net: the applicable net namespace
1829 * @name: pointer to link name string
1830 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1832 * Returns pointer to node owning the link, or 0 if no matching link is found.
1834 static struct tipc_node *tipc_link_find_owner(struct net *net,
1835 const char *link_name,
1836 unsigned int *bearer_id)
1838 struct tipc_net *tn = net_generic(net, tipc_net_id);
1839 struct tipc_link *l_ptr;
1840 struct tipc_node *n_ptr;
1841 struct tipc_node *found_node = NULL;
1846 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1847 tipc_node_lock(n_ptr);
1848 for (i = 0; i < MAX_BEARERS; i++) {
1849 l_ptr = n_ptr->links[i];
1850 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1856 tipc_node_unlock(n_ptr);
1866 * link_reset_statistics - reset link statistics
1867 * @l_ptr: pointer to link
1869 static void link_reset_statistics(struct tipc_link *l_ptr)
1871 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1872 l_ptr->stats.sent_info = l_ptr->next_out_no;
1873 l_ptr->stats.recv_info = l_ptr->next_in_no;
1876 static void link_print(struct tipc_link *l_ptr, const char *str)
1878 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1879 struct tipc_bearer *b_ptr;
1882 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1884 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1887 if (link_working_unknown(l_ptr))
1889 else if (link_reset_reset(l_ptr))
1891 else if (link_reset_unknown(l_ptr))
1893 else if (link_working_working(l_ptr))
1899 /* Parse and validate nested (link) properties valid for media, bearer and link
1901 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1905 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1906 tipc_nl_prop_policy);
1910 if (props[TIPC_NLA_PROP_PRIO]) {
1913 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1914 if (prio > TIPC_MAX_LINK_PRI)
1918 if (props[TIPC_NLA_PROP_TOL]) {
1921 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1922 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1926 if (props[TIPC_NLA_PROP_WIN]) {
1929 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1930 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1937 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1943 struct tipc_link *link;
1944 struct tipc_node *node;
1945 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1946 struct net *net = sock_net(skb->sk);
1948 if (!info->attrs[TIPC_NLA_LINK])
1951 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1952 info->attrs[TIPC_NLA_LINK],
1953 tipc_nl_link_policy);
1957 if (!attrs[TIPC_NLA_LINK_NAME])
1960 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1962 node = tipc_link_find_owner(net, name, &bearer_id);
1966 tipc_node_lock(node);
1968 link = node->links[bearer_id];
1974 if (attrs[TIPC_NLA_LINK_PROP]) {
1975 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1977 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1984 if (props[TIPC_NLA_PROP_TOL]) {
1987 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1988 link_set_supervision_props(link, tol);
1989 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
1991 if (props[TIPC_NLA_PROP_PRIO]) {
1994 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1995 link->priority = prio;
1996 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
1998 if (props[TIPC_NLA_PROP_WIN]) {
2001 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2002 tipc_link_set_queue_limits(link, win);
2007 tipc_node_unlock(node);
2012 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2015 struct nlattr *stats;
2022 struct nla_map map[] = {
2023 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2024 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2025 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2026 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2027 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2028 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2029 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2030 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2031 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2032 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2033 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2034 s->msg_length_counts : 1},
2035 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2036 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2037 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2038 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2039 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2040 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2041 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2042 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2043 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2044 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2045 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2046 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2047 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2048 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2049 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2050 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2051 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2052 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2053 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2054 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2055 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2056 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2057 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2060 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2064 for (i = 0; i < ARRAY_SIZE(map); i++)
2065 if (nla_put_u32(skb, map[i].key, map[i].val))
2068 nla_nest_end(skb, stats);
2072 nla_nest_cancel(skb, stats);
2077 /* Caller should hold appropriate locks to protect the link */
2078 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2079 struct tipc_link *link)
2083 struct nlattr *attrs;
2084 struct nlattr *prop;
2085 struct tipc_net *tn = net_generic(net, tipc_net_id);
2087 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2088 NLM_F_MULTI, TIPC_NL_LINK_GET);
2092 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2096 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2098 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2099 tipc_cluster_mask(tn->own_addr)))
2101 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2103 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2105 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2108 if (tipc_link_is_up(link))
2109 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2111 if (tipc_link_is_active(link))
2112 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2115 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2118 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2120 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2122 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2123 link->queue_limit[TIPC_LOW_IMPORTANCE]))
2125 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2127 nla_nest_end(msg->skb, prop);
2129 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2133 nla_nest_end(msg->skb, attrs);
2134 genlmsg_end(msg->skb, hdr);
2139 nla_nest_cancel(msg->skb, prop);
2141 nla_nest_cancel(msg->skb, attrs);
2143 genlmsg_cancel(msg->skb, hdr);
2148 /* Caller should hold node lock */
2149 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2150 struct tipc_node *node, u32 *prev_link)
2155 for (i = *prev_link; i < MAX_BEARERS; i++) {
2158 if (!node->links[i])
2161 err = __tipc_nl_add_link(net, msg, node->links[i]);
2170 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2172 struct net *net = sock_net(skb->sk);
2173 struct tipc_net *tn = net_generic(net, tipc_net_id);
2174 struct tipc_node *node;
2175 struct tipc_nl_msg msg;
2176 u32 prev_node = cb->args[0];
2177 u32 prev_link = cb->args[1];
2178 int done = cb->args[2];
2185 msg.portid = NETLINK_CB(cb->skb).portid;
2186 msg.seq = cb->nlh->nlmsg_seq;
2191 node = tipc_node_find(net, prev_node);
2193 /* We never set seq or call nl_dump_check_consistent()
2194 * this means that setting prev_seq here will cause the
2195 * consistence check to fail in the netlink callback
2196 * handler. Resulting in the last NLMSG_DONE message
2197 * having the NLM_F_DUMP_INTR flag set.
2203 list_for_each_entry_continue_rcu(node, &tn->node_list,
2205 tipc_node_lock(node);
2206 err = __tipc_nl_add_node_links(net, &msg, node,
2208 tipc_node_unlock(node);
2212 prev_node = node->addr;
2215 err = tipc_nl_add_bc_link(net, &msg);
2219 list_for_each_entry_rcu(node, &tn->node_list, list) {
2220 tipc_node_lock(node);
2221 err = __tipc_nl_add_node_links(net, &msg, node,
2223 tipc_node_unlock(node);
2227 prev_node = node->addr;
2234 cb->args[0] = prev_node;
2235 cb->args[1] = prev_link;
2241 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2243 struct net *net = genl_info_net(info);
2244 struct sk_buff *ans_skb;
2245 struct tipc_nl_msg msg;
2246 struct tipc_link *link;
2247 struct tipc_node *node;
2252 if (!info->attrs[TIPC_NLA_LINK_NAME])
2255 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2256 node = tipc_link_find_owner(net, name, &bearer_id);
2260 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2265 msg.portid = info->snd_portid;
2266 msg.seq = info->snd_seq;
2268 tipc_node_lock(node);
2269 link = node->links[bearer_id];
2275 err = __tipc_nl_add_link(net, &msg, link);
2279 tipc_node_unlock(node);
2281 return genlmsg_reply(ans_skb, info);
2284 tipc_node_unlock(node);
2285 nlmsg_free(ans_skb);
2290 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2294 unsigned int bearer_id;
2295 struct tipc_link *link;
2296 struct tipc_node *node;
2297 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2298 struct net *net = sock_net(skb->sk);
2300 if (!info->attrs[TIPC_NLA_LINK])
2303 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2304 info->attrs[TIPC_NLA_LINK],
2305 tipc_nl_link_policy);
2309 if (!attrs[TIPC_NLA_LINK_NAME])
2312 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2314 if (strcmp(link_name, tipc_bclink_name) == 0) {
2315 err = tipc_bclink_reset_stats(net);
2321 node = tipc_link_find_owner(net, link_name, &bearer_id);
2325 tipc_node_lock(node);
2327 link = node->links[bearer_id];
2329 tipc_node_unlock(node);
2333 link_reset_statistics(link);
2335 tipc_node_unlock(node);