net: cleanup unsigned to unsigned int
[linux-2.6-block.git] / net / tipc / link.c
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "core.h"
38 #include "link.h"
39 #include "port.h"
40 #include "name_distr.h"
41 #include "discover.h"
42 #include "config.h"
43
44
45 /*
46  * Out-of-range value for link session numbers
47  */
48
49 #define INVALID_SESSION 0x10000
50
51 /*
52  * Link state events:
53  */
54
55 #define  STARTING_EVT    856384768      /* link processing trigger */
56 #define  TRAFFIC_MSG_EVT 560815u        /* rx'd ??? */
57 #define  TIMEOUT_EVT     560817u        /* link timer expired */
58
59 /*
60  * The following two 'message types' is really just implementation
61  * data conveniently stored in the message header.
62  * They must not be considered part of the protocol
63  */
64 #define OPEN_MSG   0
65 #define CLOSED_MSG 1
66
67 /*
68  * State value stored in 'exp_msg_count'
69  */
70
71 #define START_CHANGEOVER 100000u
72
73 /**
74  * struct tipc_link_name - deconstructed link name
75  * @addr_local: network address of node at this end
76  * @if_local: name of interface at this end
77  * @addr_peer: network address of node at far end
78  * @if_peer: name of interface at far end
79  */
80
81 struct tipc_link_name {
82         u32 addr_local;
83         char if_local[TIPC_MAX_IF_NAME];
84         u32 addr_peer;
85         char if_peer[TIPC_MAX_IF_NAME];
86 };
87
88 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
89                                        struct sk_buff *buf);
90 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
91 static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
92                                      struct sk_buff **buf);
93 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
94 static int  link_send_sections_long(struct tipc_port *sender,
95                                     struct iovec const *msg_sect,
96                                     u32 num_sect, unsigned int total_len,
97                                     u32 destnode);
98 static void link_check_defragm_bufs(struct tipc_link *l_ptr);
99 static void link_state_event(struct tipc_link *l_ptr, u32 event);
100 static void link_reset_statistics(struct tipc_link *l_ptr);
101 static void link_print(struct tipc_link *l_ptr, const char *str);
102 static void link_start(struct tipc_link *l_ptr);
103 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
104
105 /*
106  *  Simple link routines
107  */
108
109 static unsigned int align(unsigned int i)
110 {
111         return (i + 3) & ~3u;
112 }
113
114 static void link_init_max_pkt(struct tipc_link *l_ptr)
115 {
116         u32 max_pkt;
117
118         max_pkt = (l_ptr->b_ptr->mtu & ~3);
119         if (max_pkt > MAX_MSG_SIZE)
120                 max_pkt = MAX_MSG_SIZE;
121
122         l_ptr->max_pkt_target = max_pkt;
123         if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
124                 l_ptr->max_pkt = l_ptr->max_pkt_target;
125         else
126                 l_ptr->max_pkt = MAX_PKT_DEFAULT;
127
128         l_ptr->max_pkt_probes = 0;
129 }
130
131 static u32 link_next_sent(struct tipc_link *l_ptr)
132 {
133         if (l_ptr->next_out)
134                 return buf_seqno(l_ptr->next_out);
135         return mod(l_ptr->next_out_no);
136 }
137
138 static u32 link_last_sent(struct tipc_link *l_ptr)
139 {
140         return mod(link_next_sent(l_ptr) - 1);
141 }
142
143 /*
144  *  Simple non-static link routines (i.e. referenced outside this file)
145  */
146
147 int tipc_link_is_up(struct tipc_link *l_ptr)
148 {
149         if (!l_ptr)
150                 return 0;
151         return link_working_working(l_ptr) || link_working_unknown(l_ptr);
152 }
153
154 int tipc_link_is_active(struct tipc_link *l_ptr)
155 {
156         return  (l_ptr->owner->active_links[0] == l_ptr) ||
157                 (l_ptr->owner->active_links[1] == l_ptr);
158 }
159
160 /**
161  * link_name_validate - validate & (optionally) deconstruct tipc_link name
162  * @name - ptr to link name string
163  * @name_parts - ptr to area for link name components (or NULL if not needed)
164  *
165  * Returns 1 if link name is valid, otherwise 0.
166  */
167
168 static int link_name_validate(const char *name,
169                                 struct tipc_link_name *name_parts)
170 {
171         char name_copy[TIPC_MAX_LINK_NAME];
172         char *addr_local;
173         char *if_local;
174         char *addr_peer;
175         char *if_peer;
176         char dummy;
177         u32 z_local, c_local, n_local;
178         u32 z_peer, c_peer, n_peer;
179         u32 if_local_len;
180         u32 if_peer_len;
181
182         /* copy link name & ensure length is OK */
183
184         name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
185         /* need above in case non-Posix strncpy() doesn't pad with nulls */
186         strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
187         if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
188                 return 0;
189
190         /* ensure all component parts of link name are present */
191
192         addr_local = name_copy;
193         if_local = strchr(addr_local, ':');
194         if (if_local == NULL)
195                 return 0;
196         *(if_local++) = 0;
197         addr_peer = strchr(if_local, '-');
198         if (addr_peer == NULL)
199                 return 0;
200         *(addr_peer++) = 0;
201         if_local_len = addr_peer - if_local;
202         if_peer = strchr(addr_peer, ':');
203         if (if_peer == NULL)
204                 return 0;
205         *(if_peer++) = 0;
206         if_peer_len = strlen(if_peer) + 1;
207
208         /* validate component parts of link name */
209
210         if ((sscanf(addr_local, "%u.%u.%u%c",
211                     &z_local, &c_local, &n_local, &dummy) != 3) ||
212             (sscanf(addr_peer, "%u.%u.%u%c",
213                     &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
214             (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
215             (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
216             (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
217             (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) ||
218             (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
219             (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
220                 return 0;
221
222         /* return link name components, if necessary */
223
224         if (name_parts) {
225                 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
226                 strcpy(name_parts->if_local, if_local);
227                 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
228                 strcpy(name_parts->if_peer, if_peer);
229         }
230         return 1;
231 }
232
233 /**
234  * link_timeout - handle expiration of link timer
235  * @l_ptr: pointer to link
236  *
237  * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
238  * with tipc_link_delete().  (There is no risk that the node will be deleted by
239  * another thread because tipc_link_delete() always cancels the link timer before
240  * tipc_node_delete() is called.)
241  */
242
243 static void link_timeout(struct tipc_link *l_ptr)
244 {
245         tipc_node_lock(l_ptr->owner);
246
247         /* update counters used in statistical profiling of send traffic */
248
249         l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
250         l_ptr->stats.queue_sz_counts++;
251
252         if (l_ptr->first_out) {
253                 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
254                 u32 length = msg_size(msg);
255
256                 if ((msg_user(msg) == MSG_FRAGMENTER) &&
257                     (msg_type(msg) == FIRST_FRAGMENT)) {
258                         length = msg_size(msg_get_wrapped(msg));
259                 }
260                 if (length) {
261                         l_ptr->stats.msg_lengths_total += length;
262                         l_ptr->stats.msg_length_counts++;
263                         if (length <= 64)
264                                 l_ptr->stats.msg_length_profile[0]++;
265                         else if (length <= 256)
266                                 l_ptr->stats.msg_length_profile[1]++;
267                         else if (length <= 1024)
268                                 l_ptr->stats.msg_length_profile[2]++;
269                         else if (length <= 4096)
270                                 l_ptr->stats.msg_length_profile[3]++;
271                         else if (length <= 16384)
272                                 l_ptr->stats.msg_length_profile[4]++;
273                         else if (length <= 32768)
274                                 l_ptr->stats.msg_length_profile[5]++;
275                         else
276                                 l_ptr->stats.msg_length_profile[6]++;
277                 }
278         }
279
280         /* do all other link processing performed on a periodic basis */
281
282         link_check_defragm_bufs(l_ptr);
283
284         link_state_event(l_ptr, TIMEOUT_EVT);
285
286         if (l_ptr->next_out)
287                 tipc_link_push_queue(l_ptr);
288
289         tipc_node_unlock(l_ptr->owner);
290 }
291
292 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
293 {
294         k_start_timer(&l_ptr->timer, time);
295 }
296
297 /**
298  * tipc_link_create - create a new link
299  * @n_ptr: pointer to associated node
300  * @b_ptr: pointer to associated bearer
301  * @media_addr: media address to use when sending messages over link
302  *
303  * Returns pointer to link.
304  */
305
306 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
307                               struct tipc_bearer *b_ptr,
308                               const struct tipc_media_addr *media_addr)
309 {
310         struct tipc_link *l_ptr;
311         struct tipc_msg *msg;
312         char *if_name;
313         char addr_string[16];
314         u32 peer = n_ptr->addr;
315
316         if (n_ptr->link_cnt >= 2) {
317                 tipc_addr_string_fill(addr_string, n_ptr->addr);
318                 err("Attempt to establish third link to %s\n", addr_string);
319                 return NULL;
320         }
321
322         if (n_ptr->links[b_ptr->identity]) {
323                 tipc_addr_string_fill(addr_string, n_ptr->addr);
324                 err("Attempt to establish second link on <%s> to %s\n",
325                     b_ptr->name, addr_string);
326                 return NULL;
327         }
328
329         l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
330         if (!l_ptr) {
331                 warn("Link creation failed, no memory\n");
332                 return NULL;
333         }
334
335         l_ptr->addr = peer;
336         if_name = strchr(b_ptr->name, ':') + 1;
337         sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
338                 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
339                 tipc_node(tipc_own_addr),
340                 if_name,
341                 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
342                 /* note: peer i/f name is updated by reset/activate message */
343         memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
344         l_ptr->owner = n_ptr;
345         l_ptr->checkpoint = 1;
346         l_ptr->peer_session = INVALID_SESSION;
347         l_ptr->b_ptr = b_ptr;
348         link_set_supervision_props(l_ptr, b_ptr->tolerance);
349         l_ptr->state = RESET_UNKNOWN;
350
351         l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
352         msg = l_ptr->pmsg;
353         tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
354         msg_set_size(msg, sizeof(l_ptr->proto_msg));
355         msg_set_session(msg, (tipc_random & 0xffff));
356         msg_set_bearer_id(msg, b_ptr->identity);
357         strcpy((char *)msg_data(msg), if_name);
358
359         l_ptr->priority = b_ptr->priority;
360         tipc_link_set_queue_limits(l_ptr, b_ptr->window);
361
362         link_init_max_pkt(l_ptr);
363
364         l_ptr->next_out_no = 1;
365         INIT_LIST_HEAD(&l_ptr->waiting_ports);
366
367         link_reset_statistics(l_ptr);
368
369         tipc_node_attach_link(n_ptr, l_ptr);
370
371         k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
372         list_add_tail(&l_ptr->link_list, &b_ptr->links);
373         tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
374
375         return l_ptr;
376 }
377
378 /**
379  * tipc_link_delete - delete a link
380  * @l_ptr: pointer to link
381  *
382  * Note: 'tipc_net_lock' is write_locked, bearer is locked.
383  * This routine must not grab the node lock until after link timer cancellation
384  * to avoid a potential deadlock situation.
385  */
386
387 void tipc_link_delete(struct tipc_link *l_ptr)
388 {
389         if (!l_ptr) {
390                 err("Attempt to delete non-existent link\n");
391                 return;
392         }
393
394         k_cancel_timer(&l_ptr->timer);
395
396         tipc_node_lock(l_ptr->owner);
397         tipc_link_reset(l_ptr);
398         tipc_node_detach_link(l_ptr->owner, l_ptr);
399         tipc_link_stop(l_ptr);
400         list_del_init(&l_ptr->link_list);
401         tipc_node_unlock(l_ptr->owner);
402         k_term_timer(&l_ptr->timer);
403         kfree(l_ptr);
404 }
405
406 static void link_start(struct tipc_link *l_ptr)
407 {
408         tipc_node_lock(l_ptr->owner);
409         link_state_event(l_ptr, STARTING_EVT);
410         tipc_node_unlock(l_ptr->owner);
411 }
412
413 /**
414  * link_schedule_port - schedule port for deferred sending
415  * @l_ptr: pointer to link
416  * @origport: reference to sending port
417  * @sz: amount of data to be sent
418  *
419  * Schedules port for renewed sending of messages after link congestion
420  * has abated.
421  */
422
423 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
424 {
425         struct tipc_port *p_ptr;
426
427         spin_lock_bh(&tipc_port_list_lock);
428         p_ptr = tipc_port_lock(origport);
429         if (p_ptr) {
430                 if (!p_ptr->wakeup)
431                         goto exit;
432                 if (!list_empty(&p_ptr->wait_list))
433                         goto exit;
434                 p_ptr->congested = 1;
435                 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
436                 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
437                 l_ptr->stats.link_congs++;
438 exit:
439                 tipc_port_unlock(p_ptr);
440         }
441         spin_unlock_bh(&tipc_port_list_lock);
442         return -ELINKCONG;
443 }
444
445 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
446 {
447         struct tipc_port *p_ptr;
448         struct tipc_port *temp_p_ptr;
449         int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
450
451         if (all)
452                 win = 100000;
453         if (win <= 0)
454                 return;
455         if (!spin_trylock_bh(&tipc_port_list_lock))
456                 return;
457         if (link_congested(l_ptr))
458                 goto exit;
459         list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
460                                  wait_list) {
461                 if (win <= 0)
462                         break;
463                 list_del_init(&p_ptr->wait_list);
464                 spin_lock_bh(p_ptr->lock);
465                 p_ptr->congested = 0;
466                 p_ptr->wakeup(p_ptr);
467                 win -= p_ptr->waiting_pkts;
468                 spin_unlock_bh(p_ptr->lock);
469         }
470
471 exit:
472         spin_unlock_bh(&tipc_port_list_lock);
473 }
474
475 /**
476  * link_release_outqueue - purge link's outbound message queue
477  * @l_ptr: pointer to link
478  */
479
480 static void link_release_outqueue(struct tipc_link *l_ptr)
481 {
482         struct sk_buff *buf = l_ptr->first_out;
483         struct sk_buff *next;
484
485         while (buf) {
486                 next = buf->next;
487                 kfree_skb(buf);
488                 buf = next;
489         }
490         l_ptr->first_out = NULL;
491         l_ptr->out_queue_size = 0;
492 }
493
494 /**
495  * tipc_link_reset_fragments - purge link's inbound message fragments queue
496  * @l_ptr: pointer to link
497  */
498
499 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
500 {
501         struct sk_buff *buf = l_ptr->defragm_buf;
502         struct sk_buff *next;
503
504         while (buf) {
505                 next = buf->next;
506                 kfree_skb(buf);
507                 buf = next;
508         }
509         l_ptr->defragm_buf = NULL;
510 }
511
512 /**
513  * tipc_link_stop - purge all inbound and outbound messages associated with link
514  * @l_ptr: pointer to link
515  */
516
517 void tipc_link_stop(struct tipc_link *l_ptr)
518 {
519         struct sk_buff *buf;
520         struct sk_buff *next;
521
522         buf = l_ptr->oldest_deferred_in;
523         while (buf) {
524                 next = buf->next;
525                 kfree_skb(buf);
526                 buf = next;
527         }
528
529         buf = l_ptr->first_out;
530         while (buf) {
531                 next = buf->next;
532                 kfree_skb(buf);
533                 buf = next;
534         }
535
536         tipc_link_reset_fragments(l_ptr);
537
538         kfree_skb(l_ptr->proto_msg_queue);
539         l_ptr->proto_msg_queue = NULL;
540 }
541
542 void tipc_link_reset(struct tipc_link *l_ptr)
543 {
544         struct sk_buff *buf;
545         u32 prev_state = l_ptr->state;
546         u32 checkpoint = l_ptr->next_in_no;
547         int was_active_link = tipc_link_is_active(l_ptr);
548
549         msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
550
551         /* Link is down, accept any session */
552         l_ptr->peer_session = INVALID_SESSION;
553
554         /* Prepare for max packet size negotiation */
555         link_init_max_pkt(l_ptr);
556
557         l_ptr->state = RESET_UNKNOWN;
558
559         if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
560                 return;
561
562         tipc_node_link_down(l_ptr->owner, l_ptr);
563         tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
564
565         if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
566             l_ptr->owner->permit_changeover) {
567                 l_ptr->reset_checkpoint = checkpoint;
568                 l_ptr->exp_msg_count = START_CHANGEOVER;
569         }
570
571         /* Clean up all queues: */
572
573         link_release_outqueue(l_ptr);
574         kfree_skb(l_ptr->proto_msg_queue);
575         l_ptr->proto_msg_queue = NULL;
576         buf = l_ptr->oldest_deferred_in;
577         while (buf) {
578                 struct sk_buff *next = buf->next;
579                 kfree_skb(buf);
580                 buf = next;
581         }
582         if (!list_empty(&l_ptr->waiting_ports))
583                 tipc_link_wakeup_ports(l_ptr, 1);
584
585         l_ptr->retransm_queue_head = 0;
586         l_ptr->retransm_queue_size = 0;
587         l_ptr->last_out = NULL;
588         l_ptr->first_out = NULL;
589         l_ptr->next_out = NULL;
590         l_ptr->unacked_window = 0;
591         l_ptr->checkpoint = 1;
592         l_ptr->next_out_no = 1;
593         l_ptr->deferred_inqueue_sz = 0;
594         l_ptr->oldest_deferred_in = NULL;
595         l_ptr->newest_deferred_in = NULL;
596         l_ptr->fsm_msg_cnt = 0;
597         l_ptr->stale_count = 0;
598         link_reset_statistics(l_ptr);
599 }
600
601
602 static void link_activate(struct tipc_link *l_ptr)
603 {
604         l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
605         tipc_node_link_up(l_ptr->owner, l_ptr);
606         tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
607 }
608
609 /**
610  * link_state_event - link finite state machine
611  * @l_ptr: pointer to link
612  * @event: state machine event to process
613  */
614
615 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
616 {
617         struct tipc_link *other;
618         u32 cont_intv = l_ptr->continuity_interval;
619
620         if (!l_ptr->started && (event != STARTING_EVT))
621                 return;         /* Not yet. */
622
623         if (link_blocked(l_ptr)) {
624                 if (event == TIMEOUT_EVT)
625                         link_set_timer(l_ptr, cont_intv);
626                 return;   /* Changeover going on */
627         }
628
629         switch (l_ptr->state) {
630         case WORKING_WORKING:
631                 switch (event) {
632                 case TRAFFIC_MSG_EVT:
633                 case ACTIVATE_MSG:
634                         break;
635                 case TIMEOUT_EVT:
636                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
637                                 l_ptr->checkpoint = l_ptr->next_in_no;
638                                 if (tipc_bclink_acks_missing(l_ptr->owner)) {
639                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
640                                                                  0, 0, 0, 0, 0);
641                                         l_ptr->fsm_msg_cnt++;
642                                 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
643                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
644                                                                  1, 0, 0, 0, 0);
645                                         l_ptr->fsm_msg_cnt++;
646                                 }
647                                 link_set_timer(l_ptr, cont_intv);
648                                 break;
649                         }
650                         l_ptr->state = WORKING_UNKNOWN;
651                         l_ptr->fsm_msg_cnt = 0;
652                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
653                         l_ptr->fsm_msg_cnt++;
654                         link_set_timer(l_ptr, cont_intv / 4);
655                         break;
656                 case RESET_MSG:
657                         info("Resetting link <%s>, requested by peer\n",
658                              l_ptr->name);
659                         tipc_link_reset(l_ptr);
660                         l_ptr->state = RESET_RESET;
661                         l_ptr->fsm_msg_cnt = 0;
662                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
663                         l_ptr->fsm_msg_cnt++;
664                         link_set_timer(l_ptr, cont_intv);
665                         break;
666                 default:
667                         err("Unknown link event %u in WW state\n", event);
668                 }
669                 break;
670         case WORKING_UNKNOWN:
671                 switch (event) {
672                 case TRAFFIC_MSG_EVT:
673                 case ACTIVATE_MSG:
674                         l_ptr->state = WORKING_WORKING;
675                         l_ptr->fsm_msg_cnt = 0;
676                         link_set_timer(l_ptr, cont_intv);
677                         break;
678                 case RESET_MSG:
679                         info("Resetting link <%s>, requested by peer "
680                              "while probing\n", l_ptr->name);
681                         tipc_link_reset(l_ptr);
682                         l_ptr->state = RESET_RESET;
683                         l_ptr->fsm_msg_cnt = 0;
684                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
685                         l_ptr->fsm_msg_cnt++;
686                         link_set_timer(l_ptr, cont_intv);
687                         break;
688                 case TIMEOUT_EVT:
689                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
690                                 l_ptr->state = WORKING_WORKING;
691                                 l_ptr->fsm_msg_cnt = 0;
692                                 l_ptr->checkpoint = l_ptr->next_in_no;
693                                 if (tipc_bclink_acks_missing(l_ptr->owner)) {
694                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
695                                                                  0, 0, 0, 0, 0);
696                                         l_ptr->fsm_msg_cnt++;
697                                 }
698                                 link_set_timer(l_ptr, cont_intv);
699                         } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
700                                 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
701                                                          1, 0, 0, 0, 0);
702                                 l_ptr->fsm_msg_cnt++;
703                                 link_set_timer(l_ptr, cont_intv / 4);
704                         } else {        /* Link has failed */
705                                 warn("Resetting link <%s>, peer not responding\n",
706                                      l_ptr->name);
707                                 tipc_link_reset(l_ptr);
708                                 l_ptr->state = RESET_UNKNOWN;
709                                 l_ptr->fsm_msg_cnt = 0;
710                                 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
711                                                          0, 0, 0, 0, 0);
712                                 l_ptr->fsm_msg_cnt++;
713                                 link_set_timer(l_ptr, cont_intv);
714                         }
715                         break;
716                 default:
717                         err("Unknown link event %u in WU state\n", event);
718                 }
719                 break;
720         case RESET_UNKNOWN:
721                 switch (event) {
722                 case TRAFFIC_MSG_EVT:
723                         break;
724                 case ACTIVATE_MSG:
725                         other = l_ptr->owner->active_links[0];
726                         if (other && link_working_unknown(other))
727                                 break;
728                         l_ptr->state = WORKING_WORKING;
729                         l_ptr->fsm_msg_cnt = 0;
730                         link_activate(l_ptr);
731                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
732                         l_ptr->fsm_msg_cnt++;
733                         link_set_timer(l_ptr, cont_intv);
734                         break;
735                 case RESET_MSG:
736                         l_ptr->state = RESET_RESET;
737                         l_ptr->fsm_msg_cnt = 0;
738                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
739                         l_ptr->fsm_msg_cnt++;
740                         link_set_timer(l_ptr, cont_intv);
741                         break;
742                 case STARTING_EVT:
743                         l_ptr->started = 1;
744                         /* fall through */
745                 case TIMEOUT_EVT:
746                         tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
747                         l_ptr->fsm_msg_cnt++;
748                         link_set_timer(l_ptr, cont_intv);
749                         break;
750                 default:
751                         err("Unknown link event %u in RU state\n", event);
752                 }
753                 break;
754         case RESET_RESET:
755                 switch (event) {
756                 case TRAFFIC_MSG_EVT:
757                 case ACTIVATE_MSG:
758                         other = l_ptr->owner->active_links[0];
759                         if (other && link_working_unknown(other))
760                                 break;
761                         l_ptr->state = WORKING_WORKING;
762                         l_ptr->fsm_msg_cnt = 0;
763                         link_activate(l_ptr);
764                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
765                         l_ptr->fsm_msg_cnt++;
766                         link_set_timer(l_ptr, cont_intv);
767                         break;
768                 case RESET_MSG:
769                         break;
770                 case TIMEOUT_EVT:
771                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
772                         l_ptr->fsm_msg_cnt++;
773                         link_set_timer(l_ptr, cont_intv);
774                         break;
775                 default:
776                         err("Unknown link event %u in RR state\n", event);
777                 }
778                 break;
779         default:
780                 err("Unknown link state %u/%u\n", l_ptr->state, event);
781         }
782 }
783
784 /*
785  * link_bundle_buf(): Append contents of a buffer to
786  * the tail of an existing one.
787  */
788
789 static int link_bundle_buf(struct tipc_link *l_ptr,
790                            struct sk_buff *bundler,
791                            struct sk_buff *buf)
792 {
793         struct tipc_msg *bundler_msg = buf_msg(bundler);
794         struct tipc_msg *msg = buf_msg(buf);
795         u32 size = msg_size(msg);
796         u32 bundle_size = msg_size(bundler_msg);
797         u32 to_pos = align(bundle_size);
798         u32 pad = to_pos - bundle_size;
799
800         if (msg_user(bundler_msg) != MSG_BUNDLER)
801                 return 0;
802         if (msg_type(bundler_msg) != OPEN_MSG)
803                 return 0;
804         if (skb_tailroom(bundler) < (pad + size))
805                 return 0;
806         if (l_ptr->max_pkt < (to_pos + size))
807                 return 0;
808
809         skb_put(bundler, pad + size);
810         skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
811         msg_set_size(bundler_msg, to_pos + size);
812         msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
813         kfree_skb(buf);
814         l_ptr->stats.sent_bundled++;
815         return 1;
816 }
817
818 static void link_add_to_outqueue(struct tipc_link *l_ptr,
819                                  struct sk_buff *buf,
820                                  struct tipc_msg *msg)
821 {
822         u32 ack = mod(l_ptr->next_in_no - 1);
823         u32 seqno = mod(l_ptr->next_out_no++);
824
825         msg_set_word(msg, 2, ((ack << 16) | seqno));
826         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
827         buf->next = NULL;
828         if (l_ptr->first_out) {
829                 l_ptr->last_out->next = buf;
830                 l_ptr->last_out = buf;
831         } else
832                 l_ptr->first_out = l_ptr->last_out = buf;
833
834         l_ptr->out_queue_size++;
835         if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
836                 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
837 }
838
839 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
840                                        struct sk_buff *buf_chain,
841                                        u32 long_msgno)
842 {
843         struct sk_buff *buf;
844         struct tipc_msg *msg;
845
846         if (!l_ptr->next_out)
847                 l_ptr->next_out = buf_chain;
848         while (buf_chain) {
849                 buf = buf_chain;
850                 buf_chain = buf_chain->next;
851
852                 msg = buf_msg(buf);
853                 msg_set_long_msgno(msg, long_msgno);
854                 link_add_to_outqueue(l_ptr, buf, msg);
855         }
856 }
857
858 /*
859  * tipc_link_send_buf() is the 'full path' for messages, called from
860  * inside TIPC when the 'fast path' in tipc_send_buf
861  * has failed, and from link_send()
862  */
863
864 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
865 {
866         struct tipc_msg *msg = buf_msg(buf);
867         u32 size = msg_size(msg);
868         u32 dsz = msg_data_sz(msg);
869         u32 queue_size = l_ptr->out_queue_size;
870         u32 imp = tipc_msg_tot_importance(msg);
871         u32 queue_limit = l_ptr->queue_limit[imp];
872         u32 max_packet = l_ptr->max_pkt;
873
874         /* Match msg importance against queue limits: */
875
876         if (unlikely(queue_size >= queue_limit)) {
877                 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
878                         link_schedule_port(l_ptr, msg_origport(msg), size);
879                         kfree_skb(buf);
880                         return -ELINKCONG;
881                 }
882                 kfree_skb(buf);
883                 if (imp > CONN_MANAGER) {
884                         warn("Resetting link <%s>, send queue full", l_ptr->name);
885                         tipc_link_reset(l_ptr);
886                 }
887                 return dsz;
888         }
889
890         /* Fragmentation needed ? */
891
892         if (size > max_packet)
893                 return link_send_long_buf(l_ptr, buf);
894
895         /* Packet can be queued or sent: */
896
897         if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
898                    !link_congested(l_ptr))) {
899                 link_add_to_outqueue(l_ptr, buf, msg);
900
901                 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
902                         l_ptr->unacked_window = 0;
903                 } else {
904                         tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
905                         l_ptr->stats.bearer_congs++;
906                         l_ptr->next_out = buf;
907                 }
908                 return dsz;
909         }
910         /* Congestion: can message be bundled ?: */
911
912         if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
913             (msg_user(msg) != MSG_FRAGMENTER)) {
914
915                 /* Try adding message to an existing bundle */
916
917                 if (l_ptr->next_out &&
918                     link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
919                         tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
920                         return dsz;
921                 }
922
923                 /* Try creating a new bundle */
924
925                 if (size <= max_packet * 2 / 3) {
926                         struct sk_buff *bundler = tipc_buf_acquire(max_packet);
927                         struct tipc_msg bundler_hdr;
928
929                         if (bundler) {
930                                 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
931                                          INT_H_SIZE, l_ptr->addr);
932                                 skb_copy_to_linear_data(bundler, &bundler_hdr,
933                                                         INT_H_SIZE);
934                                 skb_trim(bundler, INT_H_SIZE);
935                                 link_bundle_buf(l_ptr, bundler, buf);
936                                 buf = bundler;
937                                 msg = buf_msg(buf);
938                                 l_ptr->stats.sent_bundles++;
939                         }
940                 }
941         }
942         if (!l_ptr->next_out)
943                 l_ptr->next_out = buf;
944         link_add_to_outqueue(l_ptr, buf, msg);
945         tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
946         return dsz;
947 }
948
949 /*
950  * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
951  * not been selected yet, and the the owner node is not locked
952  * Called by TIPC internal users, e.g. the name distributor
953  */
954
955 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
956 {
957         struct tipc_link *l_ptr;
958         struct tipc_node *n_ptr;
959         int res = -ELINKCONG;
960
961         read_lock_bh(&tipc_net_lock);
962         n_ptr = tipc_node_find(dest);
963         if (n_ptr) {
964                 tipc_node_lock(n_ptr);
965                 l_ptr = n_ptr->active_links[selector & 1];
966                 if (l_ptr)
967                         res = tipc_link_send_buf(l_ptr, buf);
968                 else
969                         kfree_skb(buf);
970                 tipc_node_unlock(n_ptr);
971         } else {
972                 kfree_skb(buf);
973         }
974         read_unlock_bh(&tipc_net_lock);
975         return res;
976 }
977
978 /*
979  * tipc_link_send_names - send name table entries to new neighbor
980  *
981  * Send routine for bulk delivery of name table messages when contact
982  * with a new neighbor occurs. No link congestion checking is performed
983  * because name table messages *must* be delivered. The messages must be
984  * small enough not to require fragmentation.
985  * Called without any locks held.
986  */
987
988 void tipc_link_send_names(struct list_head *message_list, u32 dest)
989 {
990         struct tipc_node *n_ptr;
991         struct tipc_link *l_ptr;
992         struct sk_buff *buf;
993         struct sk_buff *temp_buf;
994
995         if (list_empty(message_list))
996                 return;
997
998         read_lock_bh(&tipc_net_lock);
999         n_ptr = tipc_node_find(dest);
1000         if (n_ptr) {
1001                 tipc_node_lock(n_ptr);
1002                 l_ptr = n_ptr->active_links[0];
1003                 if (l_ptr) {
1004                         /* convert circular list to linear list */
1005                         ((struct sk_buff *)message_list->prev)->next = NULL;
1006                         link_add_chain_to_outqueue(l_ptr,
1007                                 (struct sk_buff *)message_list->next, 0);
1008                         tipc_link_push_queue(l_ptr);
1009                         INIT_LIST_HEAD(message_list);
1010                 }
1011                 tipc_node_unlock(n_ptr);
1012         }
1013         read_unlock_bh(&tipc_net_lock);
1014
1015         /* discard the messages if they couldn't be sent */
1016
1017         list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1018                 list_del((struct list_head *)buf);
1019                 kfree_skb(buf);
1020         }
1021 }
1022
1023 /*
1024  * link_send_buf_fast: Entry for data messages where the
1025  * destination link is known and the header is complete,
1026  * inclusive total message length. Very time critical.
1027  * Link is locked. Returns user data length.
1028  */
1029
1030 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
1031                               u32 *used_max_pkt)
1032 {
1033         struct tipc_msg *msg = buf_msg(buf);
1034         int res = msg_data_sz(msg);
1035
1036         if (likely(!link_congested(l_ptr))) {
1037                 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1038                         if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1039                                 link_add_to_outqueue(l_ptr, buf, msg);
1040                                 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1041                                                             &l_ptr->media_addr))) {
1042                                         l_ptr->unacked_window = 0;
1043                                         return res;
1044                                 }
1045                                 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1046                                 l_ptr->stats.bearer_congs++;
1047                                 l_ptr->next_out = buf;
1048                                 return res;
1049                         }
1050                 } else
1051                         *used_max_pkt = l_ptr->max_pkt;
1052         }
1053         return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
1054 }
1055
1056 /*
1057  * tipc_send_buf_fast: Entry for data messages where the
1058  * destination node is known and the header is complete,
1059  * inclusive total message length.
1060  * Returns user data length.
1061  */
1062 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1063 {
1064         struct tipc_link *l_ptr;
1065         struct tipc_node *n_ptr;
1066         int res;
1067         u32 selector = msg_origport(buf_msg(buf)) & 1;
1068         u32 dummy;
1069
1070         read_lock_bh(&tipc_net_lock);
1071         n_ptr = tipc_node_find(destnode);
1072         if (likely(n_ptr)) {
1073                 tipc_node_lock(n_ptr);
1074                 l_ptr = n_ptr->active_links[selector];
1075                 if (likely(l_ptr)) {
1076                         res = link_send_buf_fast(l_ptr, buf, &dummy);
1077                         tipc_node_unlock(n_ptr);
1078                         read_unlock_bh(&tipc_net_lock);
1079                         return res;
1080                 }
1081                 tipc_node_unlock(n_ptr);
1082         }
1083         read_unlock_bh(&tipc_net_lock);
1084         res = msg_data_sz(buf_msg(buf));
1085         tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1086         return res;
1087 }
1088
1089
1090 /*
1091  * tipc_link_send_sections_fast: Entry for messages where the
1092  * destination processor is known and the header is complete,
1093  * except for total message length.
1094  * Returns user data length or errno.
1095  */
1096 int tipc_link_send_sections_fast(struct tipc_port *sender,
1097                                  struct iovec const *msg_sect,
1098                                  const u32 num_sect,
1099                                  unsigned int total_len,
1100                                  u32 destaddr)
1101 {
1102         struct tipc_msg *hdr = &sender->phdr;
1103         struct tipc_link *l_ptr;
1104         struct sk_buff *buf;
1105         struct tipc_node *node;
1106         int res;
1107         u32 selector = msg_origport(hdr) & 1;
1108
1109 again:
1110         /*
1111          * Try building message using port's max_pkt hint.
1112          * (Must not hold any locks while building message.)
1113          */
1114
1115         res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1116                              sender->max_pkt, !sender->user_port, &buf);
1117
1118         read_lock_bh(&tipc_net_lock);
1119         node = tipc_node_find(destaddr);
1120         if (likely(node)) {
1121                 tipc_node_lock(node);
1122                 l_ptr = node->active_links[selector];
1123                 if (likely(l_ptr)) {
1124                         if (likely(buf)) {
1125                                 res = link_send_buf_fast(l_ptr, buf,
1126                                                          &sender->max_pkt);
1127 exit:
1128                                 tipc_node_unlock(node);
1129                                 read_unlock_bh(&tipc_net_lock);
1130                                 return res;
1131                         }
1132
1133                         /* Exit if build request was invalid */
1134
1135                         if (unlikely(res < 0))
1136                                 goto exit;
1137
1138                         /* Exit if link (or bearer) is congested */
1139
1140                         if (link_congested(l_ptr) ||
1141                             !list_empty(&l_ptr->b_ptr->cong_links)) {
1142                                 res = link_schedule_port(l_ptr,
1143                                                          sender->ref, res);
1144                                 goto exit;
1145                         }
1146
1147                         /*
1148                          * Message size exceeds max_pkt hint; update hint,
1149                          * then re-try fast path or fragment the message
1150                          */
1151
1152                         sender->max_pkt = l_ptr->max_pkt;
1153                         tipc_node_unlock(node);
1154                         read_unlock_bh(&tipc_net_lock);
1155
1156
1157                         if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1158                                 goto again;
1159
1160                         return link_send_sections_long(sender, msg_sect,
1161                                                        num_sect, total_len,
1162                                                        destaddr);
1163                 }
1164                 tipc_node_unlock(node);
1165         }
1166         read_unlock_bh(&tipc_net_lock);
1167
1168         /* Couldn't find a link to the destination node */
1169
1170         if (buf)
1171                 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1172         if (res >= 0)
1173                 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1174                                                  total_len, TIPC_ERR_NO_NODE);
1175         return res;
1176 }
1177
1178 /*
1179  * link_send_sections_long(): Entry for long messages where the
1180  * destination node is known and the header is complete,
1181  * inclusive total message length.
1182  * Link and bearer congestion status have been checked to be ok,
1183  * and are ignored if they change.
1184  *
1185  * Note that fragments do not use the full link MTU so that they won't have
1186  * to undergo refragmentation if link changeover causes them to be sent
1187  * over another link with an additional tunnel header added as prefix.
1188  * (Refragmentation will still occur if the other link has a smaller MTU.)
1189  *
1190  * Returns user data length or errno.
1191  */
1192 static int link_send_sections_long(struct tipc_port *sender,
1193                                    struct iovec const *msg_sect,
1194                                    u32 num_sect,
1195                                    unsigned int total_len,
1196                                    u32 destaddr)
1197 {
1198         struct tipc_link *l_ptr;
1199         struct tipc_node *node;
1200         struct tipc_msg *hdr = &sender->phdr;
1201         u32 dsz = total_len;
1202         u32 max_pkt, fragm_sz, rest;
1203         struct tipc_msg fragm_hdr;
1204         struct sk_buff *buf, *buf_chain, *prev;
1205         u32 fragm_crs, fragm_rest, hsz, sect_rest;
1206         const unchar *sect_crs;
1207         int curr_sect;
1208         u32 fragm_no;
1209
1210 again:
1211         fragm_no = 1;
1212         max_pkt = sender->max_pkt - INT_H_SIZE;
1213                 /* leave room for tunnel header in case of link changeover */
1214         fragm_sz = max_pkt - INT_H_SIZE;
1215                 /* leave room for fragmentation header in each fragment */
1216         rest = dsz;
1217         fragm_crs = 0;
1218         fragm_rest = 0;
1219         sect_rest = 0;
1220         sect_crs = NULL;
1221         curr_sect = -1;
1222
1223         /* Prepare reusable fragment header: */
1224
1225         tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1226                  INT_H_SIZE, msg_destnode(hdr));
1227         msg_set_size(&fragm_hdr, max_pkt);
1228         msg_set_fragm_no(&fragm_hdr, 1);
1229
1230         /* Prepare header of first fragment: */
1231
1232         buf_chain = buf = tipc_buf_acquire(max_pkt);
1233         if (!buf)
1234                 return -ENOMEM;
1235         buf->next = NULL;
1236         skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1237         hsz = msg_hdr_sz(hdr);
1238         skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1239
1240         /* Chop up message: */
1241
1242         fragm_crs = INT_H_SIZE + hsz;
1243         fragm_rest = fragm_sz - hsz;
1244
1245         do {            /* For all sections */
1246                 u32 sz;
1247
1248                 if (!sect_rest) {
1249                         sect_rest = msg_sect[++curr_sect].iov_len;
1250                         sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1251                 }
1252
1253                 if (sect_rest < fragm_rest)
1254                         sz = sect_rest;
1255                 else
1256                         sz = fragm_rest;
1257
1258                 if (likely(!sender->user_port)) {
1259                         if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1260 error:
1261                                 for (; buf_chain; buf_chain = buf) {
1262                                         buf = buf_chain->next;
1263                                         kfree_skb(buf_chain);
1264                                 }
1265                                 return -EFAULT;
1266                         }
1267                 } else
1268                         skb_copy_to_linear_data_offset(buf, fragm_crs,
1269                                                        sect_crs, sz);
1270                 sect_crs += sz;
1271                 sect_rest -= sz;
1272                 fragm_crs += sz;
1273                 fragm_rest -= sz;
1274                 rest -= sz;
1275
1276                 if (!fragm_rest && rest) {
1277
1278                         /* Initiate new fragment: */
1279                         if (rest <= fragm_sz) {
1280                                 fragm_sz = rest;
1281                                 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1282                         } else {
1283                                 msg_set_type(&fragm_hdr, FRAGMENT);
1284                         }
1285                         msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1286                         msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1287                         prev = buf;
1288                         buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1289                         if (!buf)
1290                                 goto error;
1291
1292                         buf->next = NULL;
1293                         prev->next = buf;
1294                         skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1295                         fragm_crs = INT_H_SIZE;
1296                         fragm_rest = fragm_sz;
1297                 }
1298         } while (rest > 0);
1299
1300         /*
1301          * Now we have a buffer chain. Select a link and check
1302          * that packet size is still OK
1303          */
1304         node = tipc_node_find(destaddr);
1305         if (likely(node)) {
1306                 tipc_node_lock(node);
1307                 l_ptr = node->active_links[sender->ref & 1];
1308                 if (!l_ptr) {
1309                         tipc_node_unlock(node);
1310                         goto reject;
1311                 }
1312                 if (l_ptr->max_pkt < max_pkt) {
1313                         sender->max_pkt = l_ptr->max_pkt;
1314                         tipc_node_unlock(node);
1315                         for (; buf_chain; buf_chain = buf) {
1316                                 buf = buf_chain->next;
1317                                 kfree_skb(buf_chain);
1318                         }
1319                         goto again;
1320                 }
1321         } else {
1322 reject:
1323                 for (; buf_chain; buf_chain = buf) {
1324                         buf = buf_chain->next;
1325                         kfree_skb(buf_chain);
1326                 }
1327                 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1328                                                  total_len, TIPC_ERR_NO_NODE);
1329         }
1330
1331         /* Append chain of fragments to send queue & send them */
1332
1333         l_ptr->long_msg_seq_no++;
1334         link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1335         l_ptr->stats.sent_fragments += fragm_no;
1336         l_ptr->stats.sent_fragmented++;
1337         tipc_link_push_queue(l_ptr);
1338         tipc_node_unlock(node);
1339         return dsz;
1340 }
1341
1342 /*
1343  * tipc_link_push_packet: Push one unsent packet to the media
1344  */
1345 u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1346 {
1347         struct sk_buff *buf = l_ptr->first_out;
1348         u32 r_q_size = l_ptr->retransm_queue_size;
1349         u32 r_q_head = l_ptr->retransm_queue_head;
1350
1351         /* Step to position where retransmission failed, if any,    */
1352         /* consider that buffers may have been released in meantime */
1353
1354         if (r_q_size && buf) {
1355                 u32 last = lesser(mod(r_q_head + r_q_size),
1356                                   link_last_sent(l_ptr));
1357                 u32 first = buf_seqno(buf);
1358
1359                 while (buf && less(first, r_q_head)) {
1360                         first = mod(first + 1);
1361                         buf = buf->next;
1362                 }
1363                 l_ptr->retransm_queue_head = r_q_head = first;
1364                 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1365         }
1366
1367         /* Continue retransmission now, if there is anything: */
1368
1369         if (r_q_size && buf) {
1370                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1371                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1372                 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1373                         l_ptr->retransm_queue_head = mod(++r_q_head);
1374                         l_ptr->retransm_queue_size = --r_q_size;
1375                         l_ptr->stats.retransmitted++;
1376                         return 0;
1377                 } else {
1378                         l_ptr->stats.bearer_congs++;
1379                         return PUSH_FAILED;
1380                 }
1381         }
1382
1383         /* Send deferred protocol message, if any: */
1384
1385         buf = l_ptr->proto_msg_queue;
1386         if (buf) {
1387                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1388                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1389                 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1390                         l_ptr->unacked_window = 0;
1391                         kfree_skb(buf);
1392                         l_ptr->proto_msg_queue = NULL;
1393                         return 0;
1394                 } else {
1395                         l_ptr->stats.bearer_congs++;
1396                         return PUSH_FAILED;
1397                 }
1398         }
1399
1400         /* Send one deferred data message, if send window not full: */
1401
1402         buf = l_ptr->next_out;
1403         if (buf) {
1404                 struct tipc_msg *msg = buf_msg(buf);
1405                 u32 next = msg_seqno(msg);
1406                 u32 first = buf_seqno(l_ptr->first_out);
1407
1408                 if (mod(next - first) < l_ptr->queue_limit[0]) {
1409                         msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1410                         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1411                         if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1412                                 if (msg_user(msg) == MSG_BUNDLER)
1413                                         msg_set_type(msg, CLOSED_MSG);
1414                                 l_ptr->next_out = buf->next;
1415                                 return 0;
1416                         } else {
1417                                 l_ptr->stats.bearer_congs++;
1418                                 return PUSH_FAILED;
1419                         }
1420                 }
1421         }
1422         return PUSH_FINISHED;
1423 }
1424
1425 /*
1426  * push_queue(): push out the unsent messages of a link where
1427  *               congestion has abated. Node is locked
1428  */
1429 void tipc_link_push_queue(struct tipc_link *l_ptr)
1430 {
1431         u32 res;
1432
1433         if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1434                 return;
1435
1436         do {
1437                 res = tipc_link_push_packet(l_ptr);
1438         } while (!res);
1439
1440         if (res == PUSH_FAILED)
1441                 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1442 }
1443
1444 static void link_reset_all(unsigned long addr)
1445 {
1446         struct tipc_node *n_ptr;
1447         char addr_string[16];
1448         u32 i;
1449
1450         read_lock_bh(&tipc_net_lock);
1451         n_ptr = tipc_node_find((u32)addr);
1452         if (!n_ptr) {
1453                 read_unlock_bh(&tipc_net_lock);
1454                 return; /* node no longer exists */
1455         }
1456
1457         tipc_node_lock(n_ptr);
1458
1459         warn("Resetting all links to %s\n",
1460              tipc_addr_string_fill(addr_string, n_ptr->addr));
1461
1462         for (i = 0; i < MAX_BEARERS; i++) {
1463                 if (n_ptr->links[i]) {
1464                         link_print(n_ptr->links[i], "Resetting link\n");
1465                         tipc_link_reset(n_ptr->links[i]);
1466                 }
1467         }
1468
1469         tipc_node_unlock(n_ptr);
1470         read_unlock_bh(&tipc_net_lock);
1471 }
1472
1473 static void link_retransmit_failure(struct tipc_link *l_ptr,
1474                                         struct sk_buff *buf)
1475 {
1476         struct tipc_msg *msg = buf_msg(buf);
1477
1478         warn("Retransmission failure on link <%s>\n", l_ptr->name);
1479
1480         if (l_ptr->addr) {
1481
1482                 /* Handle failure on standard link */
1483
1484                 link_print(l_ptr, "Resetting link\n");
1485                 tipc_link_reset(l_ptr);
1486
1487         } else {
1488
1489                 /* Handle failure on broadcast link */
1490
1491                 struct tipc_node *n_ptr;
1492                 char addr_string[16];
1493
1494                 info("Msg seq number: %u,  ", msg_seqno(msg));
1495                 info("Outstanding acks: %lu\n",
1496                      (unsigned long) TIPC_SKB_CB(buf)->handle);
1497
1498                 n_ptr = tipc_bclink_retransmit_to();
1499                 tipc_node_lock(n_ptr);
1500
1501                 tipc_addr_string_fill(addr_string, n_ptr->addr);
1502                 info("Broadcast link info for %s\n", addr_string);
1503                 info("Supportable: %d,  ", n_ptr->bclink.supportable);
1504                 info("Supported: %d,  ", n_ptr->bclink.supported);
1505                 info("Acked: %u\n", n_ptr->bclink.acked);
1506                 info("Last in: %u,  ", n_ptr->bclink.last_in);
1507                 info("Oos state: %u,  ", n_ptr->bclink.oos_state);
1508                 info("Last sent: %u\n", n_ptr->bclink.last_sent);
1509
1510                 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1511
1512                 tipc_node_unlock(n_ptr);
1513
1514                 l_ptr->stale_count = 0;
1515         }
1516 }
1517
1518 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1519                           u32 retransmits)
1520 {
1521         struct tipc_msg *msg;
1522
1523         if (!buf)
1524                 return;
1525
1526         msg = buf_msg(buf);
1527
1528         if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1529                 if (l_ptr->retransm_queue_size == 0) {
1530                         l_ptr->retransm_queue_head = msg_seqno(msg);
1531                         l_ptr->retransm_queue_size = retransmits;
1532                 } else {
1533                         err("Unexpected retransmit on link %s (qsize=%d)\n",
1534                             l_ptr->name, l_ptr->retransm_queue_size);
1535                 }
1536                 return;
1537         } else {
1538                 /* Detect repeated retransmit failures on uncongested bearer */
1539
1540                 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1541                         if (++l_ptr->stale_count > 100) {
1542                                 link_retransmit_failure(l_ptr, buf);
1543                                 return;
1544                         }
1545                 } else {
1546                         l_ptr->last_retransmitted = msg_seqno(msg);
1547                         l_ptr->stale_count = 1;
1548                 }
1549         }
1550
1551         while (retransmits && (buf != l_ptr->next_out) && buf) {
1552                 msg = buf_msg(buf);
1553                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1554                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1555                 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1556                         buf = buf->next;
1557                         retransmits--;
1558                         l_ptr->stats.retransmitted++;
1559                 } else {
1560                         tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1561                         l_ptr->stats.bearer_congs++;
1562                         l_ptr->retransm_queue_head = buf_seqno(buf);
1563                         l_ptr->retransm_queue_size = retransmits;
1564                         return;
1565                 }
1566         }
1567
1568         l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1569 }
1570
1571 /**
1572  * link_insert_deferred_queue - insert deferred messages back into receive chain
1573  */
1574
1575 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1576                                                   struct sk_buff *buf)
1577 {
1578         u32 seq_no;
1579
1580         if (l_ptr->oldest_deferred_in == NULL)
1581                 return buf;
1582
1583         seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1584         if (seq_no == mod(l_ptr->next_in_no)) {
1585                 l_ptr->newest_deferred_in->next = buf;
1586                 buf = l_ptr->oldest_deferred_in;
1587                 l_ptr->oldest_deferred_in = NULL;
1588                 l_ptr->deferred_inqueue_sz = 0;
1589         }
1590         return buf;
1591 }
1592
1593 /**
1594  * link_recv_buf_validate - validate basic format of received message
1595  *
1596  * This routine ensures a TIPC message has an acceptable header, and at least
1597  * as much data as the header indicates it should.  The routine also ensures
1598  * that the entire message header is stored in the main fragment of the message
1599  * buffer, to simplify future access to message header fields.
1600  *
1601  * Note: Having extra info present in the message header or data areas is OK.
1602  * TIPC will ignore the excess, under the assumption that it is optional info
1603  * introduced by a later release of the protocol.
1604  */
1605
1606 static int link_recv_buf_validate(struct sk_buff *buf)
1607 {
1608         static u32 min_data_hdr_size[8] = {
1609                 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1610                 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1611                 };
1612
1613         struct tipc_msg *msg;
1614         u32 tipc_hdr[2];
1615         u32 size;
1616         u32 hdr_size;
1617         u32 min_hdr_size;
1618
1619         if (unlikely(buf->len < MIN_H_SIZE))
1620                 return 0;
1621
1622         msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1623         if (msg == NULL)
1624                 return 0;
1625
1626         if (unlikely(msg_version(msg) != TIPC_VERSION))
1627                 return 0;
1628
1629         size = msg_size(msg);
1630         hdr_size = msg_hdr_sz(msg);
1631         min_hdr_size = msg_isdata(msg) ?
1632                 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1633
1634         if (unlikely((hdr_size < min_hdr_size) ||
1635                      (size < hdr_size) ||
1636                      (buf->len < size) ||
1637                      (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1638                 return 0;
1639
1640         return pskb_may_pull(buf, hdr_size);
1641 }
1642
1643 /**
1644  * tipc_recv_msg - process TIPC messages arriving from off-node
1645  * @head: pointer to message buffer chain
1646  * @tb_ptr: pointer to bearer message arrived on
1647  *
1648  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1649  * structure (i.e. cannot be NULL), but bearer can be inactive.
1650  */
1651
1652 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1653 {
1654         read_lock_bh(&tipc_net_lock);
1655         while (head) {
1656                 struct tipc_node *n_ptr;
1657                 struct tipc_link *l_ptr;
1658                 struct sk_buff *crs;
1659                 struct sk_buff *buf = head;
1660                 struct tipc_msg *msg;
1661                 u32 seq_no;
1662                 u32 ackd;
1663                 u32 released = 0;
1664                 int type;
1665
1666                 head = head->next;
1667
1668                 /* Ensure bearer is still enabled */
1669
1670                 if (unlikely(!b_ptr->active))
1671                         goto cont;
1672
1673                 /* Ensure message is well-formed */
1674
1675                 if (unlikely(!link_recv_buf_validate(buf)))
1676                         goto cont;
1677
1678                 /* Ensure message data is a single contiguous unit */
1679
1680                 if (unlikely(skb_linearize(buf)))
1681                         goto cont;
1682
1683                 /* Handle arrival of a non-unicast link message */
1684
1685                 msg = buf_msg(buf);
1686
1687                 if (unlikely(msg_non_seq(msg))) {
1688                         if (msg_user(msg) ==  LINK_CONFIG)
1689                                 tipc_disc_recv_msg(buf, b_ptr);
1690                         else
1691                                 tipc_bclink_recv_pkt(buf);
1692                         continue;
1693                 }
1694
1695                 /* Discard unicast link messages destined for another node */
1696
1697                 if (unlikely(!msg_short(msg) &&
1698                              (msg_destnode(msg) != tipc_own_addr)))
1699                         goto cont;
1700
1701                 /* Locate neighboring node that sent message */
1702
1703                 n_ptr = tipc_node_find(msg_prevnode(msg));
1704                 if (unlikely(!n_ptr))
1705                         goto cont;
1706                 tipc_node_lock(n_ptr);
1707
1708                 /* Locate unicast link endpoint that should handle message */
1709
1710                 l_ptr = n_ptr->links[b_ptr->identity];
1711                 if (unlikely(!l_ptr)) {
1712                         tipc_node_unlock(n_ptr);
1713                         goto cont;
1714                 }
1715
1716                 /* Verify that communication with node is currently allowed */
1717
1718                 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1719                         msg_user(msg) == LINK_PROTOCOL &&
1720                         (msg_type(msg) == RESET_MSG ||
1721                                         msg_type(msg) == ACTIVATE_MSG) &&
1722                         !msg_redundant_link(msg))
1723                         n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1724
1725                 if (n_ptr->block_setup) {
1726                         tipc_node_unlock(n_ptr);
1727                         goto cont;
1728                 }
1729
1730                 /* Validate message sequence number info */
1731
1732                 seq_no = msg_seqno(msg);
1733                 ackd = msg_ack(msg);
1734
1735                 /* Release acked messages */
1736
1737                 if (n_ptr->bclink.supported)
1738                         tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739
1740                 crs = l_ptr->first_out;
1741                 while ((crs != l_ptr->next_out) &&
1742                        less_eq(buf_seqno(crs), ackd)) {
1743                         struct sk_buff *next = crs->next;
1744
1745                         kfree_skb(crs);
1746                         crs = next;
1747                         released++;
1748                 }
1749                 if (released) {
1750                         l_ptr->first_out = crs;
1751                         l_ptr->out_queue_size -= released;
1752                 }
1753
1754                 /* Try sending any messages link endpoint has pending */
1755
1756                 if (unlikely(l_ptr->next_out))
1757                         tipc_link_push_queue(l_ptr);
1758                 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1759                         tipc_link_wakeup_ports(l_ptr, 0);
1760                 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1761                         l_ptr->stats.sent_acks++;
1762                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1763                 }
1764
1765                 /* Now (finally!) process the incoming message */
1766
1767 protocol_check:
1768                 if (likely(link_working_working(l_ptr))) {
1769                         if (likely(seq_no == mod(l_ptr->next_in_no))) {
1770                                 l_ptr->next_in_no++;
1771                                 if (unlikely(l_ptr->oldest_deferred_in))
1772                                         head = link_insert_deferred_queue(l_ptr,
1773                                                                           head);
1774 deliver:
1775                                 if (likely(msg_isdata(msg))) {
1776                                         tipc_node_unlock(n_ptr);
1777                                         tipc_port_recv_msg(buf);
1778                                         continue;
1779                                 }
1780                                 switch (msg_user(msg)) {
1781                                         int ret;
1782                                 case MSG_BUNDLER:
1783                                         l_ptr->stats.recv_bundles++;
1784                                         l_ptr->stats.recv_bundled +=
1785                                                 msg_msgcnt(msg);
1786                                         tipc_node_unlock(n_ptr);
1787                                         tipc_link_recv_bundle(buf);
1788                                         continue;
1789                                 case NAME_DISTRIBUTOR:
1790                                         tipc_node_unlock(n_ptr);
1791                                         tipc_named_recv(buf);
1792                                         continue;
1793                                 case CONN_MANAGER:
1794                                         tipc_node_unlock(n_ptr);
1795                                         tipc_port_recv_proto_msg(buf);
1796                                         continue;
1797                                 case MSG_FRAGMENTER:
1798                                         l_ptr->stats.recv_fragments++;
1799                                         ret = tipc_link_recv_fragment(
1800                                                 &l_ptr->defragm_buf,
1801                                                 &buf, &msg);
1802                                         if (ret == 1) {
1803                                                 l_ptr->stats.recv_fragmented++;
1804                                                 goto deliver;
1805                                         }
1806                                         if (ret == -1)
1807                                                 l_ptr->next_in_no--;
1808                                         break;
1809                                 case CHANGEOVER_PROTOCOL:
1810                                         type = msg_type(msg);
1811                                         if (link_recv_changeover_msg(&l_ptr,
1812                                                                      &buf)) {
1813                                                 msg = buf_msg(buf);
1814                                                 seq_no = msg_seqno(msg);
1815                                                 if (type == ORIGINAL_MSG)
1816                                                         goto deliver;
1817                                                 goto protocol_check;
1818                                         }
1819                                         break;
1820                                 default:
1821                                         kfree_skb(buf);
1822                                         buf = NULL;
1823                                         break;
1824                                 }
1825                                 tipc_node_unlock(n_ptr);
1826                                 tipc_net_route_msg(buf);
1827                                 continue;
1828                         }
1829                         link_handle_out_of_seq_msg(l_ptr, buf);
1830                         head = link_insert_deferred_queue(l_ptr, head);
1831                         tipc_node_unlock(n_ptr);
1832                         continue;
1833                 }
1834
1835                 if (msg_user(msg) == LINK_PROTOCOL) {
1836                         link_recv_proto_msg(l_ptr, buf);
1837                         head = link_insert_deferred_queue(l_ptr, head);
1838                         tipc_node_unlock(n_ptr);
1839                         continue;
1840                 }
1841                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1842
1843                 if (link_working_working(l_ptr)) {
1844                         /* Re-insert in front of queue */
1845                         buf->next = head;
1846                         head = buf;
1847                         tipc_node_unlock(n_ptr);
1848                         continue;
1849                 }
1850                 tipc_node_unlock(n_ptr);
1851 cont:
1852                 kfree_skb(buf);
1853         }
1854         read_unlock_bh(&tipc_net_lock);
1855 }
1856
1857 /*
1858  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1859  *
1860  * Returns increase in queue length (i.e. 0 or 1)
1861  */
1862
1863 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1864                         struct sk_buff *buf)
1865 {
1866         struct sk_buff *queue_buf;
1867         struct sk_buff **prev;
1868         u32 seq_no = buf_seqno(buf);
1869
1870         buf->next = NULL;
1871
1872         /* Empty queue ? */
1873         if (*head == NULL) {
1874                 *head = *tail = buf;
1875                 return 1;
1876         }
1877
1878         /* Last ? */
1879         if (less(buf_seqno(*tail), seq_no)) {
1880                 (*tail)->next = buf;
1881                 *tail = buf;
1882                 return 1;
1883         }
1884
1885         /* Locate insertion point in queue, then insert; discard if duplicate */
1886         prev = head;
1887         queue_buf = *head;
1888         for (;;) {
1889                 u32 curr_seqno = buf_seqno(queue_buf);
1890
1891                 if (seq_no == curr_seqno) {
1892                         kfree_skb(buf);
1893                         return 0;
1894                 }
1895
1896                 if (less(seq_no, curr_seqno))
1897                         break;
1898
1899                 prev = &queue_buf->next;
1900                 queue_buf = queue_buf->next;
1901         }
1902
1903         buf->next = queue_buf;
1904         *prev = buf;
1905         return 1;
1906 }
1907
1908 /*
1909  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1910  */
1911
1912 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1913                                        struct sk_buff *buf)
1914 {
1915         u32 seq_no = buf_seqno(buf);
1916
1917         if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1918                 link_recv_proto_msg(l_ptr, buf);
1919                 return;
1920         }
1921
1922         /* Record OOS packet arrival (force mismatch on next timeout) */
1923
1924         l_ptr->checkpoint--;
1925
1926         /*
1927          * Discard packet if a duplicate; otherwise add it to deferred queue
1928          * and notify peer of gap as per protocol specification
1929          */
1930
1931         if (less(seq_no, mod(l_ptr->next_in_no))) {
1932                 l_ptr->stats.duplicates++;
1933                 kfree_skb(buf);
1934                 return;
1935         }
1936
1937         if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1938                                 &l_ptr->newest_deferred_in, buf)) {
1939                 l_ptr->deferred_inqueue_sz++;
1940                 l_ptr->stats.deferred_recv++;
1941                 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1942                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1943         } else
1944                 l_ptr->stats.duplicates++;
1945 }
1946
1947 /*
1948  * Send protocol message to the other endpoint.
1949  */
1950 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1951                                 int probe_msg, u32 gap, u32 tolerance,
1952                                 u32 priority, u32 ack_mtu)
1953 {
1954         struct sk_buff *buf = NULL;
1955         struct tipc_msg *msg = l_ptr->pmsg;
1956         u32 msg_size = sizeof(l_ptr->proto_msg);
1957         int r_flag;
1958
1959         /* Discard any previous message that was deferred due to congestion */
1960
1961         if (l_ptr->proto_msg_queue) {
1962                 kfree_skb(l_ptr->proto_msg_queue);
1963                 l_ptr->proto_msg_queue = NULL;
1964         }
1965
1966         if (link_blocked(l_ptr))
1967                 return;
1968
1969         /* Abort non-RESET send if communication with node is prohibited */
1970
1971         if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1972                 return;
1973
1974         /* Create protocol message with "out-of-sequence" sequence number */
1975
1976         msg_set_type(msg, msg_typ);
1977         msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1978         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1979         msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1980
1981         if (msg_typ == STATE_MSG) {
1982                 u32 next_sent = mod(l_ptr->next_out_no);
1983
1984                 if (!tipc_link_is_up(l_ptr))
1985                         return;
1986                 if (l_ptr->next_out)
1987                         next_sent = buf_seqno(l_ptr->next_out);
1988                 msg_set_next_sent(msg, next_sent);
1989                 if (l_ptr->oldest_deferred_in) {
1990                         u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1991                         gap = mod(rec - mod(l_ptr->next_in_no));
1992                 }
1993                 msg_set_seq_gap(msg, gap);
1994                 if (gap)
1995                         l_ptr->stats.sent_nacks++;
1996                 msg_set_link_tolerance(msg, tolerance);
1997                 msg_set_linkprio(msg, priority);
1998                 msg_set_max_pkt(msg, ack_mtu);
1999                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
2000                 msg_set_probe(msg, probe_msg != 0);
2001                 if (probe_msg) {
2002                         u32 mtu = l_ptr->max_pkt;
2003
2004                         if ((mtu < l_ptr->max_pkt_target) &&
2005                             link_working_working(l_ptr) &&
2006                             l_ptr->fsm_msg_cnt) {
2007                                 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2008                                 if (l_ptr->max_pkt_probes == 10) {
2009                                         l_ptr->max_pkt_target = (msg_size - 4);
2010                                         l_ptr->max_pkt_probes = 0;
2011                                         msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2012                                 }
2013                                 l_ptr->max_pkt_probes++;
2014                         }
2015
2016                         l_ptr->stats.sent_probes++;
2017                 }
2018                 l_ptr->stats.sent_states++;
2019         } else {                /* RESET_MSG or ACTIVATE_MSG */
2020                 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2021                 msg_set_seq_gap(msg, 0);
2022                 msg_set_next_sent(msg, 1);
2023                 msg_set_probe(msg, 0);
2024                 msg_set_link_tolerance(msg, l_ptr->tolerance);
2025                 msg_set_linkprio(msg, l_ptr->priority);
2026                 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2027         }
2028
2029         r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
2030         msg_set_redundant_link(msg, r_flag);
2031         msg_set_linkprio(msg, l_ptr->priority);
2032         msg_set_size(msg, msg_size);
2033
2034         msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2035
2036         buf = tipc_buf_acquire(msg_size);
2037         if (!buf)
2038                 return;
2039
2040         skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2041
2042         /* Defer message if bearer is already congested */
2043
2044         if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2045                 l_ptr->proto_msg_queue = buf;
2046                 return;
2047         }
2048
2049         /* Defer message if attempting to send results in bearer congestion */
2050
2051         if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2052                 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2053                 l_ptr->proto_msg_queue = buf;
2054                 l_ptr->stats.bearer_congs++;
2055                 return;
2056         }
2057
2058         /* Discard message if it was sent successfully */
2059
2060         l_ptr->unacked_window = 0;
2061         kfree_skb(buf);
2062 }
2063
2064 /*
2065  * Receive protocol message :
2066  * Note that network plane id propagates through the network, and may
2067  * change at any time. The node with lowest address rules
2068  */
2069
2070 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2071 {
2072         u32 rec_gap = 0;
2073         u32 max_pkt_info;
2074         u32 max_pkt_ack;
2075         u32 msg_tol;
2076         struct tipc_msg *msg = buf_msg(buf);
2077
2078         if (link_blocked(l_ptr))
2079                 goto exit;
2080
2081         /* record unnumbered packet arrival (force mismatch on next timeout) */
2082
2083         l_ptr->checkpoint--;
2084
2085         if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2086                 if (tipc_own_addr > msg_prevnode(msg))
2087                         l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2088
2089         l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2090
2091         switch (msg_type(msg)) {
2092
2093         case RESET_MSG:
2094                 if (!link_working_unknown(l_ptr) &&
2095                     (l_ptr->peer_session != INVALID_SESSION)) {
2096                         if (less_eq(msg_session(msg), l_ptr->peer_session))
2097                                 break; /* duplicate or old reset: ignore */
2098                 }
2099
2100                 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2101                                 link_working_unknown(l_ptr))) {
2102                         /*
2103                          * peer has lost contact -- don't allow peer's links
2104                          * to reactivate before we recognize loss & clean up
2105                          */
2106                         l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2107                 }
2108
2109                 link_state_event(l_ptr, RESET_MSG);
2110
2111                 /* fall thru' */
2112         case ACTIVATE_MSG:
2113                 /* Update link settings according other endpoint's values */
2114
2115                 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2116
2117                 msg_tol = msg_link_tolerance(msg);
2118                 if (msg_tol > l_ptr->tolerance)
2119                         link_set_supervision_props(l_ptr, msg_tol);
2120
2121                 if (msg_linkprio(msg) > l_ptr->priority)
2122                         l_ptr->priority = msg_linkprio(msg);
2123
2124                 max_pkt_info = msg_max_pkt(msg);
2125                 if (max_pkt_info) {
2126                         if (max_pkt_info < l_ptr->max_pkt_target)
2127                                 l_ptr->max_pkt_target = max_pkt_info;
2128                         if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2129                                 l_ptr->max_pkt = l_ptr->max_pkt_target;
2130                 } else {
2131                         l_ptr->max_pkt = l_ptr->max_pkt_target;
2132                 }
2133                 l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2134
2135                 /* Synchronize broadcast link info, if not done previously */
2136
2137                 if (!tipc_node_is_up(l_ptr->owner)) {
2138                         l_ptr->owner->bclink.last_sent =
2139                                 l_ptr->owner->bclink.last_in =
2140                                 msg_last_bcast(msg);
2141                         l_ptr->owner->bclink.oos_state = 0;
2142                 }
2143
2144                 l_ptr->peer_session = msg_session(msg);
2145                 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2146
2147                 if (msg_type(msg) == ACTIVATE_MSG)
2148                         link_state_event(l_ptr, ACTIVATE_MSG);
2149                 break;
2150         case STATE_MSG:
2151
2152                 msg_tol = msg_link_tolerance(msg);
2153                 if (msg_tol)
2154                         link_set_supervision_props(l_ptr, msg_tol);
2155
2156                 if (msg_linkprio(msg) &&
2157                     (msg_linkprio(msg) != l_ptr->priority)) {
2158                         warn("Resetting link <%s>, priority change %u->%u\n",
2159                              l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2160                         l_ptr->priority = msg_linkprio(msg);
2161                         tipc_link_reset(l_ptr); /* Enforce change to take effect */
2162                         break;
2163                 }
2164                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2165                 l_ptr->stats.recv_states++;
2166                 if (link_reset_unknown(l_ptr))
2167                         break;
2168
2169                 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2170                         rec_gap = mod(msg_next_sent(msg) -
2171                                       mod(l_ptr->next_in_no));
2172                 }
2173
2174                 max_pkt_ack = msg_max_pkt(msg);
2175                 if (max_pkt_ack > l_ptr->max_pkt) {
2176                         l_ptr->max_pkt = max_pkt_ack;
2177                         l_ptr->max_pkt_probes = 0;
2178                 }
2179
2180                 max_pkt_ack = 0;
2181                 if (msg_probe(msg)) {
2182                         l_ptr->stats.recv_probes++;
2183                         if (msg_size(msg) > sizeof(l_ptr->proto_msg))
2184                                 max_pkt_ack = msg_size(msg);
2185                 }
2186
2187                 /* Protocol message before retransmits, reduce loss risk */
2188
2189                 if (l_ptr->owner->bclink.supported)
2190                         tipc_bclink_update_link_state(l_ptr->owner,
2191                                                       msg_last_bcast(msg));
2192
2193                 if (rec_gap || (msg_probe(msg))) {
2194                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2195                                                  0, rec_gap, 0, 0, max_pkt_ack);
2196                 }
2197                 if (msg_seq_gap(msg)) {
2198                         l_ptr->stats.recv_nacks++;
2199                         tipc_link_retransmit(l_ptr, l_ptr->first_out,
2200                                              msg_seq_gap(msg));
2201                 }
2202                 break;
2203         }
2204 exit:
2205         kfree_skb(buf);
2206 }
2207
2208
2209 /*
2210  * tipc_link_tunnel(): Send one message via a link belonging to
2211  * another bearer. Owner node is locked.
2212  */
2213 static void tipc_link_tunnel(struct tipc_link *l_ptr,
2214                              struct tipc_msg *tunnel_hdr,
2215                              struct tipc_msg  *msg,
2216                              u32 selector)
2217 {
2218         struct tipc_link *tunnel;
2219         struct sk_buff *buf;
2220         u32 length = msg_size(msg);
2221
2222         tunnel = l_ptr->owner->active_links[selector & 1];
2223         if (!tipc_link_is_up(tunnel)) {
2224                 warn("Link changeover error, "
2225                      "tunnel link no longer available\n");
2226                 return;
2227         }
2228         msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2229         buf = tipc_buf_acquire(length + INT_H_SIZE);
2230         if (!buf) {
2231                 warn("Link changeover error, "
2232                      "unable to send tunnel msg\n");
2233                 return;
2234         }
2235         skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2236         skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2237         tipc_link_send_buf(tunnel, buf);
2238 }
2239
2240
2241
2242 /*
2243  * changeover(): Send whole message queue via the remaining link
2244  *               Owner node is locked.
2245  */
2246
2247 void tipc_link_changeover(struct tipc_link *l_ptr)
2248 {
2249         u32 msgcount = l_ptr->out_queue_size;
2250         struct sk_buff *crs = l_ptr->first_out;
2251         struct tipc_link *tunnel = l_ptr->owner->active_links[0];
2252         struct tipc_msg tunnel_hdr;
2253         int split_bundles;
2254
2255         if (!tunnel)
2256                 return;
2257
2258         if (!l_ptr->owner->permit_changeover) {
2259                 warn("Link changeover error, "
2260                      "peer did not permit changeover\n");
2261                 return;
2262         }
2263
2264         tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2265                  ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2266         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2267         msg_set_msgcnt(&tunnel_hdr, msgcount);
2268
2269         if (!l_ptr->first_out) {
2270                 struct sk_buff *buf;
2271
2272                 buf = tipc_buf_acquire(INT_H_SIZE);
2273                 if (buf) {
2274                         skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2275                         msg_set_size(&tunnel_hdr, INT_H_SIZE);
2276                         tipc_link_send_buf(tunnel, buf);
2277                 } else {
2278                         warn("Link changeover error, "
2279                              "unable to send changeover msg\n");
2280                 }
2281                 return;
2282         }
2283
2284         split_bundles = (l_ptr->owner->active_links[0] !=
2285                          l_ptr->owner->active_links[1]);
2286
2287         while (crs) {
2288                 struct tipc_msg *msg = buf_msg(crs);
2289
2290                 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2291                         struct tipc_msg *m = msg_get_wrapped(msg);
2292                         unchar *pos = (unchar *)m;
2293
2294                         msgcount = msg_msgcnt(msg);
2295                         while (msgcount--) {
2296                                 msg_set_seqno(m, msg_seqno(msg));
2297                                 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2298                                                  msg_link_selector(m));
2299                                 pos += align(msg_size(m));
2300                                 m = (struct tipc_msg *)pos;
2301                         }
2302                 } else {
2303                         tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2304                                          msg_link_selector(msg));
2305                 }
2306                 crs = crs->next;
2307         }
2308 }
2309
2310 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2311 {
2312         struct sk_buff *iter;
2313         struct tipc_msg tunnel_hdr;
2314
2315         tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2316                  DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2317         msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2318         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2319         iter = l_ptr->first_out;
2320         while (iter) {
2321                 struct sk_buff *outbuf;
2322                 struct tipc_msg *msg = buf_msg(iter);
2323                 u32 length = msg_size(msg);
2324
2325                 if (msg_user(msg) == MSG_BUNDLER)
2326                         msg_set_type(msg, CLOSED_MSG);
2327                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
2328                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2329                 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2330                 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2331                 if (outbuf == NULL) {
2332                         warn("Link changeover error, "
2333                              "unable to send duplicate msg\n");
2334                         return;
2335                 }
2336                 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2337                 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2338                                                length);
2339                 tipc_link_send_buf(tunnel, outbuf);
2340                 if (!tipc_link_is_up(l_ptr))
2341                         return;
2342                 iter = iter->next;
2343         }
2344 }
2345
2346
2347
2348 /**
2349  * buf_extract - extracts embedded TIPC message from another message
2350  * @skb: encapsulating message buffer
2351  * @from_pos: offset to extract from
2352  *
2353  * Returns a new message buffer containing an embedded message.  The
2354  * encapsulating message itself is left unchanged.
2355  */
2356
2357 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2358 {
2359         struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2360         u32 size = msg_size(msg);
2361         struct sk_buff *eb;
2362
2363         eb = tipc_buf_acquire(size);
2364         if (eb)
2365                 skb_copy_to_linear_data(eb, msg, size);
2366         return eb;
2367 }
2368
2369 /*
2370  *  link_recv_changeover_msg(): Receive tunneled packet sent
2371  *  via other link. Node is locked. Return extracted buffer.
2372  */
2373
2374 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2375                                     struct sk_buff **buf)
2376 {
2377         struct sk_buff *tunnel_buf = *buf;
2378         struct tipc_link *dest_link;
2379         struct tipc_msg *msg;
2380         struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2381         u32 msg_typ = msg_type(tunnel_msg);
2382         u32 msg_count = msg_msgcnt(tunnel_msg);
2383
2384         dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2385         if (!dest_link)
2386                 goto exit;
2387         if (dest_link == *l_ptr) {
2388                 err("Unexpected changeover message on link <%s>\n",
2389                     (*l_ptr)->name);
2390                 goto exit;
2391         }
2392         *l_ptr = dest_link;
2393         msg = msg_get_wrapped(tunnel_msg);
2394
2395         if (msg_typ == DUPLICATE_MSG) {
2396                 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2397                         goto exit;
2398                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2399                 if (*buf == NULL) {
2400                         warn("Link changeover error, duplicate msg dropped\n");
2401                         goto exit;
2402                 }
2403                 kfree_skb(tunnel_buf);
2404                 return 1;
2405         }
2406
2407         /* First original message ?: */
2408
2409         if (tipc_link_is_up(dest_link)) {
2410                 info("Resetting link <%s>, changeover initiated by peer\n",
2411                      dest_link->name);
2412                 tipc_link_reset(dest_link);
2413                 dest_link->exp_msg_count = msg_count;
2414                 if (!msg_count)
2415                         goto exit;
2416         } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2417                 dest_link->exp_msg_count = msg_count;
2418                 if (!msg_count)
2419                         goto exit;
2420         }
2421
2422         /* Receive original message */
2423
2424         if (dest_link->exp_msg_count == 0) {
2425                 warn("Link switchover error, "
2426                      "got too many tunnelled messages\n");
2427                 goto exit;
2428         }
2429         dest_link->exp_msg_count--;
2430         if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2431                 goto exit;
2432         } else {
2433                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2434                 if (*buf != NULL) {
2435                         kfree_skb(tunnel_buf);
2436                         return 1;
2437                 } else {
2438                         warn("Link changeover error, original msg dropped\n");
2439                 }
2440         }
2441 exit:
2442         *buf = NULL;
2443         kfree_skb(tunnel_buf);
2444         return 0;
2445 }
2446
2447 /*
2448  *  Bundler functionality:
2449  */
2450 void tipc_link_recv_bundle(struct sk_buff *buf)
2451 {
2452         u32 msgcount = msg_msgcnt(buf_msg(buf));
2453         u32 pos = INT_H_SIZE;
2454         struct sk_buff *obuf;
2455
2456         while (msgcount--) {
2457                 obuf = buf_extract(buf, pos);
2458                 if (obuf == NULL) {
2459                         warn("Link unable to unbundle message(s)\n");
2460                         break;
2461                 }
2462                 pos += align(msg_size(buf_msg(obuf)));
2463                 tipc_net_route_msg(obuf);
2464         }
2465         kfree_skb(buf);
2466 }
2467
2468 /*
2469  *  Fragmentation/defragmentation:
2470  */
2471
2472
2473 /*
2474  * link_send_long_buf: Entry for buffers needing fragmentation.
2475  * The buffer is complete, inclusive total message length.
2476  * Returns user data length.
2477  */
2478 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2479 {
2480         struct sk_buff *buf_chain = NULL;
2481         struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2482         struct tipc_msg *inmsg = buf_msg(buf);
2483         struct tipc_msg fragm_hdr;
2484         u32 insize = msg_size(inmsg);
2485         u32 dsz = msg_data_sz(inmsg);
2486         unchar *crs = buf->data;
2487         u32 rest = insize;
2488         u32 pack_sz = l_ptr->max_pkt;
2489         u32 fragm_sz = pack_sz - INT_H_SIZE;
2490         u32 fragm_no = 0;
2491         u32 destaddr;
2492
2493         if (msg_short(inmsg))
2494                 destaddr = l_ptr->addr;
2495         else
2496                 destaddr = msg_destnode(inmsg);
2497
2498         /* Prepare reusable fragment header: */
2499
2500         tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2501                  INT_H_SIZE, destaddr);
2502
2503         /* Chop up message: */
2504
2505         while (rest > 0) {
2506                 struct sk_buff *fragm;
2507
2508                 if (rest <= fragm_sz) {
2509                         fragm_sz = rest;
2510                         msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2511                 }
2512                 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2513                 if (fragm == NULL) {
2514                         kfree_skb(buf);
2515                         while (buf_chain) {
2516                                 buf = buf_chain;
2517                                 buf_chain = buf_chain->next;
2518                                 kfree_skb(buf);
2519                         }
2520                         return -ENOMEM;
2521                 }
2522                 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2523                 fragm_no++;
2524                 msg_set_fragm_no(&fragm_hdr, fragm_no);
2525                 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2526                 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2527                                                fragm_sz);
2528                 buf_chain_tail->next = fragm;
2529                 buf_chain_tail = fragm;
2530
2531                 rest -= fragm_sz;
2532                 crs += fragm_sz;
2533                 msg_set_type(&fragm_hdr, FRAGMENT);
2534         }
2535         kfree_skb(buf);
2536
2537         /* Append chain of fragments to send queue & send them */
2538
2539         l_ptr->long_msg_seq_no++;
2540         link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2541         l_ptr->stats.sent_fragments += fragm_no;
2542         l_ptr->stats.sent_fragmented++;
2543         tipc_link_push_queue(l_ptr);
2544
2545         return dsz;
2546 }
2547
2548 /*
2549  * A pending message being re-assembled must store certain values
2550  * to handle subsequent fragments correctly. The following functions
2551  * help storing these values in unused, available fields in the
2552  * pending message. This makes dynamic memory allocation unnecessary.
2553  */
2554
2555 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2556 {
2557         msg_set_seqno(buf_msg(buf), seqno);
2558 }
2559
2560 static u32 get_fragm_size(struct sk_buff *buf)
2561 {
2562         return msg_ack(buf_msg(buf));
2563 }
2564
2565 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2566 {
2567         msg_set_ack(buf_msg(buf), sz);
2568 }
2569
2570 static u32 get_expected_frags(struct sk_buff *buf)
2571 {
2572         return msg_bcast_ack(buf_msg(buf));
2573 }
2574
2575 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2576 {
2577         msg_set_bcast_ack(buf_msg(buf), exp);
2578 }
2579
2580 static u32 get_timer_cnt(struct sk_buff *buf)
2581 {
2582         return msg_reroute_cnt(buf_msg(buf));
2583 }
2584
2585 static void incr_timer_cnt(struct sk_buff *buf)
2586 {
2587         msg_incr_reroute_cnt(buf_msg(buf));
2588 }
2589
2590 /*
2591  * tipc_link_recv_fragment(): Called with node lock on. Returns
2592  * the reassembled buffer if message is complete.
2593  */
2594 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2595                             struct tipc_msg **m)
2596 {
2597         struct sk_buff *prev = NULL;
2598         struct sk_buff *fbuf = *fb;
2599         struct tipc_msg *fragm = buf_msg(fbuf);
2600         struct sk_buff *pbuf = *pending;
2601         u32 long_msg_seq_no = msg_long_msgno(fragm);
2602
2603         *fb = NULL;
2604
2605         /* Is there an incomplete message waiting for this fragment? */
2606
2607         while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2608                         (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2609                 prev = pbuf;
2610                 pbuf = pbuf->next;
2611         }
2612
2613         if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2614                 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2615                 u32 msg_sz = msg_size(imsg);
2616                 u32 fragm_sz = msg_data_sz(fragm);
2617                 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2618                 u32 max =  TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2619                 if (msg_type(imsg) == TIPC_MCAST_MSG)
2620                         max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2621                 if (msg_size(imsg) > max) {
2622                         kfree_skb(fbuf);
2623                         return 0;
2624                 }
2625                 pbuf = tipc_buf_acquire(msg_size(imsg));
2626                 if (pbuf != NULL) {
2627                         pbuf->next = *pending;
2628                         *pending = pbuf;
2629                         skb_copy_to_linear_data(pbuf, imsg,
2630                                                 msg_data_sz(fragm));
2631                         /*  Prepare buffer for subsequent fragments. */
2632
2633                         set_long_msg_seqno(pbuf, long_msg_seq_no);
2634                         set_fragm_size(pbuf, fragm_sz);
2635                         set_expected_frags(pbuf, exp_fragm_cnt - 1);
2636                 } else {
2637                         dbg("Link unable to reassemble fragmented message\n");
2638                         kfree_skb(fbuf);
2639                         return -1;
2640                 }
2641                 kfree_skb(fbuf);
2642                 return 0;
2643         } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2644                 u32 dsz = msg_data_sz(fragm);
2645                 u32 fsz = get_fragm_size(pbuf);
2646                 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2647                 u32 exp_frags = get_expected_frags(pbuf) - 1;
2648                 skb_copy_to_linear_data_offset(pbuf, crs,
2649                                                msg_data(fragm), dsz);
2650                 kfree_skb(fbuf);
2651
2652                 /* Is message complete? */
2653
2654                 if (exp_frags == 0) {
2655                         if (prev)
2656                                 prev->next = pbuf->next;
2657                         else
2658                                 *pending = pbuf->next;
2659                         msg_reset_reroute_cnt(buf_msg(pbuf));
2660                         *fb = pbuf;
2661                         *m = buf_msg(pbuf);
2662                         return 1;
2663                 }
2664                 set_expected_frags(pbuf, exp_frags);
2665                 return 0;
2666         }
2667         kfree_skb(fbuf);
2668         return 0;
2669 }
2670
2671 /**
2672  * link_check_defragm_bufs - flush stale incoming message fragments
2673  * @l_ptr: pointer to link
2674  */
2675
2676 static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2677 {
2678         struct sk_buff *prev = NULL;
2679         struct sk_buff *next = NULL;
2680         struct sk_buff *buf = l_ptr->defragm_buf;
2681
2682         if (!buf)
2683                 return;
2684         if (!link_working_working(l_ptr))
2685                 return;
2686         while (buf) {
2687                 u32 cnt = get_timer_cnt(buf);
2688
2689                 next = buf->next;
2690                 if (cnt < 4) {
2691                         incr_timer_cnt(buf);
2692                         prev = buf;
2693                 } else {
2694                         if (prev)
2695                                 prev->next = buf->next;
2696                         else
2697                                 l_ptr->defragm_buf = buf->next;
2698                         kfree_skb(buf);
2699                 }
2700                 buf = next;
2701         }
2702 }
2703
2704
2705
2706 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2707 {
2708         if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2709                 return;
2710
2711         l_ptr->tolerance = tolerance;
2712         l_ptr->continuity_interval =
2713                 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2714         l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2715 }
2716
2717
2718 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2719 {
2720         /* Data messages from this node, inclusive FIRST_FRAGM */
2721         l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2722         l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2723         l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2724         l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2725         /* Transiting data messages,inclusive FIRST_FRAGM */
2726         l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2727         l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2728         l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2729         l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2730         l_ptr->queue_limit[CONN_MANAGER] = 1200;
2731         l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2732         l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2733         /* FRAGMENT and LAST_FRAGMENT packets */
2734         l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2735 }
2736
2737 /**
2738  * link_find_link - locate link by name
2739  * @name - ptr to link name string
2740  * @node - ptr to area to be filled with ptr to associated node
2741  *
2742  * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2743  * this also prevents link deletion.
2744  *
2745  * Returns pointer to link (or 0 if invalid link name).
2746  */
2747
2748 static struct tipc_link *link_find_link(const char *name,
2749                                         struct tipc_node **node)
2750 {
2751         struct tipc_link_name link_name_parts;
2752         struct tipc_bearer *b_ptr;
2753         struct tipc_link *l_ptr;
2754
2755         if (!link_name_validate(name, &link_name_parts))
2756                 return NULL;
2757
2758         b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2759         if (!b_ptr)
2760                 return NULL;
2761
2762         *node = tipc_node_find(link_name_parts.addr_peer);
2763         if (!*node)
2764                 return NULL;
2765
2766         l_ptr = (*node)->links[b_ptr->identity];
2767         if (!l_ptr || strcmp(l_ptr->name, name))
2768                 return NULL;
2769
2770         return l_ptr;
2771 }
2772
2773 /**
2774  * link_value_is_valid -- validate proposed link tolerance/priority/window
2775  *
2776  * @cmd - value type (TIPC_CMD_SET_LINK_*)
2777  * @new_value - the new value
2778  *
2779  * Returns 1 if value is within range, 0 if not.
2780  */
2781
2782 static int link_value_is_valid(u16 cmd, u32 new_value)
2783 {
2784         switch (cmd) {
2785         case TIPC_CMD_SET_LINK_TOL:
2786                 return (new_value >= TIPC_MIN_LINK_TOL) &&
2787                         (new_value <= TIPC_MAX_LINK_TOL);
2788         case TIPC_CMD_SET_LINK_PRI:
2789                 return (new_value <= TIPC_MAX_LINK_PRI);
2790         case TIPC_CMD_SET_LINK_WINDOW:
2791                 return (new_value >= TIPC_MIN_LINK_WIN) &&
2792                         (new_value <= TIPC_MAX_LINK_WIN);
2793         }
2794         return 0;
2795 }
2796
2797
2798 /**
2799  * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2800  * @name - ptr to link, bearer, or media name
2801  * @new_value - new value of link, bearer, or media setting
2802  * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2803  *
2804  * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2805  *
2806  * Returns 0 if value updated and negative value on error.
2807  */
2808
2809 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2810 {
2811         struct tipc_node *node;
2812         struct tipc_link *l_ptr;
2813         struct tipc_bearer *b_ptr;
2814         struct tipc_media *m_ptr;
2815
2816         l_ptr = link_find_link(name, &node);
2817         if (l_ptr) {
2818                 /*
2819                  * acquire node lock for tipc_link_send_proto_msg().
2820                  * see "TIPC locking policy" in net.c.
2821                  */
2822                 tipc_node_lock(node);
2823                 switch (cmd) {
2824                 case TIPC_CMD_SET_LINK_TOL:
2825                         link_set_supervision_props(l_ptr, new_value);
2826                         tipc_link_send_proto_msg(l_ptr,
2827                                 STATE_MSG, 0, 0, new_value, 0, 0);
2828                         break;
2829                 case TIPC_CMD_SET_LINK_PRI:
2830                         l_ptr->priority = new_value;
2831                         tipc_link_send_proto_msg(l_ptr,
2832                                 STATE_MSG, 0, 0, 0, new_value, 0);
2833                         break;
2834                 case TIPC_CMD_SET_LINK_WINDOW:
2835                         tipc_link_set_queue_limits(l_ptr, new_value);
2836                         break;
2837                 }
2838                 tipc_node_unlock(node);
2839                 return 0;
2840         }
2841
2842         b_ptr = tipc_bearer_find(name);
2843         if (b_ptr) {
2844                 switch (cmd) {
2845                 case TIPC_CMD_SET_LINK_TOL:
2846                         b_ptr->tolerance = new_value;
2847                         return 0;
2848                 case TIPC_CMD_SET_LINK_PRI:
2849                         b_ptr->priority = new_value;
2850                         return 0;
2851                 case TIPC_CMD_SET_LINK_WINDOW:
2852                         b_ptr->window = new_value;
2853                         return 0;
2854                 }
2855                 return -EINVAL;
2856         }
2857
2858         m_ptr = tipc_media_find(name);
2859         if (!m_ptr)
2860                 return -ENODEV;
2861         switch (cmd) {
2862         case TIPC_CMD_SET_LINK_TOL:
2863                 m_ptr->tolerance = new_value;
2864                 return 0;
2865         case TIPC_CMD_SET_LINK_PRI:
2866                 m_ptr->priority = new_value;
2867                 return 0;
2868         case TIPC_CMD_SET_LINK_WINDOW:
2869                 m_ptr->window = new_value;
2870                 return 0;
2871         }
2872         return -EINVAL;
2873 }
2874
2875 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2876                                      u16 cmd)
2877 {
2878         struct tipc_link_config *args;
2879         u32 new_value;
2880         int res;
2881
2882         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2883                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2884
2885         args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2886         new_value = ntohl(args->value);
2887
2888         if (!link_value_is_valid(cmd, new_value))
2889                 return tipc_cfg_reply_error_string(
2890                         "cannot change, value invalid");
2891
2892         if (!strcmp(args->name, tipc_bclink_name)) {
2893                 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2894                     (tipc_bclink_set_queue_limits(new_value) == 0))
2895                         return tipc_cfg_reply_none();
2896                 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2897                                                    " (cannot change setting on broadcast link)");
2898         }
2899
2900         read_lock_bh(&tipc_net_lock);
2901         res = link_cmd_set_value(args->name, new_value, cmd);
2902         read_unlock_bh(&tipc_net_lock);
2903         if (res)
2904                 return tipc_cfg_reply_error_string("cannot change link setting");
2905
2906         return tipc_cfg_reply_none();
2907 }
2908
2909 /**
2910  * link_reset_statistics - reset link statistics
2911  * @l_ptr: pointer to link
2912  */
2913
2914 static void link_reset_statistics(struct tipc_link *l_ptr)
2915 {
2916         memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2917         l_ptr->stats.sent_info = l_ptr->next_out_no;
2918         l_ptr->stats.recv_info = l_ptr->next_in_no;
2919 }
2920
2921 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2922 {
2923         char *link_name;
2924         struct tipc_link *l_ptr;
2925         struct tipc_node *node;
2926
2927         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2928                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2929
2930         link_name = (char *)TLV_DATA(req_tlv_area);
2931         if (!strcmp(link_name, tipc_bclink_name)) {
2932                 if (tipc_bclink_reset_stats())
2933                         return tipc_cfg_reply_error_string("link not found");
2934                 return tipc_cfg_reply_none();
2935         }
2936
2937         read_lock_bh(&tipc_net_lock);
2938         l_ptr = link_find_link(link_name, &node);
2939         if (!l_ptr) {
2940                 read_unlock_bh(&tipc_net_lock);
2941                 return tipc_cfg_reply_error_string("link not found");
2942         }
2943
2944         tipc_node_lock(node);
2945         link_reset_statistics(l_ptr);
2946         tipc_node_unlock(node);
2947         read_unlock_bh(&tipc_net_lock);
2948         return tipc_cfg_reply_none();
2949 }
2950
2951 /**
2952  * percent - convert count to a percentage of total (rounding up or down)
2953  */
2954
2955 static u32 percent(u32 count, u32 total)
2956 {
2957         return (count * 100 + (total / 2)) / total;
2958 }
2959
2960 /**
2961  * tipc_link_stats - print link statistics
2962  * @name: link name
2963  * @buf: print buffer area
2964  * @buf_size: size of print buffer area
2965  *
2966  * Returns length of print buffer data string (or 0 if error)
2967  */
2968
2969 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2970 {
2971         struct print_buf pb;
2972         struct tipc_link *l_ptr;
2973         struct tipc_node *node;
2974         char *status;
2975         u32 profile_total = 0;
2976
2977         if (!strcmp(name, tipc_bclink_name))
2978                 return tipc_bclink_stats(buf, buf_size);
2979
2980         tipc_printbuf_init(&pb, buf, buf_size);
2981
2982         read_lock_bh(&tipc_net_lock);
2983         l_ptr = link_find_link(name, &node);
2984         if (!l_ptr) {
2985                 read_unlock_bh(&tipc_net_lock);
2986                 return 0;
2987         }
2988         tipc_node_lock(node);
2989
2990         if (tipc_link_is_active(l_ptr))
2991                 status = "ACTIVE";
2992         else if (tipc_link_is_up(l_ptr))
2993                 status = "STANDBY";
2994         else
2995                 status = "DEFUNCT";
2996         tipc_printf(&pb, "Link <%s>\n"
2997                          "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2998                          "  Window:%u packets\n",
2999                     l_ptr->name, status, l_ptr->max_pkt,
3000                     l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
3001         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
3002                     l_ptr->next_in_no - l_ptr->stats.recv_info,
3003                     l_ptr->stats.recv_fragments,
3004                     l_ptr->stats.recv_fragmented,
3005                     l_ptr->stats.recv_bundles,
3006                     l_ptr->stats.recv_bundled);
3007         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
3008                     l_ptr->next_out_no - l_ptr->stats.sent_info,
3009                     l_ptr->stats.sent_fragments,
3010                     l_ptr->stats.sent_fragmented,
3011                     l_ptr->stats.sent_bundles,
3012                     l_ptr->stats.sent_bundled);
3013         profile_total = l_ptr->stats.msg_length_counts;
3014         if (!profile_total)
3015                 profile_total = 1;
3016         tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
3017                          "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
3018                          "-16384:%u%% -32768:%u%% -66000:%u%%\n",
3019                     l_ptr->stats.msg_length_counts,
3020                     l_ptr->stats.msg_lengths_total / profile_total,
3021                     percent(l_ptr->stats.msg_length_profile[0], profile_total),
3022                     percent(l_ptr->stats.msg_length_profile[1], profile_total),
3023                     percent(l_ptr->stats.msg_length_profile[2], profile_total),
3024                     percent(l_ptr->stats.msg_length_profile[3], profile_total),
3025                     percent(l_ptr->stats.msg_length_profile[4], profile_total),
3026                     percent(l_ptr->stats.msg_length_profile[5], profile_total),
3027                     percent(l_ptr->stats.msg_length_profile[6], profile_total));
3028         tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
3029                     l_ptr->stats.recv_states,
3030                     l_ptr->stats.recv_probes,
3031                     l_ptr->stats.recv_nacks,
3032                     l_ptr->stats.deferred_recv,
3033                     l_ptr->stats.duplicates);
3034         tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
3035                     l_ptr->stats.sent_states,
3036                     l_ptr->stats.sent_probes,
3037                     l_ptr->stats.sent_nacks,
3038                     l_ptr->stats.sent_acks,
3039                     l_ptr->stats.retransmitted);
3040         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
3041                     l_ptr->stats.bearer_congs,
3042                     l_ptr->stats.link_congs,
3043                     l_ptr->stats.max_queue_sz,
3044                     l_ptr->stats.queue_sz_counts
3045                     ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
3046                     : 0);
3047
3048         tipc_node_unlock(node);
3049         read_unlock_bh(&tipc_net_lock);
3050         return tipc_printbuf_validate(&pb);
3051 }
3052
3053 #define MAX_LINK_STATS_INFO 2000
3054
3055 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
3056 {
3057         struct sk_buff *buf;
3058         struct tlv_desc *rep_tlv;
3059         int str_len;
3060
3061         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
3062                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
3063
3064         buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
3065         if (!buf)
3066                 return NULL;
3067
3068         rep_tlv = (struct tlv_desc *)buf->data;
3069
3070         str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
3071                                   (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3072         if (!str_len) {
3073                 kfree_skb(buf);
3074                 return tipc_cfg_reply_error_string("link not found");
3075         }
3076
3077         skb_put(buf, TLV_SPACE(str_len));
3078         TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
3079
3080         return buf;
3081 }
3082
3083 /**
3084  * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
3085  * @dest: network address of destination node
3086  * @selector: used to select from set of active links
3087  *
3088  * If no active link can be found, uses default maximum packet size.
3089  */
3090
3091 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3092 {
3093         struct tipc_node *n_ptr;
3094         struct tipc_link *l_ptr;
3095         u32 res = MAX_PKT_DEFAULT;
3096
3097         if (dest == tipc_own_addr)
3098                 return MAX_MSG_SIZE;
3099
3100         read_lock_bh(&tipc_net_lock);
3101         n_ptr = tipc_node_find(dest);
3102         if (n_ptr) {
3103                 tipc_node_lock(n_ptr);
3104                 l_ptr = n_ptr->active_links[selector & 1];
3105                 if (l_ptr)
3106                         res = l_ptr->max_pkt;
3107                 tipc_node_unlock(n_ptr);
3108         }
3109         read_unlock_bh(&tipc_net_lock);
3110         return res;
3111 }
3112
3113 static void link_print(struct tipc_link *l_ptr, const char *str)
3114 {
3115         char print_area[256];
3116         struct print_buf pb;
3117         struct print_buf *buf = &pb;
3118
3119         tipc_printbuf_init(buf, print_area, sizeof(print_area));
3120
3121         tipc_printf(buf, str);
3122         tipc_printf(buf, "Link %x<%s>:",
3123                     l_ptr->addr, l_ptr->b_ptr->name);
3124
3125 #ifdef CONFIG_TIPC_DEBUG
3126         if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3127                 goto print_state;
3128
3129         tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3130         tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3131         tipc_printf(buf, "SQUE");
3132         if (l_ptr->first_out) {
3133                 tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
3134                 if (l_ptr->next_out)
3135                         tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
3136                 tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
3137                 if ((mod(buf_seqno(l_ptr->last_out) -
3138                          buf_seqno(l_ptr->first_out))
3139                      != (l_ptr->out_queue_size - 1)) ||
3140                     (l_ptr->last_out->next != NULL)) {
3141                         tipc_printf(buf, "\nSend queue inconsistency\n");
3142                         tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3143                         tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3144                         tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
3145                 }
3146         } else
3147                 tipc_printf(buf, "[]");
3148         tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3149         if (l_ptr->oldest_deferred_in) {
3150                 u32 o = buf_seqno(l_ptr->oldest_deferred_in);
3151                 u32 n = buf_seqno(l_ptr->newest_deferred_in);
3152                 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3153                 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3154                         tipc_printf(buf, ":RQSIZ(%u)",
3155                                     l_ptr->deferred_inqueue_sz);
3156                 }
3157         }
3158 print_state:
3159 #endif
3160
3161         if (link_working_unknown(l_ptr))
3162                 tipc_printf(buf, ":WU");
3163         else if (link_reset_reset(l_ptr))
3164                 tipc_printf(buf, ":RR");
3165         else if (link_reset_unknown(l_ptr))
3166                 tipc_printf(buf, ":RU");
3167         else if (link_working_working(l_ptr))
3168                 tipc_printf(buf, ":WW");
3169         tipc_printf(buf, "\n");
3170
3171         tipc_printbuf_validate(buf);
3172         info("%s", print_area);
3173 }
3174