tipc: eliminate redundant buffer cloning at transmission
[linux-block.git] / net / tipc / bcast.c
CommitLineData
b97bf3fd
PL
1/*
2 * net/tipc/bcast.c: TIPC broadcast code
c4307285 3 *
3c724acd 4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
b97bf3fd 5 * Copyright (c) 2004, Intel Corporation.
2d627b92 6 * Copyright (c) 2005, 2010-2011, Wind River Systems
b97bf3fd
PL
7 * All rights reserved.
8 *
9ea1fd3c 9 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
10 * modification, are permitted provided that the following conditions are met:
11 *
9ea1fd3c
PL
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
b97bf3fd 20 *
9ea1fd3c
PL
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
6beb19a6 38#include <linux/tipc_config.h>
078bec82
JPM
39#include "socket.h"
40#include "msg.h"
b97bf3fd 41#include "bcast.h"
9f6bdcd4 42#include "name_distr.h"
6beb19a6
JPM
43#include "link.h"
44#include "node.h"
b97bf3fd 45
987b58be 46#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
53387c4e
JPM
47#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
48#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
b97bf3fd 49
3aec9cc9 50const char tipc_bclink_name[] = "broadcast-link";
b97bf3fd 51
6beb19a6
JPM
52/**
53 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
54 * @primary: pointer to primary bearer
55 * @secondary: pointer to secondary bearer
56 *
57 * Bearers must have same priority and same set of reachable destinations
58 * to be paired.
59 */
60
61struct tipc_bcbearer_pair {
62 struct tipc_bearer *primary;
63 struct tipc_bearer *secondary;
64};
65
66#define BCBEARER MAX_BEARERS
67
68/**
69 * struct tipc_bcbearer - bearer used by broadcast link
70 * @bearer: (non-standard) broadcast bearer structure
71 * @media: (non-standard) broadcast media structure
72 * @bpairs: array of bearer pairs
73 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
74 * @remains: temporary node map used by tipc_bcbearer_send()
75 * @remains_new: temporary node map used tipc_bcbearer_send()
76 *
77 * Note: The fields labelled "temporary" are incorporated into the bearer
78 * to avoid consuming potentially limited stack space through the use of
79 * large local variables within multicast routines. Concurrent access is
80 * prevented through use of the spinlock "bcast_lock".
81 */
82struct tipc_bcbearer {
83 struct tipc_bearer bearer;
84 struct tipc_media media;
85 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
86 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
87 struct tipc_node_map remains;
88 struct tipc_node_map remains_new;
89};
90
91/**
92 * struct tipc_bc_base - link used for broadcast messages
b06b281e 93 * @link: broadcast send link structure
6beb19a6
JPM
94 * @node: (non-standard) node structure representing b'cast link's peer node
95 * @bcast_nodes: map of broadcast-capable nodes
96 * @retransmit_to: node that most recently requested a retransmit
b06b281e
JPM
97 * @dest_nnt: array indicating number of reachable destinations per bearer
98 * @bearers: array of bearers, sorted by number of reachable destinations
6beb19a6
JPM
99 *
100 * Handles sequence numbering, fragmentation, bundling, etc.
101 */
102struct tipc_bc_base {
32301906 103 struct tipc_link *link;
6beb19a6
JPM
104 struct tipc_node node;
105 struct sk_buff_head arrvq;
106 struct sk_buff_head inputq;
32301906 107 struct sk_buff_head namedq;
b06b281e
JPM
108 int dests[MAX_BEARERS];
109 int primary_bearer;
6beb19a6
JPM
110 struct tipc_node_map bcast_nodes;
111 struct tipc_node *retransmit_to;
112};
113
5fd9fd63
JPM
114static struct tipc_bc_base *tipc_bc_base(struct net *net)
115{
116 return tipc_net(net)->bcbase;
117}
118
6beb19a6
JPM
119/**
120 * tipc_nmap_equal - test for equality of node maps
121 */
122static int tipc_nmap_equal(struct tipc_node_map *nm_a,
123 struct tipc_node_map *nm_b)
124{
125 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
126}
127
2f566124 128static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq);
31e3c3f6 129static void tipc_nmap_diff(struct tipc_node_map *nm_a,
130 struct tipc_node_map *nm_b,
131 struct tipc_node_map *nm_diff);
28dd9418
YX
132static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
133static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
1da46568 134static void tipc_bclink_lock(struct net *net)
d69afc90 135{
0043550b 136 tipc_bcast_lock(net);
d69afc90
YX
137}
138
1da46568 139static void tipc_bclink_unlock(struct net *net)
d69afc90 140{
0043550b 141 tipc_bcast_unlock(net);
3f5a12bd
YX
142}
143
cb1b7280
JPM
144void tipc_bclink_input(struct net *net)
145{
146 struct tipc_net *tn = net_generic(net, tipc_net_id);
147
6beb19a6 148 tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
cb1b7280
JPM
149}
150
959e1781 151int tipc_bcast_get_mtu(struct net *net)
078bec82 152{
959e1781 153 return tipc_link_mtu(tipc_bc_sndlink(net));
078bec82
JPM
154}
155
2f566124 156static u16 bcbuf_acks(struct sk_buff *skb)
b97bf3fd 157{
2f566124 158 return TIPC_SKB_CB(skb)->ackers;
b97bf3fd
PL
159}
160
2f566124 161static void bcbuf_set_acks(struct sk_buff *buf, u16 ackers)
b97bf3fd 162{
2f566124 163 TIPC_SKB_CB(buf)->ackers = ackers;
b97bf3fd
PL
164}
165
05790c64 166static void bcbuf_decr_acks(struct sk_buff *buf)
b97bf3fd
PL
167{
168 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
169}
170
b06b281e
JPM
171/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
172 * if any, and make it primary bearer
173 */
174static void tipc_bcbase_select_primary(struct net *net)
175{
176 struct tipc_bc_base *bb = tipc_bc_base(net);
177 int all_dests = tipc_link_bc_peers(bb->link);
959e1781 178 int i, mtu;
b06b281e
JPM
179
180 bb->primary_bearer = INVALID_BEARER_ID;
181
182 if (!all_dests)
183 return;
184
185 for (i = 0; i < MAX_BEARERS; i++) {
959e1781
JPM
186 if (!bb->dests[i])
187 continue;
188
189 mtu = tipc_bearer_mtu(net, i);
190 if (mtu < tipc_link_mtu(bb->link))
191 tipc_link_set_mtu(bb->link, mtu);
192
b06b281e
JPM
193 if (bb->dests[i] < all_dests)
194 continue;
195
196 bb->primary_bearer = i;
197
198 /* Reduce risk that all nodes select same primary */
199 if ((i ^ tipc_own_addr(net)) & 1)
200 break;
201 }
202}
203
204void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
205{
206 struct tipc_bc_base *bb = tipc_bc_base(net);
207
208 tipc_bcast_lock(net);
209 bb->dests[bearer_id]++;
210 tipc_bcbase_select_primary(net);
211 tipc_bcast_unlock(net);
212}
213
214void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
215{
216 struct tipc_bc_base *bb = tipc_bc_base(net);
217
218 tipc_bcast_lock(net);
219 bb->dests[bearer_id]--;
220 tipc_bcbase_select_primary(net);
221 tipc_bcast_unlock(net);
222}
223
1da46568 224static void bclink_set_last_sent(struct net *net)
5b1f7bde 225{
1da46568
YX
226 struct tipc_net *tn = net_generic(net, tipc_net_id);
227 struct tipc_link *bcl = tn->bcl;
228
dd3f9e70 229 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
5b1f7bde
AS
230}
231
1da46568 232u32 tipc_bclink_get_last_sent(struct net *net)
5b1f7bde 233{
1da46568
YX
234 struct tipc_net *tn = net_generic(net, tipc_net_id);
235
a97b9d3f 236 return tn->bcl->silent_intv_cnt;
5b1f7bde
AS
237}
238
7a54d4a9 239static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
b97bf3fd 240{
7a54d4a9
AS
241 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
242 seqno : node->bclink.last_sent;
b97bf3fd
PL
243}
244
2c53040f 245/**
01d83edd
AS
246 * tipc_bclink_retransmit_to - get most recent node to request retransmission
247 *
d69afc90 248 * Called with bclink_lock locked
01d83edd 249 */
1da46568 250struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
01d83edd 251{
1da46568
YX
252 struct tipc_net *tn = net_generic(net, tipc_net_id);
253
6beb19a6 254 return tn->bcbase->retransmit_to;
01d83edd
AS
255}
256
c4307285 257/**
b97bf3fd
PL
258 * bclink_retransmit_pkt - retransmit broadcast packets
259 * @after: sequence number of last packet to *not* retransmit
260 * @to: sequence number of last packet to retransmit
c4307285 261 *
d69afc90 262 * Called with bclink_lock locked
b97bf3fd 263 */
1da46568 264static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
b97bf3fd 265{
58dc55f2 266 struct sk_buff *skb;
1da46568 267 struct tipc_link *bcl = tn->bcl;
b97bf3fd 268
05dcc5aa 269 skb_queue_walk(&bcl->transmq, skb) {
703068ee
JM
270 if (more(buf_seqno(skb), after)) {
271 tipc_link_retransmit(bcl, skb, mod(to - after));
58dc55f2 272 break;
703068ee 273 }
58dc55f2 274 }
b97bf3fd
PL
275}
276
7845989c
KD
277/**
278 * bclink_prepare_wakeup - prepare users for wakeup after congestion
279 * @bcl: broadcast link
280 * @resultq: queue for users which can be woken up
281 * Move a number of waiting users, as permitted by available space in
282 * the send queue, from link wait queue to specified queue for wakeup
283 */
284static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
285{
286 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
287 int imp, lim;
288 struct sk_buff *skb, *tmp;
289
290 skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
291 imp = TIPC_SKB_CB(skb)->chain_imp;
292 lim = bcl->window + bcl->backlog[imp].limit;
293 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
294 if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
295 continue;
296 skb_unlink(skb, &bcl->wakeupq);
297 skb_queue_tail(resultq, skb);
298 }
299}
300
908344cd
JM
301/**
302 * tipc_bclink_wakeup_users - wake up pending users
303 *
304 * Called with no locks taken
305 */
f2f9800d 306void tipc_bclink_wakeup_users(struct net *net)
908344cd 307{
1da46568 308 struct tipc_net *tn = net_generic(net, tipc_net_id);
7845989c
KD
309 struct tipc_link *bcl = tn->bcl;
310 struct sk_buff_head resultq;
4988bb4a 311
7845989c
KD
312 skb_queue_head_init(&resultq);
313 bclink_prepare_wakeup(bcl, &resultq);
314 tipc_sk_rcv(net, &resultq);
908344cd
JM
315}
316
c4307285 317/**
4323add6 318 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
b97bf3fd
PL
319 * @n_ptr: node that sent acknowledgement info
320 * @acked: broadcast sequence # that has been acknowledged
c4307285 321 *
d69afc90 322 * Node is locked, bclink_lock unlocked.
b97bf3fd 323 */
6c00055a 324void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
b97bf3fd 325{
58dc55f2 326 struct sk_buff *skb, *tmp;
b97bf3fd 327 unsigned int released = 0;
1da46568
YX
328 struct net *net = n_ptr->net;
329 struct tipc_net *tn = net_generic(net, tipc_net_id);
b97bf3fd 330
2cdf3918
JPM
331 if (unlikely(!n_ptr->bclink.recv_permitted))
332 return;
1da46568 333 tipc_bclink_lock(net);
2cdf3918 334
36559591 335 /* Bail out if tx queue is empty (no clean up is required) */
05dcc5aa 336 skb = skb_peek(&tn->bcl->transmq);
58dc55f2 337 if (!skb)
36559591
AS
338 goto exit;
339
340 /* Determine which messages need to be acknowledged */
341 if (acked == INVALID_LINK_SEQ) {
342 /*
343 * Contact with specified node has been lost, so need to
344 * acknowledge sent messages only (if other nodes still exist)
345 * or both sent and unsent messages (otherwise)
346 */
6beb19a6 347 if (tn->bcbase->bcast_nodes.count)
a97b9d3f 348 acked = tn->bcl->silent_intv_cnt;
36559591 349 else
a97b9d3f 350 acked = tn->bcl->snd_nxt;
36559591
AS
351 } else {
352 /*
353 * Bail out if specified sequence number does not correspond
354 * to a message that has been sent and not yet acknowledged
355 */
58dc55f2 356 if (less(acked, buf_seqno(skb)) ||
a97b9d3f 357 less(tn->bcl->silent_intv_cnt, acked) ||
36559591
AS
358 less_eq(acked, n_ptr->bclink.acked))
359 goto exit;
360 }
36559591 361 /* Skip over packets that node has previously acknowledged */
05dcc5aa 362 skb_queue_walk(&tn->bcl->transmq, skb) {
58dc55f2
YX
363 if (more(buf_seqno(skb), n_ptr->bclink.acked))
364 break;
365 }
b97bf3fd 366 /* Update packets that node is now acknowledging */
05dcc5aa 367 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
58dc55f2
YX
368 if (more(buf_seqno(skb), acked))
369 break;
05dcc5aa
JPM
370 bcbuf_decr_acks(skb);
371 bclink_set_last_sent(net);
58dc55f2 372 if (bcbuf_acks(skb) == 0) {
05dcc5aa 373 __skb_unlink(skb, &tn->bcl->transmq);
58dc55f2 374 kfree_skb(skb);
b97bf3fd
PL
375 released = 1;
376 }
b97bf3fd
PL
377 }
378 n_ptr->bclink.acked = acked;
379
380 /* Try resolving broadcast link congestion, if necessary */
05dcc5aa 381 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
1da46568
YX
382 tipc_link_push_packets(tn->bcl);
383 bclink_set_last_sent(net);
5b1f7bde 384 }
c637c103 385 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
908344cd 386 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
36559591 387exit:
1da46568 388 tipc_bclink_unlock(net);
b97bf3fd
PL
389}
390
2c53040f 391/**
7a54d4a9 392 * tipc_bclink_update_link_state - update broadcast link state
c4307285 393 *
7216cd94 394 * RCU and node lock set
b97bf3fd 395 */
c5898636 396void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
c93d3baa 397 u32 last_sent)
b97bf3fd 398{
7a54d4a9 399 struct sk_buff *buf;
c5898636 400 struct net *net = n_ptr->net;
c93d3baa 401 struct tipc_net *tn = net_generic(net, tipc_net_id);
2f566124 402 struct tipc_link *bcl = tn->bcl;
b97bf3fd 403
7a54d4a9 404 /* Ignore "stale" link state info */
7a54d4a9
AS
405 if (less_eq(last_sent, n_ptr->bclink.last_in))
406 return;
b97bf3fd 407
7a54d4a9 408 /* Update link synchronization state; quit if in sync */
7a54d4a9
AS
409 bclink_update_last_sent(n_ptr, last_sent);
410
2f566124
JPM
411 /* This is a good location for statistical profiling */
412 bcl->stats.queue_sz_counts++;
413 bcl->stats.accu_queue_sz += skb_queue_len(&bcl->transmq);
414
7a54d4a9
AS
415 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
416 return;
417
418 /* Update out-of-sync state; quit if loss is still unconfirmed */
7a54d4a9
AS
419 if ((++n_ptr->bclink.oos_state) == 1) {
420 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
421 return;
422 n_ptr->bclink.oos_state++;
423 }
424
425 /* Don't NACK if one has been recently sent (or seen) */
7a54d4a9 426 if (n_ptr->bclink.oos_state & 0x1)
b97bf3fd
PL
427 return;
428
7a54d4a9 429 /* Send NACK */
31e3c3f6 430 buf = tipc_buf_acquire(INT_H_SIZE);
b97bf3fd 431 if (buf) {
7a54d4a9 432 struct tipc_msg *msg = buf_msg(buf);
05dcc5aa 433 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
bc6fecd4 434 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
7a54d4a9 435
c5898636 436 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
7a54d4a9 437 INT_H_SIZE, n_ptr->addr);
bf781ecf 438 msg_set_non_seq(msg, 1);
c93d3baa 439 msg_set_mc_netid(msg, tn->net_id);
7a54d4a9
AS
440 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
441 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
bc6fecd4 442 msg_set_bcgap_to(msg, to);
b97bf3fd 443
1da46568 444 tipc_bclink_lock(net);
7f9f95d9 445 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
1da46568
YX
446 tn->bcl->stats.sent_nacks++;
447 tipc_bclink_unlock(net);
5f6d9123 448 kfree_skb(buf);
b97bf3fd 449
7a54d4a9 450 n_ptr->bclink.oos_state++;
b97bf3fd
PL
451 }
452}
453
d999297c
JPM
454void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
455{
456 u16 last = msg_last_bcast(hdr);
457 int mtyp = msg_type(hdr);
458
459 if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
460 return;
461 if (mtyp == STATE_MSG) {
462 tipc_bclink_update_link_state(n, last);
463 return;
464 }
465 /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
466 * and transfer synch info in LINK_PROTOCOL messages.
467 */
468 if (tipc_node_is_up(n))
469 return;
470 if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
471 return;
472 n->bclink.last_sent = last;
473 n->bclink.last_in = last;
474 n->bclink.oos_state = 0;
475}
476
2c53040f 477/**
7a54d4a9 478 * bclink_peek_nack - monitor retransmission requests sent by other nodes
b97bf3fd 479 *
7a54d4a9
AS
480 * Delay any upcoming NACK by this node if another node has already
481 * requested the first message this node is going to ask for.
b97bf3fd 482 */
f2f9800d 483static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
b97bf3fd 484{
f2f9800d 485 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
b97bf3fd 486
7a54d4a9 487 if (unlikely(!n_ptr))
b97bf3fd 488 return;
7a54d4a9 489
4323add6 490 tipc_node_lock(n_ptr);
389dd9bc 491 if (n_ptr->bclink.recv_permitted &&
7a54d4a9
AS
492 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
493 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
494 n_ptr->bclink.oos_state = 2;
4323add6 495 tipc_node_unlock(n_ptr);
8a0f6ebe 496 tipc_node_put(n_ptr);
b97bf3fd
PL
497}
498
b06b281e
JPM
499/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
500 *
501 * Note that number of reachable destinations, as indicated in the dests[]
502 * array, may transitionally differ from the number of destinations indicated
503 * in each sent buffer. We can sustain this. Excess destination nodes will
504 * drop and never acknowledge the unexpected packets, and missing destinations
505 * will either require retransmission (if they are just about to be added to
506 * the bearer), or be removed from the buffer's 'ackers' counter (if they
507 * just went down)
508 */
509static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
510{
511 int bearer_id;
512 struct tipc_bc_base *bb = tipc_bc_base(net);
513 struct sk_buff *skb, *_skb;
514 struct sk_buff_head _xmitq;
515
516 if (skb_queue_empty(xmitq))
517 return;
518
519 /* The typical case: at least one bearer has links to all nodes */
520 bearer_id = bb->primary_bearer;
521 if (bearer_id >= 0) {
522 tipc_bearer_bc_xmit(net, bearer_id, xmitq);
523 return;
524 }
525
526 /* We have to transmit across all bearers */
527 skb_queue_head_init(&_xmitq);
528 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
529 if (!bb->dests[bearer_id])
530 continue;
531
532 skb_queue_walk(xmitq, skb) {
533 _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
534 if (!_skb)
535 break;
536 __skb_queue_tail(&_xmitq, _skb);
537 }
538 tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
539 }
540 __skb_queue_purge(xmitq);
541 __skb_queue_purge(&_xmitq);
542}
543
6beb19a6 544/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
9fbfb8b1 545 * and to identified node local sockets
f2f9800d 546 * @net: the applicable net namespace
a6ca1094 547 * @list: chain of buffers containing message
078bec82
JPM
548 * Consumes the buffer chain, except when returning -ELINKCONG
549 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
550 */
6beb19a6 551int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
078bec82 552{
2f566124
JPM
553 struct tipc_link *l = tipc_bc_sndlink(net);
554 struct sk_buff_head xmitq, inputq, rcvq;
078bec82 555 int rc = 0;
078bec82 556
2f566124
JPM
557 __skb_queue_head_init(&rcvq);
558 __skb_queue_head_init(&xmitq);
559 skb_queue_head_init(&inputq);
22d85c79 560
2f566124
JPM
561 /* Prepare message clone for local node */
562 if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
563 return -EHOSTUNREACH;
078bec82 564
2f566124
JPM
565 tipc_bcast_lock(net);
566 if (tipc_link_bc_peers(l))
567 rc = tipc_link_xmit(l, list, &xmitq);
2f566124 568 tipc_bcast_unlock(net);
078bec82 569
2f566124 570 /* Don't send to local node if adding to link failed */
cb1b7280 571 if (unlikely(rc)) {
2f566124 572 __skb_queue_purge(&rcvq);
cb1b7280
JPM
573 return rc;
574 }
52666986 575
2f566124 576 /* Broadcast to all nodes, inluding local node */
b06b281e 577 tipc_bcbase_xmit(net, &xmitq);
2f566124
JPM
578 tipc_sk_mcast_rcv(net, &rcvq, &inputq);
579 __skb_queue_purge(list);
580 return 0;
078bec82 581}
52666986
JPM
582
583/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
584 *
585 * RCU is locked, no other locks set
586 */
587int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
588{
589 struct tipc_msg *hdr = buf_msg(skb);
590 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
591 struct sk_buff_head xmitq;
592 int rc;
593
594 __skb_queue_head_init(&xmitq);
595
596 if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
597 kfree_skb(skb);
598 return 0;
599 }
600
601 tipc_bcast_lock(net);
602 if (msg_user(hdr) == BCAST_PROTOCOL)
603 rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
604 else
605 rc = tipc_link_rcv(l, skb, NULL);
606 tipc_bcast_unlock(net);
607
b06b281e 608 tipc_bcbase_xmit(net, &xmitq);
52666986
JPM
609
610 /* Any socket wakeup messages ? */
611 if (!skb_queue_empty(inputq))
612 tipc_sk_rcv(net, inputq);
613
614 return rc;
615}
616
617/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
618 *
619 * RCU is locked, no other locks set
620 */
621void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
622{
623 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
624 struct sk_buff_head xmitq;
625
626 __skb_queue_head_init(&xmitq);
627
628 tipc_bcast_lock(net);
629 tipc_link_bc_ack_rcv(l, acked, &xmitq);
630 tipc_bcast_unlock(net);
631
b06b281e 632 tipc_bcbase_xmit(net, &xmitq);
52666986
JPM
633
634 /* Any socket wakeup messages ? */
635 if (!skb_queue_empty(inputq))
636 tipc_sk_rcv(net, inputq);
637}
638
639/* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
640 *
641 * RCU is locked, no other locks set
642 */
643void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
644 struct tipc_msg *hdr)
645{
646 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
647 struct sk_buff_head xmitq;
648
649 __skb_queue_head_init(&xmitq);
650
651 tipc_bcast_lock(net);
652 if (msg_type(hdr) == STATE_MSG) {
653 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
654 tipc_link_bc_sync_rcv(l, hdr, &xmitq);
655 } else {
656 tipc_link_bc_init_rcv(l, hdr);
657 }
658 tipc_bcast_unlock(net);
659
b06b281e 660 tipc_bcbase_xmit(net, &xmitq);
52666986
JPM
661
662 /* Any socket wakeup messages ? */
663 if (!skb_queue_empty(inputq))
664 tipc_sk_rcv(net, inputq);
665}
666
667/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
668 *
669 * RCU is locked, node lock is set
670 */
b06b281e 671void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
52666986
JPM
672 struct sk_buff_head *xmitq)
673{
52666986
JPM
674 struct tipc_link *snd_l = tipc_bc_sndlink(net);
675
b06b281e 676 tipc_bcast_lock(net);
52666986 677 tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
b06b281e
JPM
678 tipc_bcbase_select_primary(net);
679 tipc_bcast_unlock(net);
52666986
JPM
680}
681
682/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
683 *
684 * RCU is locked, node lock is set
685 */
b06b281e 686void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
52666986 687{
52666986 688 struct tipc_link *snd_l = tipc_bc_sndlink(net);
b06b281e 689 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
52666986
JPM
690 struct sk_buff_head xmitq;
691
692 __skb_queue_head_init(&xmitq);
693
b06b281e 694 tipc_bcast_lock(net);
52666986 695 tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
b06b281e
JPM
696 tipc_bcbase_select_primary(net);
697 tipc_bcast_unlock(net);
52666986 698
b06b281e 699 tipc_bcbase_xmit(net, &xmitq);
52666986
JPM
700
701 /* Any socket wakeup messages ? */
702 if (!skb_queue_empty(inputq))
703 tipc_sk_rcv(net, inputq);
704}
705
2c53040f 706/**
63e7f1ac
AS
707 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
708 *
d69afc90 709 * Called with both sending node's lock and bclink_lock taken.
63e7f1ac 710 */
63e7f1ac
AS
711static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
712{
1da46568
YX
713 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
714
63e7f1ac
AS
715 bclink_update_last_sent(node, seqno);
716 node->bclink.last_in = seqno;
717 node->bclink.oos_state = 0;
1da46568 718 tn->bcl->stats.recv_info++;
63e7f1ac
AS
719
720 /*
721 * Unicast an ACK periodically, ensuring that
722 * all nodes in the cluster don't ACK at the same time
723 */
34747539 724 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
9d13ec65 725 tipc_link_proto_xmit(node_active_link(node, node->addr),
ed193ece 726 STATE_MSG, 0, 0, 0, 0);
1da46568 727 tn->bcl->stats.sent_acks++;
63e7f1ac
AS
728 }
729}
730
2c53040f 731/**
247f0f3c 732 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
c4307285 733 *
7216cd94 734 * RCU is locked, no other locks set
b97bf3fd 735 */
c93d3baa 736void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
d356eeba 737{
c93d3baa 738 struct tipc_net *tn = net_generic(net, tipc_net_id);
1da46568 739 struct tipc_link *bcl = tn->bcl;
b97bf3fd 740 struct tipc_msg *msg = buf_msg(buf);
5d3c488d 741 struct tipc_node *node;
b97bf3fd
PL
742 u32 next_in;
743 u32 seqno;
0abd8ff2 744 int deferred = 0;
c637c103
JPM
745 int pos = 0;
746 struct sk_buff *iskb;
cb1b7280 747 struct sk_buff_head *arrvq, *inputq;
b97bf3fd 748
5d3c488d 749 /* Screen out unwanted broadcast messages */
c93d3baa 750 if (msg_mc_netid(msg) != tn->net_id)
5d3c488d
AS
751 goto exit;
752
f2f9800d 753 node = tipc_node_find(net, msg_prevnode(msg));
5d3c488d
AS
754 if (unlikely(!node))
755 goto exit;
5d3c488d 756 tipc_node_lock(node);
389dd9bc 757 if (unlikely(!node->bclink.recv_permitted))
5d3c488d 758 goto unlock;
b97bf3fd 759
8a275a6a 760 /* Handle broadcast protocol message */
b97bf3fd 761 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
9f6bdcd4
AS
762 if (msg_type(msg) != STATE_MSG)
763 goto unlock;
34747539 764 if (msg_destnode(msg) == tn->own_addr) {
4323add6 765 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
1da46568 766 tipc_bclink_lock(net);
b97bf3fd 767 bcl->stats.recv_nacks++;
6beb19a6 768 tn->bcbase->retransmit_to = node;
1da46568 769 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
b97bf3fd 770 msg_bcgap_to(msg));
1da46568 771 tipc_bclink_unlock(net);
b952b2be 772 tipc_node_unlock(node);
b97bf3fd 773 } else {
5d3c488d 774 tipc_node_unlock(node);
f2f9800d 775 bclink_peek_nack(net, msg);
b97bf3fd 776 }
8a0f6ebe 777 tipc_node_put(node);
5d3c488d 778 goto exit;
b97bf3fd 779 }
5d3c488d 780 /* Handle in-sequence broadcast message */
b97bf3fd 781 seqno = msg_seqno(msg);
8a275a6a 782 next_in = mod(node->bclink.last_in + 1);
6beb19a6
JPM
783 arrvq = &tn->bcbase->arrvq;
784 inputq = &tn->bcbase->inputq;
b97bf3fd
PL
785
786 if (likely(seqno == next_in)) {
8a275a6a 787receive:
7a54d4a9 788 /* Deliver message to destination */
b97bf3fd 789 if (likely(msg_isdata(msg))) {
1da46568 790 tipc_bclink_lock(net);
63e7f1ac 791 bclink_accept_pkt(node, seqno);
cb1b7280
JPM
792 spin_lock_bh(&inputq->lock);
793 __skb_queue_tail(arrvq, buf);
794 spin_unlock_bh(&inputq->lock);
795 node->action_flags |= TIPC_BCAST_MSG_EVT;
1da46568 796 tipc_bclink_unlock(net);
4323add6 797 tipc_node_unlock(node);
b97bf3fd 798 } else if (msg_user(msg) == MSG_BUNDLER) {
1da46568 799 tipc_bclink_lock(net);
63e7f1ac 800 bclink_accept_pkt(node, seqno);
b97bf3fd
PL
801 bcl->stats.recv_bundles++;
802 bcl->stats.recv_bundled += msg_msgcnt(msg);
cb1b7280
JPM
803 pos = 0;
804 while (tipc_msg_extract(buf, &iskb, &pos)) {
805 spin_lock_bh(&inputq->lock);
806 __skb_queue_tail(arrvq, iskb);
807 spin_unlock_bh(&inputq->lock);
808 }
809 node->action_flags |= TIPC_BCAST_MSG_EVT;
1da46568 810 tipc_bclink_unlock(net);
4323add6 811 tipc_node_unlock(node);
b97bf3fd 812 } else if (msg_user(msg) == MSG_FRAGMENTER) {
1da46568 813 tipc_bclink_lock(net);
63e7f1ac 814 bclink_accept_pkt(node, seqno);
bc14b8d6
YX
815 tipc_buf_append(&node->bclink.reasm_buf, &buf);
816 if (unlikely(!buf && !node->bclink.reasm_buf)) {
817 tipc_bclink_unlock(net);
818 goto unlock;
819 }
b97bf3fd 820 bcl->stats.recv_fragments++;
37e22164 821 if (buf) {
b97bf3fd 822 bcl->stats.recv_fragmented++;
40ba3cdf 823 msg = buf_msg(buf);
1da46568 824 tipc_bclink_unlock(net);
528f6f4b
EH
825 goto receive;
826 }
1da46568 827 tipc_bclink_unlock(net);
4323add6 828 tipc_node_unlock(node);
b97bf3fd 829 } else {
1da46568 830 tipc_bclink_lock(net);
63e7f1ac 831 bclink_accept_pkt(node, seqno);
1da46568 832 tipc_bclink_unlock(net);
4323add6 833 tipc_node_unlock(node);
5f6d9123 834 kfree_skb(buf);
b97bf3fd 835 }
5d3c488d 836 buf = NULL;
8a275a6a
AS
837
838 /* Determine new synchronization state */
5d3c488d 839 tipc_node_lock(node);
8a275a6a
AS
840 if (unlikely(!tipc_node_is_up(node)))
841 goto unlock;
842
7a54d4a9 843 if (node->bclink.last_in == node->bclink.last_sent)
8a275a6a
AS
844 goto unlock;
845
05dcc5aa 846 if (skb_queue_empty(&node->bclink.deferdq)) {
7a54d4a9
AS
847 node->bclink.oos_state = 1;
848 goto unlock;
849 }
850
05dcc5aa 851 msg = buf_msg(skb_peek(&node->bclink.deferdq));
8a275a6a
AS
852 seqno = msg_seqno(msg);
853 next_in = mod(next_in + 1);
854 if (seqno != next_in)
855 goto unlock;
856
857 /* Take in-sequence message from deferred queue & deliver it */
05dcc5aa 858 buf = __skb_dequeue(&node->bclink.deferdq);
8a275a6a
AS
859 goto receive;
860 }
861
862 /* Handle out-of-sequence broadcast message */
8a275a6a 863 if (less(next_in, seqno)) {
05dcc5aa 864 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
8a275a6a 865 buf);
7a54d4a9 866 bclink_update_last_sent(node, seqno);
5d3c488d 867 buf = NULL;
0abd8ff2 868 }
8a275a6a 869
1da46568 870 tipc_bclink_lock(net);
b98158e3 871
8a275a6a
AS
872 if (deferred)
873 bcl->stats.deferred_recv++;
0232c5a5
AS
874 else
875 bcl->stats.duplicates++;
8a275a6a 876
1da46568 877 tipc_bclink_unlock(net);
b98158e3 878
5d3c488d 879unlock:
4323add6 880 tipc_node_unlock(node);
8a0f6ebe 881 tipc_node_put(node);
5d3c488d 882exit:
5f6d9123 883 kfree_skb(buf);
b97bf3fd
PL
884}
885
6c00055a 886u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
b97bf3fd 887{
389dd9bc 888 return (n_ptr->bclink.recv_permitted &&
1da46568 889 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
b97bf3fd
PL
890}
891
892
893/**
4323add6 894 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
c4307285 895 *
2ff9f924
AS
896 * Send packet over as many bearers as necessary to reach all nodes
897 * that have joined the broadcast link.
c4307285 898 *
2ff9f924
AS
899 * Returns 0 (packet sent successfully) under all circumstances,
900 * since the broadcast link's pseudo-bearer never blocks
b97bf3fd 901 */
1da46568
YX
902static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
903 struct tipc_bearer *unused1,
988f088a 904 struct tipc_media_addr *unused2)
b97bf3fd 905{
b97bf3fd 906 int bp_index;
6f92ee54 907 struct tipc_msg *msg = buf_msg(buf);
c93d3baa 908 struct tipc_net *tn = net_generic(net, tipc_net_id);
1da46568 909 struct tipc_bcbearer *bcbearer = tn->bcbearer;
6beb19a6 910 struct tipc_bc_base *bclink = tn->bcbase;
b97bf3fd 911
e6160710 912 /* Prepare broadcast link message for reliable transmission,
2ff9f924
AS
913 * if first time trying to send it;
914 * preparation is skipped for broadcast link protocol messages
915 * since they are sent in an unreliable manner and don't need it
916 */
b97bf3fd 917 if (likely(!msg_non_seq(buf_msg(buf)))) {
cd3decdf 918 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
40aecb1b 919 msg_set_non_seq(msg, 1);
c93d3baa 920 msg_set_mc_netid(msg, tn->net_id);
1da46568 921 tn->bcl->stats.sent_info++;
cd3decdf 922 if (WARN_ON(!bclink->bcast_nodes.count)) {
5e726900
AS
923 dump_stack();
924 return 0;
925 }
b97bf3fd 926 }
52666986 927 msg_set_mc_netid(msg, tn->net_id);
b97bf3fd 928
b97bf3fd 929 /* Send buffer over bearers until all targets reached */
cd3decdf 930 bcbearer->remains = bclink->bcast_nodes;
b97bf3fd
PL
931
932 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
2d627b92
AS
933 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
934 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
6f92ee54
JPM
935 struct tipc_bearer *bp[2] = {p, s};
936 struct tipc_bearer *b = bp[msg_link_selector(msg)];
488fc9af 937 struct sk_buff *tbuf;
b97bf3fd
PL
938
939 if (!p)
e6160710 940 break; /* No more bearers to try */
6f92ee54
JPM
941 if (!b)
942 b = p;
77861d9c 943 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
e6160710 944 &bcbearer->remains_new);
65f51ef0 945 if (bcbearer->remains_new.count == bcbearer->remains.count)
e6160710 946 continue; /* Nothing added by bearer pair */
b97bf3fd 947
488fc9af
GF
948 if (bp_index == 0) {
949 /* Use original buffer for first bearer */
7f9f95d9 950 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
488fc9af
GF
951 } else {
952 /* Avoid concurrent buffer access */
bad93e9d 953 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
488fc9af
GF
954 if (!tbuf)
955 break;
7f9f95d9
YX
956 tipc_bearer_send(net, b->identity, tbuf,
957 &b->bcast_addr);
488fc9af
GF
958 kfree_skb(tbuf); /* Bearer keeps a clone */
959 }
65f51ef0 960 if (bcbearer->remains_new.count == 0)
e6160710 961 break; /* All targets reached */
b97bf3fd 962
65f51ef0 963 bcbearer->remains = bcbearer->remains_new;
b97bf3fd 964 }
c4307285 965
2ff9f924 966 return 0;
b97bf3fd
PL
967}
968
969/**
4323add6 970 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
b97bf3fd 971 */
7f9f95d9
YX
972void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
973 u32 node, bool action)
b97bf3fd 974{
7f9f95d9 975 struct tipc_net *tn = net_generic(net, tipc_net_id);
1da46568 976 struct tipc_bcbearer *bcbearer = tn->bcbearer;
7f9ab6ac
PG
977 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
978 struct tipc_bcbearer_pair *bp_curr;
f8322dfc 979 struct tipc_bearer *b;
b97bf3fd
PL
980 int b_index;
981 int pri;
982
1da46568 983 tipc_bclink_lock(net);
b97bf3fd 984
28dd9418
YX
985 if (action)
986 tipc_nmap_add(nm_ptr, node);
987 else
988 tipc_nmap_remove(nm_ptr, node);
989
b97bf3fd 990 /* Group bearers by priority (can assume max of two per priority) */
b97bf3fd
PL
991 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
992
f8322dfc 993 rcu_read_lock();
b97bf3fd 994 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
7f9f95d9 995 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
f47de12b 996 if (!b || !b->nodes.count)
b97bf3fd
PL
997 continue;
998
999 if (!bp_temp[b->priority].primary)
1000 bp_temp[b->priority].primary = b;
1001 else
1002 bp_temp[b->priority].secondary = b;
1003 }
f8322dfc 1004 rcu_read_unlock();
b97bf3fd
PL
1005
1006 /* Create array of bearer pairs for broadcasting */
b97bf3fd
PL
1007 bp_curr = bcbearer->bpairs;
1008 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
1009
16cb4b33 1010 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
b97bf3fd
PL
1011
1012 if (!bp_temp[pri].primary)
1013 continue;
1014
1015 bp_curr->primary = bp_temp[pri].primary;
1016
1017 if (bp_temp[pri].secondary) {
4323add6
PL
1018 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
1019 &bp_temp[pri].secondary->nodes)) {
b97bf3fd
PL
1020 bp_curr->secondary = bp_temp[pri].secondary;
1021 } else {
1022 bp_curr++;
1023 bp_curr->primary = bp_temp[pri].secondary;
1024 }
1025 }
1026
1027 bp_curr++;
1028 }
1029
1da46568 1030 tipc_bclink_unlock(net);
b97bf3fd
PL
1031}
1032
d8182804
RA
1033static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1034 struct tipc_stats *stats)
7be57fc6
RA
1035{
1036 int i;
1037 struct nlattr *nest;
1038
1039 struct nla_map {
1040 __u32 key;
1041 __u32 val;
1042 };
1043
1044 struct nla_map map[] = {
1045 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1046 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1047 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1048 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1049 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1050 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1051 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1052 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1053 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1054 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1055 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1056 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1057 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1058 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1059 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1060 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1061 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1062 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1063 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1064 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1065 };
1066
1067 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1068 if (!nest)
1069 return -EMSGSIZE;
1070
1071 for (i = 0; i < ARRAY_SIZE(map); i++)
1072 if (nla_put_u32(skb, map[i].key, map[i].val))
1073 goto msg_full;
1074
1075 nla_nest_end(skb, nest);
1076
1077 return 0;
1078msg_full:
1079 nla_nest_cancel(skb, nest);
1080
1081 return -EMSGSIZE;
1082}
1083
1da46568 1084int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
7be57fc6
RA
1085{
1086 int err;
1087 void *hdr;
1088 struct nlattr *attrs;
1089 struct nlattr *prop;
1da46568
YX
1090 struct tipc_net *tn = net_generic(net, tipc_net_id);
1091 struct tipc_link *bcl = tn->bcl;
7be57fc6
RA
1092
1093 if (!bcl)
1094 return 0;
1095
1da46568 1096 tipc_bclink_lock(net);
7be57fc6 1097
bfb3e5dd 1098 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
7be57fc6
RA
1099 NLM_F_MULTI, TIPC_NL_LINK_GET);
1100 if (!hdr)
1101 return -EMSGSIZE;
1102
1103 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1104 if (!attrs)
1105 goto msg_full;
1106
1107 /* The broadcast link is always up */
1108 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1109 goto attr_msg_full;
1110
1111 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1112 goto attr_msg_full;
1113 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1114 goto attr_msg_full;
a97b9d3f 1115 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
7be57fc6 1116 goto attr_msg_full;
a97b9d3f 1117 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
7be57fc6
RA
1118 goto attr_msg_full;
1119
1120 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1121 if (!prop)
1122 goto attr_msg_full;
1f66d161 1123 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
7be57fc6
RA
1124 goto prop_msg_full;
1125 nla_nest_end(msg->skb, prop);
1126
1127 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1128 if (err)
1129 goto attr_msg_full;
1130
1da46568 1131 tipc_bclink_unlock(net);
7be57fc6
RA
1132 nla_nest_end(msg->skb, attrs);
1133 genlmsg_end(msg->skb, hdr);
1134
1135 return 0;
1136
1137prop_msg_full:
1138 nla_nest_cancel(msg->skb, prop);
1139attr_msg_full:
1140 nla_nest_cancel(msg->skb, attrs);
1141msg_full:
1da46568 1142 tipc_bclink_unlock(net);
7be57fc6
RA
1143 genlmsg_cancel(msg->skb, hdr);
1144
1145 return -EMSGSIZE;
1146}
b97bf3fd 1147
1da46568 1148int tipc_bclink_reset_stats(struct net *net)
b97bf3fd 1149{
1da46568
YX
1150 struct tipc_net *tn = net_generic(net, tipc_net_id);
1151 struct tipc_link *bcl = tn->bcl;
1152
b97bf3fd
PL
1153 if (!bcl)
1154 return -ENOPROTOOPT;
1155
1da46568 1156 tipc_bclink_lock(net);
b97bf3fd 1157 memset(&bcl->stats, 0, sizeof(bcl->stats));
1da46568 1158 tipc_bclink_unlock(net);
0e35fd5e 1159 return 0;
b97bf3fd
PL
1160}
1161
1da46568 1162int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
b97bf3fd 1163{
1da46568
YX
1164 struct tipc_net *tn = net_generic(net, tipc_net_id);
1165 struct tipc_link *bcl = tn->bcl;
1166
b97bf3fd
PL
1167 if (!bcl)
1168 return -ENOPROTOOPT;
53387c4e
JPM
1169 if (limit < BCLINK_WIN_MIN)
1170 limit = BCLINK_WIN_MIN;
1171 if (limit > TIPC_MAX_LINK_WIN)
b97bf3fd 1172 return -EINVAL;
1da46568 1173 tipc_bclink_lock(net);
4323add6 1174 tipc_link_set_queue_limits(bcl, limit);
1da46568 1175 tipc_bclink_unlock(net);
0e35fd5e 1176 return 0;
b97bf3fd
PL
1177}
1178
670f4f88
RA
1179int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
1180{
1181 int err;
1182 u32 win;
1183 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1184
1185 if (!attrs[TIPC_NLA_LINK_PROP])
1186 return -EINVAL;
1187
1188 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
1189 if (err)
1190 return err;
1191
1192 if (!props[TIPC_NLA_PROP_WIN])
1193 return -EOPNOTSUPP;
1194
1195 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1196
1197 return tipc_bclink_set_queue_limits(net, win);
1198}
1199
6beb19a6 1200int tipc_bcast_init(struct net *net)
b97bf3fd 1201{
32301906
JPM
1202 struct tipc_net *tn = tipc_net(net);
1203 struct tipc_bcbearer *bcb = NULL;
1204 struct tipc_bc_base *bb = NULL;
1205 struct tipc_link *l = NULL;
1206
1207 bcb = kzalloc(sizeof(*bcb), GFP_ATOMIC);
1208 if (!bcb)
1209 goto enomem;
1210 tn->bcbearer = bcb;
1211
1212 bcb->bearer.window = BCLINK_WIN_DEFAULT;
1213 bcb->bearer.mtu = MAX_PKT_DEFAULT_MCAST;
1214 bcb->bearer.identity = MAX_BEARERS;
1215
1216 bcb->bearer.media = &bcb->media;
1217 bcb->media.send_msg = tipc_bcbearer_send;
1218 sprintf(bcb->media.name, "tipc-broadcast");
1219 strcpy(bcb->bearer.name, bcb->media.name);
1220
1221 bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
1222 if (!bb)
1223 goto enomem;
1224 tn->bcbase = bb;
1225 __skb_queue_head_init(&bb->arrvq);
0043550b 1226 spin_lock_init(&tipc_net(net)->bclock);
32301906
JPM
1227 bb->node.net = net;
1228
52666986 1229 if (!tipc_link_bc_create(&bb->node, 0, 0,
959e1781 1230 U16_MAX,
32301906 1231 BCLINK_WIN_DEFAULT,
fd556f20 1232 0,
32301906
JPM
1233 &bb->inputq,
1234 &bb->namedq,
52666986 1235 NULL,
32301906
JPM
1236 &l))
1237 goto enomem;
1238 bb->link = l;
1239 tn->bcl = l;
1240 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcb->bearer);
eb8b00f5 1241 return 0;
32301906
JPM
1242enomem:
1243 kfree(bcb);
1244 kfree(bb);
1245 kfree(l);
1246 return -ENOMEM;
b97bf3fd
PL
1247}
1248
5fd9fd63
JPM
1249void tipc_bcast_reinit(struct net *net)
1250{
1251 struct tipc_bc_base *b = tipc_bc_base(net);
1252
32301906 1253 msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
5fd9fd63
JPM
1254}
1255
6beb19a6 1256void tipc_bcast_stop(struct net *net)
b97bf3fd 1257{
7f9f95d9
YX
1258 struct tipc_net *tn = net_generic(net, tipc_net_id);
1259
1da46568
YX
1260 tipc_bclink_lock(net);
1261 tipc_link_purge_queues(tn->bcl);
1262 tipc_bclink_unlock(net);
7f9f95d9 1263 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
eb8b00f5 1264 synchronize_net();
1da46568 1265 kfree(tn->bcbearer);
6beb19a6 1266 kfree(tn->bcbase);
32301906 1267 kfree(tn->bcl);
b97bf3fd
PL
1268}
1269
3e22e62b
AS
1270/**
1271 * tipc_nmap_add - add a node to a node map
1272 */
28dd9418 1273static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
3e22e62b
AS
1274{
1275 int n = tipc_node(node);
1276 int w = n / WSIZE;
1277 u32 mask = (1 << (n % WSIZE));
1278
1279 if ((nm_ptr->map[w] & mask) == 0) {
1280 nm_ptr->count++;
1281 nm_ptr->map[w] |= mask;
1282 }
1283}
1284
1285/**
1286 * tipc_nmap_remove - remove a node from a node map
1287 */
28dd9418 1288static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
3e22e62b
AS
1289{
1290 int n = tipc_node(node);
1291 int w = n / WSIZE;
1292 u32 mask = (1 << (n % WSIZE));
1293
1294 if ((nm_ptr->map[w] & mask) != 0) {
1295 nm_ptr->map[w] &= ~mask;
1296 nm_ptr->count--;
1297 }
1298}
1299
1300/**
1301 * tipc_nmap_diff - find differences between node maps
1302 * @nm_a: input node map A
1303 * @nm_b: input node map B
1304 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1305 */
31e3c3f6 1306static void tipc_nmap_diff(struct tipc_node_map *nm_a,
1307 struct tipc_node_map *nm_b,
1308 struct tipc_node_map *nm_diff)
3e22e62b
AS
1309{
1310 int stop = ARRAY_SIZE(nm_a->map);
1311 int w;
1312 int b;
1313 u32 map;
1314
1315 memset(nm_diff, 0, sizeof(*nm_diff));
1316 for (w = 0; w < stop; w++) {
1317 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
1318 nm_diff->map[w] = map;
1319 if (map != 0) {
1320 for (b = 0 ; b < WSIZE; b++) {
1321 if (map & (1 << b))
1322 nm_diff->count++;
1323 }
1324 }
1325 }
1326}