2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 int bond_get_slave(struct net_device *slave_dev, struct sk_buff *skb)
26 struct slave *slave = bond_slave_get_rtnl(slave_dev);
27 const struct aggregator *agg;
29 if (nla_put_u8(skb, IFLA_SLAVE_STATE, bond_slave_state(slave)))
32 if (nla_put_u8(skb, IFLA_SLAVE_MII_STATUS, slave->link))
35 if (nla_put_u32(skb, IFLA_SLAVE_LINK_FAILURE_COUNT,
36 slave->link_failure_count))
39 if (nla_put(skb, IFLA_SLAVE_PERM_HWADDR,
40 slave_dev->addr_len, slave->perm_hwaddr))
43 if (nla_put_u16(skb, IFLA_SLAVE_QUEUE_ID, slave->queue_id))
46 if (slave->bond->params.mode == BOND_MODE_8023AD) {
47 agg = SLAVE_AD_INFO(slave).port.aggregator;
49 if (nla_put_u16(skb, IFLA_SLAVE_AD_AGGREGATOR_ID,
50 agg->aggregator_identifier))
60 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
61 [IFLA_BOND_MODE] = { .type = NLA_U8 },
62 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
63 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
64 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
65 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
66 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
67 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
68 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
69 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
70 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
71 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
72 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
73 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
74 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
75 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
76 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
77 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
78 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
79 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
80 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
81 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
82 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
83 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
86 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
88 if (tb[IFLA_ADDRESS]) {
89 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
92 return -EADDRNOTAVAIL;
97 static int bond_changelink(struct net_device *bond_dev,
98 struct nlattr *tb[], struct nlattr *data[])
100 struct bonding *bond = netdev_priv(bond_dev);
101 struct bond_opt_value newval;
108 if (data[IFLA_BOND_MODE]) {
109 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
111 bond_opt_initval(&newval, mode);
112 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
116 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
117 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
118 struct net_device *slave_dev;
123 slave_dev = __dev_get_by_index(dev_net(bond_dev),
128 err = bond_option_active_slave_set(bond, slave_dev);
132 if (data[IFLA_BOND_MIIMON]) {
133 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
135 err = bond_option_miimon_set(bond, miimon);
139 if (data[IFLA_BOND_UPDELAY]) {
140 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
142 err = bond_option_updelay_set(bond, updelay);
146 if (data[IFLA_BOND_DOWNDELAY]) {
147 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
149 err = bond_option_downdelay_set(bond, downdelay);
153 if (data[IFLA_BOND_USE_CARRIER]) {
154 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
156 err = bond_option_use_carrier_set(bond, use_carrier);
160 if (data[IFLA_BOND_ARP_INTERVAL]) {
161 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
163 if (arp_interval && miimon) {
164 pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
169 err = bond_option_arp_interval_set(bond, arp_interval);
173 if (data[IFLA_BOND_ARP_IP_TARGET]) {
174 __be32 targets[BOND_MAX_ARP_TARGETS] = { 0, };
178 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
179 __be32 target = nla_get_be32(attr);
180 targets[i++] = target;
183 err = bond_option_arp_ip_targets_set(bond, targets, i);
187 if (data[IFLA_BOND_ARP_VALIDATE]) {
188 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
190 if (arp_validate && miimon) {
191 pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
196 err = bond_option_arp_validate_set(bond, arp_validate);
200 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
201 int arp_all_targets =
202 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
204 err = bond_option_arp_all_targets_set(bond, arp_all_targets);
208 if (data[IFLA_BOND_PRIMARY]) {
209 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
210 struct net_device *dev;
213 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
217 err = bond_option_primary_set(bond, primary);
221 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
222 int primary_reselect =
223 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
225 err = bond_option_primary_reselect_set(bond, primary_reselect);
229 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
231 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
233 err = bond_option_fail_over_mac_set(bond, fail_over_mac);
237 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
238 int xmit_hash_policy =
239 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
241 bond_opt_initval(&newval, xmit_hash_policy);
242 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
246 if (data[IFLA_BOND_RESEND_IGMP]) {
248 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
250 err = bond_option_resend_igmp_set(bond, resend_igmp);
254 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
256 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
258 err = bond_option_num_peer_notif_set(bond, num_peer_notif);
262 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
263 int all_slaves_active =
264 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
266 err = bond_option_all_slaves_active_set(bond,
271 if (data[IFLA_BOND_MIN_LINKS]) {
273 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
275 err = bond_option_min_links_set(bond, min_links);
279 if (data[IFLA_BOND_LP_INTERVAL]) {
281 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
283 err = bond_option_lp_interval_set(bond, lp_interval);
287 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
288 int packets_per_slave =
289 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
291 bond_opt_initval(&newval, packets_per_slave);
292 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
296 if (data[IFLA_BOND_AD_LACP_RATE]) {
298 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
300 err = bond_option_lacp_rate_set(bond, lacp_rate);
304 if (data[IFLA_BOND_AD_SELECT]) {
306 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
308 err = bond_option_ad_select_set(bond, ad_select);
315 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
316 struct nlattr *tb[], struct nlattr *data[])
320 err = bond_changelink(bond_dev, tb, data);
324 return register_netdevice(bond_dev);
327 static size_t bond_get_size(const struct net_device *bond_dev)
329 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
330 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
331 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
332 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
333 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
334 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
335 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
336 /* IFLA_BOND_ARP_IP_TARGET */
337 nla_total_size(sizeof(struct nlattr)) +
338 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
339 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
340 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
341 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
342 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
343 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
344 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
345 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
346 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
347 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
348 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
349 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
350 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
351 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
352 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
353 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
354 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
355 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
356 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
357 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
358 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
362 static int bond_fill_info(struct sk_buff *skb,
363 const struct net_device *bond_dev)
365 struct bonding *bond = netdev_priv(bond_dev);
366 struct net_device *slave_dev = bond_option_active_slave_get(bond);
367 struct nlattr *targets;
368 unsigned int packets_per_slave;
369 int i, targets_added;
371 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
372 goto nla_put_failure;
375 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
376 goto nla_put_failure;
378 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
379 goto nla_put_failure;
381 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
382 bond->params.updelay * bond->params.miimon))
383 goto nla_put_failure;
385 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
386 bond->params.downdelay * bond->params.miimon))
387 goto nla_put_failure;
389 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
390 goto nla_put_failure;
392 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
393 goto nla_put_failure;
395 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
397 goto nla_put_failure;
400 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
401 if (bond->params.arp_targets[i]) {
402 nla_put_be32(skb, i, bond->params.arp_targets[i]);
408 nla_nest_end(skb, targets);
410 nla_nest_cancel(skb, targets);
412 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
413 goto nla_put_failure;
415 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
416 bond->params.arp_all_targets))
417 goto nla_put_failure;
419 if (bond->primary_slave &&
420 nla_put_u32(skb, IFLA_BOND_PRIMARY,
421 bond->primary_slave->dev->ifindex))
422 goto nla_put_failure;
424 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
425 bond->params.primary_reselect))
426 goto nla_put_failure;
428 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
429 bond->params.fail_over_mac))
430 goto nla_put_failure;
432 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
433 bond->params.xmit_policy))
434 goto nla_put_failure;
436 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
437 bond->params.resend_igmp))
438 goto nla_put_failure;
440 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
441 bond->params.num_peer_notif))
442 goto nla_put_failure;
444 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
445 bond->params.all_slaves_active))
446 goto nla_put_failure;
448 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
449 bond->params.min_links))
450 goto nla_put_failure;
452 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
453 bond->params.lp_interval))
454 goto nla_put_failure;
456 packets_per_slave = bond->params.packets_per_slave;
457 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
459 goto nla_put_failure;
461 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
462 bond->params.lacp_fast))
463 goto nla_put_failure;
465 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
466 bond->params.ad_select))
467 goto nla_put_failure;
469 if (bond->params.mode == BOND_MODE_8023AD) {
472 if (!bond_3ad_get_active_agg_info(bond, &info)) {
475 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
477 goto nla_put_failure;
479 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
481 goto nla_put_failure;
482 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
484 goto nla_put_failure;
485 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
487 goto nla_put_failure;
488 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
490 goto nla_put_failure;
491 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
492 sizeof(info.partner_system),
493 &info.partner_system))
494 goto nla_put_failure;
496 nla_nest_end(skb, nest);
506 struct rtnl_link_ops bond_link_ops __read_mostly = {
508 .priv_size = sizeof(struct bonding),
510 .maxtype = IFLA_BOND_MAX,
511 .policy = bond_policy,
512 .validate = bond_validate,
513 .newlink = bond_newlink,
514 .changelink = bond_changelink,
515 .get_size = bond_get_size,
516 .fill_info = bond_fill_info,
517 .get_num_tx_queues = bond_get_num_tx_queues,
518 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
522 int __init bond_netlink_init(void)
524 return rtnl_link_register(&bond_link_ops);
527 void bond_netlink_fini(void)
529 rtnl_link_unregister(&bond_link_ops);
532 MODULE_ALIAS_RTNL_LINK("bond");