2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_arp.h>
23 #include <linux/socket.h>
24 #include <linux/etherdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <net/rtnetlink.h>
27 #include <net/genetlink.h>
28 #include <net/netlink.h>
29 #include <linux/if_team.h>
31 #define DRV_NAME "team"
38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
40 static struct team_port *team_port_get_rcu(const struct net_device *dev)
42 struct team_port *port = rcu_dereference(dev->rx_handler_data);
44 return team_port_exists(dev) ? port : NULL;
47 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
49 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
51 return team_port_exists(dev) ? port : NULL;
55 * Since the ability to change mac address for open port device is tested in
56 * team_port_add, this function can be called without control of return value
58 static int __set_port_mac(struct net_device *port_dev,
59 const unsigned char *dev_addr)
63 memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 addr.sa_family = ARPHRD_ETHER;
65 return dev_set_mac_address(port_dev, &addr);
68 static int team_port_set_orig_mac(struct team_port *port)
70 return __set_port_mac(port->dev, port->orig.dev_addr);
73 int team_port_set_team_mac(struct team_port *port)
75 return __set_port_mac(port->dev, port->team->dev->dev_addr);
77 EXPORT_SYMBOL(team_port_set_team_mac);
79 static void team_refresh_port_linkup(struct team_port *port)
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
90 struct team_option_inst { /* One for each option instance */
91 struct list_head list;
92 struct list_head tmp_list;
93 struct team_option *option;
94 struct team_option_inst_info info;
99 static struct team_option *__team_find_option(struct team *team,
100 const char *opt_name)
102 struct team_option *option;
104 list_for_each_entry(option, &team->option_list, list) {
105 if (strcmp(option->name, opt_name) == 0)
111 static void __team_option_inst_del(struct team_option_inst *opt_inst)
113 list_del(&opt_inst->list);
117 static void __team_option_inst_del_option(struct team *team,
118 struct team_option *option)
120 struct team_option_inst *opt_inst, *tmp;
122 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
123 if (opt_inst->option == option)
124 __team_option_inst_del(opt_inst);
128 static int __team_option_inst_add(struct team *team, struct team_option *option,
129 struct team_port *port)
131 struct team_option_inst *opt_inst;
132 unsigned int array_size;
136 array_size = option->array_size;
138 array_size = 1; /* No array but still need one instance */
140 for (i = 0; i < array_size; i++) {
141 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
144 opt_inst->option = option;
145 opt_inst->info.port = port;
146 opt_inst->info.array_index = i;
147 opt_inst->changed = true;
148 opt_inst->removed = false;
149 list_add_tail(&opt_inst->list, &team->option_inst_list);
151 err = option->init(team, &opt_inst->info);
160 static int __team_option_inst_add_option(struct team *team,
161 struct team_option *option)
163 struct team_port *port;
166 if (!option->per_port) {
167 err = __team_option_inst_add(team, option, NULL);
169 goto inst_del_option;
172 list_for_each_entry(port, &team->port_list, list) {
173 err = __team_option_inst_add(team, option, port);
175 goto inst_del_option;
180 __team_option_inst_del_option(team, option);
184 static void __team_option_inst_mark_removed_option(struct team *team,
185 struct team_option *option)
187 struct team_option_inst *opt_inst;
189 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
190 if (opt_inst->option == option) {
191 opt_inst->changed = true;
192 opt_inst->removed = true;
197 static void __team_option_inst_del_port(struct team *team,
198 struct team_port *port)
200 struct team_option_inst *opt_inst, *tmp;
202 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
203 if (opt_inst->option->per_port &&
204 opt_inst->info.port == port)
205 __team_option_inst_del(opt_inst);
209 static int __team_option_inst_add_port(struct team *team,
210 struct team_port *port)
212 struct team_option *option;
215 list_for_each_entry(option, &team->option_list, list) {
216 if (!option->per_port)
218 err = __team_option_inst_add(team, option, port);
225 __team_option_inst_del_port(team, port);
229 static void __team_option_inst_mark_removed_port(struct team *team,
230 struct team_port *port)
232 struct team_option_inst *opt_inst;
234 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
235 if (opt_inst->info.port == port) {
236 opt_inst->changed = true;
237 opt_inst->removed = true;
242 static int __team_options_register(struct team *team,
243 const struct team_option *option,
247 struct team_option **dst_opts;
250 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
254 for (i = 0; i < option_count; i++, option++) {
255 if (__team_find_option(team, option->name)) {
259 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
266 for (i = 0; i < option_count; i++) {
267 err = __team_option_inst_add_option(team, dst_opts[i]);
270 list_add_tail(&dst_opts[i]->list, &team->option_list);
277 for (i--; i >= 0; i--)
278 __team_option_inst_del_option(team, dst_opts[i]);
280 i = option_count - 1;
282 for (i--; i >= 0; i--)
289 static void __team_options_mark_removed(struct team *team,
290 const struct team_option *option,
295 for (i = 0; i < option_count; i++, option++) {
296 struct team_option *del_opt;
298 del_opt = __team_find_option(team, option->name);
300 __team_option_inst_mark_removed_option(team, del_opt);
304 static void __team_options_unregister(struct team *team,
305 const struct team_option *option,
310 for (i = 0; i < option_count; i++, option++) {
311 struct team_option *del_opt;
313 del_opt = __team_find_option(team, option->name);
315 __team_option_inst_del_option(team, del_opt);
316 list_del(&del_opt->list);
322 static void __team_options_change_check(struct team *team);
324 int team_options_register(struct team *team,
325 const struct team_option *option,
330 err = __team_options_register(team, option, option_count);
333 __team_options_change_check(team);
336 EXPORT_SYMBOL(team_options_register);
338 void team_options_unregister(struct team *team,
339 const struct team_option *option,
342 __team_options_mark_removed(team, option, option_count);
343 __team_options_change_check(team);
344 __team_options_unregister(team, option, option_count);
346 EXPORT_SYMBOL(team_options_unregister);
348 static int team_option_get(struct team *team,
349 struct team_option_inst *opt_inst,
350 struct team_gsetter_ctx *ctx)
352 if (!opt_inst->option->getter)
354 return opt_inst->option->getter(team, ctx);
357 static int team_option_set(struct team *team,
358 struct team_option_inst *opt_inst,
359 struct team_gsetter_ctx *ctx)
361 if (!opt_inst->option->setter)
363 return opt_inst->option->setter(team, ctx);
366 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
368 struct team_option_inst *opt_inst;
370 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
371 opt_inst->changed = true;
373 EXPORT_SYMBOL(team_option_inst_set_change);
375 void team_options_change_check(struct team *team)
377 __team_options_change_check(team);
379 EXPORT_SYMBOL(team_options_change_check);
386 static LIST_HEAD(mode_list);
387 static DEFINE_SPINLOCK(mode_list_lock);
389 struct team_mode_item {
390 struct list_head list;
391 const struct team_mode *mode;
394 static struct team_mode_item *__find_mode(const char *kind)
396 struct team_mode_item *mitem;
398 list_for_each_entry(mitem, &mode_list, list) {
399 if (strcmp(mitem->mode->kind, kind) == 0)
405 static bool is_good_mode_name(const char *name)
407 while (*name != '\0') {
408 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
415 int team_mode_register(const struct team_mode *mode)
418 struct team_mode_item *mitem;
420 if (!is_good_mode_name(mode->kind) ||
421 mode->priv_size > TEAM_MODE_PRIV_SIZE)
424 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
428 spin_lock(&mode_list_lock);
429 if (__find_mode(mode->kind)) {
435 list_add_tail(&mitem->list, &mode_list);
437 spin_unlock(&mode_list_lock);
440 EXPORT_SYMBOL(team_mode_register);
442 void team_mode_unregister(const struct team_mode *mode)
444 struct team_mode_item *mitem;
446 spin_lock(&mode_list_lock);
447 mitem = __find_mode(mode->kind);
449 list_del_init(&mitem->list);
452 spin_unlock(&mode_list_lock);
454 EXPORT_SYMBOL(team_mode_unregister);
456 static const struct team_mode *team_mode_get(const char *kind)
458 struct team_mode_item *mitem;
459 const struct team_mode *mode = NULL;
461 spin_lock(&mode_list_lock);
462 mitem = __find_mode(kind);
464 spin_unlock(&mode_list_lock);
465 request_module("team-mode-%s", kind);
466 spin_lock(&mode_list_lock);
467 mitem = __find_mode(kind);
471 if (!try_module_get(mode->owner))
475 spin_unlock(&mode_list_lock);
479 static void team_mode_put(const struct team_mode *mode)
481 module_put(mode->owner);
484 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
486 dev_kfree_skb_any(skb);
490 rx_handler_result_t team_dummy_receive(struct team *team,
491 struct team_port *port,
494 return RX_HANDLER_ANOTHER;
497 static const struct team_mode __team_no_mode = {
501 static bool team_is_mode_set(struct team *team)
503 return team->mode != &__team_no_mode;
506 static void team_set_no_mode(struct team *team)
508 team->mode = &__team_no_mode;
511 static void __team_adjust_ops(struct team *team, int en_port_count)
514 * To avoid checks in rx/tx skb paths, ensure here that non-null and
515 * correct ops are always set.
518 if (!en_port_count || !team_is_mode_set(team) ||
519 !team->mode->ops->transmit)
520 team->ops.transmit = team_dummy_transmit;
522 team->ops.transmit = team->mode->ops->transmit;
524 if (!en_port_count || !team_is_mode_set(team) ||
525 !team->mode->ops->receive)
526 team->ops.receive = team_dummy_receive;
528 team->ops.receive = team->mode->ops->receive;
531 static void team_adjust_ops(struct team *team)
533 __team_adjust_ops(team, team->en_port_count);
537 * We can benefit from the fact that it's ensured no port is present
538 * at the time of mode change. Therefore no packets are in fly so there's no
539 * need to set mode operations in any special way.
541 static int __team_change_mode(struct team *team,
542 const struct team_mode *new_mode)
544 /* Check if mode was previously set and do cleanup if so */
545 if (team_is_mode_set(team)) {
546 void (*exit_op)(struct team *team) = team->ops.exit;
548 /* Clear ops area so no callback is called any longer */
549 memset(&team->ops, 0, sizeof(struct team_mode_ops));
550 team_adjust_ops(team);
554 team_mode_put(team->mode);
555 team_set_no_mode(team);
556 /* zero private data area */
557 memset(&team->mode_priv, 0,
558 sizeof(struct team) - offsetof(struct team, mode_priv));
564 if (new_mode->ops->init) {
567 err = new_mode->ops->init(team);
572 team->mode = new_mode;
573 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
574 team_adjust_ops(team);
579 static int team_change_mode(struct team *team, const char *kind)
581 const struct team_mode *new_mode;
582 struct net_device *dev = team->dev;
585 if (!list_empty(&team->port_list)) {
586 netdev_err(dev, "No ports can be present during mode change\n");
590 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
591 netdev_err(dev, "Unable to change to the same mode the team is in\n");
595 new_mode = team_mode_get(kind);
597 netdev_err(dev, "Mode \"%s\" not found\n", kind);
601 err = __team_change_mode(team, new_mode);
603 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
604 team_mode_put(new_mode);
608 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
613 /************************
614 * Rx path frame handler
615 ************************/
617 /* note: already called with rcu_read_lock */
618 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
620 struct sk_buff *skb = *pskb;
621 struct team_port *port;
623 rx_handler_result_t res;
625 skb = skb_share_check(skb, GFP_ATOMIC);
627 return RX_HANDLER_CONSUMED;
631 port = team_port_get_rcu(skb->dev);
633 if (!team_port_enabled(port)) {
634 /* allow exact match delivery for disabled ports */
635 res = RX_HANDLER_EXACT;
637 res = team->ops.receive(team, port, skb);
639 if (res == RX_HANDLER_ANOTHER) {
640 struct team_pcpu_stats *pcpu_stats;
642 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
643 u64_stats_update_begin(&pcpu_stats->syncp);
644 pcpu_stats->rx_packets++;
645 pcpu_stats->rx_bytes += skb->len;
646 if (skb->pkt_type == PACKET_MULTICAST)
647 pcpu_stats->rx_multicast++;
648 u64_stats_update_end(&pcpu_stats->syncp);
650 skb->dev = team->dev;
652 this_cpu_inc(team->pcpu_stats->rx_dropped);
663 static bool team_port_find(const struct team *team,
664 const struct team_port *port)
666 struct team_port *cur;
668 list_for_each_entry(cur, &team->port_list, list)
674 bool team_port_enabled(struct team_port *port)
676 return port->index != -1;
678 EXPORT_SYMBOL(team_port_enabled);
681 * Enable/disable port by adding to enabled port hashlist and setting
682 * port->index (Might be racy so reader could see incorrect ifindex when
683 * processing a flying packet, but that is not a problem). Write guarded
686 static void team_port_enable(struct team *team,
687 struct team_port *port)
689 if (team_port_enabled(port))
691 port->index = team->en_port_count++;
692 hlist_add_head_rcu(&port->hlist,
693 team_port_index_hash(team, port->index));
694 team_adjust_ops(team);
695 if (team->ops.port_enabled)
696 team->ops.port_enabled(team, port);
699 static void __reconstruct_port_hlist(struct team *team, int rm_index)
702 struct team_port *port;
704 for (i = rm_index + 1; i < team->en_port_count; i++) {
705 port = team_get_port_by_index(team, i);
706 hlist_del_rcu(&port->hlist);
708 hlist_add_head_rcu(&port->hlist,
709 team_port_index_hash(team, port->index));
713 static void team_port_disable(struct team *team,
714 struct team_port *port)
716 if (!team_port_enabled(port))
718 if (team->ops.port_disabled)
719 team->ops.port_disabled(team, port);
720 hlist_del_rcu(&port->hlist);
721 __reconstruct_port_hlist(team, port->index);
723 __team_adjust_ops(team, team->en_port_count - 1);
725 * Wait until readers see adjusted ops. This ensures that
726 * readers never see team->en_port_count == 0
729 team->en_port_count--;
732 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
733 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
734 NETIF_F_HIGHDMA | NETIF_F_LRO)
736 static void __team_compute_features(struct team *team)
738 struct team_port *port;
739 u32 vlan_features = TEAM_VLAN_FEATURES;
740 unsigned short max_hard_header_len = ETH_HLEN;
742 list_for_each_entry(port, &team->port_list, list) {
743 vlan_features = netdev_increment_features(vlan_features,
744 port->dev->vlan_features,
747 if (port->dev->hard_header_len > max_hard_header_len)
748 max_hard_header_len = port->dev->hard_header_len;
751 team->dev->vlan_features = vlan_features;
752 team->dev->hard_header_len = max_hard_header_len;
754 netdev_change_features(team->dev);
757 static void team_compute_features(struct team *team)
759 mutex_lock(&team->lock);
760 __team_compute_features(team);
761 mutex_unlock(&team->lock);
764 static int team_port_enter(struct team *team, struct team_port *port)
769 port->dev->priv_flags |= IFF_TEAM_PORT;
770 if (team->ops.port_enter) {
771 err = team->ops.port_enter(team, port);
773 netdev_err(team->dev, "Device %s failed to enter team mode\n",
782 port->dev->priv_flags &= ~IFF_TEAM_PORT;
788 static void team_port_leave(struct team *team, struct team_port *port)
790 if (team->ops.port_leave)
791 team->ops.port_leave(team, port);
792 port->dev->priv_flags &= ~IFF_TEAM_PORT;
796 static void __team_port_change_check(struct team_port *port, bool linkup);
798 static int team_port_add(struct team *team, struct net_device *port_dev)
800 struct net_device *dev = team->dev;
801 struct team_port *port;
802 char *portname = port_dev->name;
805 if (port_dev->flags & IFF_LOOPBACK ||
806 port_dev->type != ARPHRD_ETHER) {
807 netdev_err(dev, "Device %s is of an unsupported type\n",
812 if (team_port_exists(port_dev)) {
813 netdev_err(dev, "Device %s is already a port "
814 "of a team device\n", portname);
818 if (port_dev->flags & IFF_UP) {
819 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
824 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
829 port->dev = port_dev;
832 port->orig.mtu = port_dev->mtu;
833 err = dev_set_mtu(port_dev, dev->mtu);
835 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
839 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
841 err = team_port_enter(team, port);
843 netdev_err(dev, "Device %s failed to enter team mode\n",
848 err = dev_open(port_dev);
850 netdev_dbg(dev, "Device %s opening failed\n",
855 err = vlan_vids_add_by_dev(port_dev, dev);
857 netdev_err(dev, "Failed to add vlan ids to device %s\n",
862 err = netdev_set_master(port_dev, dev);
864 netdev_err(dev, "Device %s failed to set master\n", portname);
868 err = netdev_rx_handler_register(port_dev, team_handle_frame,
871 netdev_err(dev, "Device %s failed to register rx_handler\n",
873 goto err_handler_register;
876 err = __team_option_inst_add_port(team, port);
878 netdev_err(dev, "Device %s failed to add per-port options\n",
880 goto err_option_port_add;
884 team_port_enable(team, port);
885 list_add_tail_rcu(&port->list, &team->port_list);
886 __team_compute_features(team);
887 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
888 __team_options_change_check(team);
890 netdev_info(dev, "Port device %s added\n", portname);
895 netdev_rx_handler_unregister(port_dev);
897 err_handler_register:
898 netdev_set_master(port_dev, NULL);
901 vlan_vids_del_by_dev(port_dev, dev);
907 team_port_leave(team, port);
908 team_port_set_orig_mac(port);
911 dev_set_mtu(port_dev, port->orig.mtu);
919 static int team_port_del(struct team *team, struct net_device *port_dev)
921 struct net_device *dev = team->dev;
922 struct team_port *port;
923 char *portname = port_dev->name;
925 port = team_port_get_rtnl(port_dev);
926 if (!port || !team_port_find(team, port)) {
927 netdev_err(dev, "Device %s does not act as a port of this team\n",
932 __team_option_inst_mark_removed_port(team, port);
933 __team_options_change_check(team);
934 __team_option_inst_del_port(team, port);
935 port->removed = true;
936 __team_port_change_check(port, false);
937 team_port_disable(team, port);
938 list_del_rcu(&port->list);
939 netdev_rx_handler_unregister(port_dev);
940 netdev_set_master(port_dev, NULL);
941 vlan_vids_del_by_dev(port_dev, dev);
943 team_port_leave(team, port);
944 team_port_set_orig_mac(port);
945 dev_set_mtu(port_dev, port->orig.mtu);
948 netdev_info(dev, "Port device %s removed\n", portname);
949 __team_compute_features(team);
959 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
961 ctx->data.str_val = team->mode->kind;
965 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
967 return team_change_mode(team, ctx->data.str_val);
970 static int team_port_en_option_get(struct team *team,
971 struct team_gsetter_ctx *ctx)
973 struct team_port *port = ctx->info->port;
975 ctx->data.bool_val = team_port_enabled(port);
979 static int team_port_en_option_set(struct team *team,
980 struct team_gsetter_ctx *ctx)
982 struct team_port *port = ctx->info->port;
984 if (ctx->data.bool_val)
985 team_port_enable(team, port);
987 team_port_disable(team, port);
991 static int team_user_linkup_option_get(struct team *team,
992 struct team_gsetter_ctx *ctx)
994 struct team_port *port = ctx->info->port;
996 ctx->data.bool_val = port->user.linkup;
1000 static int team_user_linkup_option_set(struct team *team,
1001 struct team_gsetter_ctx *ctx)
1003 struct team_port *port = ctx->info->port;
1005 port->user.linkup = ctx->data.bool_val;
1006 team_refresh_port_linkup(port);
1010 static int team_user_linkup_en_option_get(struct team *team,
1011 struct team_gsetter_ctx *ctx)
1013 struct team_port *port = ctx->info->port;
1015 ctx->data.bool_val = port->user.linkup_enabled;
1019 static int team_user_linkup_en_option_set(struct team *team,
1020 struct team_gsetter_ctx *ctx)
1022 struct team_port *port = ctx->info->port;
1024 port->user.linkup_enabled = ctx->data.bool_val;
1025 team_refresh_port_linkup(port);
1029 static const struct team_option team_options[] = {
1032 .type = TEAM_OPTION_TYPE_STRING,
1033 .getter = team_mode_option_get,
1034 .setter = team_mode_option_set,
1038 .type = TEAM_OPTION_TYPE_BOOL,
1040 .getter = team_port_en_option_get,
1041 .setter = team_port_en_option_set,
1044 .name = "user_linkup",
1045 .type = TEAM_OPTION_TYPE_BOOL,
1047 .getter = team_user_linkup_option_get,
1048 .setter = team_user_linkup_option_set,
1051 .name = "user_linkup_enabled",
1052 .type = TEAM_OPTION_TYPE_BOOL,
1054 .getter = team_user_linkup_en_option_get,
1055 .setter = team_user_linkup_en_option_set,
1059 static int team_init(struct net_device *dev)
1061 struct team *team = netdev_priv(dev);
1066 mutex_init(&team->lock);
1067 team_set_no_mode(team);
1069 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1070 if (!team->pcpu_stats)
1073 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1074 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1075 INIT_LIST_HEAD(&team->port_list);
1077 team_adjust_ops(team);
1079 INIT_LIST_HEAD(&team->option_list);
1080 INIT_LIST_HEAD(&team->option_inst_list);
1081 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1083 goto err_options_register;
1084 netif_carrier_off(dev);
1088 err_options_register:
1089 free_percpu(team->pcpu_stats);
1094 static void team_uninit(struct net_device *dev)
1096 struct team *team = netdev_priv(dev);
1097 struct team_port *port;
1098 struct team_port *tmp;
1100 mutex_lock(&team->lock);
1101 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1102 team_port_del(team, port->dev);
1104 __team_change_mode(team, NULL); /* cleanup */
1105 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1106 mutex_unlock(&team->lock);
1109 static void team_destructor(struct net_device *dev)
1111 struct team *team = netdev_priv(dev);
1113 free_percpu(team->pcpu_stats);
1117 static int team_open(struct net_device *dev)
1119 netif_carrier_on(dev);
1123 static int team_close(struct net_device *dev)
1125 netif_carrier_off(dev);
1130 * note: already called with rcu_read_lock
1132 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1134 struct team *team = netdev_priv(dev);
1135 bool tx_success = false;
1136 unsigned int len = skb->len;
1138 tx_success = team->ops.transmit(team, skb);
1140 struct team_pcpu_stats *pcpu_stats;
1142 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1143 u64_stats_update_begin(&pcpu_stats->syncp);
1144 pcpu_stats->tx_packets++;
1145 pcpu_stats->tx_bytes += len;
1146 u64_stats_update_end(&pcpu_stats->syncp);
1148 this_cpu_inc(team->pcpu_stats->tx_dropped);
1151 return NETDEV_TX_OK;
1154 static void team_change_rx_flags(struct net_device *dev, int change)
1156 struct team *team = netdev_priv(dev);
1157 struct team_port *port;
1161 list_for_each_entry_rcu(port, &team->port_list, list) {
1162 if (change & IFF_PROMISC) {
1163 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1164 dev_set_promiscuity(port->dev, inc);
1166 if (change & IFF_ALLMULTI) {
1167 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1168 dev_set_allmulti(port->dev, inc);
1174 static void team_set_rx_mode(struct net_device *dev)
1176 struct team *team = netdev_priv(dev);
1177 struct team_port *port;
1180 list_for_each_entry_rcu(port, &team->port_list, list) {
1181 dev_uc_sync(port->dev, dev);
1182 dev_mc_sync(port->dev, dev);
1187 static int team_set_mac_address(struct net_device *dev, void *p)
1189 struct team *team = netdev_priv(dev);
1190 struct team_port *port;
1191 struct sockaddr *addr = p;
1193 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1194 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1196 list_for_each_entry_rcu(port, &team->port_list, list)
1197 if (team->ops.port_change_mac)
1198 team->ops.port_change_mac(team, port);
1203 static int team_change_mtu(struct net_device *dev, int new_mtu)
1205 struct team *team = netdev_priv(dev);
1206 struct team_port *port;
1210 * Alhough this is reader, it's guarded by team lock. It's not possible
1211 * to traverse list in reverse under rcu_read_lock
1213 mutex_lock(&team->lock);
1214 list_for_each_entry(port, &team->port_list, list) {
1215 err = dev_set_mtu(port->dev, new_mtu);
1217 netdev_err(dev, "Device %s failed to change mtu",
1222 mutex_unlock(&team->lock);
1229 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1230 dev_set_mtu(port->dev, dev->mtu);
1231 mutex_unlock(&team->lock);
1236 static struct rtnl_link_stats64 *
1237 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1239 struct team *team = netdev_priv(dev);
1240 struct team_pcpu_stats *p;
1241 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1242 u32 rx_dropped = 0, tx_dropped = 0;
1246 for_each_possible_cpu(i) {
1247 p = per_cpu_ptr(team->pcpu_stats, i);
1249 start = u64_stats_fetch_begin_bh(&p->syncp);
1250 rx_packets = p->rx_packets;
1251 rx_bytes = p->rx_bytes;
1252 rx_multicast = p->rx_multicast;
1253 tx_packets = p->tx_packets;
1254 tx_bytes = p->tx_bytes;
1255 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
1257 stats->rx_packets += rx_packets;
1258 stats->rx_bytes += rx_bytes;
1259 stats->multicast += rx_multicast;
1260 stats->tx_packets += tx_packets;
1261 stats->tx_bytes += tx_bytes;
1263 * rx_dropped & tx_dropped are u32, updated
1264 * without syncp protection.
1266 rx_dropped += p->rx_dropped;
1267 tx_dropped += p->tx_dropped;
1269 stats->rx_dropped = rx_dropped;
1270 stats->tx_dropped = tx_dropped;
1274 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1276 struct team *team = netdev_priv(dev);
1277 struct team_port *port;
1281 * Alhough this is reader, it's guarded by team lock. It's not possible
1282 * to traverse list in reverse under rcu_read_lock
1284 mutex_lock(&team->lock);
1285 list_for_each_entry(port, &team->port_list, list) {
1286 err = vlan_vid_add(port->dev, vid);
1290 mutex_unlock(&team->lock);
1295 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1296 vlan_vid_del(port->dev, vid);
1297 mutex_unlock(&team->lock);
1302 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1304 struct team *team = netdev_priv(dev);
1305 struct team_port *port;
1308 list_for_each_entry_rcu(port, &team->port_list, list)
1309 vlan_vid_del(port->dev, vid);
1315 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1317 struct team *team = netdev_priv(dev);
1320 mutex_lock(&team->lock);
1321 err = team_port_add(team, port_dev);
1322 mutex_unlock(&team->lock);
1326 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1328 struct team *team = netdev_priv(dev);
1331 mutex_lock(&team->lock);
1332 err = team_port_del(team, port_dev);
1333 mutex_unlock(&team->lock);
1337 static netdev_features_t team_fix_features(struct net_device *dev,
1338 netdev_features_t features)
1340 struct team_port *port;
1341 struct team *team = netdev_priv(dev);
1342 netdev_features_t mask;
1345 features &= ~NETIF_F_ONE_FOR_ALL;
1346 features |= NETIF_F_ALL_FOR_ALL;
1349 list_for_each_entry_rcu(port, &team->port_list, list) {
1350 features = netdev_increment_features(features,
1351 port->dev->features,
1358 static const struct net_device_ops team_netdev_ops = {
1359 .ndo_init = team_init,
1360 .ndo_uninit = team_uninit,
1361 .ndo_open = team_open,
1362 .ndo_stop = team_close,
1363 .ndo_start_xmit = team_xmit,
1364 .ndo_change_rx_flags = team_change_rx_flags,
1365 .ndo_set_rx_mode = team_set_rx_mode,
1366 .ndo_set_mac_address = team_set_mac_address,
1367 .ndo_change_mtu = team_change_mtu,
1368 .ndo_get_stats64 = team_get_stats64,
1369 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1370 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1371 .ndo_add_slave = team_add_slave,
1372 .ndo_del_slave = team_del_slave,
1373 .ndo_fix_features = team_fix_features,
1377 /***********************
1378 * rt netlink interface
1379 ***********************/
1381 static void team_setup(struct net_device *dev)
1385 dev->netdev_ops = &team_netdev_ops;
1386 dev->destructor = team_destructor;
1387 dev->tx_queue_len = 0;
1388 dev->flags |= IFF_MULTICAST;
1389 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1392 * Indicate we support unicast address filtering. That way core won't
1393 * bring us to promisc mode in case a unicast addr is added.
1394 * Let this up to underlay drivers.
1396 dev->priv_flags |= IFF_UNICAST_FLT;
1398 dev->features |= NETIF_F_LLTX;
1399 dev->features |= NETIF_F_GRO;
1400 dev->hw_features = NETIF_F_HW_VLAN_TX |
1401 NETIF_F_HW_VLAN_RX |
1402 NETIF_F_HW_VLAN_FILTER;
1404 dev->features |= dev->hw_features;
1407 static int team_newlink(struct net *src_net, struct net_device *dev,
1408 struct nlattr *tb[], struct nlattr *data[])
1412 if (tb[IFLA_ADDRESS] == NULL)
1413 eth_hw_addr_random(dev);
1415 err = register_netdevice(dev);
1422 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1424 if (tb[IFLA_ADDRESS]) {
1425 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1427 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1428 return -EADDRNOTAVAIL;
1433 static struct rtnl_link_ops team_link_ops __read_mostly = {
1435 .priv_size = sizeof(struct team),
1436 .setup = team_setup,
1437 .newlink = team_newlink,
1438 .validate = team_validate,
1442 /***********************************
1443 * Generic netlink custom interface
1444 ***********************************/
1446 static struct genl_family team_nl_family = {
1447 .id = GENL_ID_GENERATE,
1448 .name = TEAM_GENL_NAME,
1449 .version = TEAM_GENL_VERSION,
1450 .maxattr = TEAM_ATTR_MAX,
1454 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1455 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
1456 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
1457 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
1458 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
1461 static const struct nla_policy
1462 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1463 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
1464 [TEAM_ATTR_OPTION_NAME] = {
1466 .len = TEAM_STRING_MAX_LEN,
1468 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1469 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1470 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1473 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1475 struct sk_buff *msg;
1479 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1483 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1484 &team_nl_family, 0, TEAM_CMD_NOOP);
1490 genlmsg_end(msg, hdr);
1492 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1501 * Netlink cmd functions should be locked by following two functions.
1502 * Since dev gets held here, that ensures dev won't disappear in between.
1504 static struct team *team_nl_team_get(struct genl_info *info)
1506 struct net *net = genl_info_net(info);
1508 struct net_device *dev;
1511 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1514 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1515 dev = dev_get_by_index(net, ifindex);
1516 if (!dev || dev->netdev_ops != &team_netdev_ops) {
1522 team = netdev_priv(dev);
1523 mutex_lock(&team->lock);
1527 static void team_nl_team_put(struct team *team)
1529 mutex_unlock(&team->lock);
1533 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1534 int (*fill_func)(struct sk_buff *skb,
1535 struct genl_info *info,
1536 int flags, struct team *team))
1538 struct sk_buff *skb;
1541 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1545 err = fill_func(skb, info, NLM_F_ACK, team);
1549 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1557 typedef int team_nl_send_func_t(struct sk_buff *skb,
1558 struct team *team, u32 pid);
1560 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
1562 return genlmsg_unicast(dev_net(team->dev), skb, pid);
1565 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1566 struct team_option_inst *opt_inst)
1568 struct nlattr *option_item;
1569 struct team_option *option = opt_inst->option;
1570 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
1571 struct team_gsetter_ctx ctx;
1574 ctx.info = opt_inst_info;
1575 err = team_option_get(team, opt_inst, &ctx);
1579 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1583 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1585 if (opt_inst_info->port &&
1586 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1587 opt_inst_info->port->dev->ifindex))
1589 if (opt_inst->option->array_size &&
1590 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1591 opt_inst_info->array_index))
1594 switch (option->type) {
1595 case TEAM_OPTION_TYPE_U32:
1596 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1598 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1601 case TEAM_OPTION_TYPE_STRING:
1602 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1604 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1608 case TEAM_OPTION_TYPE_BINARY:
1609 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1611 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1612 ctx.data.bin_val.ptr))
1615 case TEAM_OPTION_TYPE_BOOL:
1616 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1618 if (ctx.data.bool_val &&
1619 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1625 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1627 if (opt_inst->changed) {
1628 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1630 opt_inst->changed = false;
1632 nla_nest_end(skb, option_item);
1636 nla_nest_cancel(skb, option_item);
1640 static int __send_and_alloc_skb(struct sk_buff **pskb,
1641 struct team *team, u32 pid,
1642 team_nl_send_func_t *send_func)
1647 err = send_func(*pskb, team, pid);
1651 *pskb = genlmsg_new(NLMSG_DEFAULT_SIZE - GENL_HDRLEN, GFP_KERNEL);
1657 static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1658 int flags, team_nl_send_func_t *send_func,
1659 struct list_head *sel_opt_inst_list)
1661 struct nlattr *option_list;
1662 struct nlmsghdr *nlh;
1664 struct team_option_inst *opt_inst;
1666 struct sk_buff *skb = NULL;
1670 opt_inst = list_first_entry(sel_opt_inst_list,
1671 struct team_option_inst, tmp_list);
1674 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1678 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1679 TEAM_CMD_OPTIONS_GET);
1681 return PTR_ERR(hdr);
1683 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1684 goto nla_put_failure;
1685 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1687 goto nla_put_failure;
1691 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
1692 err = team_nl_fill_one_option_get(skb, team, opt_inst);
1694 if (err == -EMSGSIZE) {
1705 nla_nest_end(skb, option_list);
1706 genlmsg_end(skb, hdr);
1711 nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1713 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1719 return send_func(skb, team, pid);
1724 genlmsg_cancel(skb, hdr);
1729 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1732 struct team_option_inst *opt_inst;
1734 LIST_HEAD(sel_opt_inst_list);
1736 team = team_nl_team_get(info);
1740 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1741 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1742 err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
1743 NLM_F_ACK, team_nl_send_unicast,
1744 &sel_opt_inst_list);
1746 team_nl_team_put(team);
1751 static int team_nl_send_event_options_get(struct team *team,
1752 struct list_head *sel_opt_inst_list);
1754 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1759 struct nlattr *nl_option;
1760 LIST_HEAD(opt_inst_list);
1762 team = team_nl_team_get(info);
1767 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1772 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1773 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1774 struct nlattr *attr;
1775 struct nlattr *attr_data;
1776 enum team_option_type opt_type;
1777 int opt_port_ifindex = 0; /* != 0 for per-port options */
1778 u32 opt_array_index = 0;
1779 bool opt_is_array = false;
1780 struct team_option_inst *opt_inst;
1782 bool opt_found = false;
1784 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1788 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1789 nl_option, team_nl_option_policy);
1792 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1793 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1797 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1799 opt_type = TEAM_OPTION_TYPE_U32;
1802 opt_type = TEAM_OPTION_TYPE_STRING;
1805 opt_type = TEAM_OPTION_TYPE_BINARY;
1808 opt_type = TEAM_OPTION_TYPE_BOOL;
1814 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1815 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1820 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1821 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1823 opt_port_ifindex = nla_get_u32(attr);
1825 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1827 opt_is_array = true;
1828 opt_array_index = nla_get_u32(attr);
1831 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1832 struct team_option *option = opt_inst->option;
1833 struct team_gsetter_ctx ctx;
1834 struct team_option_inst_info *opt_inst_info;
1837 opt_inst_info = &opt_inst->info;
1838 tmp_ifindex = opt_inst_info->port ?
1839 opt_inst_info->port->dev->ifindex : 0;
1840 if (option->type != opt_type ||
1841 strcmp(option->name, opt_name) ||
1842 tmp_ifindex != opt_port_ifindex ||
1843 (option->array_size && !opt_is_array) ||
1844 opt_inst_info->array_index != opt_array_index)
1847 ctx.info = opt_inst_info;
1849 case TEAM_OPTION_TYPE_U32:
1850 ctx.data.u32_val = nla_get_u32(attr_data);
1852 case TEAM_OPTION_TYPE_STRING:
1853 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1857 ctx.data.str_val = nla_data(attr_data);
1859 case TEAM_OPTION_TYPE_BINARY:
1860 ctx.data.bin_val.len = nla_len(attr_data);
1861 ctx.data.bin_val.ptr = nla_data(attr_data);
1863 case TEAM_OPTION_TYPE_BOOL:
1864 ctx.data.bool_val = attr_data ? true : false;
1869 err = team_option_set(team, opt_inst, &ctx);
1872 opt_inst->changed = true;
1873 list_add(&opt_inst->tmp_list, &opt_inst_list);
1881 err = team_nl_send_event_options_get(team, &opt_inst_list);
1884 team_nl_team_put(team);
1889 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1890 u32 pid, u32 seq, int flags,
1894 struct nlattr *port_list;
1896 struct team_port *port;
1898 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1899 TEAM_CMD_PORT_LIST_GET);
1901 return PTR_ERR(hdr);
1903 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1904 goto nla_put_failure;
1905 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1907 goto nla_put_failure;
1909 list_for_each_entry(port, &team->port_list, list) {
1910 struct nlattr *port_item;
1912 /* Include only changed ports if fill all mode is not on */
1913 if (!fillall && !port->changed)
1915 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1917 goto nla_put_failure;
1918 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1919 goto nla_put_failure;
1920 if (port->changed) {
1921 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1922 goto nla_put_failure;
1923 port->changed = false;
1925 if ((port->removed &&
1926 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1927 (port->state.linkup &&
1928 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1929 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1930 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1931 goto nla_put_failure;
1932 nla_nest_end(skb, port_item);
1935 nla_nest_end(skb, port_list);
1936 return genlmsg_end(skb, hdr);
1939 genlmsg_cancel(skb, hdr);
1943 static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1944 struct genl_info *info, int flags,
1947 return team_nl_fill_port_list_get(skb, info->snd_pid,
1948 info->snd_seq, NLM_F_ACK,
1952 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1953 struct genl_info *info)
1958 team = team_nl_team_get(info);
1962 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1964 team_nl_team_put(team);
1969 static struct genl_ops team_nl_ops[] = {
1971 .cmd = TEAM_CMD_NOOP,
1972 .doit = team_nl_cmd_noop,
1973 .policy = team_nl_policy,
1976 .cmd = TEAM_CMD_OPTIONS_SET,
1977 .doit = team_nl_cmd_options_set,
1978 .policy = team_nl_policy,
1979 .flags = GENL_ADMIN_PERM,
1982 .cmd = TEAM_CMD_OPTIONS_GET,
1983 .doit = team_nl_cmd_options_get,
1984 .policy = team_nl_policy,
1985 .flags = GENL_ADMIN_PERM,
1988 .cmd = TEAM_CMD_PORT_LIST_GET,
1989 .doit = team_nl_cmd_port_list_get,
1990 .policy = team_nl_policy,
1991 .flags = GENL_ADMIN_PERM,
1995 static struct genl_multicast_group team_change_event_mcgrp = {
1996 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1999 static int team_nl_send_multicast(struct sk_buff *skb,
2000 struct team *team, u32 pid)
2002 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
2003 team_change_event_mcgrp.id, GFP_KERNEL);
2006 static int team_nl_send_event_options_get(struct team *team,
2007 struct list_head *sel_opt_inst_list)
2009 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2013 static int team_nl_send_event_port_list_get(struct team *team)
2015 struct sk_buff *skb;
2017 struct net *net = dev_net(team->dev);
2019 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2023 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
2027 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
2036 static int team_nl_init(void)
2040 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2041 ARRAY_SIZE(team_nl_ops));
2045 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2047 goto err_change_event_grp_reg;
2051 err_change_event_grp_reg:
2052 genl_unregister_family(&team_nl_family);
2057 static void team_nl_fini(void)
2059 genl_unregister_family(&team_nl_family);
2067 static void __team_options_change_check(struct team *team)
2070 struct team_option_inst *opt_inst;
2071 LIST_HEAD(sel_opt_inst_list);
2073 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2074 if (opt_inst->changed)
2075 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2077 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2079 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2083 /* rtnl lock is held */
2084 static void __team_port_change_check(struct team_port *port, bool linkup)
2088 if (!port->removed && port->state.linkup == linkup)
2091 port->changed = true;
2092 port->state.linkup = linkup;
2093 team_refresh_port_linkup(port);
2095 struct ethtool_cmd ecmd;
2097 err = __ethtool_get_settings(port->dev, &ecmd);
2099 port->state.speed = ethtool_cmd_speed(&ecmd);
2100 port->state.duplex = ecmd.duplex;
2104 port->state.speed = 0;
2105 port->state.duplex = 0;
2108 err = team_nl_send_event_port_list_get(port->team);
2110 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
2115 static void team_port_change_check(struct team_port *port, bool linkup)
2117 struct team *team = port->team;
2119 mutex_lock(&team->lock);
2120 __team_port_change_check(port, linkup);
2121 mutex_unlock(&team->lock);
2125 /************************************
2126 * Net device notifier event handler
2127 ************************************/
2129 static int team_device_event(struct notifier_block *unused,
2130 unsigned long event, void *ptr)
2132 struct net_device *dev = (struct net_device *) ptr;
2133 struct team_port *port;
2135 port = team_port_get_rtnl(dev);
2141 if (netif_carrier_ok(dev))
2142 team_port_change_check(port, true);
2144 team_port_change_check(port, false);
2146 if (netif_running(port->dev))
2147 team_port_change_check(port,
2148 !!netif_carrier_ok(port->dev));
2150 case NETDEV_UNREGISTER:
2151 team_del_slave(port->team->dev, dev);
2153 case NETDEV_FEAT_CHANGE:
2154 team_compute_features(port->team);
2156 case NETDEV_CHANGEMTU:
2157 /* Forbid to change mtu of underlaying device */
2159 case NETDEV_PRE_TYPE_CHANGE:
2160 /* Forbid to change type of underlaying device */
2166 static struct notifier_block team_notifier_block __read_mostly = {
2167 .notifier_call = team_device_event,
2171 /***********************
2172 * Module init and exit
2173 ***********************/
2175 static int __init team_module_init(void)
2179 register_netdevice_notifier(&team_notifier_block);
2181 err = rtnl_link_register(&team_link_ops);
2185 err = team_nl_init();
2192 rtnl_link_unregister(&team_link_ops);
2195 unregister_netdevice_notifier(&team_notifier_block);
2200 static void __exit team_module_exit(void)
2203 rtnl_link_unregister(&team_link_ops);
2204 unregister_netdevice_notifier(&team_notifier_block);
2207 module_init(team_module_init);
2208 module_exit(team_module_exit);
2210 MODULE_LICENSE("GPL v2");
2211 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2212 MODULE_DESCRIPTION("Ethernet team device driver");
2213 MODULE_ALIAS_RTNL_LINK(DRV_NAME);