2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_arp.h>
23 #include <linux/socket.h>
24 #include <linux/etherdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <net/rtnetlink.h>
27 #include <net/genetlink.h>
28 #include <net/netlink.h>
29 #include <linux/if_team.h>
31 #define DRV_NAME "team"
38 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
40 static struct team_port *team_port_get_rcu(const struct net_device *dev)
42 struct team_port *port = rcu_dereference(dev->rx_handler_data);
44 return team_port_exists(dev) ? port : NULL;
47 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
49 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
51 return team_port_exists(dev) ? port : NULL;
55 * Since the ability to change mac address for open port device is tested in
56 * team_port_add, this function can be called without control of return value
58 static int __set_port_mac(struct net_device *port_dev,
59 const unsigned char *dev_addr)
63 memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 addr.sa_family = ARPHRD_ETHER;
65 return dev_set_mac_address(port_dev, &addr);
68 static int team_port_set_orig_mac(struct team_port *port)
70 return __set_port_mac(port->dev, port->orig.dev_addr);
73 int team_port_set_team_mac(struct team_port *port)
75 return __set_port_mac(port->dev, port->team->dev->dev_addr);
77 EXPORT_SYMBOL(team_port_set_team_mac);
79 static void team_refresh_port_linkup(struct team_port *port)
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
90 struct team_option_inst { /* One for each option instance */
91 struct list_head list;
92 struct list_head tmp_list;
93 struct team_option *option;
94 struct team_option_inst_info info;
99 static struct team_option *__team_find_option(struct team *team,
100 const char *opt_name)
102 struct team_option *option;
104 list_for_each_entry(option, &team->option_list, list) {
105 if (strcmp(option->name, opt_name) == 0)
111 static void __team_option_inst_del(struct team_option_inst *opt_inst)
113 list_del(&opt_inst->list);
117 static void __team_option_inst_del_option(struct team *team,
118 struct team_option *option)
120 struct team_option_inst *opt_inst, *tmp;
122 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
123 if (opt_inst->option == option)
124 __team_option_inst_del(opt_inst);
128 static int __team_option_inst_add(struct team *team, struct team_option *option,
129 struct team_port *port)
131 struct team_option_inst *opt_inst;
132 unsigned int array_size;
136 array_size = option->array_size;
138 array_size = 1; /* No array but still need one instance */
140 for (i = 0; i < array_size; i++) {
141 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
144 opt_inst->option = option;
145 opt_inst->info.port = port;
146 opt_inst->info.array_index = i;
147 opt_inst->changed = true;
148 opt_inst->removed = false;
149 list_add_tail(&opt_inst->list, &team->option_inst_list);
151 err = option->init(team, &opt_inst->info);
160 static int __team_option_inst_add_option(struct team *team,
161 struct team_option *option)
163 struct team_port *port;
166 if (!option->per_port) {
167 err = __team_option_inst_add(team, option, NULL);
169 goto inst_del_option;
172 list_for_each_entry(port, &team->port_list, list) {
173 err = __team_option_inst_add(team, option, port);
175 goto inst_del_option;
180 __team_option_inst_del_option(team, option);
184 static void __team_option_inst_mark_removed_option(struct team *team,
185 struct team_option *option)
187 struct team_option_inst *opt_inst;
189 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
190 if (opt_inst->option == option) {
191 opt_inst->changed = true;
192 opt_inst->removed = true;
197 static void __team_option_inst_del_port(struct team *team,
198 struct team_port *port)
200 struct team_option_inst *opt_inst, *tmp;
202 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
203 if (opt_inst->option->per_port &&
204 opt_inst->info.port == port)
205 __team_option_inst_del(opt_inst);
209 static int __team_option_inst_add_port(struct team *team,
210 struct team_port *port)
212 struct team_option *option;
215 list_for_each_entry(option, &team->option_list, list) {
216 if (!option->per_port)
218 err = __team_option_inst_add(team, option, port);
225 __team_option_inst_del_port(team, port);
229 static void __team_option_inst_mark_removed_port(struct team *team,
230 struct team_port *port)
232 struct team_option_inst *opt_inst;
234 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
235 if (opt_inst->info.port == port) {
236 opt_inst->changed = true;
237 opt_inst->removed = true;
242 static int __team_options_register(struct team *team,
243 const struct team_option *option,
247 struct team_option **dst_opts;
250 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
254 for (i = 0; i < option_count; i++, option++) {
255 if (__team_find_option(team, option->name)) {
259 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
266 for (i = 0; i < option_count; i++) {
267 err = __team_option_inst_add_option(team, dst_opts[i]);
270 list_add_tail(&dst_opts[i]->list, &team->option_list);
277 for (i--; i >= 0; i--)
278 __team_option_inst_del_option(team, dst_opts[i]);
280 i = option_count - 1;
282 for (i--; i >= 0; i--)
289 static void __team_options_mark_removed(struct team *team,
290 const struct team_option *option,
295 for (i = 0; i < option_count; i++, option++) {
296 struct team_option *del_opt;
298 del_opt = __team_find_option(team, option->name);
300 __team_option_inst_mark_removed_option(team, del_opt);
304 static void __team_options_unregister(struct team *team,
305 const struct team_option *option,
310 for (i = 0; i < option_count; i++, option++) {
311 struct team_option *del_opt;
313 del_opt = __team_find_option(team, option->name);
315 __team_option_inst_del_option(team, del_opt);
316 list_del(&del_opt->list);
322 static void __team_options_change_check(struct team *team);
324 int team_options_register(struct team *team,
325 const struct team_option *option,
330 err = __team_options_register(team, option, option_count);
333 __team_options_change_check(team);
336 EXPORT_SYMBOL(team_options_register);
338 void team_options_unregister(struct team *team,
339 const struct team_option *option,
342 __team_options_mark_removed(team, option, option_count);
343 __team_options_change_check(team);
344 __team_options_unregister(team, option, option_count);
346 EXPORT_SYMBOL(team_options_unregister);
348 static int team_option_get(struct team *team,
349 struct team_option_inst *opt_inst,
350 struct team_gsetter_ctx *ctx)
352 if (!opt_inst->option->getter)
354 return opt_inst->option->getter(team, ctx);
357 static int team_option_set(struct team *team,
358 struct team_option_inst *opt_inst,
359 struct team_gsetter_ctx *ctx)
361 if (!opt_inst->option->setter)
363 return opt_inst->option->setter(team, ctx);
366 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
368 struct team_option_inst *opt_inst;
370 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
371 opt_inst->changed = true;
373 EXPORT_SYMBOL(team_option_inst_set_change);
375 void team_options_change_check(struct team *team)
377 __team_options_change_check(team);
379 EXPORT_SYMBOL(team_options_change_check);
386 static LIST_HEAD(mode_list);
387 static DEFINE_SPINLOCK(mode_list_lock);
389 struct team_mode_item {
390 struct list_head list;
391 const struct team_mode *mode;
394 static struct team_mode_item *__find_mode(const char *kind)
396 struct team_mode_item *mitem;
398 list_for_each_entry(mitem, &mode_list, list) {
399 if (strcmp(mitem->mode->kind, kind) == 0)
405 static bool is_good_mode_name(const char *name)
407 while (*name != '\0') {
408 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
415 int team_mode_register(const struct team_mode *mode)
418 struct team_mode_item *mitem;
420 if (!is_good_mode_name(mode->kind) ||
421 mode->priv_size > TEAM_MODE_PRIV_SIZE)
424 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
428 spin_lock(&mode_list_lock);
429 if (__find_mode(mode->kind)) {
435 list_add_tail(&mitem->list, &mode_list);
437 spin_unlock(&mode_list_lock);
440 EXPORT_SYMBOL(team_mode_register);
442 void team_mode_unregister(const struct team_mode *mode)
444 struct team_mode_item *mitem;
446 spin_lock(&mode_list_lock);
447 mitem = __find_mode(mode->kind);
449 list_del_init(&mitem->list);
452 spin_unlock(&mode_list_lock);
454 EXPORT_SYMBOL(team_mode_unregister);
456 static const struct team_mode *team_mode_get(const char *kind)
458 struct team_mode_item *mitem;
459 const struct team_mode *mode = NULL;
461 spin_lock(&mode_list_lock);
462 mitem = __find_mode(kind);
464 spin_unlock(&mode_list_lock);
465 request_module("team-mode-%s", kind);
466 spin_lock(&mode_list_lock);
467 mitem = __find_mode(kind);
471 if (!try_module_get(mode->owner))
475 spin_unlock(&mode_list_lock);
479 static void team_mode_put(const struct team_mode *mode)
481 module_put(mode->owner);
484 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
486 dev_kfree_skb_any(skb);
490 rx_handler_result_t team_dummy_receive(struct team *team,
491 struct team_port *port,
494 return RX_HANDLER_ANOTHER;
497 static const struct team_mode __team_no_mode = {
501 static bool team_is_mode_set(struct team *team)
503 return team->mode != &__team_no_mode;
506 static void team_set_no_mode(struct team *team)
508 team->mode = &__team_no_mode;
511 static void team_adjust_ops(struct team *team)
514 * To avoid checks in rx/tx skb paths, ensure here that non-null and
515 * correct ops are always set.
518 if (list_empty(&team->port_list) ||
519 !team_is_mode_set(team) || !team->mode->ops->transmit)
520 team->ops.transmit = team_dummy_transmit;
522 team->ops.transmit = team->mode->ops->transmit;
524 if (list_empty(&team->port_list) ||
525 !team_is_mode_set(team) || !team->mode->ops->receive)
526 team->ops.receive = team_dummy_receive;
528 team->ops.receive = team->mode->ops->receive;
532 * We can benefit from the fact that it's ensured no port is present
533 * at the time of mode change. Therefore no packets are in fly so there's no
534 * need to set mode operations in any special way.
536 static int __team_change_mode(struct team *team,
537 const struct team_mode *new_mode)
539 /* Check if mode was previously set and do cleanup if so */
540 if (team_is_mode_set(team)) {
541 void (*exit_op)(struct team *team) = team->ops.exit;
543 /* Clear ops area so no callback is called any longer */
544 memset(&team->ops, 0, sizeof(struct team_mode_ops));
545 team_adjust_ops(team);
549 team_mode_put(team->mode);
550 team_set_no_mode(team);
551 /* zero private data area */
552 memset(&team->mode_priv, 0,
553 sizeof(struct team) - offsetof(struct team, mode_priv));
559 if (new_mode->ops->init) {
562 err = new_mode->ops->init(team);
567 team->mode = new_mode;
568 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
569 team_adjust_ops(team);
574 static int team_change_mode(struct team *team, const char *kind)
576 const struct team_mode *new_mode;
577 struct net_device *dev = team->dev;
580 if (!list_empty(&team->port_list)) {
581 netdev_err(dev, "No ports can be present during mode change\n");
585 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
586 netdev_err(dev, "Unable to change to the same mode the team is in\n");
590 new_mode = team_mode_get(kind);
592 netdev_err(dev, "Mode \"%s\" not found\n", kind);
596 err = __team_change_mode(team, new_mode);
598 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
599 team_mode_put(new_mode);
603 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
608 /************************
609 * Rx path frame handler
610 ************************/
612 static bool team_port_enabled(struct team_port *port);
614 /* note: already called with rcu_read_lock */
615 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
617 struct sk_buff *skb = *pskb;
618 struct team_port *port;
620 rx_handler_result_t res;
622 skb = skb_share_check(skb, GFP_ATOMIC);
624 return RX_HANDLER_CONSUMED;
628 port = team_port_get_rcu(skb->dev);
630 if (!team_port_enabled(port)) {
631 /* allow exact match delivery for disabled ports */
632 res = RX_HANDLER_EXACT;
634 res = team->ops.receive(team, port, skb);
636 if (res == RX_HANDLER_ANOTHER) {
637 struct team_pcpu_stats *pcpu_stats;
639 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
640 u64_stats_update_begin(&pcpu_stats->syncp);
641 pcpu_stats->rx_packets++;
642 pcpu_stats->rx_bytes += skb->len;
643 if (skb->pkt_type == PACKET_MULTICAST)
644 pcpu_stats->rx_multicast++;
645 u64_stats_update_end(&pcpu_stats->syncp);
647 skb->dev = team->dev;
649 this_cpu_inc(team->pcpu_stats->rx_dropped);
660 static bool team_port_find(const struct team *team,
661 const struct team_port *port)
663 struct team_port *cur;
665 list_for_each_entry(cur, &team->port_list, list)
671 static bool team_port_enabled(struct team_port *port)
673 return port->index != -1;
677 * Enable/disable port by adding to enabled port hashlist and setting
678 * port->index (Might be racy so reader could see incorrect ifindex when
679 * processing a flying packet, but that is not a problem). Write guarded
682 static void team_port_enable(struct team *team,
683 struct team_port *port)
685 if (team_port_enabled(port))
687 port->index = team->en_port_count++;
688 hlist_add_head_rcu(&port->hlist,
689 team_port_index_hash(team, port->index));
690 if (team->ops.port_enabled)
691 team->ops.port_enabled(team, port);
694 static void __reconstruct_port_hlist(struct team *team, int rm_index)
697 struct team_port *port;
699 for (i = rm_index + 1; i < team->en_port_count; i++) {
700 port = team_get_port_by_index(team, i);
701 hlist_del_rcu(&port->hlist);
703 hlist_add_head_rcu(&port->hlist,
704 team_port_index_hash(team, port->index));
708 static void team_port_disable(struct team *team,
709 struct team_port *port)
711 int rm_index = port->index;
713 if (!team_port_enabled(port))
715 if (team->ops.port_disabled)
716 team->ops.port_disabled(team, port);
717 hlist_del_rcu(&port->hlist);
718 __reconstruct_port_hlist(team, rm_index);
719 team->en_port_count--;
723 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
724 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
725 NETIF_F_HIGHDMA | NETIF_F_LRO)
727 static void __team_compute_features(struct team *team)
729 struct team_port *port;
730 u32 vlan_features = TEAM_VLAN_FEATURES;
731 unsigned short max_hard_header_len = ETH_HLEN;
733 list_for_each_entry(port, &team->port_list, list) {
734 vlan_features = netdev_increment_features(vlan_features,
735 port->dev->vlan_features,
738 if (port->dev->hard_header_len > max_hard_header_len)
739 max_hard_header_len = port->dev->hard_header_len;
742 team->dev->vlan_features = vlan_features;
743 team->dev->hard_header_len = max_hard_header_len;
745 netdev_change_features(team->dev);
748 static void team_compute_features(struct team *team)
750 mutex_lock(&team->lock);
751 __team_compute_features(team);
752 mutex_unlock(&team->lock);
755 static int team_port_enter(struct team *team, struct team_port *port)
760 port->dev->priv_flags |= IFF_TEAM_PORT;
761 if (team->ops.port_enter) {
762 err = team->ops.port_enter(team, port);
764 netdev_err(team->dev, "Device %s failed to enter team mode\n",
773 port->dev->priv_flags &= ~IFF_TEAM_PORT;
779 static void team_port_leave(struct team *team, struct team_port *port)
781 if (team->ops.port_leave)
782 team->ops.port_leave(team, port);
783 port->dev->priv_flags &= ~IFF_TEAM_PORT;
787 static void __team_port_change_check(struct team_port *port, bool linkup);
789 static int team_port_add(struct team *team, struct net_device *port_dev)
791 struct net_device *dev = team->dev;
792 struct team_port *port;
793 char *portname = port_dev->name;
796 if (port_dev->flags & IFF_LOOPBACK ||
797 port_dev->type != ARPHRD_ETHER) {
798 netdev_err(dev, "Device %s is of an unsupported type\n",
803 if (team_port_exists(port_dev)) {
804 netdev_err(dev, "Device %s is already a port "
805 "of a team device\n", portname);
809 if (port_dev->flags & IFF_UP) {
810 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
815 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
820 port->dev = port_dev;
823 port->orig.mtu = port_dev->mtu;
824 err = dev_set_mtu(port_dev, dev->mtu);
826 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
830 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
832 err = team_port_enter(team, port);
834 netdev_err(dev, "Device %s failed to enter team mode\n",
839 err = dev_open(port_dev);
841 netdev_dbg(dev, "Device %s opening failed\n",
846 err = vlan_vids_add_by_dev(port_dev, dev);
848 netdev_err(dev, "Failed to add vlan ids to device %s\n",
853 err = netdev_set_master(port_dev, dev);
855 netdev_err(dev, "Device %s failed to set master\n", portname);
859 err = netdev_rx_handler_register(port_dev, team_handle_frame,
862 netdev_err(dev, "Device %s failed to register rx_handler\n",
864 goto err_handler_register;
867 err = __team_option_inst_add_port(team, port);
869 netdev_err(dev, "Device %s failed to add per-port options\n",
871 goto err_option_port_add;
875 team_port_enable(team, port);
876 list_add_tail_rcu(&port->list, &team->port_list);
877 team_adjust_ops(team);
878 __team_compute_features(team);
879 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
880 __team_options_change_check(team);
882 netdev_info(dev, "Port device %s added\n", portname);
887 netdev_rx_handler_unregister(port_dev);
889 err_handler_register:
890 netdev_set_master(port_dev, NULL);
893 vlan_vids_del_by_dev(port_dev, dev);
899 team_port_leave(team, port);
900 team_port_set_orig_mac(port);
903 dev_set_mtu(port_dev, port->orig.mtu);
911 static int team_port_del(struct team *team, struct net_device *port_dev)
913 struct net_device *dev = team->dev;
914 struct team_port *port;
915 char *portname = port_dev->name;
917 port = team_port_get_rtnl(port_dev);
918 if (!port || !team_port_find(team, port)) {
919 netdev_err(dev, "Device %s does not act as a port of this team\n",
924 __team_option_inst_mark_removed_port(team, port);
925 __team_options_change_check(team);
926 __team_option_inst_del_port(team, port);
927 port->removed = true;
928 __team_port_change_check(port, false);
929 team_port_disable(team, port);
930 list_del_rcu(&port->list);
931 team_adjust_ops(team);
932 netdev_rx_handler_unregister(port_dev);
933 netdev_set_master(port_dev, NULL);
934 vlan_vids_del_by_dev(port_dev, dev);
936 team_port_leave(team, port);
937 team_port_set_orig_mac(port);
938 dev_set_mtu(port_dev, port->orig.mtu);
941 netdev_info(dev, "Port device %s removed\n", portname);
942 __team_compute_features(team);
952 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
954 ctx->data.str_val = team->mode->kind;
958 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
960 return team_change_mode(team, ctx->data.str_val);
963 static int team_port_en_option_get(struct team *team,
964 struct team_gsetter_ctx *ctx)
966 struct team_port *port = ctx->info->port;
968 ctx->data.bool_val = team_port_enabled(port);
972 static int team_port_en_option_set(struct team *team,
973 struct team_gsetter_ctx *ctx)
975 struct team_port *port = ctx->info->port;
977 if (ctx->data.bool_val)
978 team_port_enable(team, port);
980 team_port_disable(team, port);
984 static int team_user_linkup_option_get(struct team *team,
985 struct team_gsetter_ctx *ctx)
987 struct team_port *port = ctx->info->port;
989 ctx->data.bool_val = port->user.linkup;
993 static int team_user_linkup_option_set(struct team *team,
994 struct team_gsetter_ctx *ctx)
996 struct team_port *port = ctx->info->port;
998 port->user.linkup = ctx->data.bool_val;
999 team_refresh_port_linkup(port);
1003 static int team_user_linkup_en_option_get(struct team *team,
1004 struct team_gsetter_ctx *ctx)
1006 struct team_port *port = ctx->info->port;
1008 ctx->data.bool_val = port->user.linkup_enabled;
1012 static int team_user_linkup_en_option_set(struct team *team,
1013 struct team_gsetter_ctx *ctx)
1015 struct team_port *port = ctx->info->port;
1017 port->user.linkup_enabled = ctx->data.bool_val;
1018 team_refresh_port_linkup(port);
1022 static const struct team_option team_options[] = {
1025 .type = TEAM_OPTION_TYPE_STRING,
1026 .getter = team_mode_option_get,
1027 .setter = team_mode_option_set,
1031 .type = TEAM_OPTION_TYPE_BOOL,
1033 .getter = team_port_en_option_get,
1034 .setter = team_port_en_option_set,
1037 .name = "user_linkup",
1038 .type = TEAM_OPTION_TYPE_BOOL,
1040 .getter = team_user_linkup_option_get,
1041 .setter = team_user_linkup_option_set,
1044 .name = "user_linkup_enabled",
1045 .type = TEAM_OPTION_TYPE_BOOL,
1047 .getter = team_user_linkup_en_option_get,
1048 .setter = team_user_linkup_en_option_set,
1052 static int team_init(struct net_device *dev)
1054 struct team *team = netdev_priv(dev);
1059 mutex_init(&team->lock);
1060 team_set_no_mode(team);
1062 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
1063 if (!team->pcpu_stats)
1066 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1067 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1068 INIT_LIST_HEAD(&team->port_list);
1070 team_adjust_ops(team);
1072 INIT_LIST_HEAD(&team->option_list);
1073 INIT_LIST_HEAD(&team->option_inst_list);
1074 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1076 goto err_options_register;
1077 netif_carrier_off(dev);
1081 err_options_register:
1082 free_percpu(team->pcpu_stats);
1087 static void team_uninit(struct net_device *dev)
1089 struct team *team = netdev_priv(dev);
1090 struct team_port *port;
1091 struct team_port *tmp;
1093 mutex_lock(&team->lock);
1094 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1095 team_port_del(team, port->dev);
1097 __team_change_mode(team, NULL); /* cleanup */
1098 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1099 mutex_unlock(&team->lock);
1102 static void team_destructor(struct net_device *dev)
1104 struct team *team = netdev_priv(dev);
1106 free_percpu(team->pcpu_stats);
1110 static int team_open(struct net_device *dev)
1112 netif_carrier_on(dev);
1116 static int team_close(struct net_device *dev)
1118 netif_carrier_off(dev);
1123 * note: already called with rcu_read_lock
1125 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1127 struct team *team = netdev_priv(dev);
1128 bool tx_success = false;
1129 unsigned int len = skb->len;
1131 tx_success = team->ops.transmit(team, skb);
1133 struct team_pcpu_stats *pcpu_stats;
1135 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1136 u64_stats_update_begin(&pcpu_stats->syncp);
1137 pcpu_stats->tx_packets++;
1138 pcpu_stats->tx_bytes += len;
1139 u64_stats_update_end(&pcpu_stats->syncp);
1141 this_cpu_inc(team->pcpu_stats->tx_dropped);
1144 return NETDEV_TX_OK;
1147 static void team_change_rx_flags(struct net_device *dev, int change)
1149 struct team *team = netdev_priv(dev);
1150 struct team_port *port;
1154 list_for_each_entry_rcu(port, &team->port_list, list) {
1155 if (change & IFF_PROMISC) {
1156 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1157 dev_set_promiscuity(port->dev, inc);
1159 if (change & IFF_ALLMULTI) {
1160 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1161 dev_set_allmulti(port->dev, inc);
1167 static void team_set_rx_mode(struct net_device *dev)
1169 struct team *team = netdev_priv(dev);
1170 struct team_port *port;
1173 list_for_each_entry_rcu(port, &team->port_list, list) {
1174 dev_uc_sync(port->dev, dev);
1175 dev_mc_sync(port->dev, dev);
1180 static int team_set_mac_address(struct net_device *dev, void *p)
1182 struct team *team = netdev_priv(dev);
1183 struct team_port *port;
1184 struct sockaddr *addr = p;
1186 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
1187 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1189 list_for_each_entry_rcu(port, &team->port_list, list)
1190 if (team->ops.port_change_mac)
1191 team->ops.port_change_mac(team, port);
1196 static int team_change_mtu(struct net_device *dev, int new_mtu)
1198 struct team *team = netdev_priv(dev);
1199 struct team_port *port;
1203 * Alhough this is reader, it's guarded by team lock. It's not possible
1204 * to traverse list in reverse under rcu_read_lock
1206 mutex_lock(&team->lock);
1207 list_for_each_entry(port, &team->port_list, list) {
1208 err = dev_set_mtu(port->dev, new_mtu);
1210 netdev_err(dev, "Device %s failed to change mtu",
1215 mutex_unlock(&team->lock);
1222 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1223 dev_set_mtu(port->dev, dev->mtu);
1224 mutex_unlock(&team->lock);
1229 static struct rtnl_link_stats64 *
1230 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1232 struct team *team = netdev_priv(dev);
1233 struct team_pcpu_stats *p;
1234 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1235 u32 rx_dropped = 0, tx_dropped = 0;
1239 for_each_possible_cpu(i) {
1240 p = per_cpu_ptr(team->pcpu_stats, i);
1242 start = u64_stats_fetch_begin_bh(&p->syncp);
1243 rx_packets = p->rx_packets;
1244 rx_bytes = p->rx_bytes;
1245 rx_multicast = p->rx_multicast;
1246 tx_packets = p->tx_packets;
1247 tx_bytes = p->tx_bytes;
1248 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
1250 stats->rx_packets += rx_packets;
1251 stats->rx_bytes += rx_bytes;
1252 stats->multicast += rx_multicast;
1253 stats->tx_packets += tx_packets;
1254 stats->tx_bytes += tx_bytes;
1256 * rx_dropped & tx_dropped are u32, updated
1257 * without syncp protection.
1259 rx_dropped += p->rx_dropped;
1260 tx_dropped += p->tx_dropped;
1262 stats->rx_dropped = rx_dropped;
1263 stats->tx_dropped = tx_dropped;
1267 static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
1269 struct team *team = netdev_priv(dev);
1270 struct team_port *port;
1274 * Alhough this is reader, it's guarded by team lock. It's not possible
1275 * to traverse list in reverse under rcu_read_lock
1277 mutex_lock(&team->lock);
1278 list_for_each_entry(port, &team->port_list, list) {
1279 err = vlan_vid_add(port->dev, vid);
1283 mutex_unlock(&team->lock);
1288 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1289 vlan_vid_del(port->dev, vid);
1290 mutex_unlock(&team->lock);
1295 static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
1297 struct team *team = netdev_priv(dev);
1298 struct team_port *port;
1301 list_for_each_entry_rcu(port, &team->port_list, list)
1302 vlan_vid_del(port->dev, vid);
1308 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1310 struct team *team = netdev_priv(dev);
1313 mutex_lock(&team->lock);
1314 err = team_port_add(team, port_dev);
1315 mutex_unlock(&team->lock);
1319 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1321 struct team *team = netdev_priv(dev);
1324 mutex_lock(&team->lock);
1325 err = team_port_del(team, port_dev);
1326 mutex_unlock(&team->lock);
1330 static netdev_features_t team_fix_features(struct net_device *dev,
1331 netdev_features_t features)
1333 struct team_port *port;
1334 struct team *team = netdev_priv(dev);
1335 netdev_features_t mask;
1338 features &= ~NETIF_F_ONE_FOR_ALL;
1339 features |= NETIF_F_ALL_FOR_ALL;
1342 list_for_each_entry_rcu(port, &team->port_list, list) {
1343 features = netdev_increment_features(features,
1344 port->dev->features,
1351 static const struct net_device_ops team_netdev_ops = {
1352 .ndo_init = team_init,
1353 .ndo_uninit = team_uninit,
1354 .ndo_open = team_open,
1355 .ndo_stop = team_close,
1356 .ndo_start_xmit = team_xmit,
1357 .ndo_change_rx_flags = team_change_rx_flags,
1358 .ndo_set_rx_mode = team_set_rx_mode,
1359 .ndo_set_mac_address = team_set_mac_address,
1360 .ndo_change_mtu = team_change_mtu,
1361 .ndo_get_stats64 = team_get_stats64,
1362 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1363 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1364 .ndo_add_slave = team_add_slave,
1365 .ndo_del_slave = team_del_slave,
1366 .ndo_fix_features = team_fix_features,
1370 /***********************
1371 * rt netlink interface
1372 ***********************/
1374 static void team_setup(struct net_device *dev)
1378 dev->netdev_ops = &team_netdev_ops;
1379 dev->destructor = team_destructor;
1380 dev->tx_queue_len = 0;
1381 dev->flags |= IFF_MULTICAST;
1382 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1385 * Indicate we support unicast address filtering. That way core won't
1386 * bring us to promisc mode in case a unicast addr is added.
1387 * Let this up to underlay drivers.
1389 dev->priv_flags |= IFF_UNICAST_FLT;
1391 dev->features |= NETIF_F_LLTX;
1392 dev->features |= NETIF_F_GRO;
1393 dev->hw_features = NETIF_F_HW_VLAN_TX |
1394 NETIF_F_HW_VLAN_RX |
1395 NETIF_F_HW_VLAN_FILTER;
1397 dev->features |= dev->hw_features;
1400 static int team_newlink(struct net *src_net, struct net_device *dev,
1401 struct nlattr *tb[], struct nlattr *data[])
1405 if (tb[IFLA_ADDRESS] == NULL)
1406 eth_hw_addr_random(dev);
1408 err = register_netdevice(dev);
1415 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1417 if (tb[IFLA_ADDRESS]) {
1418 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1420 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1421 return -EADDRNOTAVAIL;
1426 static struct rtnl_link_ops team_link_ops __read_mostly = {
1428 .priv_size = sizeof(struct team),
1429 .setup = team_setup,
1430 .newlink = team_newlink,
1431 .validate = team_validate,
1435 /***********************************
1436 * Generic netlink custom interface
1437 ***********************************/
1439 static struct genl_family team_nl_family = {
1440 .id = GENL_ID_GENERATE,
1441 .name = TEAM_GENL_NAME,
1442 .version = TEAM_GENL_VERSION,
1443 .maxattr = TEAM_ATTR_MAX,
1447 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1448 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
1449 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
1450 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
1451 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
1454 static const struct nla_policy
1455 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1456 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
1457 [TEAM_ATTR_OPTION_NAME] = {
1459 .len = TEAM_STRING_MAX_LEN,
1461 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1462 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1463 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1466 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1468 struct sk_buff *msg;
1472 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1476 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1477 &team_nl_family, 0, TEAM_CMD_NOOP);
1483 genlmsg_end(msg, hdr);
1485 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1494 * Netlink cmd functions should be locked by following two functions.
1495 * Since dev gets held here, that ensures dev won't disappear in between.
1497 static struct team *team_nl_team_get(struct genl_info *info)
1499 struct net *net = genl_info_net(info);
1501 struct net_device *dev;
1504 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1507 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
1508 dev = dev_get_by_index(net, ifindex);
1509 if (!dev || dev->netdev_ops != &team_netdev_ops) {
1515 team = netdev_priv(dev);
1516 mutex_lock(&team->lock);
1520 static void team_nl_team_put(struct team *team)
1522 mutex_unlock(&team->lock);
1526 static int team_nl_send_generic(struct genl_info *info, struct team *team,
1527 int (*fill_func)(struct sk_buff *skb,
1528 struct genl_info *info,
1529 int flags, struct team *team))
1531 struct sk_buff *skb;
1534 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1538 err = fill_func(skb, info, NLM_F_ACK, team);
1542 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1550 typedef int team_nl_send_func_t(struct sk_buff *skb,
1551 struct team *team, u32 pid);
1553 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
1555 return genlmsg_unicast(dev_net(team->dev), skb, pid);
1558 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
1559 struct team_option_inst *opt_inst)
1561 struct nlattr *option_item;
1562 struct team_option *option = opt_inst->option;
1563 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
1564 struct team_gsetter_ctx ctx;
1567 ctx.info = opt_inst_info;
1568 err = team_option_get(team, opt_inst, &ctx);
1572 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1576 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1578 if (opt_inst_info->port &&
1579 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1580 opt_inst_info->port->dev->ifindex))
1582 if (opt_inst->option->array_size &&
1583 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
1584 opt_inst_info->array_index))
1587 switch (option->type) {
1588 case TEAM_OPTION_TYPE_U32:
1589 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1591 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
1594 case TEAM_OPTION_TYPE_STRING:
1595 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1597 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1601 case TEAM_OPTION_TYPE_BINARY:
1602 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1604 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
1605 ctx.data.bin_val.ptr))
1608 case TEAM_OPTION_TYPE_BOOL:
1609 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1611 if (ctx.data.bool_val &&
1612 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1618 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1620 if (opt_inst->changed) {
1621 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1623 opt_inst->changed = false;
1625 nla_nest_end(skb, option_item);
1629 nla_nest_cancel(skb, option_item);
1633 static int __send_and_alloc_skb(struct sk_buff **pskb,
1634 struct team *team, u32 pid,
1635 team_nl_send_func_t *send_func)
1640 err = send_func(*pskb, team, pid);
1644 *pskb = genlmsg_new(NLMSG_DEFAULT_SIZE - GENL_HDRLEN, GFP_KERNEL);
1650 static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
1651 int flags, team_nl_send_func_t *send_func,
1652 struct list_head *sel_opt_inst_list)
1654 struct nlattr *option_list;
1655 struct nlmsghdr *nlh;
1657 struct team_option_inst *opt_inst;
1659 struct sk_buff *skb = NULL;
1663 opt_inst = list_first_entry(sel_opt_inst_list,
1664 struct team_option_inst, tmp_list);
1667 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1671 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
1672 TEAM_CMD_OPTIONS_GET);
1674 return PTR_ERR(hdr);
1676 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1677 goto nla_put_failure;
1678 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1680 goto nla_put_failure;
1684 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
1685 err = team_nl_fill_one_option_get(skb, team, opt_inst);
1687 if (err == -EMSGSIZE) {
1698 nla_nest_end(skb, option_list);
1699 genlmsg_end(skb, hdr);
1704 nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
1706 err = __send_and_alloc_skb(&skb, team, pid, send_func);
1712 return send_func(skb, team, pid);
1717 genlmsg_cancel(skb, hdr);
1722 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1725 struct team_option_inst *opt_inst;
1727 LIST_HEAD(sel_opt_inst_list);
1729 team = team_nl_team_get(info);
1733 list_for_each_entry(opt_inst, &team->option_inst_list, list)
1734 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
1735 err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
1736 NLM_F_ACK, team_nl_send_unicast,
1737 &sel_opt_inst_list);
1739 team_nl_team_put(team);
1744 static int team_nl_send_event_options_get(struct team *team,
1745 struct list_head *sel_opt_inst_list);
1747 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1752 struct nlattr *nl_option;
1753 LIST_HEAD(opt_inst_list);
1755 team = team_nl_team_get(info);
1760 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1765 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1766 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1767 struct nlattr *attr;
1768 struct nlattr *attr_data;
1769 enum team_option_type opt_type;
1770 int opt_port_ifindex = 0; /* != 0 for per-port options */
1771 u32 opt_array_index = 0;
1772 bool opt_is_array = false;
1773 struct team_option_inst *opt_inst;
1775 bool opt_found = false;
1777 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1781 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1782 nl_option, team_nl_option_policy);
1785 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1786 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1790 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1792 opt_type = TEAM_OPTION_TYPE_U32;
1795 opt_type = TEAM_OPTION_TYPE_STRING;
1798 opt_type = TEAM_OPTION_TYPE_BINARY;
1801 opt_type = TEAM_OPTION_TYPE_BOOL;
1807 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1808 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1813 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1814 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1816 opt_port_ifindex = nla_get_u32(attr);
1818 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
1820 opt_is_array = true;
1821 opt_array_index = nla_get_u32(attr);
1824 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1825 struct team_option *option = opt_inst->option;
1826 struct team_gsetter_ctx ctx;
1827 struct team_option_inst_info *opt_inst_info;
1830 opt_inst_info = &opt_inst->info;
1831 tmp_ifindex = opt_inst_info->port ?
1832 opt_inst_info->port->dev->ifindex : 0;
1833 if (option->type != opt_type ||
1834 strcmp(option->name, opt_name) ||
1835 tmp_ifindex != opt_port_ifindex ||
1836 (option->array_size && !opt_is_array) ||
1837 opt_inst_info->array_index != opt_array_index)
1840 ctx.info = opt_inst_info;
1842 case TEAM_OPTION_TYPE_U32:
1843 ctx.data.u32_val = nla_get_u32(attr_data);
1845 case TEAM_OPTION_TYPE_STRING:
1846 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1850 ctx.data.str_val = nla_data(attr_data);
1852 case TEAM_OPTION_TYPE_BINARY:
1853 ctx.data.bin_val.len = nla_len(attr_data);
1854 ctx.data.bin_val.ptr = nla_data(attr_data);
1856 case TEAM_OPTION_TYPE_BOOL:
1857 ctx.data.bool_val = attr_data ? true : false;
1862 err = team_option_set(team, opt_inst, &ctx);
1865 opt_inst->changed = true;
1866 list_add(&opt_inst->tmp_list, &opt_inst_list);
1874 err = team_nl_send_event_options_get(team, &opt_inst_list);
1877 team_nl_team_put(team);
1882 static int team_nl_fill_port_list_get(struct sk_buff *skb,
1883 u32 pid, u32 seq, int flags,
1887 struct nlattr *port_list;
1889 struct team_port *port;
1891 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1892 TEAM_CMD_PORT_LIST_GET);
1894 return PTR_ERR(hdr);
1896 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1897 goto nla_put_failure;
1898 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1900 goto nla_put_failure;
1902 list_for_each_entry(port, &team->port_list, list) {
1903 struct nlattr *port_item;
1905 /* Include only changed ports if fill all mode is not on */
1906 if (!fillall && !port->changed)
1908 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1910 goto nla_put_failure;
1911 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1912 goto nla_put_failure;
1913 if (port->changed) {
1914 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1915 goto nla_put_failure;
1916 port->changed = false;
1918 if ((port->removed &&
1919 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1920 (port->state.linkup &&
1921 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1922 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1923 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1924 goto nla_put_failure;
1925 nla_nest_end(skb, port_item);
1928 nla_nest_end(skb, port_list);
1929 return genlmsg_end(skb, hdr);
1932 genlmsg_cancel(skb, hdr);
1936 static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
1937 struct genl_info *info, int flags,
1940 return team_nl_fill_port_list_get(skb, info->snd_pid,
1941 info->snd_seq, NLM_F_ACK,
1945 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1946 struct genl_info *info)
1951 team = team_nl_team_get(info);
1955 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
1957 team_nl_team_put(team);
1962 static struct genl_ops team_nl_ops[] = {
1964 .cmd = TEAM_CMD_NOOP,
1965 .doit = team_nl_cmd_noop,
1966 .policy = team_nl_policy,
1969 .cmd = TEAM_CMD_OPTIONS_SET,
1970 .doit = team_nl_cmd_options_set,
1971 .policy = team_nl_policy,
1972 .flags = GENL_ADMIN_PERM,
1975 .cmd = TEAM_CMD_OPTIONS_GET,
1976 .doit = team_nl_cmd_options_get,
1977 .policy = team_nl_policy,
1978 .flags = GENL_ADMIN_PERM,
1981 .cmd = TEAM_CMD_PORT_LIST_GET,
1982 .doit = team_nl_cmd_port_list_get,
1983 .policy = team_nl_policy,
1984 .flags = GENL_ADMIN_PERM,
1988 static struct genl_multicast_group team_change_event_mcgrp = {
1989 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1992 static int team_nl_send_multicast(struct sk_buff *skb,
1993 struct team *team, u32 pid)
1995 return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
1996 team_change_event_mcgrp.id, GFP_KERNEL);
1999 static int team_nl_send_event_options_get(struct team *team,
2000 struct list_head *sel_opt_inst_list)
2002 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2006 static int team_nl_send_event_port_list_get(struct team *team)
2008 struct sk_buff *skb;
2010 struct net *net = dev_net(team->dev);
2012 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2016 err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
2020 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
2029 static int team_nl_init(void)
2033 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
2034 ARRAY_SIZE(team_nl_ops));
2038 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
2040 goto err_change_event_grp_reg;
2044 err_change_event_grp_reg:
2045 genl_unregister_family(&team_nl_family);
2050 static void team_nl_fini(void)
2052 genl_unregister_family(&team_nl_family);
2060 static void __team_options_change_check(struct team *team)
2063 struct team_option_inst *opt_inst;
2064 LIST_HEAD(sel_opt_inst_list);
2066 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2067 if (opt_inst->changed)
2068 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2070 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2072 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2076 /* rtnl lock is held */
2077 static void __team_port_change_check(struct team_port *port, bool linkup)
2081 if (!port->removed && port->state.linkup == linkup)
2084 port->changed = true;
2085 port->state.linkup = linkup;
2086 team_refresh_port_linkup(port);
2088 struct ethtool_cmd ecmd;
2090 err = __ethtool_get_settings(port->dev, &ecmd);
2092 port->state.speed = ethtool_cmd_speed(&ecmd);
2093 port->state.duplex = ecmd.duplex;
2097 port->state.speed = 0;
2098 port->state.duplex = 0;
2101 err = team_nl_send_event_port_list_get(port->team);
2103 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
2108 static void team_port_change_check(struct team_port *port, bool linkup)
2110 struct team *team = port->team;
2112 mutex_lock(&team->lock);
2113 __team_port_change_check(port, linkup);
2114 mutex_unlock(&team->lock);
2118 /************************************
2119 * Net device notifier event handler
2120 ************************************/
2122 static int team_device_event(struct notifier_block *unused,
2123 unsigned long event, void *ptr)
2125 struct net_device *dev = (struct net_device *) ptr;
2126 struct team_port *port;
2128 port = team_port_get_rtnl(dev);
2134 if (netif_carrier_ok(dev))
2135 team_port_change_check(port, true);
2137 team_port_change_check(port, false);
2139 if (netif_running(port->dev))
2140 team_port_change_check(port,
2141 !!netif_carrier_ok(port->dev));
2143 case NETDEV_UNREGISTER:
2144 team_del_slave(port->team->dev, dev);
2146 case NETDEV_FEAT_CHANGE:
2147 team_compute_features(port->team);
2149 case NETDEV_CHANGEMTU:
2150 /* Forbid to change mtu of underlaying device */
2152 case NETDEV_PRE_TYPE_CHANGE:
2153 /* Forbid to change type of underlaying device */
2159 static struct notifier_block team_notifier_block __read_mostly = {
2160 .notifier_call = team_device_event,
2164 /***********************
2165 * Module init and exit
2166 ***********************/
2168 static int __init team_module_init(void)
2172 register_netdevice_notifier(&team_notifier_block);
2174 err = rtnl_link_register(&team_link_ops);
2178 err = team_nl_init();
2185 rtnl_link_unregister(&team_link_ops);
2188 unregister_netdevice_notifier(&team_notifier_block);
2193 static void __exit team_module_exit(void)
2196 rtnl_link_unregister(&team_link_ops);
2197 unregister_netdevice_notifier(&team_notifier_block);
2200 module_init(team_module_init);
2201 module_exit(team_module_exit);
2203 MODULE_LICENSE("GPL v2");
2204 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2205 MODULE_DESCRIPTION("Ethernet team device driver");
2206 MODULE_ALIAS_RTNL_LINK(DRV_NAME);