vlan: introduce functions to do mass addition/deletion of vids by another device
[linux-2.6-block.git] / drivers / net / team / team.c
CommitLineData
3d249d4c
JP
1/*
2 * net/drivers/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/rcupdate.h>
17#include <linux/errno.h>
18#include <linux/ctype.h>
19#include <linux/notifier.h>
20#include <linux/netdevice.h>
87002b03 21#include <linux/if_vlan.h>
3d249d4c
JP
22#include <linux/if_arp.h>
23#include <linux/socket.h>
24#include <linux/etherdevice.h>
25#include <linux/rtnetlink.h>
26#include <net/rtnetlink.h>
27#include <net/genetlink.h>
28#include <net/netlink.h>
29#include <linux/if_team.h>
30
31#define DRV_NAME "team"
32
33
34/**********
35 * Helpers
36 **********/
37
38#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
39
40static struct team_port *team_port_get_rcu(const struct net_device *dev)
41{
42 struct team_port *port = rcu_dereference(dev->rx_handler_data);
43
44 return team_port_exists(dev) ? port : NULL;
45}
46
47static struct team_port *team_port_get_rtnl(const struct net_device *dev)
48{
49 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
50
51 return team_port_exists(dev) ? port : NULL;
52}
53
54/*
55 * Since the ability to change mac address for open port device is tested in
56 * team_port_add, this function can be called without control of return value
57 */
58static int __set_port_mac(struct net_device *port_dev,
59 const unsigned char *dev_addr)
60{
61 struct sockaddr addr;
62
63 memcpy(addr.sa_data, dev_addr, ETH_ALEN);
64 addr.sa_family = ARPHRD_ETHER;
65 return dev_set_mac_address(port_dev, &addr);
66}
67
68int team_port_set_orig_mac(struct team_port *port)
69{
70 return __set_port_mac(port->dev, port->orig.dev_addr);
71}
72
73int team_port_set_team_mac(struct team_port *port)
74{
75 return __set_port_mac(port->dev, port->team->dev->dev_addr);
76}
77EXPORT_SYMBOL(team_port_set_team_mac);
78
79
80/*******************
81 * Options handling
82 *******************/
83
358b8382
JP
84struct team_option *__team_find_option(struct team *team, const char *opt_name)
85{
86 struct team_option *option;
87
88 list_for_each_entry(option, &team->option_list, list) {
89 if (strcmp(option->name, opt_name) == 0)
90 return option;
91 }
92 return NULL;
93}
94
95int team_options_register(struct team *team,
96 const struct team_option *option,
97 size_t option_count)
3d249d4c
JP
98{
99 int i;
2bba19ff 100 struct team_option **dst_opts;
358b8382 101 int err;
3d249d4c 102
2bba19ff
JP
103 dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
104 GFP_KERNEL);
105 if (!dst_opts)
106 return -ENOMEM;
358b8382 107 for (i = 0; i < option_count; i++, option++) {
358b8382
JP
108 if (__team_find_option(team, option->name)) {
109 err = -EEXIST;
110 goto rollback;
111 }
f8a15af0
JP
112 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
113 if (!dst_opts[i]) {
358b8382
JP
114 err = -ENOMEM;
115 goto rollback;
116 }
358b8382
JP
117 }
118
119 for (i = 0; i < option_count; i++)
120 list_add_tail(&dst_opts[i]->list, &team->option_list);
121
2bba19ff 122 kfree(dst_opts);
358b8382
JP
123 return 0;
124
125rollback:
126 for (i = 0; i < option_count; i++)
127 kfree(dst_opts[i]);
128
2bba19ff 129 kfree(dst_opts);
358b8382 130 return err;
3d249d4c 131}
358b8382 132
3d249d4c
JP
133EXPORT_SYMBOL(team_options_register);
134
135static void __team_options_change_check(struct team *team,
136 struct team_option *changed_option);
137
138static void __team_options_unregister(struct team *team,
358b8382 139 const struct team_option *option,
3d249d4c
JP
140 size_t option_count)
141{
142 int i;
143
358b8382
JP
144 for (i = 0; i < option_count; i++, option++) {
145 struct team_option *del_opt;
146
147 del_opt = __team_find_option(team, option->name);
148 if (del_opt) {
149 list_del(&del_opt->list);
150 kfree(del_opt);
151 }
152 }
3d249d4c
JP
153}
154
358b8382
JP
155void team_options_unregister(struct team *team,
156 const struct team_option *option,
3d249d4c
JP
157 size_t option_count)
158{
159 __team_options_unregister(team, option, option_count);
160 __team_options_change_check(team, NULL);
161}
162EXPORT_SYMBOL(team_options_unregister);
163
164static int team_option_get(struct team *team, struct team_option *option,
165 void *arg)
166{
167 return option->getter(team, arg);
168}
169
170static int team_option_set(struct team *team, struct team_option *option,
171 void *arg)
172{
173 int err;
174
175 err = option->setter(team, arg);
176 if (err)
177 return err;
178
179 __team_options_change_check(team, option);
180 return err;
181}
182
183/****************
184 * Mode handling
185 ****************/
186
187static LIST_HEAD(mode_list);
188static DEFINE_SPINLOCK(mode_list_lock);
189
190static struct team_mode *__find_mode(const char *kind)
191{
192 struct team_mode *mode;
193
194 list_for_each_entry(mode, &mode_list, list) {
195 if (strcmp(mode->kind, kind) == 0)
196 return mode;
197 }
198 return NULL;
199}
200
201static bool is_good_mode_name(const char *name)
202{
203 while (*name != '\0') {
204 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
205 return false;
206 name++;
207 }
208 return true;
209}
210
211int team_mode_register(struct team_mode *mode)
212{
213 int err = 0;
214
215 if (!is_good_mode_name(mode->kind) ||
216 mode->priv_size > TEAM_MODE_PRIV_SIZE)
217 return -EINVAL;
218 spin_lock(&mode_list_lock);
219 if (__find_mode(mode->kind)) {
220 err = -EEXIST;
221 goto unlock;
222 }
223 list_add_tail(&mode->list, &mode_list);
224unlock:
225 spin_unlock(&mode_list_lock);
226 return err;
227}
228EXPORT_SYMBOL(team_mode_register);
229
230int team_mode_unregister(struct team_mode *mode)
231{
232 spin_lock(&mode_list_lock);
233 list_del_init(&mode->list);
234 spin_unlock(&mode_list_lock);
235 return 0;
236}
237EXPORT_SYMBOL(team_mode_unregister);
238
239static struct team_mode *team_mode_get(const char *kind)
240{
241 struct team_mode *mode;
242
243 spin_lock(&mode_list_lock);
244 mode = __find_mode(kind);
245 if (!mode) {
246 spin_unlock(&mode_list_lock);
247 request_module("team-mode-%s", kind);
248 spin_lock(&mode_list_lock);
249 mode = __find_mode(kind);
250 }
251 if (mode)
252 if (!try_module_get(mode->owner))
253 mode = NULL;
254
255 spin_unlock(&mode_list_lock);
256 return mode;
257}
258
259static void team_mode_put(const struct team_mode *mode)
260{
261 module_put(mode->owner);
262}
263
264static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
265{
266 dev_kfree_skb_any(skb);
267 return false;
268}
269
270rx_handler_result_t team_dummy_receive(struct team *team,
271 struct team_port *port,
272 struct sk_buff *skb)
273{
274 return RX_HANDLER_ANOTHER;
275}
276
277static void team_adjust_ops(struct team *team)
278{
279 /*
280 * To avoid checks in rx/tx skb paths, ensure here that non-null and
281 * correct ops are always set.
282 */
283
284 if (list_empty(&team->port_list) ||
285 !team->mode || !team->mode->ops->transmit)
286 team->ops.transmit = team_dummy_transmit;
287 else
288 team->ops.transmit = team->mode->ops->transmit;
289
290 if (list_empty(&team->port_list) ||
291 !team->mode || !team->mode->ops->receive)
292 team->ops.receive = team_dummy_receive;
293 else
294 team->ops.receive = team->mode->ops->receive;
295}
296
297/*
298 * We can benefit from the fact that it's ensured no port is present
299 * at the time of mode change. Therefore no packets are in fly so there's no
300 * need to set mode operations in any special way.
301 */
302static int __team_change_mode(struct team *team,
303 const struct team_mode *new_mode)
304{
305 /* Check if mode was previously set and do cleanup if so */
306 if (team->mode) {
307 void (*exit_op)(struct team *team) = team->ops.exit;
308
309 /* Clear ops area so no callback is called any longer */
310 memset(&team->ops, 0, sizeof(struct team_mode_ops));
311 team_adjust_ops(team);
312
313 if (exit_op)
314 exit_op(team);
315 team_mode_put(team->mode);
316 team->mode = NULL;
317 /* zero private data area */
318 memset(&team->mode_priv, 0,
319 sizeof(struct team) - offsetof(struct team, mode_priv));
320 }
321
322 if (!new_mode)
323 return 0;
324
325 if (new_mode->ops->init) {
326 int err;
327
328 err = new_mode->ops->init(team);
329 if (err)
330 return err;
331 }
332
333 team->mode = new_mode;
334 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
335 team_adjust_ops(team);
336
337 return 0;
338}
339
340static int team_change_mode(struct team *team, const char *kind)
341{
342 struct team_mode *new_mode;
343 struct net_device *dev = team->dev;
344 int err;
345
346 if (!list_empty(&team->port_list)) {
347 netdev_err(dev, "No ports can be present during mode change\n");
348 return -EBUSY;
349 }
350
351 if (team->mode && strcmp(team->mode->kind, kind) == 0) {
352 netdev_err(dev, "Unable to change to the same mode the team is in\n");
353 return -EINVAL;
354 }
355
356 new_mode = team_mode_get(kind);
357 if (!new_mode) {
358 netdev_err(dev, "Mode \"%s\" not found\n", kind);
359 return -EINVAL;
360 }
361
362 err = __team_change_mode(team, new_mode);
363 if (err) {
364 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
365 team_mode_put(new_mode);
366 return err;
367 }
368
369 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
370 return 0;
371}
372
373
374/************************
375 * Rx path frame handler
376 ************************/
377
378/* note: already called with rcu_read_lock */
379static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
380{
381 struct sk_buff *skb = *pskb;
382 struct team_port *port;
383 struct team *team;
384 rx_handler_result_t res;
385
386 skb = skb_share_check(skb, GFP_ATOMIC);
387 if (!skb)
388 return RX_HANDLER_CONSUMED;
389
390 *pskb = skb;
391
392 port = team_port_get_rcu(skb->dev);
393 team = port->team;
394
395 res = team->ops.receive(team, port, skb);
396 if (res == RX_HANDLER_ANOTHER) {
397 struct team_pcpu_stats *pcpu_stats;
398
399 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
400 u64_stats_update_begin(&pcpu_stats->syncp);
401 pcpu_stats->rx_packets++;
402 pcpu_stats->rx_bytes += skb->len;
403 if (skb->pkt_type == PACKET_MULTICAST)
404 pcpu_stats->rx_multicast++;
405 u64_stats_update_end(&pcpu_stats->syncp);
406
407 skb->dev = team->dev;
408 } else {
409 this_cpu_inc(team->pcpu_stats->rx_dropped);
410 }
411
412 return res;
413}
414
415
416/****************
417 * Port handling
418 ****************/
419
420static bool team_port_find(const struct team *team,
421 const struct team_port *port)
422{
423 struct team_port *cur;
424
425 list_for_each_entry(cur, &team->port_list, list)
426 if (cur == port)
427 return true;
428 return false;
429}
430
431/*
432 * Add/delete port to the team port list. Write guarded by rtnl_lock.
433 * Takes care of correct port->index setup (might be racy).
434 */
435static void team_port_list_add_port(struct team *team,
436 struct team_port *port)
437{
438 port->index = team->port_count++;
439 hlist_add_head_rcu(&port->hlist,
440 team_port_index_hash(team, port->index));
441 list_add_tail_rcu(&port->list, &team->port_list);
442}
443
444static void __reconstruct_port_hlist(struct team *team, int rm_index)
445{
446 int i;
447 struct team_port *port;
448
449 for (i = rm_index + 1; i < team->port_count; i++) {
450 port = team_get_port_by_index(team, i);
451 hlist_del_rcu(&port->hlist);
452 port->index--;
453 hlist_add_head_rcu(&port->hlist,
454 team_port_index_hash(team, port->index));
455 }
456}
457
458static void team_port_list_del_port(struct team *team,
459 struct team_port *port)
460{
461 int rm_index = port->index;
462
463 hlist_del_rcu(&port->hlist);
464 list_del_rcu(&port->list);
465 __reconstruct_port_hlist(team, rm_index);
466 team->port_count--;
467}
468
469#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
470 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
471 NETIF_F_HIGHDMA | NETIF_F_LRO)
472
473static void __team_compute_features(struct team *team)
474{
475 struct team_port *port;
476 u32 vlan_features = TEAM_VLAN_FEATURES;
477 unsigned short max_hard_header_len = ETH_HLEN;
478
479 list_for_each_entry(port, &team->port_list, list) {
480 vlan_features = netdev_increment_features(vlan_features,
481 port->dev->vlan_features,
482 TEAM_VLAN_FEATURES);
483
484 if (port->dev->hard_header_len > max_hard_header_len)
485 max_hard_header_len = port->dev->hard_header_len;
486 }
487
488 team->dev->vlan_features = vlan_features;
489 team->dev->hard_header_len = max_hard_header_len;
490
491 netdev_change_features(team->dev);
492}
493
494static void team_compute_features(struct team *team)
495{
61dc3461 496 mutex_lock(&team->lock);
3d249d4c 497 __team_compute_features(team);
61dc3461 498 mutex_unlock(&team->lock);
3d249d4c
JP
499}
500
501static int team_port_enter(struct team *team, struct team_port *port)
502{
503 int err = 0;
504
505 dev_hold(team->dev);
506 port->dev->priv_flags |= IFF_TEAM_PORT;
507 if (team->ops.port_enter) {
508 err = team->ops.port_enter(team, port);
509 if (err) {
510 netdev_err(team->dev, "Device %s failed to enter team mode\n",
511 port->dev->name);
512 goto err_port_enter;
513 }
514 }
515
516 return 0;
517
518err_port_enter:
519 port->dev->priv_flags &= ~IFF_TEAM_PORT;
520 dev_put(team->dev);
521
522 return err;
523}
524
525static void team_port_leave(struct team *team, struct team_port *port)
526{
527 if (team->ops.port_leave)
528 team->ops.port_leave(team, port);
529 port->dev->priv_flags &= ~IFF_TEAM_PORT;
530 dev_put(team->dev);
531}
532
533static void __team_port_change_check(struct team_port *port, bool linkup);
534
535static int team_port_add(struct team *team, struct net_device *port_dev)
536{
537 struct net_device *dev = team->dev;
538 struct team_port *port;
539 char *portname = port_dev->name;
540 int err;
541
542 if (port_dev->flags & IFF_LOOPBACK ||
543 port_dev->type != ARPHRD_ETHER) {
544 netdev_err(dev, "Device %s is of an unsupported type\n",
545 portname);
546 return -EINVAL;
547 }
548
549 if (team_port_exists(port_dev)) {
550 netdev_err(dev, "Device %s is already a port "
551 "of a team device\n", portname);
552 return -EBUSY;
553 }
554
555 if (port_dev->flags & IFF_UP) {
556 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
557 portname);
558 return -EBUSY;
559 }
560
561 port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
562 if (!port)
563 return -ENOMEM;
564
565 port->dev = port_dev;
566 port->team = team;
567
568 port->orig.mtu = port_dev->mtu;
569 err = dev_set_mtu(port_dev, dev->mtu);
570 if (err) {
571 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
572 goto err_set_mtu;
573 }
574
575 memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
576
577 err = team_port_enter(team, port);
578 if (err) {
579 netdev_err(dev, "Device %s failed to enter team mode\n",
580 portname);
581 goto err_port_enter;
582 }
583
584 err = dev_open(port_dev);
585 if (err) {
586 netdev_dbg(dev, "Device %s opening failed\n",
587 portname);
588 goto err_dev_open;
589 }
590
591 err = netdev_set_master(port_dev, dev);
592 if (err) {
593 netdev_err(dev, "Device %s failed to set master\n", portname);
594 goto err_set_master;
595 }
596
597 err = netdev_rx_handler_register(port_dev, team_handle_frame,
598 port);
599 if (err) {
600 netdev_err(dev, "Device %s failed to register rx_handler\n",
601 portname);
602 goto err_handler_register;
603 }
604
605 team_port_list_add_port(team, port);
606 team_adjust_ops(team);
607 __team_compute_features(team);
608 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
609
610 netdev_info(dev, "Port device %s added\n", portname);
611
612 return 0;
613
614err_handler_register:
615 netdev_set_master(port_dev, NULL);
616
617err_set_master:
618 dev_close(port_dev);
619
620err_dev_open:
621 team_port_leave(team, port);
622 team_port_set_orig_mac(port);
623
624err_port_enter:
625 dev_set_mtu(port_dev, port->orig.mtu);
626
627err_set_mtu:
628 kfree(port);
629
630 return err;
631}
632
633static int team_port_del(struct team *team, struct net_device *port_dev)
634{
635 struct net_device *dev = team->dev;
636 struct team_port *port;
637 char *portname = port_dev->name;
638
639 port = team_port_get_rtnl(port_dev);
640 if (!port || !team_port_find(team, port)) {
641 netdev_err(dev, "Device %s does not act as a port of this team\n",
642 portname);
643 return -ENOENT;
644 }
645
646 __team_port_change_check(port, false);
647 team_port_list_del_port(team, port);
648 team_adjust_ops(team);
649 netdev_rx_handler_unregister(port_dev);
650 netdev_set_master(port_dev, NULL);
651 dev_close(port_dev);
652 team_port_leave(team, port);
653 team_port_set_orig_mac(port);
654 dev_set_mtu(port_dev, port->orig.mtu);
655 synchronize_rcu();
656 kfree(port);
657 netdev_info(dev, "Port device %s removed\n", portname);
658 __team_compute_features(team);
659
660 return 0;
661}
662
663
664/*****************
665 * Net device ops
666 *****************/
667
668static const char team_no_mode_kind[] = "*NOMODE*";
669
670static int team_mode_option_get(struct team *team, void *arg)
671{
672 const char **str = arg;
673
674 *str = team->mode ? team->mode->kind : team_no_mode_kind;
675 return 0;
676}
677
678static int team_mode_option_set(struct team *team, void *arg)
679{
680 const char **str = arg;
681
682 return team_change_mode(team, *str);
683}
684
358b8382 685static const struct team_option team_options[] = {
3d249d4c
JP
686 {
687 .name = "mode",
688 .type = TEAM_OPTION_TYPE_STRING,
689 .getter = team_mode_option_get,
690 .setter = team_mode_option_set,
691 },
692};
693
694static int team_init(struct net_device *dev)
695{
696 struct team *team = netdev_priv(dev);
697 int i;
358b8382 698 int err;
3d249d4c
JP
699
700 team->dev = dev;
61dc3461 701 mutex_init(&team->lock);
3d249d4c
JP
702
703 team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
704 if (!team->pcpu_stats)
705 return -ENOMEM;
706
707 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
708 INIT_HLIST_HEAD(&team->port_hlist[i]);
709 INIT_LIST_HEAD(&team->port_list);
710
711 team_adjust_ops(team);
712
713 INIT_LIST_HEAD(&team->option_list);
358b8382
JP
714 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
715 if (err)
716 goto err_options_register;
3d249d4c
JP
717 netif_carrier_off(dev);
718
719 return 0;
358b8382
JP
720
721err_options_register:
722 free_percpu(team->pcpu_stats);
723
724 return err;
3d249d4c
JP
725}
726
727static void team_uninit(struct net_device *dev)
728{
729 struct team *team = netdev_priv(dev);
730 struct team_port *port;
731 struct team_port *tmp;
732
61dc3461 733 mutex_lock(&team->lock);
3d249d4c
JP
734 list_for_each_entry_safe(port, tmp, &team->port_list, list)
735 team_port_del(team, port->dev);
736
737 __team_change_mode(team, NULL); /* cleanup */
738 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
61dc3461 739 mutex_unlock(&team->lock);
3d249d4c
JP
740}
741
742static void team_destructor(struct net_device *dev)
743{
744 struct team *team = netdev_priv(dev);
745
746 free_percpu(team->pcpu_stats);
747 free_netdev(dev);
748}
749
750static int team_open(struct net_device *dev)
751{
752 netif_carrier_on(dev);
753 return 0;
754}
755
756static int team_close(struct net_device *dev)
757{
758 netif_carrier_off(dev);
759 return 0;
760}
761
762/*
763 * note: already called with rcu_read_lock
764 */
765static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
766{
767 struct team *team = netdev_priv(dev);
768 bool tx_success = false;
769 unsigned int len = skb->len;
770
771 tx_success = team->ops.transmit(team, skb);
772 if (tx_success) {
773 struct team_pcpu_stats *pcpu_stats;
774
775 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
776 u64_stats_update_begin(&pcpu_stats->syncp);
777 pcpu_stats->tx_packets++;
778 pcpu_stats->tx_bytes += len;
779 u64_stats_update_end(&pcpu_stats->syncp);
780 } else {
781 this_cpu_inc(team->pcpu_stats->tx_dropped);
782 }
783
784 return NETDEV_TX_OK;
785}
786
787static void team_change_rx_flags(struct net_device *dev, int change)
788{
789 struct team *team = netdev_priv(dev);
790 struct team_port *port;
791 int inc;
792
793 rcu_read_lock();
794 list_for_each_entry_rcu(port, &team->port_list, list) {
795 if (change & IFF_PROMISC) {
796 inc = dev->flags & IFF_PROMISC ? 1 : -1;
797 dev_set_promiscuity(port->dev, inc);
798 }
799 if (change & IFF_ALLMULTI) {
800 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
801 dev_set_allmulti(port->dev, inc);
802 }
803 }
804 rcu_read_unlock();
805}
806
807static void team_set_rx_mode(struct net_device *dev)
808{
809 struct team *team = netdev_priv(dev);
810 struct team_port *port;
811
812 rcu_read_lock();
813 list_for_each_entry_rcu(port, &team->port_list, list) {
814 dev_uc_sync(port->dev, dev);
815 dev_mc_sync(port->dev, dev);
816 }
817 rcu_read_unlock();
818}
819
820static int team_set_mac_address(struct net_device *dev, void *p)
821{
822 struct team *team = netdev_priv(dev);
823 struct team_port *port;
824 struct sockaddr *addr = p;
825
826 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
827 rcu_read_lock();
828 list_for_each_entry_rcu(port, &team->port_list, list)
829 if (team->ops.port_change_mac)
830 team->ops.port_change_mac(team, port);
831 rcu_read_unlock();
832 return 0;
833}
834
835static int team_change_mtu(struct net_device *dev, int new_mtu)
836{
837 struct team *team = netdev_priv(dev);
838 struct team_port *port;
839 int err;
840
841 /*
842 * Alhough this is reader, it's guarded by team lock. It's not possible
843 * to traverse list in reverse under rcu_read_lock
844 */
61dc3461 845 mutex_lock(&team->lock);
3d249d4c
JP
846 list_for_each_entry(port, &team->port_list, list) {
847 err = dev_set_mtu(port->dev, new_mtu);
848 if (err) {
849 netdev_err(dev, "Device %s failed to change mtu",
850 port->dev->name);
851 goto unwind;
852 }
853 }
61dc3461 854 mutex_unlock(&team->lock);
3d249d4c
JP
855
856 dev->mtu = new_mtu;
857
858 return 0;
859
860unwind:
861 list_for_each_entry_continue_reverse(port, &team->port_list, list)
862 dev_set_mtu(port->dev, dev->mtu);
61dc3461 863 mutex_unlock(&team->lock);
3d249d4c
JP
864
865 return err;
866}
867
868static struct rtnl_link_stats64 *
869team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
870{
871 struct team *team = netdev_priv(dev);
872 struct team_pcpu_stats *p;
873 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
874 u32 rx_dropped = 0, tx_dropped = 0;
875 unsigned int start;
876 int i;
877
878 for_each_possible_cpu(i) {
879 p = per_cpu_ptr(team->pcpu_stats, i);
880 do {
881 start = u64_stats_fetch_begin_bh(&p->syncp);
882 rx_packets = p->rx_packets;
883 rx_bytes = p->rx_bytes;
884 rx_multicast = p->rx_multicast;
885 tx_packets = p->tx_packets;
886 tx_bytes = p->tx_bytes;
887 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
888
889 stats->rx_packets += rx_packets;
890 stats->rx_bytes += rx_bytes;
891 stats->multicast += rx_multicast;
892 stats->tx_packets += tx_packets;
893 stats->tx_bytes += tx_bytes;
894 /*
895 * rx_dropped & tx_dropped are u32, updated
896 * without syncp protection.
897 */
898 rx_dropped += p->rx_dropped;
899 tx_dropped += p->tx_dropped;
900 }
901 stats->rx_dropped = rx_dropped;
902 stats->tx_dropped = tx_dropped;
903 return stats;
904}
905
8e586137 906static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
3d249d4c
JP
907{
908 struct team *team = netdev_priv(dev);
909 struct team_port *port;
87002b03 910 int err;
3d249d4c 911
87002b03
JP
912 /*
913 * Alhough this is reader, it's guarded by team lock. It's not possible
914 * to traverse list in reverse under rcu_read_lock
915 */
916 mutex_lock(&team->lock);
917 list_for_each_entry(port, &team->port_list, list) {
918 err = vlan_vid_add(port->dev, vid);
919 if (err)
920 goto unwind;
3d249d4c 921 }
87002b03 922 mutex_unlock(&team->lock);
8e586137
JP
923
924 return 0;
87002b03
JP
925
926unwind:
927 list_for_each_entry_continue_reverse(port, &team->port_list, list)
928 vlan_vid_del(port->dev, vid);
929 mutex_unlock(&team->lock);
930
931 return err;
3d249d4c
JP
932}
933
8e586137 934static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
3d249d4c
JP
935{
936 struct team *team = netdev_priv(dev);
937 struct team_port *port;
938
939 rcu_read_lock();
87002b03
JP
940 list_for_each_entry_rcu(port, &team->port_list, list)
941 vlan_vid_del(port->dev, vid);
3d249d4c 942 rcu_read_unlock();
8e586137
JP
943
944 return 0;
3d249d4c
JP
945}
946
947static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
948{
949 struct team *team = netdev_priv(dev);
950 int err;
951
61dc3461 952 mutex_lock(&team->lock);
3d249d4c 953 err = team_port_add(team, port_dev);
61dc3461 954 mutex_unlock(&team->lock);
3d249d4c
JP
955 return err;
956}
957
958static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
959{
960 struct team *team = netdev_priv(dev);
961 int err;
962
61dc3461 963 mutex_lock(&team->lock);
3d249d4c 964 err = team_port_del(team, port_dev);
61dc3461 965 mutex_unlock(&team->lock);
3d249d4c
JP
966 return err;
967}
968
234a8fd4
JP
969static netdev_features_t team_fix_features(struct net_device *dev,
970 netdev_features_t features)
971{
972 struct team_port *port;
973 struct team *team = netdev_priv(dev);
974 netdev_features_t mask;
975
976 mask = features;
977 features &= ~NETIF_F_ONE_FOR_ALL;
978 features |= NETIF_F_ALL_FOR_ALL;
979
980 rcu_read_lock();
981 list_for_each_entry_rcu(port, &team->port_list, list) {
982 features = netdev_increment_features(features,
983 port->dev->features,
984 mask);
985 }
986 rcu_read_unlock();
987 return features;
988}
989
3d249d4c
JP
990static const struct net_device_ops team_netdev_ops = {
991 .ndo_init = team_init,
992 .ndo_uninit = team_uninit,
993 .ndo_open = team_open,
994 .ndo_stop = team_close,
995 .ndo_start_xmit = team_xmit,
996 .ndo_change_rx_flags = team_change_rx_flags,
997 .ndo_set_rx_mode = team_set_rx_mode,
998 .ndo_set_mac_address = team_set_mac_address,
999 .ndo_change_mtu = team_change_mtu,
1000 .ndo_get_stats64 = team_get_stats64,
1001 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
1002 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
1003 .ndo_add_slave = team_add_slave,
1004 .ndo_del_slave = team_del_slave,
234a8fd4 1005 .ndo_fix_features = team_fix_features,
3d249d4c
JP
1006};
1007
1008
1009/***********************
1010 * rt netlink interface
1011 ***********************/
1012
1013static void team_setup(struct net_device *dev)
1014{
1015 ether_setup(dev);
1016
1017 dev->netdev_ops = &team_netdev_ops;
1018 dev->destructor = team_destructor;
1019 dev->tx_queue_len = 0;
1020 dev->flags |= IFF_MULTICAST;
1021 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
1022
1023 /*
1024 * Indicate we support unicast address filtering. That way core won't
1025 * bring us to promisc mode in case a unicast addr is added.
1026 * Let this up to underlay drivers.
1027 */
1028 dev->priv_flags |= IFF_UNICAST_FLT;
1029
1030 dev->features |= NETIF_F_LLTX;
1031 dev->features |= NETIF_F_GRO;
1032 dev->hw_features = NETIF_F_HW_VLAN_TX |
1033 NETIF_F_HW_VLAN_RX |
1034 NETIF_F_HW_VLAN_FILTER;
1035
1036 dev->features |= dev->hw_features;
1037}
1038
1039static int team_newlink(struct net *src_net, struct net_device *dev,
1040 struct nlattr *tb[], struct nlattr *data[])
1041{
1042 int err;
1043
1044 if (tb[IFLA_ADDRESS] == NULL)
1045 random_ether_addr(dev->dev_addr);
1046
1047 err = register_netdevice(dev);
1048 if (err)
1049 return err;
1050
1051 return 0;
1052}
1053
1054static int team_validate(struct nlattr *tb[], struct nlattr *data[])
1055{
1056 if (tb[IFLA_ADDRESS]) {
1057 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1058 return -EINVAL;
1059 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1060 return -EADDRNOTAVAIL;
1061 }
1062 return 0;
1063}
1064
1065static struct rtnl_link_ops team_link_ops __read_mostly = {
1066 .kind = DRV_NAME,
1067 .priv_size = sizeof(struct team),
1068 .setup = team_setup,
1069 .newlink = team_newlink,
1070 .validate = team_validate,
1071};
1072
1073
1074/***********************************
1075 * Generic netlink custom interface
1076 ***********************************/
1077
1078static struct genl_family team_nl_family = {
1079 .id = GENL_ID_GENERATE,
1080 .name = TEAM_GENL_NAME,
1081 .version = TEAM_GENL_VERSION,
1082 .maxattr = TEAM_ATTR_MAX,
1083 .netnsok = true,
1084};
1085
1086static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
1087 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
1088 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
1089 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
1090 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
1091};
1092
1093static const struct nla_policy
1094team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1095 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
1096 [TEAM_ATTR_OPTION_NAME] = {
1097 .type = NLA_STRING,
1098 .len = TEAM_STRING_MAX_LEN,
1099 },
1100 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1101 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1102 [TEAM_ATTR_OPTION_DATA] = {
1103 .type = NLA_BINARY,
1104 .len = TEAM_STRING_MAX_LEN,
1105 },
1106};
1107
1108static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1109{
1110 struct sk_buff *msg;
1111 void *hdr;
1112 int err;
1113
1114 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1115 if (!msg)
1116 return -ENOMEM;
1117
1118 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
1119 &team_nl_family, 0, TEAM_CMD_NOOP);
1120 if (IS_ERR(hdr)) {
1121 err = PTR_ERR(hdr);
1122 goto err_msg_put;
1123 }
1124
1125 genlmsg_end(msg, hdr);
1126
1127 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
1128
1129err_msg_put:
1130 nlmsg_free(msg);
1131
1132 return err;
1133}
1134
1135/*
1136 * Netlink cmd functions should be locked by following two functions.
8c0713a5 1137 * Since dev gets held here, that ensures dev won't disappear in between.
3d249d4c
JP
1138 */
1139static struct team *team_nl_team_get(struct genl_info *info)
1140{
1141 struct net *net = genl_info_net(info);
1142 int ifindex;
1143 struct net_device *dev;
1144 struct team *team;
1145
1146 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
1147 return NULL;
1148
1149 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
8c0713a5 1150 dev = dev_get_by_index(net, ifindex);
3d249d4c 1151 if (!dev || dev->netdev_ops != &team_netdev_ops) {
8c0713a5
JP
1152 if (dev)
1153 dev_put(dev);
3d249d4c
JP
1154 return NULL;
1155 }
1156
1157 team = netdev_priv(dev);
61dc3461 1158 mutex_lock(&team->lock);
3d249d4c
JP
1159 return team;
1160}
1161
1162static void team_nl_team_put(struct team *team)
1163{
61dc3461 1164 mutex_unlock(&team->lock);
8c0713a5 1165 dev_put(team->dev);
3d249d4c
JP
1166}
1167
1168static int team_nl_send_generic(struct genl_info *info, struct team *team,
1169 int (*fill_func)(struct sk_buff *skb,
1170 struct genl_info *info,
1171 int flags, struct team *team))
1172{
1173 struct sk_buff *skb;
1174 int err;
1175
1176 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1177 if (!skb)
1178 return -ENOMEM;
1179
1180 err = fill_func(skb, info, NLM_F_ACK, team);
1181 if (err < 0)
1182 goto err_fill;
1183
1184 err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
1185 return err;
1186
1187err_fill:
1188 nlmsg_free(skb);
1189 return err;
1190}
1191
1192static int team_nl_fill_options_get_changed(struct sk_buff *skb,
1193 u32 pid, u32 seq, int flags,
1194 struct team *team,
1195 struct team_option *changed_option)
1196{
1197 struct nlattr *option_list;
1198 void *hdr;
1199 struct team_option *option;
1200
1201 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1202 TEAM_CMD_OPTIONS_GET);
1203 if (IS_ERR(hdr))
1204 return PTR_ERR(hdr);
1205
1206 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
1207 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1208 if (!option_list)
1209 return -EMSGSIZE;
1210
1211 list_for_each_entry(option, &team->option_list, list) {
1212 struct nlattr *option_item;
1213 long arg;
1214
1215 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1216 if (!option_item)
1217 goto nla_put_failure;
1218 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
1219 if (option == changed_option)
1220 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
1221 switch (option->type) {
1222 case TEAM_OPTION_TYPE_U32:
1223 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
1224 team_option_get(team, option, &arg);
1225 NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
1226 break;
1227 case TEAM_OPTION_TYPE_STRING:
1228 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
1229 team_option_get(team, option, &arg);
1230 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
1231 (char *) arg);
1232 break;
1233 default:
1234 BUG();
1235 }
1236 nla_nest_end(skb, option_item);
1237 }
1238
1239 nla_nest_end(skb, option_list);
1240 return genlmsg_end(skb, hdr);
1241
1242nla_put_failure:
1243 genlmsg_cancel(skb, hdr);
1244 return -EMSGSIZE;
1245}
1246
1247static int team_nl_fill_options_get(struct sk_buff *skb,
1248 struct genl_info *info, int flags,
1249 struct team *team)
1250{
1251 return team_nl_fill_options_get_changed(skb, info->snd_pid,
1252 info->snd_seq, NLM_F_ACK,
1253 team, NULL);
1254}
1255
1256static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
1257{
1258 struct team *team;
1259 int err;
1260
1261 team = team_nl_team_get(info);
1262 if (!team)
1263 return -EINVAL;
1264
1265 err = team_nl_send_generic(info, team, team_nl_fill_options_get);
1266
1267 team_nl_team_put(team);
1268
1269 return err;
1270}
1271
1272static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1273{
1274 struct team *team;
1275 int err = 0;
1276 int i;
1277 struct nlattr *nl_option;
1278
1279 team = team_nl_team_get(info);
1280 if (!team)
1281 return -EINVAL;
1282
1283 err = -EINVAL;
1284 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
1285 err = -EINVAL;
1286 goto team_put;
1287 }
1288
1289 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1290 struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
1291 enum team_option_type opt_type;
1292 struct team_option *option;
1293 char *opt_name;
1294 bool opt_found = false;
1295
1296 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
1297 err = -EINVAL;
1298 goto team_put;
1299 }
1300 err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
1301 nl_option, team_nl_option_policy);
1302 if (err)
1303 goto team_put;
1304 if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
1305 !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
1306 !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
1307 err = -EINVAL;
1308 goto team_put;
1309 }
1310 switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
1311 case NLA_U32:
1312 opt_type = TEAM_OPTION_TYPE_U32;
1313 break;
1314 case NLA_STRING:
1315 opt_type = TEAM_OPTION_TYPE_STRING;
1316 break;
1317 default:
1318 goto team_put;
1319 }
1320
1321 opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
1322 list_for_each_entry(option, &team->option_list, list) {
1323 long arg;
1324 struct nlattr *opt_data_attr;
1325
1326 if (option->type != opt_type ||
1327 strcmp(option->name, opt_name))
1328 continue;
1329 opt_found = true;
1330 opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
1331 switch (opt_type) {
1332 case TEAM_OPTION_TYPE_U32:
1333 arg = nla_get_u32(opt_data_attr);
1334 break;
1335 case TEAM_OPTION_TYPE_STRING:
1336 arg = (long) nla_data(opt_data_attr);
1337 break;
1338 default:
1339 BUG();
1340 }
1341 err = team_option_set(team, option, &arg);
1342 if (err)
1343 goto team_put;
1344 }
1345 if (!opt_found) {
1346 err = -ENOENT;
1347 goto team_put;
1348 }
1349 }
1350
1351team_put:
1352 team_nl_team_put(team);
1353
1354 return err;
1355}
1356
1357static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
1358 u32 pid, u32 seq, int flags,
1359 struct team *team,
1360 struct team_port *changed_port)
1361{
1362 struct nlattr *port_list;
1363 void *hdr;
1364 struct team_port *port;
1365
1366 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1367 TEAM_CMD_PORT_LIST_GET);
1368 if (IS_ERR(hdr))
1369 return PTR_ERR(hdr);
1370
1371 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
1372 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1373 if (!port_list)
1374 return -EMSGSIZE;
1375
1376 list_for_each_entry(port, &team->port_list, list) {
1377 struct nlattr *port_item;
1378
1379 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1380 if (!port_item)
1381 goto nla_put_failure;
1382 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
1383 if (port == changed_port)
1384 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
1385 if (port->linkup)
1386 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
1387 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
1388 NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
1389 nla_nest_end(skb, port_item);
1390 }
1391
1392 nla_nest_end(skb, port_list);
1393 return genlmsg_end(skb, hdr);
1394
1395nla_put_failure:
1396 genlmsg_cancel(skb, hdr);
1397 return -EMSGSIZE;
1398}
1399
1400static int team_nl_fill_port_list_get(struct sk_buff *skb,
1401 struct genl_info *info, int flags,
1402 struct team *team)
1403{
1404 return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
1405 info->snd_seq, NLM_F_ACK,
1406 team, NULL);
1407}
1408
1409static int team_nl_cmd_port_list_get(struct sk_buff *skb,
1410 struct genl_info *info)
1411{
1412 struct team *team;
1413 int err;
1414
1415 team = team_nl_team_get(info);
1416 if (!team)
1417 return -EINVAL;
1418
1419 err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
1420
1421 team_nl_team_put(team);
1422
1423 return err;
1424}
1425
1426static struct genl_ops team_nl_ops[] = {
1427 {
1428 .cmd = TEAM_CMD_NOOP,
1429 .doit = team_nl_cmd_noop,
1430 .policy = team_nl_policy,
1431 },
1432 {
1433 .cmd = TEAM_CMD_OPTIONS_SET,
1434 .doit = team_nl_cmd_options_set,
1435 .policy = team_nl_policy,
1436 .flags = GENL_ADMIN_PERM,
1437 },
1438 {
1439 .cmd = TEAM_CMD_OPTIONS_GET,
1440 .doit = team_nl_cmd_options_get,
1441 .policy = team_nl_policy,
1442 .flags = GENL_ADMIN_PERM,
1443 },
1444 {
1445 .cmd = TEAM_CMD_PORT_LIST_GET,
1446 .doit = team_nl_cmd_port_list_get,
1447 .policy = team_nl_policy,
1448 .flags = GENL_ADMIN_PERM,
1449 },
1450};
1451
1452static struct genl_multicast_group team_change_event_mcgrp = {
1453 .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
1454};
1455
1456static int team_nl_send_event_options_get(struct team *team,
1457 struct team_option *changed_option)
1458{
1459 struct sk_buff *skb;
1460 int err;
1461 struct net *net = dev_net(team->dev);
1462
1463 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1464 if (!skb)
1465 return -ENOMEM;
1466
1467 err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
1468 changed_option);
1469 if (err < 0)
1470 goto err_fill;
1471
1472 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1473 GFP_KERNEL);
1474 return err;
1475
1476err_fill:
1477 nlmsg_free(skb);
1478 return err;
1479}
1480
1481static int team_nl_send_event_port_list_get(struct team_port *port)
1482{
1483 struct sk_buff *skb;
1484 int err;
1485 struct net *net = dev_net(port->team->dev);
1486
1487 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1488 if (!skb)
1489 return -ENOMEM;
1490
1491 err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
1492 port->team, port);
1493 if (err < 0)
1494 goto err_fill;
1495
1496 err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
1497 GFP_KERNEL);
1498 return err;
1499
1500err_fill:
1501 nlmsg_free(skb);
1502 return err;
1503}
1504
1505static int team_nl_init(void)
1506{
1507 int err;
1508
1509 err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
1510 ARRAY_SIZE(team_nl_ops));
1511 if (err)
1512 return err;
1513
1514 err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
1515 if (err)
1516 goto err_change_event_grp_reg;
1517
1518 return 0;
1519
1520err_change_event_grp_reg:
1521 genl_unregister_family(&team_nl_family);
1522
1523 return err;
1524}
1525
1526static void team_nl_fini(void)
1527{
1528 genl_unregister_family(&team_nl_family);
1529}
1530
1531
1532/******************
1533 * Change checkers
1534 ******************/
1535
1536static void __team_options_change_check(struct team *team,
1537 struct team_option *changed_option)
1538{
1539 int err;
1540
1541 err = team_nl_send_event_options_get(team, changed_option);
1542 if (err)
1543 netdev_warn(team->dev, "Failed to send options change via netlink\n");
1544}
1545
1546/* rtnl lock is held */
1547static void __team_port_change_check(struct team_port *port, bool linkup)
1548{
1549 int err;
1550
1551 if (port->linkup == linkup)
1552 return;
1553
1554 port->linkup = linkup;
1555 if (linkup) {
1556 struct ethtool_cmd ecmd;
1557
1558 err = __ethtool_get_settings(port->dev, &ecmd);
1559 if (!err) {
1560 port->speed = ethtool_cmd_speed(&ecmd);
1561 port->duplex = ecmd.duplex;
1562 goto send_event;
1563 }
1564 }
1565 port->speed = 0;
1566 port->duplex = 0;
1567
1568send_event:
1569 err = team_nl_send_event_port_list_get(port);
1570 if (err)
1571 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
1572 port->dev->name);
1573
1574}
1575
1576static void team_port_change_check(struct team_port *port, bool linkup)
1577{
1578 struct team *team = port->team;
1579
61dc3461 1580 mutex_lock(&team->lock);
3d249d4c 1581 __team_port_change_check(port, linkup);
61dc3461 1582 mutex_unlock(&team->lock);
3d249d4c
JP
1583}
1584
1585/************************************
1586 * Net device notifier event handler
1587 ************************************/
1588
1589static int team_device_event(struct notifier_block *unused,
1590 unsigned long event, void *ptr)
1591{
1592 struct net_device *dev = (struct net_device *) ptr;
1593 struct team_port *port;
1594
1595 port = team_port_get_rtnl(dev);
1596 if (!port)
1597 return NOTIFY_DONE;
1598
1599 switch (event) {
1600 case NETDEV_UP:
1601 if (netif_carrier_ok(dev))
1602 team_port_change_check(port, true);
1603 case NETDEV_DOWN:
1604 team_port_change_check(port, false);
1605 case NETDEV_CHANGE:
1606 if (netif_running(port->dev))
1607 team_port_change_check(port,
1608 !!netif_carrier_ok(port->dev));
1609 break;
1610 case NETDEV_UNREGISTER:
1611 team_del_slave(port->team->dev, dev);
1612 break;
1613 case NETDEV_FEAT_CHANGE:
1614 team_compute_features(port->team);
1615 break;
1616 case NETDEV_CHANGEMTU:
1617 /* Forbid to change mtu of underlaying device */
1618 return NOTIFY_BAD;
1619 case NETDEV_PRE_TYPE_CHANGE:
1620 /* Forbid to change type of underlaying device */
1621 return NOTIFY_BAD;
1622 }
1623 return NOTIFY_DONE;
1624}
1625
1626static struct notifier_block team_notifier_block __read_mostly = {
1627 .notifier_call = team_device_event,
1628};
1629
1630
1631/***********************
1632 * Module init and exit
1633 ***********************/
1634
1635static int __init team_module_init(void)
1636{
1637 int err;
1638
1639 register_netdevice_notifier(&team_notifier_block);
1640
1641 err = rtnl_link_register(&team_link_ops);
1642 if (err)
1643 goto err_rtnl_reg;
1644
1645 err = team_nl_init();
1646 if (err)
1647 goto err_nl_init;
1648
1649 return 0;
1650
1651err_nl_init:
1652 rtnl_link_unregister(&team_link_ops);
1653
1654err_rtnl_reg:
1655 unregister_netdevice_notifier(&team_notifier_block);
1656
1657 return err;
1658}
1659
1660static void __exit team_module_exit(void)
1661{
1662 team_nl_fini();
1663 rtnl_link_unregister(&team_link_ops);
1664 unregister_netdevice_notifier(&team_notifier_block);
1665}
1666
1667module_init(team_module_init);
1668module_exit(team_module_exit);
1669
1670MODULE_LICENSE("GPL v2");
1671MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
1672MODULE_DESCRIPTION("Ethernet team device driver");
1673MODULE_ALIAS_RTNL_LINK(DRV_NAME);