team: avoid possible underflow of count_pending value for notify_peers and mcast_rejoin
authorJiri Pirko <jiri@resnulli.us>
Wed, 14 Jan 2015 17:15:30 +0000 (18:15 +0100)
committerDavid S. Miller <davem@davemloft.net>
Wed, 14 Jan 2015 21:53:57 +0000 (16:53 -0500)
This patch is fixing a race condition that may cause setting
count_pending to -1, which results in unwanted big bulk of arp messages
(in case of "notify peers").

Consider following scenario:

count_pending == 2
   CPU0                                           CPU1
team_notify_peers_work
  atomic_dec_and_test (dec count_pending to 1)
  schedule_delayed_work
 team_notify_peers
   atomic_add (adding 1 to count_pending)
team_notify_peers_work
  atomic_dec_and_test (dec count_pending to 1)
  schedule_delayed_work
team_notify_peers_work
  atomic_dec_and_test (dec count_pending to 0)
   schedule_delayed_work
team_notify_peers_work
  atomic_dec_and_test (dec count_pending to -1)

Fix this race by using atomic_dec_if_positive - that will prevent
count_pending running under 0.

Fixes: fc423ff00df3a1955441 ("team: add peer notification")
Fixes: 492b200efdd20b8fcfd  ("team: add support for sending multicast rejoins")
Signed-off-by: Jiri Pirko <jiri@resnulli.us>
Signed-off-by: Jiri Benc <jbenc@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/team/team.c

index 93e224217e24b36b089102be11ada5921f62d83b..f7ff493f1e73dfa129dbb10ed68c6436c52a4b1b 100644 (file)
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
 static void team_notify_peers_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, notify_peers.dw.work);
 
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
                schedule_delayed_work(&team->notify_peers.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+       if (val)
                schedule_delayed_work(&team->notify_peers.dw,
                                      msecs_to_jiffies(team->notify_peers.interval));
 }
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
 static void team_mcast_rejoin_work(struct work_struct *work)
 {
        struct team *team;
+       int val;
 
        team = container_of(work, struct team, mcast_rejoin.dw.work);
 
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
                schedule_delayed_work(&team->mcast_rejoin.dw, 0);
                return;
        }
+       val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+       if (val < 0) {
+               rtnl_unlock();
+               return;
+       }
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
        rtnl_unlock();
-       if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+       if (val)
                schedule_delayed_work(&team->mcast_rejoin.dw,
                                      msecs_to_jiffies(team->mcast_rejoin.interval));
 }