Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Linux network device link state notification | |
3 | * | |
4 | * Author: | |
5 | * Stefan Rompf <sux@loplof.de> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | */ | |
13 | ||
1da177e4 LT |
14 | #include <linux/module.h> |
15 | #include <linux/netdevice.h> | |
16 | #include <linux/if.h> | |
17 | #include <net/sock.h> | |
cacaddf5 | 18 | #include <net/pkt_sched.h> |
1da177e4 LT |
19 | #include <linux/rtnetlink.h> |
20 | #include <linux/jiffies.h> | |
21 | #include <linux/spinlock.h> | |
1da177e4 LT |
22 | #include <linux/slab.h> |
23 | #include <linux/workqueue.h> | |
24 | #include <linux/bitops.h> | |
25 | #include <asm/types.h> | |
26 | ||
27 | ||
28 | enum lw_bits { | |
d9568ba9 | 29 | LW_URGENT = 0, |
1da177e4 LT |
30 | }; |
31 | ||
32 | static unsigned long linkwatch_flags; | |
33 | static unsigned long linkwatch_nextevent; | |
34 | ||
65f27f38 DH |
35 | static void linkwatch_event(struct work_struct *dummy); |
36 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); | |
1da177e4 | 37 | |
572a103d | 38 | static struct net_device *lweventlist; |
1da177e4 LT |
39 | static DEFINE_SPINLOCK(lweventlist_lock); |
40 | ||
b00055aa SR |
41 | static unsigned char default_operstate(const struct net_device *dev) |
42 | { | |
43 | if (!netif_carrier_ok(dev)) | |
44 | return (dev->ifindex != dev->iflink ? | |
45 | IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN); | |
46 | ||
47 | if (netif_dormant(dev)) | |
48 | return IF_OPER_DORMANT; | |
49 | ||
50 | return IF_OPER_UP; | |
51 | } | |
52 | ||
53 | ||
54 | static void rfc2863_policy(struct net_device *dev) | |
55 | { | |
56 | unsigned char operstate = default_operstate(dev); | |
57 | ||
58 | if (operstate == dev->operstate) | |
59 | return; | |
60 | ||
61 | write_lock_bh(&dev_base_lock); | |
62 | ||
63 | switch(dev->link_mode) { | |
64 | case IF_LINK_MODE_DORMANT: | |
65 | if (operstate == IF_OPER_UP) | |
66 | operstate = IF_OPER_DORMANT; | |
67 | break; | |
68 | ||
69 | case IF_LINK_MODE_DEFAULT: | |
70 | default: | |
71 | break; | |
3ff50b79 | 72 | } |
b00055aa SR |
73 | |
74 | dev->operstate = operstate; | |
75 | ||
76 | write_unlock_bh(&dev_base_lock); | |
77 | } | |
78 | ||
79 | ||
294cc44b HX |
80 | static int linkwatch_urgent_event(struct net_device *dev) |
81 | { | |
b0e1e646 DM |
82 | struct netdev_queue *txq = &dev->tx_queue; |
83 | ||
294cc44b | 84 | return netif_running(dev) && netif_carrier_ok(dev) && |
b0e1e646 | 85 | txq->qdisc != txq->qdisc_sleeping; |
294cc44b HX |
86 | } |
87 | ||
88 | ||
89 | static void linkwatch_add_event(struct net_device *dev) | |
90 | { | |
91 | unsigned long flags; | |
92 | ||
93 | spin_lock_irqsave(&lweventlist_lock, flags); | |
94 | dev->link_watch_next = lweventlist; | |
95 | lweventlist = dev; | |
96 | spin_unlock_irqrestore(&lweventlist_lock, flags); | |
97 | } | |
98 | ||
99 | ||
d9568ba9 | 100 | static void linkwatch_schedule_work(int urgent) |
294cc44b | 101 | { |
d9568ba9 HX |
102 | unsigned long delay = linkwatch_nextevent - jiffies; |
103 | ||
104 | if (test_bit(LW_URGENT, &linkwatch_flags)) | |
294cc44b HX |
105 | return; |
106 | ||
d9568ba9 HX |
107 | /* Minimise down-time: drop delay for up event. */ |
108 | if (urgent) { | |
109 | if (test_and_set_bit(LW_URGENT, &linkwatch_flags)) | |
110 | return; | |
294cc44b | 111 | delay = 0; |
db0ccffe | 112 | } |
294cc44b | 113 | |
d9568ba9 HX |
114 | /* If we wrap around we'll delay it by at most HZ. */ |
115 | if (delay > HZ) | |
116 | delay = 0; | |
117 | ||
118 | /* | |
119 | * This is true if we've scheduled it immeditately or if we don't | |
120 | * need an immediate execution and it's already pending. | |
121 | */ | |
122 | if (schedule_delayed_work(&linkwatch_work, delay) == !delay) | |
123 | return; | |
124 | ||
125 | /* Don't bother if there is nothing urgent. */ | |
126 | if (!test_bit(LW_URGENT, &linkwatch_flags)) | |
127 | return; | |
128 | ||
129 | /* It's already running which is good enough. */ | |
130 | if (!cancel_delayed_work(&linkwatch_work)) | |
131 | return; | |
132 | ||
133 | /* Otherwise we reschedule it again for immediate exection. */ | |
134 | schedule_delayed_work(&linkwatch_work, 0); | |
294cc44b HX |
135 | } |
136 | ||
137 | ||
138 | static void __linkwatch_run_queue(int urgent_only) | |
1da177e4 | 139 | { |
572a103d | 140 | struct net_device *next; |
1da177e4 | 141 | |
294cc44b HX |
142 | /* |
143 | * Limit the number of linkwatch events to one | |
144 | * per second so that a runaway driver does not | |
145 | * cause a storm of messages on the netlink | |
146 | * socket. This limit does not apply to up events | |
147 | * while the device qdisc is down. | |
148 | */ | |
149 | if (!urgent_only) | |
150 | linkwatch_nextevent = jiffies + HZ; | |
d9568ba9 HX |
151 | /* Limit wrap-around effect on delay. */ |
152 | else if (time_after(linkwatch_nextevent, jiffies + HZ)) | |
153 | linkwatch_nextevent = jiffies; | |
154 | ||
155 | clear_bit(LW_URGENT, &linkwatch_flags); | |
294cc44b | 156 | |
1da177e4 | 157 | spin_lock_irq(&lweventlist_lock); |
572a103d HX |
158 | next = lweventlist; |
159 | lweventlist = NULL; | |
1da177e4 LT |
160 | spin_unlock_irq(&lweventlist_lock); |
161 | ||
572a103d HX |
162 | while (next) { |
163 | struct net_device *dev = next; | |
1da177e4 | 164 | |
572a103d HX |
165 | next = dev->link_watch_next; |
166 | ||
294cc44b HX |
167 | if (urgent_only && !linkwatch_urgent_event(dev)) { |
168 | linkwatch_add_event(dev); | |
169 | continue; | |
170 | } | |
171 | ||
572a103d HX |
172 | /* |
173 | * Make sure the above read is complete since it can be | |
174 | * rewritten as soon as we clear the bit below. | |
175 | */ | |
176 | smp_mb__before_clear_bit(); | |
1da177e4 LT |
177 | |
178 | /* We are about to handle this device, | |
179 | * so new events can be accepted | |
180 | */ | |
181 | clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); | |
182 | ||
b00055aa | 183 | rfc2863_policy(dev); |
1da177e4 | 184 | if (dev->flags & IFF_UP) { |
cacaddf5 | 185 | if (netif_carrier_ok(dev)) { |
b0e1e646 DM |
186 | struct netdev_queue *txq = &dev->tx_queue; |
187 | ||
188 | WARN_ON(txq->qdisc_sleeping == &noop_qdisc); | |
cacaddf5 TC |
189 | dev_activate(dev); |
190 | } else | |
191 | dev_deactivate(dev); | |
192 | ||
1da177e4 LT |
193 | netdev_state_change(dev); |
194 | } | |
195 | ||
196 | dev_put(dev); | |
197 | } | |
294cc44b HX |
198 | |
199 | if (lweventlist) | |
d9568ba9 | 200 | linkwatch_schedule_work(0); |
4ec93edb | 201 | } |
1da177e4 LT |
202 | |
203 | ||
294cc44b HX |
204 | /* Must be called with the rtnl semaphore held */ |
205 | void linkwatch_run_queue(void) | |
1da177e4 | 206 | { |
294cc44b HX |
207 | __linkwatch_run_queue(0); |
208 | } | |
209 | ||
1da177e4 | 210 | |
294cc44b HX |
211 | static void linkwatch_event(struct work_struct *dummy) |
212 | { | |
6756ae4b | 213 | rtnl_lock(); |
294cc44b | 214 | __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies)); |
6756ae4b | 215 | rtnl_unlock(); |
1da177e4 LT |
216 | } |
217 | ||
218 | ||
219 | void linkwatch_fire_event(struct net_device *dev) | |
220 | { | |
d9568ba9 | 221 | int urgent = linkwatch_urgent_event(dev); |
1da177e4 | 222 | |
d9568ba9 | 223 | if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { |
1da177e4 | 224 | dev_hold(dev); |
1da177e4 | 225 | |
294cc44b | 226 | linkwatch_add_event(dev); |
d9568ba9 HX |
227 | } else if (!urgent) |
228 | return; | |
1da177e4 | 229 | |
d9568ba9 | 230 | linkwatch_schedule_work(urgent); |
1da177e4 LT |
231 | } |
232 | ||
233 | EXPORT_SYMBOL(linkwatch_fire_event); |