Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/sched/sch_generic.c Generic packet scheduler routines. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
10 | * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 | |
11 | * - Ingress support | |
12 | */ | |
13 | ||
1da177e4 | 14 | #include <linux/bitops.h> |
1da177e4 LT |
15 | #include <linux/module.h> |
16 | #include <linux/types.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/string.h> | |
1da177e4 | 20 | #include <linux/errno.h> |
1da177e4 LT |
21 | #include <linux/netdevice.h> |
22 | #include <linux/skbuff.h> | |
23 | #include <linux/rtnetlink.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/rcupdate.h> | |
26 | #include <linux/list.h> | |
5a0e3ad6 | 27 | #include <linux/slab.h> |
07ce76aa | 28 | #include <linux/if_vlan.h> |
c5ad119f | 29 | #include <linux/skb_array.h> |
32d3e51a | 30 | #include <linux/if_macvlan.h> |
292f1c7f | 31 | #include <net/sch_generic.h> |
1da177e4 | 32 | #include <net/pkt_sched.h> |
7fee226a | 33 | #include <net/dst.h> |
e543002f | 34 | #include <trace/events/qdisc.h> |
f53c7239 | 35 | #include <net/xfrm.h> |
1da177e4 | 36 | |
34aedd3f | 37 | /* Qdisc to use by default */ |
38 | const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; | |
39 | EXPORT_SYMBOL(default_qdisc_ops); | |
40 | ||
1da177e4 LT |
41 | /* Main transmission queue. */ |
42 | ||
0463d4ae | 43 | /* Modifications to data participating in scheduling must be protected with |
5fb66229 | 44 | * qdisc_lock(qdisc) spinlock. |
0463d4ae PM |
45 | * |
46 | * The idea is the following: | |
c7e4f3bb DM |
47 | * - enqueue, dequeue are serialized via qdisc root lock |
48 | * - ingress filtering is also serialized via qdisc root lock | |
0463d4ae | 49 | * - updates to tree and tree walking are only done under the rtnl mutex. |
1da177e4 | 50 | */ |
70e57d5e JF |
51 | |
52 | static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) | |
53 | { | |
54 | const struct netdev_queue *txq = q->dev_queue; | |
55 | spinlock_t *lock = NULL; | |
56 | struct sk_buff *skb; | |
57 | ||
58 | if (q->flags & TCQ_F_NOLOCK) { | |
59 | lock = qdisc_lock(q); | |
60 | spin_lock(lock); | |
61 | } | |
62 | ||
63 | skb = skb_peek(&q->skb_bad_txq); | |
64 | if (skb) { | |
65 | /* check the reason of requeuing without tx lock first */ | |
66 | txq = skb_get_tx_queue(txq->dev, skb); | |
67 | if (!netif_xmit_frozen_or_stopped(txq)) { | |
68 | skb = __skb_dequeue(&q->skb_bad_txq); | |
69 | if (qdisc_is_percpu_stats(q)) { | |
70 | qdisc_qstats_cpu_backlog_dec(q, skb); | |
46b1c18f | 71 | qdisc_qstats_atomic_qlen_dec(q); |
70e57d5e JF |
72 | } else { |
73 | qdisc_qstats_backlog_dec(q, skb); | |
74 | q->q.qlen--; | |
75 | } | |
76 | } else { | |
77 | skb = NULL; | |
78 | } | |
79 | } | |
80 | ||
81 | if (lock) | |
82 | spin_unlock(lock); | |
83 | ||
84 | return skb; | |
85 | } | |
86 | ||
87 | static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) | |
88 | { | |
89 | struct sk_buff *skb = skb_peek(&q->skb_bad_txq); | |
90 | ||
91 | if (unlikely(skb)) | |
92 | skb = __skb_dequeue_bad_txq(q); | |
93 | ||
94 | return skb; | |
95 | } | |
96 | ||
97 | static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, | |
98 | struct sk_buff *skb) | |
99 | { | |
100 | spinlock_t *lock = NULL; | |
101 | ||
102 | if (q->flags & TCQ_F_NOLOCK) { | |
103 | lock = qdisc_lock(q); | |
104 | spin_lock(lock); | |
105 | } | |
106 | ||
107 | __skb_queue_tail(&q->skb_bad_txq, skb); | |
108 | ||
cce6294c ED |
109 | if (qdisc_is_percpu_stats(q)) { |
110 | qdisc_qstats_cpu_backlog_inc(q, skb); | |
46b1c18f | 111 | qdisc_qstats_atomic_qlen_inc(q); |
cce6294c ED |
112 | } else { |
113 | qdisc_qstats_backlog_inc(q, skb); | |
114 | q->q.qlen++; | |
115 | } | |
116 | ||
70e57d5e JF |
117 | if (lock) |
118 | spin_unlock(lock); | |
119 | } | |
120 | ||
9c01c9f1 | 121 | static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
c716a81a | 122 | { |
9c01c9f1 | 123 | spinlock_t *lock = NULL; |
9540d977 | 124 | |
9c01c9f1 PA |
125 | if (q->flags & TCQ_F_NOLOCK) { |
126 | lock = qdisc_lock(q); | |
127 | spin_lock(lock); | |
9540d977 | 128 | } |
c716a81a | 129 | |
9540d977 WY |
130 | while (skb) { |
131 | struct sk_buff *next = skb->next; | |
132 | ||
133 | __skb_queue_tail(&q->gso_skb, skb); | |
134 | ||
9c01c9f1 PA |
135 | /* it's still part of the queue */ |
136 | if (qdisc_is_percpu_stats(q)) { | |
137 | qdisc_qstats_cpu_requeues_inc(q); | |
138 | qdisc_qstats_cpu_backlog_inc(q, skb); | |
139 | qdisc_qstats_atomic_qlen_inc(q); | |
140 | } else { | |
141 | q->qstats.requeues++; | |
142 | qdisc_qstats_backlog_inc(q, skb); | |
143 | q->q.qlen++; | |
144 | } | |
9540d977 WY |
145 | |
146 | skb = next; | |
147 | } | |
9c01c9f1 PA |
148 | if (lock) |
149 | spin_unlock(lock); | |
a53851e2 | 150 | __netif_schedule(q); |
a53851e2 JF |
151 | } |
152 | ||
55a93b3e ED |
153 | static void try_bulk_dequeue_skb(struct Qdisc *q, |
154 | struct sk_buff *skb, | |
b8358d70 JDB |
155 | const struct netdev_queue *txq, |
156 | int *packets) | |
5772e9a3 | 157 | { |
55a93b3e | 158 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; |
5772e9a3 JDB |
159 | |
160 | while (bytelimit > 0) { | |
55a93b3e | 161 | struct sk_buff *nskb = q->dequeue(q); |
5772e9a3 | 162 | |
55a93b3e | 163 | if (!nskb) |
5772e9a3 JDB |
164 | break; |
165 | ||
55a93b3e ED |
166 | bytelimit -= nskb->len; /* covers GSO len */ |
167 | skb->next = nskb; | |
168 | skb = nskb; | |
b8358d70 | 169 | (*packets)++; /* GSO counts as one pkt */ |
5772e9a3 | 170 | } |
a8305bff | 171 | skb_mark_not_on_list(skb); |
5772e9a3 JDB |
172 | } |
173 | ||
4d202a0d ED |
174 | /* This variant of try_bulk_dequeue_skb() makes sure |
175 | * all skbs in the chain are for the same txq | |
176 | */ | |
177 | static void try_bulk_dequeue_skb_slow(struct Qdisc *q, | |
178 | struct sk_buff *skb, | |
179 | int *packets) | |
180 | { | |
181 | int mapping = skb_get_queue_mapping(skb); | |
182 | struct sk_buff *nskb; | |
183 | int cnt = 0; | |
184 | ||
185 | do { | |
186 | nskb = q->dequeue(q); | |
187 | if (!nskb) | |
188 | break; | |
189 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { | |
70e57d5e | 190 | qdisc_enqueue_skb_bad_txq(q, nskb); |
4d202a0d ED |
191 | break; |
192 | } | |
193 | skb->next = nskb; | |
194 | skb = nskb; | |
195 | } while (++cnt < 8); | |
196 | (*packets) += cnt; | |
a8305bff | 197 | skb_mark_not_on_list(skb); |
4d202a0d ED |
198 | } |
199 | ||
5772e9a3 JDB |
200 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). |
201 | * A requeued skb (via q->gso_skb) can also be a SKB list. | |
202 | */ | |
b8358d70 JDB |
203 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, |
204 | int *packets) | |
c716a81a | 205 | { |
1abbe139 | 206 | const struct netdev_queue *txq = q->dev_queue; |
fd8e8d1a | 207 | struct sk_buff *skb = NULL; |
554794de | 208 | |
b8358d70 | 209 | *packets = 1; |
a53851e2 JF |
210 | if (unlikely(!skb_queue_empty(&q->gso_skb))) { |
211 | spinlock_t *lock = NULL; | |
212 | ||
213 | if (q->flags & TCQ_F_NOLOCK) { | |
214 | lock = qdisc_lock(q); | |
215 | spin_lock(lock); | |
216 | } | |
217 | ||
218 | skb = skb_peek(&q->gso_skb); | |
219 | ||
220 | /* skb may be null if another cpu pulls gso_skb off in between | |
221 | * empty check and lock. | |
222 | */ | |
223 | if (!skb) { | |
224 | if (lock) | |
225 | spin_unlock(lock); | |
226 | goto validate; | |
227 | } | |
228 | ||
4d202a0d ED |
229 | /* skb in gso_skb were already validated */ |
230 | *validate = false; | |
f53c7239 SK |
231 | if (xfrm_offload(skb)) |
232 | *validate = true; | |
ebf05982 | 233 | /* check the reason of requeuing without tx lock first */ |
10c51b56 | 234 | txq = skb_get_tx_queue(txq->dev, skb); |
73466498 | 235 | if (!netif_xmit_frozen_or_stopped(txq)) { |
a53851e2 JF |
236 | skb = __skb_dequeue(&q->gso_skb); |
237 | if (qdisc_is_percpu_stats(q)) { | |
238 | qdisc_qstats_cpu_backlog_dec(q, skb); | |
46b1c18f | 239 | qdisc_qstats_atomic_qlen_dec(q); |
a53851e2 JF |
240 | } else { |
241 | qdisc_qstats_backlog_dec(q, skb); | |
242 | q->q.qlen--; | |
243 | } | |
244 | } else { | |
ebf05982 | 245 | skb = NULL; |
a53851e2 JF |
246 | } |
247 | if (lock) | |
248 | spin_unlock(lock); | |
e543002f | 249 | goto trace; |
4d202a0d | 250 | } |
a53851e2 | 251 | validate: |
4d202a0d | 252 | *validate = true; |
fd8e8d1a JF |
253 | |
254 | if ((q->flags & TCQ_F_ONETXQUEUE) && | |
255 | netif_xmit_frozen_or_stopped(txq)) | |
256 | return skb; | |
257 | ||
70e57d5e JF |
258 | skb = qdisc_dequeue_skb_bad_txq(q); |
259 | if (unlikely(skb)) | |
260 | goto bulk; | |
fd8e8d1a | 261 | skb = q->dequeue(q); |
4d202a0d ED |
262 | if (skb) { |
263 | bulk: | |
264 | if (qdisc_may_bulk(q)) | |
265 | try_bulk_dequeue_skb(q, skb, txq, packets); | |
266 | else | |
267 | try_bulk_dequeue_skb_slow(q, skb, packets); | |
ebf05982 | 268 | } |
e543002f JDB |
269 | trace: |
270 | trace_qdisc_dequeue(q, txq, *packets, skb); | |
c716a81a JHS |
271 | return skb; |
272 | } | |
273 | ||
10297b99 | 274 | /* |
10770bc2 | 275 | * Transmit possibly several skbs, and handle the return status as |
f9eb8aea | 276 | * required. Owning running seqcount bit guarantees that |
10770bc2 | 277 | * only one CPU can execute this function. |
6c1361a6 KK |
278 | * |
279 | * Returns to the caller: | |
29b86cda JF |
280 | * false - hardware queue frozen backoff |
281 | * true - feel free to send more pkts | |
6c1361a6 | 282 | */ |
29b86cda JF |
283 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
284 | struct net_device *dev, struct netdev_queue *txq, | |
285 | spinlock_t *root_lock, bool validate) | |
1da177e4 | 286 | { |
5f1a485d | 287 | int ret = NETDEV_TX_BUSY; |
f53c7239 | 288 | bool again = false; |
7698b4fc DM |
289 | |
290 | /* And release qdisc */ | |
6b3ba914 JF |
291 | if (root_lock) |
292 | spin_unlock(root_lock); | |
c716a81a | 293 | |
55a93b3e ED |
294 | /* Note that we validate skb (GSO, checksum, ...) outside of locks */ |
295 | if (validate) | |
f53c7239 SK |
296 | skb = validate_xmit_skb_list(skb, dev, &again); |
297 | ||
298 | #ifdef CONFIG_XFRM_OFFLOAD | |
299 | if (unlikely(again)) { | |
300 | if (root_lock) | |
301 | spin_lock(root_lock); | |
302 | ||
303 | dev_requeue_skb(skb, q); | |
304 | return false; | |
305 | } | |
306 | #endif | |
572a9d7b | 307 | |
3dcd493f | 308 | if (likely(skb)) { |
55a93b3e ED |
309 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
310 | if (!netif_xmit_frozen_or_stopped(txq)) | |
311 | skb = dev_hard_start_xmit(skb, dev, txq, &ret); | |
c716a81a | 312 | |
55a93b3e | 313 | HARD_TX_UNLOCK(dev, txq); |
3dcd493f | 314 | } else { |
6b3ba914 JF |
315 | if (root_lock) |
316 | spin_lock(root_lock); | |
29b86cda | 317 | return true; |
55a93b3e | 318 | } |
6b3ba914 JF |
319 | |
320 | if (root_lock) | |
321 | spin_lock(root_lock); | |
c716a81a | 322 | |
29b86cda | 323 | if (!dev_xmit_complete(ret)) { |
6c1361a6 | 324 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
e87cc472 JP |
325 | if (unlikely(ret != NETDEV_TX_BUSY)) |
326 | net_warn_ratelimited("BUG %s code %d qlen %d\n", | |
327 | dev->name, ret, q->q.qlen); | |
6c1361a6 | 328 | |
29b86cda JF |
329 | dev_requeue_skb(skb, q); |
330 | return false; | |
6c1361a6 | 331 | } |
c716a81a | 332 | |
29b86cda | 333 | return true; |
1da177e4 LT |
334 | } |
335 | ||
bbd8a0d3 KK |
336 | /* |
337 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | |
338 | * | |
f9eb8aea | 339 | * running seqcount guarantees only one CPU can process |
bbd8a0d3 KK |
340 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for |
341 | * this queue. | |
342 | * | |
343 | * netif_tx_lock serializes accesses to device driver. | |
344 | * | |
345 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | |
346 | * if one is grabbed, another must be free. | |
347 | * | |
348 | * Note, that this procedure can be called by a watchdog timer | |
349 | * | |
350 | * Returns to the caller: | |
351 | * 0 - queue is empty or throttled. | |
352 | * >0 - queue is not empty. | |
353 | * | |
354 | */ | |
29b86cda | 355 | static inline bool qdisc_restart(struct Qdisc *q, int *packets) |
bbd8a0d3 | 356 | { |
6b3ba914 | 357 | spinlock_t *root_lock = NULL; |
bbd8a0d3 KK |
358 | struct netdev_queue *txq; |
359 | struct net_device *dev; | |
bbd8a0d3 | 360 | struct sk_buff *skb; |
32f7b44d | 361 | bool validate; |
bbd8a0d3 KK |
362 | |
363 | /* Dequeue packet */ | |
b8358d70 | 364 | skb = dequeue_skb(q, &validate, packets); |
32f7b44d | 365 | if (unlikely(!skb)) |
29b86cda | 366 | return false; |
10c51b56 | 367 | |
32f7b44d | 368 | if (!(q->flags & TCQ_F_NOLOCK)) |
6b3ba914 JF |
369 | root_lock = qdisc_lock(q); |
370 | ||
bbd8a0d3 | 371 | dev = qdisc_dev(q); |
10c51b56 | 372 | txq = skb_get_tx_queue(dev, skb); |
bbd8a0d3 | 373 | |
32f7b44d | 374 | return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); |
bbd8a0d3 KK |
375 | } |
376 | ||
37437bb2 | 377 | void __qdisc_run(struct Qdisc *q) |
48d83325 | 378 | { |
3d48b53f | 379 | int quota = dev_tx_weight; |
b8358d70 | 380 | int packets; |
2ba2506c | 381 | |
b8358d70 | 382 | while (qdisc_restart(q, &packets)) { |
2ba2506c | 383 | /* |
d5b8aa1d | 384 | * Ordered by possible occurrence: Postpone processing if |
385 | * 1. we've exceeded packet quota | |
386 | * 2. another process needs the CPU; | |
2ba2506c | 387 | */ |
b8358d70 JDB |
388 | quota -= packets; |
389 | if (quota <= 0 || need_resched()) { | |
37437bb2 | 390 | __netif_schedule(q); |
d90df3ad | 391 | break; |
2ba2506c HX |
392 | } |
393 | } | |
48d83325 HX |
394 | } |
395 | ||
9d21493b ED |
396 | unsigned long dev_trans_start(struct net_device *dev) |
397 | { | |
07ce76aa | 398 | unsigned long val, res; |
9d21493b ED |
399 | unsigned int i; |
400 | ||
07ce76aa | 401 | if (is_vlan_dev(dev)) |
402 | dev = vlan_dev_real_dev(dev); | |
32d3e51a CD |
403 | else if (netif_is_macvlan(dev)) |
404 | dev = macvlan_dev_real_dev(dev); | |
9b36627a FW |
405 | res = netdev_get_tx_queue(dev, 0)->trans_start; |
406 | for (i = 1; i < dev->num_tx_queues; i++) { | |
9d21493b ED |
407 | val = netdev_get_tx_queue(dev, i)->trans_start; |
408 | if (val && time_after(val, res)) | |
409 | res = val; | |
410 | } | |
07ce76aa | 411 | |
9d21493b ED |
412 | return res; |
413 | } | |
414 | EXPORT_SYMBOL(dev_trans_start); | |
415 | ||
cdeabbb8 | 416 | static void dev_watchdog(struct timer_list *t) |
1da177e4 | 417 | { |
cdeabbb8 | 418 | struct net_device *dev = from_timer(dev, t, watchdog_timer); |
1da177e4 | 419 | |
932ff279 | 420 | netif_tx_lock(dev); |
e8a0464c | 421 | if (!qdisc_tx_is_noop(dev)) { |
1da177e4 LT |
422 | if (netif_device_present(dev) && |
423 | netif_running(dev) && | |
424 | netif_carrier_ok(dev)) { | |
9d21493b | 425 | int some_queue_timedout = 0; |
e8a0464c | 426 | unsigned int i; |
9d21493b | 427 | unsigned long trans_start; |
e8a0464c DM |
428 | |
429 | for (i = 0; i < dev->num_tx_queues; i++) { | |
430 | struct netdev_queue *txq; | |
431 | ||
432 | txq = netdev_get_tx_queue(dev, i); | |
9b36627a | 433 | trans_start = txq->trans_start; |
73466498 | 434 | if (netif_xmit_stopped(txq) && |
9d21493b ED |
435 | time_after(jiffies, (trans_start + |
436 | dev->watchdog_timeo))) { | |
437 | some_queue_timedout = 1; | |
ccf5ff69 | 438 | txq->trans_timeout++; |
e8a0464c DM |
439 | break; |
440 | } | |
441 | } | |
338f7566 | 442 | |
9d21493b | 443 | if (some_queue_timedout) { |
9d21493b | 444 | WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", |
3019de12 | 445 | dev->name, netdev_drivername(dev), i); |
d314774c | 446 | dev->netdev_ops->ndo_tx_timeout(dev); |
1da177e4 | 447 | } |
e8a0464c DM |
448 | if (!mod_timer(&dev->watchdog_timer, |
449 | round_jiffies(jiffies + | |
450 | dev->watchdog_timeo))) | |
1da177e4 LT |
451 | dev_hold(dev); |
452 | } | |
453 | } | |
932ff279 | 454 | netif_tx_unlock(dev); |
1da177e4 LT |
455 | |
456 | dev_put(dev); | |
457 | } | |
458 | ||
1da177e4 LT |
459 | void __netdev_watchdog_up(struct net_device *dev) |
460 | { | |
d314774c | 461 | if (dev->netdev_ops->ndo_tx_timeout) { |
1da177e4 LT |
462 | if (dev->watchdog_timeo <= 0) |
463 | dev->watchdog_timeo = 5*HZ; | |
60468d5b VP |
464 | if (!mod_timer(&dev->watchdog_timer, |
465 | round_jiffies(jiffies + dev->watchdog_timeo))) | |
1da177e4 LT |
466 | dev_hold(dev); |
467 | } | |
468 | } | |
469 | ||
470 | static void dev_watchdog_up(struct net_device *dev) | |
471 | { | |
1da177e4 | 472 | __netdev_watchdog_up(dev); |
1da177e4 LT |
473 | } |
474 | ||
475 | static void dev_watchdog_down(struct net_device *dev) | |
476 | { | |
932ff279 | 477 | netif_tx_lock_bh(dev); |
1da177e4 | 478 | if (del_timer(&dev->watchdog_timer)) |
15333061 | 479 | dev_put(dev); |
932ff279 | 480 | netif_tx_unlock_bh(dev); |
1da177e4 LT |
481 | } |
482 | ||
bea3348e SH |
483 | /** |
484 | * netif_carrier_on - set carrier | |
485 | * @dev: network device | |
486 | * | |
989723b0 | 487 | * Device has detected acquisition of carrier. |
bea3348e | 488 | */ |
0a242efc DV |
489 | void netif_carrier_on(struct net_device *dev) |
490 | { | |
bfaae0f0 | 491 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { |
b4730016 DM |
492 | if (dev->reg_state == NETREG_UNINITIALIZED) |
493 | return; | |
b2d3bcfa | 494 | atomic_inc(&dev->carrier_up_count); |
0a242efc | 495 | linkwatch_fire_event(dev); |
bfaae0f0 JG |
496 | if (netif_running(dev)) |
497 | __netdev_watchdog_up(dev); | |
498 | } | |
0a242efc | 499 | } |
62e3ba1b | 500 | EXPORT_SYMBOL(netif_carrier_on); |
0a242efc | 501 | |
bea3348e SH |
502 | /** |
503 | * netif_carrier_off - clear carrier | |
504 | * @dev: network device | |
505 | * | |
506 | * Device has detected loss of carrier. | |
507 | */ | |
0a242efc DV |
508 | void netif_carrier_off(struct net_device *dev) |
509 | { | |
b4730016 DM |
510 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { |
511 | if (dev->reg_state == NETREG_UNINITIALIZED) | |
512 | return; | |
b2d3bcfa | 513 | atomic_inc(&dev->carrier_down_count); |
0a242efc | 514 | linkwatch_fire_event(dev); |
b4730016 | 515 | } |
0a242efc | 516 | } |
62e3ba1b | 517 | EXPORT_SYMBOL(netif_carrier_off); |
0a242efc | 518 | |
1da177e4 LT |
519 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces |
520 | under all circumstances. It is difficult to invent anything faster or | |
521 | cheaper. | |
522 | */ | |
523 | ||
520ac30f ED |
524 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, |
525 | struct sk_buff **to_free) | |
1da177e4 | 526 | { |
520ac30f | 527 | __qdisc_drop(skb, to_free); |
1da177e4 LT |
528 | return NET_XMIT_CN; |
529 | } | |
530 | ||
82d567c2 | 531 | static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) |
1da177e4 LT |
532 | { |
533 | return NULL; | |
534 | } | |
535 | ||
20fea08b | 536 | struct Qdisc_ops noop_qdisc_ops __read_mostly = { |
1da177e4 LT |
537 | .id = "noop", |
538 | .priv_size = 0, | |
539 | .enqueue = noop_enqueue, | |
540 | .dequeue = noop_dequeue, | |
99c0db26 | 541 | .peek = noop_dequeue, |
1da177e4 LT |
542 | .owner = THIS_MODULE, |
543 | }; | |
544 | ||
7698b4fc | 545 | static struct netdev_queue noop_netdev_queue = { |
3b40bf4e | 546 | RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), |
9f3ffae0 | 547 | .qdisc_sleeping = &noop_qdisc, |
7698b4fc DM |
548 | }; |
549 | ||
1da177e4 LT |
550 | struct Qdisc noop_qdisc = { |
551 | .enqueue = noop_enqueue, | |
552 | .dequeue = noop_dequeue, | |
553 | .flags = TCQ_F_BUILTIN, | |
10297b99 | 554 | .ops = &noop_qdisc_ops, |
83874000 | 555 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
7698b4fc | 556 | .dev_queue = &noop_netdev_queue, |
f9eb8aea | 557 | .running = SEQCNT_ZERO(noop_qdisc.running), |
7b5edbc4 | 558 | .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), |
f98ebd47 ED |
559 | .gso_skb = { |
560 | .next = (struct sk_buff *)&noop_qdisc.gso_skb, | |
561 | .prev = (struct sk_buff *)&noop_qdisc.gso_skb, | |
562 | .qlen = 0, | |
563 | .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), | |
564 | }, | |
565 | .skb_bad_txq = { | |
566 | .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, | |
567 | .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, | |
568 | .qlen = 0, | |
569 | .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), | |
570 | }, | |
1da177e4 | 571 | }; |
62e3ba1b | 572 | EXPORT_SYMBOL(noop_qdisc); |
1da177e4 | 573 | |
e63d7dfd AA |
574 | static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, |
575 | struct netlink_ext_ack *extack) | |
d66d6c31 PS |
576 | { |
577 | /* register_qdisc() assigns a default of noop_enqueue if unset, | |
578 | * but __dev_queue_xmit() treats noqueue only as such | |
579 | * if this is NULL - so clear it here. */ | |
580 | qdisc->enqueue = NULL; | |
581 | return 0; | |
582 | } | |
583 | ||
584 | struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { | |
1da177e4 LT |
585 | .id = "noqueue", |
586 | .priv_size = 0, | |
d66d6c31 | 587 | .init = noqueue_init, |
1da177e4 LT |
588 | .enqueue = noop_enqueue, |
589 | .dequeue = noop_dequeue, | |
99c0db26 | 590 | .peek = noop_dequeue, |
1da177e4 LT |
591 | .owner = THIS_MODULE, |
592 | }; | |
593 | ||
cc7ec456 ED |
594 | static const u8 prio2band[TC_PRIO_MAX + 1] = { |
595 | 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 | |
596 | }; | |
d3678b46 DM |
597 | |
598 | /* 3-band FIFO queue: old style, but should be a bit faster than | |
599 | generic prio+fifo combination. | |
600 | */ | |
601 | ||
602 | #define PFIFO_FAST_BANDS 3 | |
603 | ||
fd3ae5e8 KK |
604 | /* |
605 | * Private data for a pfifo_fast scheduler containing: | |
c5ad119f | 606 | * - rings for priority bands |
fd3ae5e8 KK |
607 | */ |
608 | struct pfifo_fast_priv { | |
c5ad119f | 609 | struct skb_array q[PFIFO_FAST_BANDS]; |
fd3ae5e8 KK |
610 | }; |
611 | ||
c5ad119f JF |
612 | static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, |
613 | int band) | |
d3678b46 | 614 | { |
c5ad119f | 615 | return &priv->q[band]; |
d3678b46 DM |
616 | } |
617 | ||
520ac30f ED |
618 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, |
619 | struct sk_buff **to_free) | |
321090e7 | 620 | { |
c5ad119f JF |
621 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
622 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | |
623 | struct skb_array *q = band2list(priv, band); | |
cce6294c | 624 | unsigned int pkt_len = qdisc_pkt_len(skb); |
c5ad119f | 625 | int err; |
821d24ae | 626 | |
c5ad119f JF |
627 | err = skb_array_produce(q, skb); |
628 | ||
629 | if (unlikely(err)) | |
630 | return qdisc_drop_cpu(skb, qdisc, to_free); | |
631 | ||
8a53e616 | 632 | qdisc_update_stats_at_enqueue(qdisc, pkt_len); |
c5ad119f | 633 | return NET_XMIT_SUCCESS; |
1da177e4 LT |
634 | } |
635 | ||
cc7ec456 | 636 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) |
1da177e4 | 637 | { |
fd3ae5e8 | 638 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
c5ad119f JF |
639 | struct sk_buff *skb = NULL; |
640 | int band; | |
ec323368 | 641 | |
c5ad119f JF |
642 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { |
643 | struct skb_array *q = band2list(priv, band); | |
fd3ae5e8 | 644 | |
c5ad119f JF |
645 | if (__skb_array_empty(q)) |
646 | continue; | |
fd3ae5e8 | 647 | |
021a17ed | 648 | skb = __skb_array_consume(q); |
c5ad119f JF |
649 | } |
650 | if (likely(skb)) { | |
8a53e616 | 651 | qdisc_update_stats_at_dequeue(qdisc, skb); |
28cff537 PA |
652 | } else { |
653 | qdisc->empty = true; | |
d3678b46 | 654 | } |
f87a9c3d | 655 | |
c5ad119f | 656 | return skb; |
1da177e4 LT |
657 | } |
658 | ||
cc7ec456 | 659 | static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) |
99c0db26 | 660 | { |
fd3ae5e8 | 661 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
c5ad119f JF |
662 | struct sk_buff *skb = NULL; |
663 | int band; | |
fd3ae5e8 | 664 | |
c5ad119f JF |
665 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { |
666 | struct skb_array *q = band2list(priv, band); | |
99c0db26 | 667 | |
c5ad119f | 668 | skb = __skb_array_peek(q); |
99c0db26 JP |
669 | } |
670 | ||
c5ad119f | 671 | return skb; |
99c0db26 JP |
672 | } |
673 | ||
cc7ec456 | 674 | static void pfifo_fast_reset(struct Qdisc *qdisc) |
1da177e4 | 675 | { |
c5ad119f | 676 | int i, band; |
fd3ae5e8 | 677 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
d3678b46 | 678 | |
c5ad119f JF |
679 | for (band = 0; band < PFIFO_FAST_BANDS; band++) { |
680 | struct skb_array *q = band2list(priv, band); | |
681 | struct sk_buff *skb; | |
d3678b46 | 682 | |
1df94c3c CW |
683 | /* NULL ring is possible if destroy path is due to a failed |
684 | * skb_array_init() in pfifo_fast_init() case. | |
685 | */ | |
686 | if (!q->ring.queue) | |
687 | continue; | |
688 | ||
021a17ed | 689 | while ((skb = __skb_array_consume(q)) != NULL) |
c5ad119f JF |
690 | kfree_skb(skb); |
691 | } | |
692 | ||
693 | for_each_possible_cpu(i) { | |
694 | struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); | |
695 | ||
696 | q->backlog = 0; | |
c5ad119f | 697 | } |
1da177e4 LT |
698 | } |
699 | ||
d3678b46 DM |
700 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) |
701 | { | |
702 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | |
703 | ||
cc7ec456 | 704 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); |
1b34ec43 DM |
705 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) |
706 | goto nla_put_failure; | |
d3678b46 DM |
707 | return skb->len; |
708 | ||
709 | nla_put_failure: | |
710 | return -1; | |
711 | } | |
712 | ||
e63d7dfd AA |
713 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, |
714 | struct netlink_ext_ack *extack) | |
d3678b46 | 715 | { |
c5ad119f | 716 | unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; |
fd3ae5e8 | 717 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
c5ad119f JF |
718 | int prio; |
719 | ||
720 | /* guard against zero length rings */ | |
721 | if (!qlen) | |
722 | return -EINVAL; | |
d3678b46 | 723 | |
c5ad119f JF |
724 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { |
725 | struct skb_array *q = band2list(priv, prio); | |
726 | int err; | |
727 | ||
728 | err = skb_array_init(q, qlen, GFP_KERNEL); | |
729 | if (err) | |
730 | return -ENOMEM; | |
731 | } | |
d3678b46 | 732 | |
23624935 ED |
733 | /* Can by-pass the queue discipline */ |
734 | qdisc->flags |= TCQ_F_CAN_BYPASS; | |
d3678b46 DM |
735 | return 0; |
736 | } | |
737 | ||
c5ad119f JF |
738 | static void pfifo_fast_destroy(struct Qdisc *sch) |
739 | { | |
740 | struct pfifo_fast_priv *priv = qdisc_priv(sch); | |
741 | int prio; | |
742 | ||
743 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | |
744 | struct skb_array *q = band2list(priv, prio); | |
745 | ||
746 | /* NULL ring is possible if destroy path is due to a failed | |
747 | * skb_array_init() in pfifo_fast_init() case. | |
748 | */ | |
1df94c3c | 749 | if (!q->ring.queue) |
c5ad119f JF |
750 | continue; |
751 | /* Destroy ring but no need to kfree_skb because a call to | |
752 | * pfifo_fast_reset() has already done that work. | |
753 | */ | |
754 | ptr_ring_cleanup(&q->ring, NULL); | |
755 | } | |
756 | } | |
757 | ||
7007ba63 CW |
758 | static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, |
759 | unsigned int new_len) | |
760 | { | |
761 | struct pfifo_fast_priv *priv = qdisc_priv(sch); | |
762 | struct skb_array *bands[PFIFO_FAST_BANDS]; | |
763 | int prio; | |
764 | ||
765 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | |
766 | struct skb_array *q = band2list(priv, prio); | |
767 | ||
768 | bands[prio] = q; | |
769 | } | |
770 | ||
771 | return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len, | |
772 | GFP_KERNEL); | |
773 | } | |
774 | ||
6ec1c69a | 775 | struct Qdisc_ops pfifo_fast_ops __read_mostly = { |
d3678b46 | 776 | .id = "pfifo_fast", |
fd3ae5e8 | 777 | .priv_size = sizeof(struct pfifo_fast_priv), |
d3678b46 DM |
778 | .enqueue = pfifo_fast_enqueue, |
779 | .dequeue = pfifo_fast_dequeue, | |
99c0db26 | 780 | .peek = pfifo_fast_peek, |
d3678b46 | 781 | .init = pfifo_fast_init, |
c5ad119f | 782 | .destroy = pfifo_fast_destroy, |
d3678b46 DM |
783 | .reset = pfifo_fast_reset, |
784 | .dump = pfifo_fast_dump, | |
7007ba63 | 785 | .change_tx_queue_len = pfifo_fast_change_tx_queue_len, |
1da177e4 | 786 | .owner = THIS_MODULE, |
c5ad119f | 787 | .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS, |
1da177e4 | 788 | }; |
1f27cde3 | 789 | EXPORT_SYMBOL(pfifo_fast_ops); |
1da177e4 | 790 | |
23d3b8bf | 791 | static struct lock_class_key qdisc_tx_busylock; |
f9eb8aea | 792 | static struct lock_class_key qdisc_running_key; |
23d3b8bf | 793 | |
5ce2d488 | 794 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
d0bd684d AA |
795 | const struct Qdisc_ops *ops, |
796 | struct netlink_ext_ack *extack) | |
1da177e4 LT |
797 | { |
798 | void *p; | |
799 | struct Qdisc *sch; | |
d276055c | 800 | unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; |
3d54b82f | 801 | int err = -ENOBUFS; |
26aa0459 JSP |
802 | struct net_device *dev; |
803 | ||
804 | if (!dev_queue) { | |
d0bd684d | 805 | NL_SET_ERR_MSG(extack, "No device queue given"); |
26aa0459 JSP |
806 | err = -EINVAL; |
807 | goto errout; | |
808 | } | |
1da177e4 | 809 | |
26aa0459 | 810 | dev = dev_queue->dev; |
f2cd2d3e ED |
811 | p = kzalloc_node(size, GFP_KERNEL, |
812 | netdev_queue_numa_node_read(dev_queue)); | |
813 | ||
1da177e4 | 814 | if (!p) |
3d54b82f | 815 | goto errout; |
3d54b82f | 816 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
d276055c ED |
817 | /* if we got non aligned memory, ask more and do alignment ourself */ |
818 | if (sch != p) { | |
819 | kfree(p); | |
820 | p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, | |
821 | netdev_queue_numa_node_read(dev_queue)); | |
822 | if (!p) | |
823 | goto errout; | |
824 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); | |
825 | sch->padded = (char *) sch - (char *) p; | |
826 | } | |
a53851e2 | 827 | __skb_queue_head_init(&sch->gso_skb); |
70e57d5e | 828 | __skb_queue_head_init(&sch->skb_bad_txq); |
48da34b7 FW |
829 | qdisc_skb_head_init(&sch->q); |
830 | spin_lock_init(&sch->q.lock); | |
23d3b8bf | 831 | |
d59f5ffa JF |
832 | if (ops->static_flags & TCQ_F_CPUSTATS) { |
833 | sch->cpu_bstats = | |
834 | netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); | |
835 | if (!sch->cpu_bstats) | |
836 | goto errout1; | |
837 | ||
838 | sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); | |
839 | if (!sch->cpu_qstats) { | |
840 | free_percpu(sch->cpu_bstats); | |
841 | goto errout1; | |
842 | } | |
843 | } | |
844 | ||
79640a4c | 845 | spin_lock_init(&sch->busylock); |
23d3b8bf ED |
846 | lockdep_set_class(&sch->busylock, |
847 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); | |
848 | ||
96009c7d PA |
849 | /* seqlock has the same scope of busylock, for NOLOCK qdisc */ |
850 | spin_lock_init(&sch->seqlock); | |
851 | lockdep_set_class(&sch->busylock, | |
852 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); | |
853 | ||
f9eb8aea ED |
854 | seqcount_init(&sch->running); |
855 | lockdep_set_class(&sch->running, | |
856 | dev->qdisc_running_key ?: &qdisc_running_key); | |
857 | ||
1da177e4 | 858 | sch->ops = ops; |
d59f5ffa | 859 | sch->flags = ops->static_flags; |
1da177e4 LT |
860 | sch->enqueue = ops->enqueue; |
861 | sch->dequeue = ops->dequeue; | |
bb949fbd | 862 | sch->dev_queue = dev_queue; |
28cff537 | 863 | sch->empty = true; |
23d3b8bf | 864 | dev_hold(dev); |
7b936405 | 865 | refcount_set(&sch->refcnt, 1); |
3d54b82f TG |
866 | |
867 | return sch; | |
d59f5ffa JF |
868 | errout1: |
869 | kfree(p); | |
3d54b82f | 870 | errout: |
01e123d7 | 871 | return ERR_PTR(err); |
3d54b82f TG |
872 | } |
873 | ||
3511c913 | 874 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
d2a7f269 | 875 | const struct Qdisc_ops *ops, |
a38a9882 AA |
876 | unsigned int parentid, |
877 | struct netlink_ext_ack *extack) | |
3d54b82f TG |
878 | { |
879 | struct Qdisc *sch; | |
10297b99 | 880 | |
a38a9882 AA |
881 | if (!try_module_get(ops->owner)) { |
882 | NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); | |
166ee5b8 | 883 | return NULL; |
a38a9882 | 884 | } |
6da7c8fc | 885 | |
a38a9882 | 886 | sch = qdisc_alloc(dev_queue, ops, extack); |
166ee5b8 ED |
887 | if (IS_ERR(sch)) { |
888 | module_put(ops->owner); | |
889 | return NULL; | |
890 | } | |
9f9afec4 | 891 | sch->parent = parentid; |
3d54b82f | 892 | |
a38a9882 | 893 | if (!ops->init || ops->init(sch, NULL, extack) == 0) |
1da177e4 LT |
894 | return sch; |
895 | ||
86bd446b | 896 | qdisc_put(sch); |
1da177e4 LT |
897 | return NULL; |
898 | } | |
62e3ba1b | 899 | EXPORT_SYMBOL(qdisc_create_dflt); |
1da177e4 | 900 | |
5fb66229 | 901 | /* Under qdisc_lock(qdisc) and BH! */ |
1da177e4 LT |
902 | |
903 | void qdisc_reset(struct Qdisc *qdisc) | |
904 | { | |
20fea08b | 905 | const struct Qdisc_ops *ops = qdisc->ops; |
a53851e2 | 906 | struct sk_buff *skb, *tmp; |
1da177e4 LT |
907 | |
908 | if (ops->reset) | |
909 | ops->reset(qdisc); | |
67305ebc | 910 | |
a53851e2 JF |
911 | skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { |
912 | __skb_unlink(skb, &qdisc->gso_skb); | |
913 | kfree_skb_list(skb); | |
bbd8a0d3 | 914 | } |
a53851e2 | 915 | |
70e57d5e JF |
916 | skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { |
917 | __skb_unlink(skb, &qdisc->skb_bad_txq); | |
918 | kfree_skb_list(skb); | |
919 | } | |
920 | ||
4d202a0d | 921 | qdisc->q.qlen = 0; |
c8e18129 | 922 | qdisc->qstats.backlog = 0; |
1da177e4 | 923 | } |
62e3ba1b | 924 | EXPORT_SYMBOL(qdisc_reset); |
1da177e4 | 925 | |
81d947e2 | 926 | void qdisc_free(struct Qdisc *qdisc) |
5d944c64 | 927 | { |
73c20a8b | 928 | if (qdisc_is_percpu_stats(qdisc)) { |
22e0f8b9 | 929 | free_percpu(qdisc->cpu_bstats); |
73c20a8b JF |
930 | free_percpu(qdisc->cpu_qstats); |
931 | } | |
22e0f8b9 | 932 | |
5d944c64 ED |
933 | kfree((char *) qdisc - qdisc->padded); |
934 | } | |
935 | ||
5362700c | 936 | static void qdisc_free_cb(struct rcu_head *head) |
3a7d0d07 VB |
937 | { |
938 | struct Qdisc *q = container_of(head, struct Qdisc, rcu); | |
939 | ||
940 | qdisc_free(q); | |
941 | } | |
942 | ||
86bd446b | 943 | static void qdisc_destroy(struct Qdisc *qdisc) |
1da177e4 | 944 | { |
8a34c5dc | 945 | const struct Qdisc_ops *ops = qdisc->ops; |
a53851e2 | 946 | struct sk_buff *skb, *tmp; |
8a34c5dc | 947 | |
3a682fbd | 948 | #ifdef CONFIG_NET_SCHED |
59cc1f61 | 949 | qdisc_hash_del(qdisc); |
f6e0b239 | 950 | |
a2da570d | 951 | qdisc_put_stab(rtnl_dereference(qdisc->stab)); |
3a682fbd | 952 | #endif |
1c0d32fd | 953 | gen_kill_estimator(&qdisc->rate_est); |
8a34c5dc DM |
954 | if (ops->reset) |
955 | ops->reset(qdisc); | |
956 | if (ops->destroy) | |
957 | ops->destroy(qdisc); | |
958 | ||
959 | module_put(ops->owner); | |
960 | dev_put(qdisc_dev(qdisc)); | |
961 | ||
a53851e2 JF |
962 | skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { |
963 | __skb_unlink(skb, &qdisc->gso_skb); | |
964 | kfree_skb_list(skb); | |
965 | } | |
966 | ||
70e57d5e JF |
967 | skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { |
968 | __skb_unlink(skb, &qdisc->skb_bad_txq); | |
969 | kfree_skb_list(skb); | |
970 | } | |
971 | ||
3a7d0d07 | 972 | call_rcu(&qdisc->rcu, qdisc_free_cb); |
1da177e4 | 973 | } |
86bd446b VB |
974 | |
975 | void qdisc_put(struct Qdisc *qdisc) | |
976 | { | |
977 | if (qdisc->flags & TCQ_F_BUILTIN || | |
978 | !refcount_dec_and_test(&qdisc->refcnt)) | |
979 | return; | |
980 | ||
981 | qdisc_destroy(qdisc); | |
982 | } | |
983 | EXPORT_SYMBOL(qdisc_put); | |
1da177e4 | 984 | |
3a7d0d07 VB |
985 | /* Version of qdisc_put() that is called with rtnl mutex unlocked. |
986 | * Intended to be used as optimization, this function only takes rtnl lock if | |
987 | * qdisc reference counter reached zero. | |
988 | */ | |
989 | ||
990 | void qdisc_put_unlocked(struct Qdisc *qdisc) | |
991 | { | |
992 | if (qdisc->flags & TCQ_F_BUILTIN || | |
993 | !refcount_dec_and_rtnl_lock(&qdisc->refcnt)) | |
994 | return; | |
995 | ||
996 | qdisc_destroy(qdisc); | |
997 | rtnl_unlock(); | |
998 | } | |
999 | EXPORT_SYMBOL(qdisc_put_unlocked); | |
1000 | ||
589983cd PM |
1001 | /* Attach toplevel qdisc to device queue. */ |
1002 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | |
1003 | struct Qdisc *qdisc) | |
1004 | { | |
1005 | struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; | |
1006 | spinlock_t *root_lock; | |
1007 | ||
1008 | root_lock = qdisc_lock(oqdisc); | |
1009 | spin_lock_bh(root_lock); | |
1010 | ||
589983cd PM |
1011 | /* ... and graft new one */ |
1012 | if (qdisc == NULL) | |
1013 | qdisc = &noop_qdisc; | |
1014 | dev_queue->qdisc_sleeping = qdisc; | |
1015 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | |
1016 | ||
1017 | spin_unlock_bh(root_lock); | |
1018 | ||
1019 | return oqdisc; | |
1020 | } | |
b8970f0b | 1021 | EXPORT_SYMBOL(dev_graft_qdisc); |
589983cd | 1022 | |
e8a0464c DM |
1023 | static void attach_one_default_qdisc(struct net_device *dev, |
1024 | struct netdev_queue *dev_queue, | |
1025 | void *_unused) | |
1026 | { | |
3e692f21 PS |
1027 | struct Qdisc *qdisc; |
1028 | const struct Qdisc_ops *ops = default_qdisc_ops; | |
e8a0464c | 1029 | |
3e692f21 PS |
1030 | if (dev->priv_flags & IFF_NO_QUEUE) |
1031 | ops = &noqueue_qdisc_ops; | |
1032 | ||
a38a9882 | 1033 | qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); |
3e692f21 PS |
1034 | if (!qdisc) { |
1035 | netdev_info(dev, "activation failed\n"); | |
1036 | return; | |
e8a0464c | 1037 | } |
3e692f21 | 1038 | if (!netif_is_multiqueue(dev)) |
4eaf3b84 | 1039 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
e8a0464c DM |
1040 | dev_queue->qdisc_sleeping = qdisc; |
1041 | } | |
1042 | ||
6ec1c69a DM |
1043 | static void attach_default_qdiscs(struct net_device *dev) |
1044 | { | |
1045 | struct netdev_queue *txq; | |
1046 | struct Qdisc *qdisc; | |
1047 | ||
1048 | txq = netdev_get_tx_queue(dev, 0); | |
1049 | ||
4b469955 | 1050 | if (!netif_is_multiqueue(dev) || |
4b469955 | 1051 | dev->priv_flags & IFF_NO_QUEUE) { |
6ec1c69a DM |
1052 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
1053 | dev->qdisc = txq->qdisc_sleeping; | |
551143d8 | 1054 | qdisc_refcount_inc(dev->qdisc); |
6ec1c69a | 1055 | } else { |
a38a9882 | 1056 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); |
6ec1c69a | 1057 | if (qdisc) { |
6ec1c69a | 1058 | dev->qdisc = qdisc; |
e57a784d | 1059 | qdisc->ops->attach(qdisc); |
6ec1c69a DM |
1060 | } |
1061 | } | |
59cc1f61 | 1062 | #ifdef CONFIG_NET_SCHED |
92f91706 | 1063 | if (dev->qdisc != &noop_qdisc) |
49b49971 | 1064 | qdisc_hash_add(dev->qdisc, false); |
59cc1f61 | 1065 | #endif |
6ec1c69a DM |
1066 | } |
1067 | ||
e8a0464c DM |
1068 | static void transition_one_qdisc(struct net_device *dev, |
1069 | struct netdev_queue *dev_queue, | |
1070 | void *_need_watchdog) | |
1071 | { | |
83874000 | 1072 | struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; |
e8a0464c DM |
1073 | int *need_watchdog_p = _need_watchdog; |
1074 | ||
a9312ae8 DM |
1075 | if (!(new_qdisc->flags & TCQ_F_BUILTIN)) |
1076 | clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); | |
1077 | ||
83874000 | 1078 | rcu_assign_pointer(dev_queue->qdisc, new_qdisc); |
3e692f21 | 1079 | if (need_watchdog_p) { |
9d21493b | 1080 | dev_queue->trans_start = 0; |
e8a0464c | 1081 | *need_watchdog_p = 1; |
9d21493b | 1082 | } |
e8a0464c DM |
1083 | } |
1084 | ||
1da177e4 LT |
1085 | void dev_activate(struct net_device *dev) |
1086 | { | |
e8a0464c | 1087 | int need_watchdog; |
b0e1e646 | 1088 | |
1da177e4 | 1089 | /* No queueing discipline is attached to device; |
6da7c8fc | 1090 | * create default one for devices, which need queueing |
1091 | * and noqueue_qdisc for virtual interfaces | |
1da177e4 LT |
1092 | */ |
1093 | ||
6ec1c69a DM |
1094 | if (dev->qdisc == &noop_qdisc) |
1095 | attach_default_qdiscs(dev); | |
af356afa | 1096 | |
cacaddf5 TC |
1097 | if (!netif_carrier_ok(dev)) |
1098 | /* Delay activation until next carrier-on event */ | |
1099 | return; | |
1100 | ||
e8a0464c DM |
1101 | need_watchdog = 0; |
1102 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); | |
24824a09 ED |
1103 | if (dev_ingress_queue(dev)) |
1104 | transition_one_qdisc(dev, dev_ingress_queue(dev), NULL); | |
e8a0464c DM |
1105 | |
1106 | if (need_watchdog) { | |
860e9538 | 1107 | netif_trans_update(dev); |
1da177e4 LT |
1108 | dev_watchdog_up(dev); |
1109 | } | |
b0e1e646 | 1110 | } |
b8970f0b | 1111 | EXPORT_SYMBOL(dev_activate); |
b0e1e646 | 1112 | |
e8a0464c DM |
1113 | static void dev_deactivate_queue(struct net_device *dev, |
1114 | struct netdev_queue *dev_queue, | |
1115 | void *_qdisc_default) | |
b0e1e646 | 1116 | { |
e8a0464c | 1117 | struct Qdisc *qdisc_default = _qdisc_default; |
970565bb | 1118 | struct Qdisc *qdisc; |
970565bb | 1119 | |
46e5da40 | 1120 | qdisc = rtnl_dereference(dev_queue->qdisc); |
b0e1e646 | 1121 | if (qdisc) { |
96009c7d PA |
1122 | bool nolock = qdisc->flags & TCQ_F_NOLOCK; |
1123 | ||
1124 | if (nolock) | |
1125 | spin_lock_bh(&qdisc->seqlock); | |
83874000 DM |
1126 | spin_lock_bh(qdisc_lock(qdisc)); |
1127 | ||
a9312ae8 DM |
1128 | if (!(qdisc->flags & TCQ_F_BUILTIN)) |
1129 | set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); | |
1130 | ||
f7a54c13 | 1131 | rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
b0e1e646 | 1132 | qdisc_reset(qdisc); |
d3b753db | 1133 | |
83874000 | 1134 | spin_unlock_bh(qdisc_lock(qdisc)); |
96009c7d PA |
1135 | if (nolock) |
1136 | spin_unlock_bh(&qdisc->seqlock); | |
b0e1e646 | 1137 | } |
1da177e4 LT |
1138 | } |
1139 | ||
4335cd2d | 1140 | static bool some_qdisc_is_busy(struct net_device *dev) |
e8a0464c DM |
1141 | { |
1142 | unsigned int i; | |
1143 | ||
1144 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1145 | struct netdev_queue *dev_queue; | |
7698b4fc | 1146 | spinlock_t *root_lock; |
e2627c8c | 1147 | struct Qdisc *q; |
e8a0464c DM |
1148 | int val; |
1149 | ||
1150 | dev_queue = netdev_get_tx_queue(dev, i); | |
b9a3b110 | 1151 | q = dev_queue->qdisc_sleeping; |
e8a0464c | 1152 | |
32f7b44d PA |
1153 | root_lock = qdisc_lock(q); |
1154 | spin_lock_bh(root_lock); | |
e8a0464c | 1155 | |
32f7b44d PA |
1156 | val = (qdisc_is_running(q) || |
1157 | test_bit(__QDISC_STATE_SCHED, &q->state)); | |
e8a0464c | 1158 | |
32f7b44d | 1159 | spin_unlock_bh(root_lock); |
e8a0464c DM |
1160 | |
1161 | if (val) | |
1162 | return true; | |
1163 | } | |
1164 | return false; | |
1165 | } | |
1166 | ||
7bbde83b JF |
1167 | static void dev_qdisc_reset(struct net_device *dev, |
1168 | struct netdev_queue *dev_queue, | |
1169 | void *none) | |
1170 | { | |
1171 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; | |
1172 | ||
1173 | if (qdisc) | |
1174 | qdisc_reset(qdisc); | |
1175 | } | |
1176 | ||
3137663d ED |
1177 | /** |
1178 | * dev_deactivate_many - deactivate transmissions on several devices | |
1179 | * @head: list of devices to deactivate | |
1180 | * | |
1181 | * This function returns only when all outstanding transmissions | |
1182 | * have completed, unless all devices are in dismantle phase. | |
1183 | */ | |
44345724 | 1184 | void dev_deactivate_many(struct list_head *head) |
1da177e4 | 1185 | { |
44345724 | 1186 | struct net_device *dev; |
41a23b07 | 1187 | |
5cde2829 | 1188 | list_for_each_entry(dev, head, close_list) { |
44345724 OP |
1189 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, |
1190 | &noop_qdisc); | |
1191 | if (dev_ingress_queue(dev)) | |
1192 | dev_deactivate_queue(dev, dev_ingress_queue(dev), | |
1193 | &noop_qdisc); | |
1194 | ||
1195 | dev_watchdog_down(dev); | |
1196 | } | |
1da177e4 | 1197 | |
3137663d ED |
1198 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. |
1199 | * This is avoided if all devices are in dismantle phase : | |
1200 | * Caller will call synchronize_net() for us | |
1201 | */ | |
7bbde83b | 1202 | synchronize_net(); |
1da177e4 | 1203 | |
d4828d85 | 1204 | /* Wait for outstanding qdisc_run calls. */ |
7bbde83b | 1205 | list_for_each_entry(dev, head, close_list) { |
44345724 OP |
1206 | while (some_qdisc_is_busy(dev)) |
1207 | yield(); | |
7bbde83b JF |
1208 | /* The new qdisc is assigned at this point so we can safely |
1209 | * unwind stale skb lists and qdisc statistics | |
1210 | */ | |
1211 | netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL); | |
1212 | if (dev_ingress_queue(dev)) | |
1213 | dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL); | |
1214 | } | |
44345724 OP |
1215 | } |
1216 | ||
1217 | void dev_deactivate(struct net_device *dev) | |
1218 | { | |
1219 | LIST_HEAD(single); | |
1220 | ||
5cde2829 | 1221 | list_add(&dev->close_list, &single); |
44345724 | 1222 | dev_deactivate_many(&single); |
5f04d506 | 1223 | list_del(&single); |
1da177e4 | 1224 | } |
b8970f0b | 1225 | EXPORT_SYMBOL(dev_deactivate); |
1da177e4 | 1226 | |
48bfd55e CW |
1227 | static int qdisc_change_tx_queue_len(struct net_device *dev, |
1228 | struct netdev_queue *dev_queue) | |
1229 | { | |
1230 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; | |
1231 | const struct Qdisc_ops *ops = qdisc->ops; | |
1232 | ||
1233 | if (ops->change_tx_queue_len) | |
1234 | return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); | |
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | int dev_qdisc_change_tx_queue_len(struct net_device *dev) | |
1239 | { | |
1240 | bool up = dev->flags & IFF_UP; | |
1241 | unsigned int i; | |
1242 | int ret = 0; | |
1243 | ||
1244 | if (up) | |
1245 | dev_deactivate(dev); | |
1246 | ||
1247 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1248 | ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]); | |
1249 | ||
1250 | /* TODO: revert changes on a partial failure */ | |
1251 | if (ret) | |
1252 | break; | |
1253 | } | |
1254 | ||
1255 | if (up) | |
1256 | dev_activate(dev); | |
1257 | return ret; | |
1258 | } | |
1259 | ||
b0e1e646 DM |
1260 | static void dev_init_scheduler_queue(struct net_device *dev, |
1261 | struct netdev_queue *dev_queue, | |
e8a0464c | 1262 | void *_qdisc) |
b0e1e646 | 1263 | { |
e8a0464c DM |
1264 | struct Qdisc *qdisc = _qdisc; |
1265 | ||
46e5da40 | 1266 | rcu_assign_pointer(dev_queue->qdisc, qdisc); |
b0e1e646 | 1267 | dev_queue->qdisc_sleeping = qdisc; |
b0e1e646 DM |
1268 | } |
1269 | ||
1da177e4 LT |
1270 | void dev_init_scheduler(struct net_device *dev) |
1271 | { | |
af356afa | 1272 | dev->qdisc = &noop_qdisc; |
e8a0464c | 1273 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
24824a09 ED |
1274 | if (dev_ingress_queue(dev)) |
1275 | dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | |
1da177e4 | 1276 | |
cdeabbb8 | 1277 | timer_setup(&dev->watchdog_timer, dev_watchdog, 0); |
1da177e4 LT |
1278 | } |
1279 | ||
e8a0464c DM |
1280 | static void shutdown_scheduler_queue(struct net_device *dev, |
1281 | struct netdev_queue *dev_queue, | |
1282 | void *_qdisc_default) | |
1da177e4 | 1283 | { |
b0e1e646 | 1284 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
e8a0464c | 1285 | struct Qdisc *qdisc_default = _qdisc_default; |
b0e1e646 DM |
1286 | |
1287 | if (qdisc) { | |
f7a54c13 | 1288 | rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
b0e1e646 | 1289 | dev_queue->qdisc_sleeping = qdisc_default; |
1da177e4 | 1290 | |
86bd446b | 1291 | qdisc_put(qdisc); |
10297b99 | 1292 | } |
b0e1e646 DM |
1293 | } |
1294 | ||
1295 | void dev_shutdown(struct net_device *dev) | |
1296 | { | |
e8a0464c | 1297 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
24824a09 ED |
1298 | if (dev_ingress_queue(dev)) |
1299 | shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); | |
86bd446b | 1300 | qdisc_put(dev->qdisc); |
af356afa PM |
1301 | dev->qdisc = &noop_qdisc; |
1302 | ||
547b792c | 1303 | WARN_ON(timer_pending(&dev->watchdog_timer)); |
1da177e4 | 1304 | } |
292f1c7f | 1305 | |
01cb71d2 | 1306 | void psched_ratecfg_precompute(struct psched_ratecfg *r, |
3e1e3aae ED |
1307 | const struct tc_ratespec *conf, |
1308 | u64 rate64) | |
292f1c7f | 1309 | { |
01cb71d2 ED |
1310 | memset(r, 0, sizeof(*r)); |
1311 | r->overhead = conf->overhead; | |
3e1e3aae | 1312 | r->rate_bytes_ps = max_t(u64, conf->rate, rate64); |
8a8e3d84 | 1313 | r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); |
292f1c7f JP |
1314 | r->mult = 1; |
1315 | /* | |
130d3d68 ED |
1316 | * The deal here is to replace a divide by a reciprocal one |
1317 | * in fast path (a reciprocal divide is a multiply and a shift) | |
1318 | * | |
1319 | * Normal formula would be : | |
1320 | * time_in_ns = (NSEC_PER_SEC * len) / rate_bps | |
1321 | * | |
1322 | * We compute mult/shift to use instead : | |
1323 | * time_in_ns = (len * mult) >> shift; | |
1324 | * | |
1325 | * We try to get the highest possible mult value for accuracy, | |
1326 | * but have to make sure no overflows will ever happen. | |
292f1c7f | 1327 | */ |
130d3d68 ED |
1328 | if (r->rate_bytes_ps > 0) { |
1329 | u64 factor = NSEC_PER_SEC; | |
1330 | ||
1331 | for (;;) { | |
1332 | r->mult = div64_u64(factor, r->rate_bytes_ps); | |
1333 | if (r->mult & (1U << 31) || factor & (1ULL << 63)) | |
292f1c7f | 1334 | break; |
130d3d68 ED |
1335 | factor <<= 1; |
1336 | r->shift++; | |
292f1c7f | 1337 | } |
292f1c7f JP |
1338 | } |
1339 | } | |
1340 | EXPORT_SYMBOL(psched_ratecfg_precompute); | |
46209401 JP |
1341 | |
1342 | static void mini_qdisc_rcu_func(struct rcu_head *head) | |
1343 | { | |
1344 | } | |
1345 | ||
1346 | void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, | |
1347 | struct tcf_proto *tp_head) | |
1348 | { | |
ed76f5ed VB |
1349 | /* Protected with chain0->filter_chain_lock. |
1350 | * Can't access chain directly because tp_head can be NULL. | |
1351 | */ | |
1352 | struct mini_Qdisc *miniq_old = | |
1353 | rcu_dereference_protected(*miniqp->p_miniq, 1); | |
46209401 JP |
1354 | struct mini_Qdisc *miniq; |
1355 | ||
1356 | if (!tp_head) { | |
1357 | RCU_INIT_POINTER(*miniqp->p_miniq, NULL); | |
b2fb01f4 | 1358 | /* Wait for flying RCU callback before it is freed. */ |
ae0e3349 | 1359 | rcu_barrier(); |
46209401 JP |
1360 | return; |
1361 | } | |
1362 | ||
1363 | miniq = !miniq_old || miniq_old == &miniqp->miniq2 ? | |
1364 | &miniqp->miniq1 : &miniqp->miniq2; | |
1365 | ||
1366 | /* We need to make sure that readers won't see the miniq | |
ae0e3349 | 1367 | * we are about to modify. So wait until previous call_rcu callback |
46209401 JP |
1368 | * is done. |
1369 | */ | |
ae0e3349 | 1370 | rcu_barrier(); |
46209401 JP |
1371 | miniq->filter_list = tp_head; |
1372 | rcu_assign_pointer(*miniqp->p_miniq, miniq); | |
1373 | ||
1374 | if (miniq_old) | |
b2fb01f4 | 1375 | /* This is counterpart of the rcu barriers above. We need to |
46209401 JP |
1376 | * block potential new user of miniq_old until all readers |
1377 | * are not seeing it. | |
1378 | */ | |
ae0e3349 | 1379 | call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func); |
46209401 JP |
1380 | } |
1381 | EXPORT_SYMBOL(mini_qdisc_pair_swap); | |
1382 | ||
1383 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, | |
1384 | struct mini_Qdisc __rcu **p_miniq) | |
1385 | { | |
1386 | miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; | |
1387 | miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; | |
1388 | miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; | |
1389 | miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; | |
1390 | miniqp->p_miniq = p_miniq; | |
1391 | } | |
1392 | EXPORT_SYMBOL(mini_qdisc_pair_init); |