Linux 6.16-rc6
[linux-block.git] / net / sched / sch_generic.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/sched/sch_generic.c Generic packet scheduler routines.
4 *
1da177e4
LT
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
7 * - Ingress support
8 */
9
1da177e4 10#include <linux/bitops.h>
1da177e4
LT
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/string.h>
1da177e4 16#include <linux/errno.h>
1da177e4
LT
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/rtnetlink.h>
20#include <linux/init.h>
21#include <linux/rcupdate.h>
22#include <linux/list.h>
5a0e3ad6 23#include <linux/slab.h>
07ce76aa 24#include <linux/if_vlan.h>
c5ad119f 25#include <linux/skb_array.h>
32d3e51a 26#include <linux/if_macvlan.h>
c8240344 27#include <linux/bpf.h>
292f1c7f 28#include <net/sch_generic.h>
1da177e4 29#include <net/pkt_sched.h>
7fee226a 30#include <net/dst.h>
26722dc7 31#include <net/hotdata.h>
e543002f 32#include <trace/events/qdisc.h>
141b6b2a 33#include <trace/events/net.h>
f53c7239 34#include <net/xfrm.h>
1da177e4 35
34aedd3f 36/* Qdisc to use by default */
37const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
38EXPORT_SYMBOL(default_qdisc_ops);
39
dcad9ee9
YL
40static void qdisc_maybe_clear_missed(struct Qdisc *q,
41 const struct netdev_queue *txq)
42{
43 clear_bit(__QDISC_STATE_MISSED, &q->state);
44
45 /* Make sure the below netif_xmit_frozen_or_stopped()
46 * checking happens after clearing STATE_MISSED.
47 */
48 smp_mb__after_atomic();
49
50 /* Checking netif_xmit_frozen_or_stopped() again to
51 * make sure STATE_MISSED is set if the STATE_MISSED
52 * set by netif_tx_wake_queue()'s rescheduling of
53 * net_tx_action() is cleared by the above clear_bit().
54 */
55 if (!netif_xmit_frozen_or_stopped(txq))
56 set_bit(__QDISC_STATE_MISSED, &q->state);
c4fef01b
YL
57 else
58 set_bit(__QDISC_STATE_DRAINING, &q->state);
dcad9ee9
YL
59}
60
1da177e4
LT
61/* Main transmission queue. */
62
0463d4ae 63/* Modifications to data participating in scheduling must be protected with
5fb66229 64 * qdisc_lock(qdisc) spinlock.
0463d4ae
PM
65 *
66 * The idea is the following:
c7e4f3bb
DM
67 * - enqueue, dequeue are serialized via qdisc root lock
68 * - ingress filtering is also serialized via qdisc root lock
0463d4ae 69 * - updates to tree and tree walking are only done under the rtnl mutex.
1da177e4 70 */
70e57d5e 71
b88dd52c
ED
72#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
73
70e57d5e
JF
74static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
75{
76 const struct netdev_queue *txq = q->dev_queue;
77 spinlock_t *lock = NULL;
78 struct sk_buff *skb;
79
80 if (q->flags & TCQ_F_NOLOCK) {
81 lock = qdisc_lock(q);
82 spin_lock(lock);
83 }
84
85 skb = skb_peek(&q->skb_bad_txq);
86 if (skb) {
87 /* check the reason of requeuing without tx lock first */
88 txq = skb_get_tx_queue(txq->dev, skb);
89 if (!netif_xmit_frozen_or_stopped(txq)) {
90 skb = __skb_dequeue(&q->skb_bad_txq);
91 if (qdisc_is_percpu_stats(q)) {
92 qdisc_qstats_cpu_backlog_dec(q, skb);
73eb628d 93 qdisc_qstats_cpu_qlen_dec(q);
70e57d5e
JF
94 } else {
95 qdisc_qstats_backlog_dec(q, skb);
96 q->q.qlen--;
97 }
98 } else {
b88dd52c 99 skb = SKB_XOFF_MAGIC;
dcad9ee9 100 qdisc_maybe_clear_missed(q, txq);
70e57d5e
JF
101 }
102 }
103
104 if (lock)
105 spin_unlock(lock);
106
107 return skb;
108}
109
110static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
111{
112 struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
113
114 if (unlikely(skb))
115 skb = __skb_dequeue_bad_txq(q);
116
117 return skb;
118}
119
120static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
121 struct sk_buff *skb)
122{
123 spinlock_t *lock = NULL;
124
125 if (q->flags & TCQ_F_NOLOCK) {
126 lock = qdisc_lock(q);
127 spin_lock(lock);
128 }
129
130 __skb_queue_tail(&q->skb_bad_txq, skb);
131
cce6294c
ED
132 if (qdisc_is_percpu_stats(q)) {
133 qdisc_qstats_cpu_backlog_inc(q, skb);
73eb628d 134 qdisc_qstats_cpu_qlen_inc(q);
cce6294c
ED
135 } else {
136 qdisc_qstats_backlog_inc(q, skb);
137 q->q.qlen++;
138 }
139
70e57d5e
JF
140 if (lock)
141 spin_unlock(lock);
142}
143
9c01c9f1 144static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
c716a81a 145{
9c01c9f1 146 spinlock_t *lock = NULL;
9540d977 147
9c01c9f1
PA
148 if (q->flags & TCQ_F_NOLOCK) {
149 lock = qdisc_lock(q);
150 spin_lock(lock);
9540d977 151 }
c716a81a 152
9540d977
WY
153 while (skb) {
154 struct sk_buff *next = skb->next;
155
156 __skb_queue_tail(&q->gso_skb, skb);
157
9c01c9f1
PA
158 /* it's still part of the queue */
159 if (qdisc_is_percpu_stats(q)) {
160 qdisc_qstats_cpu_requeues_inc(q);
161 qdisc_qstats_cpu_backlog_inc(q, skb);
73eb628d 162 qdisc_qstats_cpu_qlen_inc(q);
9c01c9f1
PA
163 } else {
164 q->qstats.requeues++;
165 qdisc_qstats_backlog_inc(q, skb);
166 q->q.qlen++;
167 }
9540d977
WY
168
169 skb = next;
170 }
c4fef01b
YL
171
172 if (lock) {
9c01c9f1 173 spin_unlock(lock);
c4fef01b
YL
174 set_bit(__QDISC_STATE_MISSED, &q->state);
175 } else {
176 __netif_schedule(q);
177 }
a53851e2
JF
178}
179
55a93b3e
ED
180static void try_bulk_dequeue_skb(struct Qdisc *q,
181 struct sk_buff *skb,
b8358d70
JDB
182 const struct netdev_queue *txq,
183 int *packets)
5772e9a3 184{
55a93b3e 185 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
5772e9a3
JDB
186
187 while (bytelimit > 0) {
55a93b3e 188 struct sk_buff *nskb = q->dequeue(q);
5772e9a3 189
55a93b3e 190 if (!nskb)
5772e9a3
JDB
191 break;
192
55a93b3e
ED
193 bytelimit -= nskb->len; /* covers GSO len */
194 skb->next = nskb;
195 skb = nskb;
b8358d70 196 (*packets)++; /* GSO counts as one pkt */
5772e9a3 197 }
a8305bff 198 skb_mark_not_on_list(skb);
5772e9a3
JDB
199}
200
4d202a0d
ED
201/* This variant of try_bulk_dequeue_skb() makes sure
202 * all skbs in the chain are for the same txq
203 */
204static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
205 struct sk_buff *skb,
206 int *packets)
207{
208 int mapping = skb_get_queue_mapping(skb);
209 struct sk_buff *nskb;
210 int cnt = 0;
211
212 do {
213 nskb = q->dequeue(q);
214 if (!nskb)
215 break;
216 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
70e57d5e 217 qdisc_enqueue_skb_bad_txq(q, nskb);
4d202a0d
ED
218 break;
219 }
220 skb->next = nskb;
221 skb = nskb;
222 } while (++cnt < 8);
223 (*packets) += cnt;
a8305bff 224 skb_mark_not_on_list(skb);
4d202a0d
ED
225}
226
5772e9a3
JDB
227/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
228 * A requeued skb (via q->gso_skb) can also be a SKB list.
229 */
b8358d70
JDB
230static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
231 int *packets)
c716a81a 232{
1abbe139 233 const struct netdev_queue *txq = q->dev_queue;
fd8e8d1a 234 struct sk_buff *skb = NULL;
554794de 235
b8358d70 236 *packets = 1;
a53851e2
JF
237 if (unlikely(!skb_queue_empty(&q->gso_skb))) {
238 spinlock_t *lock = NULL;
239
240 if (q->flags & TCQ_F_NOLOCK) {
241 lock = qdisc_lock(q);
242 spin_lock(lock);
243 }
244
245 skb = skb_peek(&q->gso_skb);
246
247 /* skb may be null if another cpu pulls gso_skb off in between
248 * empty check and lock.
249 */
250 if (!skb) {
251 if (lock)
252 spin_unlock(lock);
253 goto validate;
254 }
255
4d202a0d
ED
256 /* skb in gso_skb were already validated */
257 *validate = false;
f53c7239
SK
258 if (xfrm_offload(skb))
259 *validate = true;
ebf05982 260 /* check the reason of requeuing without tx lock first */
10c51b56 261 txq = skb_get_tx_queue(txq->dev, skb);
73466498 262 if (!netif_xmit_frozen_or_stopped(txq)) {
a53851e2
JF
263 skb = __skb_dequeue(&q->gso_skb);
264 if (qdisc_is_percpu_stats(q)) {
265 qdisc_qstats_cpu_backlog_dec(q, skb);
73eb628d 266 qdisc_qstats_cpu_qlen_dec(q);
a53851e2
JF
267 } else {
268 qdisc_qstats_backlog_dec(q, skb);
269 q->q.qlen--;
270 }
271 } else {
ebf05982 272 skb = NULL;
dcad9ee9 273 qdisc_maybe_clear_missed(q, txq);
a53851e2
JF
274 }
275 if (lock)
276 spin_unlock(lock);
e543002f 277 goto trace;
4d202a0d 278 }
a53851e2 279validate:
4d202a0d 280 *validate = true;
fd8e8d1a
JF
281
282 if ((q->flags & TCQ_F_ONETXQUEUE) &&
dcad9ee9
YL
283 netif_xmit_frozen_or_stopped(txq)) {
284 qdisc_maybe_clear_missed(q, txq);
fd8e8d1a 285 return skb;
dcad9ee9 286 }
fd8e8d1a 287
70e57d5e 288 skb = qdisc_dequeue_skb_bad_txq(q);
b88dd52c
ED
289 if (unlikely(skb)) {
290 if (skb == SKB_XOFF_MAGIC)
291 return NULL;
70e57d5e 292 goto bulk;
b88dd52c 293 }
fd8e8d1a 294 skb = q->dequeue(q);
4d202a0d
ED
295 if (skb) {
296bulk:
297 if (qdisc_may_bulk(q))
298 try_bulk_dequeue_skb(q, skb, txq, packets);
299 else
300 try_bulk_dequeue_skb_slow(q, skb, packets);
ebf05982 301 }
e543002f
JDB
302trace:
303 trace_qdisc_dequeue(q, txq, *packets, skb);
c716a81a
JHS
304 return skb;
305}
306
10297b99 307/*
10770bc2 308 * Transmit possibly several skbs, and handle the return status as
29cbcd85
AD
309 * required. Owning qdisc running bit guarantees that only one CPU
310 * can execute this function.
6c1361a6
KK
311 *
312 * Returns to the caller:
29b86cda
JF
313 * false - hardware queue frozen backoff
314 * true - feel free to send more pkts
6c1361a6 315 */
29b86cda
JF
316bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
317 struct net_device *dev, struct netdev_queue *txq,
318 spinlock_t *root_lock, bool validate)
1da177e4 319{
5f1a485d 320 int ret = NETDEV_TX_BUSY;
f53c7239 321 bool again = false;
7698b4fc
DM
322
323 /* And release qdisc */
6b3ba914
JF
324 if (root_lock)
325 spin_unlock(root_lock);
c716a81a 326
55a93b3e
ED
327 /* Note that we validate skb (GSO, checksum, ...) outside of locks */
328 if (validate)
f53c7239
SK
329 skb = validate_xmit_skb_list(skb, dev, &again);
330
331#ifdef CONFIG_XFRM_OFFLOAD
332 if (unlikely(again)) {
333 if (root_lock)
334 spin_lock(root_lock);
335
336 dev_requeue_skb(skb, q);
337 return false;
338 }
339#endif
572a9d7b 340
3dcd493f 341 if (likely(skb)) {
55a93b3e
ED
342 HARD_TX_LOCK(dev, txq, smp_processor_id());
343 if (!netif_xmit_frozen_or_stopped(txq))
344 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
dcad9ee9
YL
345 else
346 qdisc_maybe_clear_missed(q, txq);
c716a81a 347
55a93b3e 348 HARD_TX_UNLOCK(dev, txq);
3dcd493f 349 } else {
6b3ba914
JF
350 if (root_lock)
351 spin_lock(root_lock);
29b86cda 352 return true;
55a93b3e 353 }
6b3ba914
JF
354
355 if (root_lock)
356 spin_lock(root_lock);
c716a81a 357
29b86cda 358 if (!dev_xmit_complete(ret)) {
6c1361a6 359 /* Driver returned NETDEV_TX_BUSY - requeue skb */
e87cc472
JP
360 if (unlikely(ret != NETDEV_TX_BUSY))
361 net_warn_ratelimited("BUG %s code %d qlen %d\n",
362 dev->name, ret, q->q.qlen);
6c1361a6 363
29b86cda
JF
364 dev_requeue_skb(skb, q);
365 return false;
6c1361a6 366 }
c716a81a 367
29b86cda 368 return true;
1da177e4
LT
369}
370
bbd8a0d3
KK
371/*
372 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
373 *
f9eb8aea 374 * running seqcount guarantees only one CPU can process
bbd8a0d3
KK
375 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
376 * this queue.
377 *
378 * netif_tx_lock serializes accesses to device driver.
379 *
380 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
381 * if one is grabbed, another must be free.
382 *
383 * Note, that this procedure can be called by a watchdog timer
384 *
385 * Returns to the caller:
386 * 0 - queue is empty or throttled.
387 * >0 - queue is not empty.
388 *
389 */
29b86cda 390static inline bool qdisc_restart(struct Qdisc *q, int *packets)
bbd8a0d3 391{
6b3ba914 392 spinlock_t *root_lock = NULL;
bbd8a0d3
KK
393 struct netdev_queue *txq;
394 struct net_device *dev;
bbd8a0d3 395 struct sk_buff *skb;
32f7b44d 396 bool validate;
bbd8a0d3
KK
397
398 /* Dequeue packet */
b8358d70 399 skb = dequeue_skb(q, &validate, packets);
32f7b44d 400 if (unlikely(!skb))
29b86cda 401 return false;
10c51b56 402
32f7b44d 403 if (!(q->flags & TCQ_F_NOLOCK))
6b3ba914
JF
404 root_lock = qdisc_lock(q);
405
bbd8a0d3 406 dev = qdisc_dev(q);
10c51b56 407 txq = skb_get_tx_queue(dev, skb);
bbd8a0d3 408
32f7b44d 409 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
bbd8a0d3
KK
410}
411
37437bb2 412void __qdisc_run(struct Qdisc *q)
48d83325 413{
26722dc7 414 int quota = READ_ONCE(net_hotdata.dev_tx_weight);
b8358d70 415 int packets;
2ba2506c 416
b8358d70 417 while (qdisc_restart(q, &packets)) {
b8358d70 418 quota -= packets;
b60fa1c5 419 if (quota <= 0) {
c4fef01b
YL
420 if (q->flags & TCQ_F_NOLOCK)
421 set_bit(__QDISC_STATE_MISSED, &q->state);
422 else
423 __netif_schedule(q);
424
d90df3ad 425 break;
2ba2506c
HX
426 }
427 }
48d83325
HX
428}
429
9d21493b
ED
430unsigned long dev_trans_start(struct net_device *dev)
431{
4873a1b2
VO
432 unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
433 unsigned long val;
9d21493b
ED
434 unsigned int i;
435
9b36627a 436 for (i = 1; i < dev->num_tx_queues; i++) {
5337824f 437 val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
9d21493b
ED
438 if (val && time_after(val, res))
439 res = val;
440 }
07ce76aa 441
9d21493b
ED
442 return res;
443}
444EXPORT_SYMBOL(dev_trans_start);
445
dab8fe32
ED
446static void netif_freeze_queues(struct net_device *dev)
447{
448 unsigned int i;
449 int cpu;
450
451 cpu = smp_processor_id();
452 for (i = 0; i < dev->num_tx_queues; i++) {
453 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
454
455 /* We are the only thread of execution doing a
456 * freeze, but we have to grab the _xmit_lock in
457 * order to synchronize with threads which are in
458 * the ->hard_start_xmit() handler and already
459 * checked the frozen bit.
460 */
461 __netif_tx_lock(txq, cpu);
462 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
463 __netif_tx_unlock(txq);
464 }
465}
466
467void netif_tx_lock(struct net_device *dev)
468{
469 spin_lock(&dev->tx_global_lock);
470 netif_freeze_queues(dev);
471}
472EXPORT_SYMBOL(netif_tx_lock);
473
474static void netif_unfreeze_queues(struct net_device *dev)
475{
476 unsigned int i;
477
478 for (i = 0; i < dev->num_tx_queues; i++) {
479 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
480
481 /* No need to grab the _xmit_lock here. If the
482 * queue is not stopped for another reason, we
483 * force a schedule.
484 */
485 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
486 netif_schedule_queue(txq);
487 }
488}
489
490void netif_tx_unlock(struct net_device *dev)
491{
492 netif_unfreeze_queues(dev);
493 spin_unlock(&dev->tx_global_lock);
494}
495EXPORT_SYMBOL(netif_tx_unlock);
496
cdeabbb8 497static void dev_watchdog(struct timer_list *t)
1da177e4 498{
41cb0855 499 struct net_device *dev = timer_container_of(dev, t, watchdog_timer);
f12bf6f3 500 bool release = true;
1da177e4 501
bec251bc 502 spin_lock(&dev->tx_global_lock);
e8a0464c 503 if (!qdisc_tx_is_noop(dev)) {
1da177e4
LT
504 if (netif_device_present(dev) &&
505 netif_running(dev) &&
506 netif_carrier_ok(dev)) {
2f0f9465 507 unsigned int timedout_ms = 0;
e8a0464c 508 unsigned int i;
9d21493b 509 unsigned long trans_start;
33fb988b 510 unsigned long oldest_start = jiffies;
e8a0464c
DM
511
512 for (i = 0; i < dev->num_tx_queues; i++) {
513 struct netdev_queue *txq;
514
515 txq = netdev_get_tx_queue(dev, i);
33fb988b
PKK
516 if (!netif_xmit_stopped(txq))
517 continue;
95ecba62
ED
518
519 /* Paired with WRITE_ONCE() + smp_mb...() in
520 * netdev_tx_sent_queue() and netif_tx_stop_queue().
521 */
522 smp_mb();
523 trans_start = READ_ONCE(txq->trans_start);
524
33fb988b 525 if (time_after(jiffies, trans_start + dev->watchdog_timeo)) {
2f0f9465 526 timedout_ms = jiffies_to_msecs(jiffies - trans_start);
8160fb43 527 atomic_long_inc(&txq->trans_timeout);
e8a0464c
DM
528 break;
529 }
33fb988b
PKK
530 if (time_after(oldest_start, trans_start))
531 oldest_start = trans_start;
e8a0464c 532 }
338f7566 533
2f0f9465 534 if (unlikely(timedout_ms)) {
141b6b2a 535 trace_net_dev_xmit_timeout(dev, i);
e316dd1c
JK
536 netdev_crit(dev, "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n",
537 raw_smp_processor_id(),
538 i, timedout_ms);
bec251bc 539 netif_freeze_queues(dev);
0290bd29 540 dev->netdev_ops->ndo_tx_timeout(dev, i);
bec251bc 541 netif_unfreeze_queues(dev);
1da177e4 542 }
e8a0464c 543 if (!mod_timer(&dev->watchdog_timer,
33fb988b 544 round_jiffies(oldest_start +
e8a0464c 545 dev->watchdog_timeo)))
f12bf6f3 546 release = false;
1da177e4
LT
547 }
548 }
bec251bc 549 spin_unlock(&dev->tx_global_lock);
1da177e4 550
f12bf6f3 551 if (release)
d62607c3 552 netdev_put(dev, &dev->watchdog_dev_tracker);
1da177e4
LT
553}
554
1b960cd1 555void netdev_watchdog_up(struct net_device *dev)
1da177e4 556{
1b960cd1
ED
557 if (!dev->netdev_ops->ndo_tx_timeout)
558 return;
559 if (dev->watchdog_timeo <= 0)
560 dev->watchdog_timeo = 5*HZ;
561 if (!mod_timer(&dev->watchdog_timer,
562 round_jiffies(jiffies + dev->watchdog_timeo)))
563 netdev_hold(dev, &dev->watchdog_dev_tracker,
564 GFP_ATOMIC);
1da177e4 565}
1b960cd1 566EXPORT_SYMBOL_GPL(netdev_watchdog_up);
1da177e4 567
1b960cd1 568static void netdev_watchdog_down(struct net_device *dev)
1da177e4 569{
932ff279 570 netif_tx_lock_bh(dev);
8fa7292f 571 if (timer_delete(&dev->watchdog_timer))
d62607c3 572 netdev_put(dev, &dev->watchdog_dev_tracker);
932ff279 573 netif_tx_unlock_bh(dev);
1da177e4
LT
574}
575
bea3348e
SH
576/**
577 * netif_carrier_on - set carrier
578 * @dev: network device
579 *
989723b0 580 * Device has detected acquisition of carrier.
bea3348e 581 */
0a242efc
DV
582void netif_carrier_on(struct net_device *dev)
583{
bfaae0f0 584 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
b4730016
DM
585 if (dev->reg_state == NETREG_UNINITIALIZED)
586 return;
b2d3bcfa 587 atomic_inc(&dev->carrier_up_count);
0a242efc 588 linkwatch_fire_event(dev);
bfaae0f0 589 if (netif_running(dev))
1b960cd1 590 netdev_watchdog_up(dev);
bfaae0f0 591 }
0a242efc 592}
62e3ba1b 593EXPORT_SYMBOL(netif_carrier_on);
0a242efc 594
bea3348e
SH
595/**
596 * netif_carrier_off - clear carrier
597 * @dev: network device
598 *
599 * Device has detected loss of carrier.
600 */
0a242efc
DV
601void netif_carrier_off(struct net_device *dev)
602{
b4730016
DM
603 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
604 if (dev->reg_state == NETREG_UNINITIALIZED)
605 return;
b2d3bcfa 606 atomic_inc(&dev->carrier_down_count);
0a242efc 607 linkwatch_fire_event(dev);
b4730016 608 }
0a242efc 609}
62e3ba1b 610EXPORT_SYMBOL(netif_carrier_off);
0a242efc 611
490dceca
JK
612/**
613 * netif_carrier_event - report carrier state event
614 * @dev: network device
615 *
616 * Device has detected a carrier event but the carrier state wasn't changed.
617 * Use in drivers when querying carrier state asynchronously, to avoid missing
618 * events (link flaps) if link recovers before it's queried.
619 */
620void netif_carrier_event(struct net_device *dev)
621{
622 if (dev->reg_state == NETREG_UNINITIALIZED)
623 return;
624 atomic_inc(&dev->carrier_up_count);
625 atomic_inc(&dev->carrier_down_count);
626 linkwatch_fire_event(dev);
627}
628EXPORT_SYMBOL_GPL(netif_carrier_event);
629
1da177e4
LT
630/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
631 under all circumstances. It is difficult to invent anything faster or
632 cheaper.
633 */
634
520ac30f
ED
635static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
636 struct sk_buff **to_free)
1da177e4 637{
4fdb6b60 638 dev_core_stats_tx_dropped_inc(skb->dev);
520ac30f 639 __qdisc_drop(skb, to_free);
1da177e4
LT
640 return NET_XMIT_CN;
641}
642
82d567c2 643static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
1da177e4
LT
644{
645 return NULL;
646}
647
20fea08b 648struct Qdisc_ops noop_qdisc_ops __read_mostly = {
1da177e4
LT
649 .id = "noop",
650 .priv_size = 0,
651 .enqueue = noop_enqueue,
652 .dequeue = noop_dequeue,
99c0db26 653 .peek = noop_dequeue,
1da177e4
LT
654 .owner = THIS_MODULE,
655};
656
7698b4fc 657static struct netdev_queue noop_netdev_queue = {
3b40bf4e 658 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
d636fc5d 659 RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
7698b4fc
DM
660};
661
1da177e4
LT
662struct Qdisc noop_qdisc = {
663 .enqueue = noop_enqueue,
664 .dequeue = noop_dequeue,
665 .flags = TCQ_F_BUILTIN,
10297b99 666 .ops = &noop_qdisc_ops,
83874000 667 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
7698b4fc 668 .dev_queue = &noop_netdev_queue,
7b5edbc4 669 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
f98ebd47
ED
670 .gso_skb = {
671 .next = (struct sk_buff *)&noop_qdisc.gso_skb,
672 .prev = (struct sk_buff *)&noop_qdisc.gso_skb,
673 .qlen = 0,
674 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
675 },
676 .skb_bad_txq = {
677 .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
678 .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
679 .qlen = 0,
680 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
681 },
44180fea 682 .owner = -1,
1da177e4 683};
62e3ba1b 684EXPORT_SYMBOL(noop_qdisc);
1da177e4 685
e63d7dfd
AA
686static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
687 struct netlink_ext_ack *extack)
d66d6c31
PS
688{
689 /* register_qdisc() assigns a default of noop_enqueue if unset,
690 * but __dev_queue_xmit() treats noqueue only as such
691 * if this is NULL - so clear it here. */
692 qdisc->enqueue = NULL;
693 return 0;
694}
695
696struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
1da177e4
LT
697 .id = "noqueue",
698 .priv_size = 0,
d66d6c31 699 .init = noqueue_init,
1da177e4
LT
700 .enqueue = noop_enqueue,
701 .dequeue = noop_dequeue,
99c0db26 702 .peek = noop_dequeue,
1da177e4
LT
703 .owner = THIS_MODULE,
704};
705
5579ee46
ED
706const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = {
707 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1
cc7ec456 708};
5579ee46 709EXPORT_SYMBOL(sch_default_prio2band);
d3678b46
DM
710
711/* 3-band FIFO queue: old style, but should be a bit faster than
712 generic prio+fifo combination.
713 */
714
715#define PFIFO_FAST_BANDS 3
716
fd3ae5e8
KK
717/*
718 * Private data for a pfifo_fast scheduler containing:
c5ad119f 719 * - rings for priority bands
fd3ae5e8
KK
720 */
721struct pfifo_fast_priv {
c5ad119f 722 struct skb_array q[PFIFO_FAST_BANDS];
fd3ae5e8
KK
723};
724
c5ad119f
JF
725static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
726 int band)
d3678b46 727{
c5ad119f 728 return &priv->q[band];
d3678b46
DM
729}
730
520ac30f
ED
731static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
732 struct sk_buff **to_free)
321090e7 733{
5579ee46 734 int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX];
c5ad119f
JF
735 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
736 struct skb_array *q = band2list(priv, band);
cce6294c 737 unsigned int pkt_len = qdisc_pkt_len(skb);
c5ad119f 738 int err;
821d24ae 739
c5ad119f
JF
740 err = skb_array_produce(q, skb);
741
092e22e5
DC
742 if (unlikely(err)) {
743 if (qdisc_is_percpu_stats(qdisc))
744 return qdisc_drop_cpu(skb, qdisc, to_free);
745 else
746 return qdisc_drop(skb, qdisc, to_free);
747 }
c5ad119f 748
8a53e616 749 qdisc_update_stats_at_enqueue(qdisc, pkt_len);
c5ad119f 750 return NET_XMIT_SUCCESS;
1da177e4
LT
751}
752
cc7ec456 753static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
1da177e4 754{
fd3ae5e8 755 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
c5ad119f 756 struct sk_buff *skb = NULL;
a90c57f2 757 bool need_retry = true;
c5ad119f 758 int band;
ec323368 759
a90c57f2 760retry:
c5ad119f
JF
761 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
762 struct skb_array *q = band2list(priv, band);
fd3ae5e8 763
c5ad119f
JF
764 if (__skb_array_empty(q))
765 continue;
fd3ae5e8 766
021a17ed 767 skb = __skb_array_consume(q);
c5ad119f
JF
768 }
769 if (likely(skb)) {
8a53e616 770 qdisc_update_stats_at_dequeue(qdisc, skb);
a90c57f2 771 } else if (need_retry &&
c4fef01b 772 READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
a90c57f2
YL
773 /* Delay clearing the STATE_MISSED here to reduce
774 * the overhead of the second spin_trylock() in
775 * qdisc_run_begin() and __netif_schedule() calling
776 * in qdisc_run_end().
777 */
778 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
c4fef01b 779 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
a90c57f2
YL
780
781 /* Make sure dequeuing happens after clearing
782 * STATE_MISSED.
783 */
784 smp_mb__after_atomic();
785
786 need_retry = false;
787
788 goto retry;
d3678b46 789 }
f87a9c3d 790
c5ad119f 791 return skb;
1da177e4
LT
792}
793
cc7ec456 794static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
99c0db26 795{
fd3ae5e8 796 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
c5ad119f
JF
797 struct sk_buff *skb = NULL;
798 int band;
fd3ae5e8 799
c5ad119f
JF
800 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
801 struct skb_array *q = band2list(priv, band);
99c0db26 802
c5ad119f 803 skb = __skb_array_peek(q);
99c0db26
JP
804 }
805
c5ad119f 806 return skb;
99c0db26
JP
807}
808
cc7ec456 809static void pfifo_fast_reset(struct Qdisc *qdisc)
1da177e4 810{
c5ad119f 811 int i, band;
fd3ae5e8 812 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
d3678b46 813
c5ad119f
JF
814 for (band = 0; band < PFIFO_FAST_BANDS; band++) {
815 struct skb_array *q = band2list(priv, band);
816 struct sk_buff *skb;
d3678b46 817
1df94c3c
CW
818 /* NULL ring is possible if destroy path is due to a failed
819 * skb_array_init() in pfifo_fast_init() case.
820 */
821 if (!q->ring.queue)
822 continue;
823
021a17ed 824 while ((skb = __skb_array_consume(q)) != NULL)
c5ad119f
JF
825 kfree_skb(skb);
826 }
827
04d37cf4
DC
828 if (qdisc_is_percpu_stats(qdisc)) {
829 for_each_possible_cpu(i) {
830 struct gnet_stats_queue *q;
c5ad119f 831
04d37cf4
DC
832 q = per_cpu_ptr(qdisc->cpu_qstats, i);
833 q->backlog = 0;
834 q->qlen = 0;
835 }
c5ad119f 836 }
1da177e4
LT
837}
838
d3678b46
DM
839static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
840{
841 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
842
5579ee46 843 memcpy(&opt.priomap, sch_default_prio2band, TC_PRIO_MAX + 1);
1b34ec43
DM
844 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
845 goto nla_put_failure;
d3678b46
DM
846 return skb->len;
847
848nla_put_failure:
849 return -1;
850}
851
e63d7dfd
AA
852static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
853 struct netlink_ext_ack *extack)
d3678b46 854{
c5ad119f 855 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
fd3ae5e8 856 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
c5ad119f
JF
857 int prio;
858
859 /* guard against zero length rings */
860 if (!qlen)
861 return -EINVAL;
d3678b46 862
c5ad119f
JF
863 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
864 struct skb_array *q = band2list(priv, prio);
865 int err;
866
867 err = skb_array_init(q, qlen, GFP_KERNEL);
868 if (err)
869 return -ENOMEM;
870 }
d3678b46 871
23624935
ED
872 /* Can by-pass the queue discipline */
873 qdisc->flags |= TCQ_F_CAN_BYPASS;
d3678b46
DM
874 return 0;
875}
876
c5ad119f
JF
877static void pfifo_fast_destroy(struct Qdisc *sch)
878{
879 struct pfifo_fast_priv *priv = qdisc_priv(sch);
880 int prio;
881
882 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
883 struct skb_array *q = band2list(priv, prio);
884
885 /* NULL ring is possible if destroy path is due to a failed
886 * skb_array_init() in pfifo_fast_init() case.
887 */
1df94c3c 888 if (!q->ring.queue)
c5ad119f
JF
889 continue;
890 /* Destroy ring but no need to kfree_skb because a call to
891 * pfifo_fast_reset() has already done that work.
892 */
893 ptr_ring_cleanup(&q->ring, NULL);
894 }
895}
896
7007ba63
CW
897static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
898 unsigned int new_len)
899{
900 struct pfifo_fast_priv *priv = qdisc_priv(sch);
901 struct skb_array *bands[PFIFO_FAST_BANDS];
902 int prio;
903
904 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
905 struct skb_array *q = band2list(priv, prio);
906
907 bands[prio] = q;
908 }
909
a126061c
ED
910 return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
911 GFP_KERNEL);
7007ba63
CW
912}
913
6ec1c69a 914struct Qdisc_ops pfifo_fast_ops __read_mostly = {
d3678b46 915 .id = "pfifo_fast",
fd3ae5e8 916 .priv_size = sizeof(struct pfifo_fast_priv),
d3678b46
DM
917 .enqueue = pfifo_fast_enqueue,
918 .dequeue = pfifo_fast_dequeue,
99c0db26 919 .peek = pfifo_fast_peek,
d3678b46 920 .init = pfifo_fast_init,
c5ad119f 921 .destroy = pfifo_fast_destroy,
d3678b46
DM
922 .reset = pfifo_fast_reset,
923 .dump = pfifo_fast_dump,
7007ba63 924 .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
1da177e4 925 .owner = THIS_MODULE,
c5ad119f 926 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
1da177e4 927};
1f27cde3 928EXPORT_SYMBOL(pfifo_fast_ops);
1da177e4 929
1a33e10e 930static struct lock_class_key qdisc_tx_busylock;
1a33e10e 931
5ce2d488 932struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
d0bd684d
AA
933 const struct Qdisc_ops *ops,
934 struct netlink_ext_ack *extack)
1da177e4 935{
1da177e4 936 struct Qdisc *sch;
846e463a 937 unsigned int size = sizeof(*sch) + ops->priv_size;
3d54b82f 938 int err = -ENOBUFS;
26aa0459
JSP
939 struct net_device *dev;
940
941 if (!dev_queue) {
d0bd684d 942 NL_SET_ERR_MSG(extack, "No device queue given");
26aa0459
JSP
943 err = -EINVAL;
944 goto errout;
945 }
1da177e4 946
26aa0459 947 dev = dev_queue->dev;
846e463a 948 sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
f2cd2d3e 949
846e463a 950 if (!sch)
3d54b82f 951 goto errout;
a53851e2 952 __skb_queue_head_init(&sch->gso_skb);
70e57d5e 953 __skb_queue_head_init(&sch->skb_bad_txq);
50dc9a85 954 gnet_stats_basic_sync_init(&sch->bstats);
af0cb3fa 955 lockdep_register_key(&sch->root_lock_key);
48da34b7 956 spin_lock_init(&sch->q.lock);
af0cb3fa 957 lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
23d3b8bf 958
d59f5ffa
JF
959 if (ops->static_flags & TCQ_F_CPUSTATS) {
960 sch->cpu_bstats =
50dc9a85 961 netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
d59f5ffa
JF
962 if (!sch->cpu_bstats)
963 goto errout1;
964
965 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
966 if (!sch->cpu_qstats) {
967 free_percpu(sch->cpu_bstats);
968 goto errout1;
969 }
970 }
971
79640a4c 972 spin_lock_init(&sch->busylock);
1a33e10e
CW
973 lockdep_set_class(&sch->busylock,
974 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
975
96009c7d
PA
976 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
977 spin_lock_init(&sch->seqlock);
06f5553e 978 lockdep_set_class(&sch->seqlock,
1a33e10e 979 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
f9eb8aea 980
1da177e4 981 sch->ops = ops;
d59f5ffa 982 sch->flags = ops->static_flags;
1da177e4
LT
983 sch->enqueue = ops->enqueue;
984 sch->dequeue = ops->dequeue;
bb949fbd 985 sch->dev_queue = dev_queue;
0f022d32 986 sch->owner = -1;
d62607c3 987 netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
7b936405 988 refcount_set(&sch->refcnt, 1);
3d54b82f
TG
989
990 return sch;
d59f5ffa 991errout1:
86735b57 992 lockdep_unregister_key(&sch->root_lock_key);
846e463a 993 kfree(sch);
3d54b82f 994errout:
01e123d7 995 return ERR_PTR(err);
3d54b82f
TG
996}
997
3511c913 998struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
d2a7f269 999 const struct Qdisc_ops *ops,
a38a9882
AA
1000 unsigned int parentid,
1001 struct netlink_ext_ack *extack)
3d54b82f
TG
1002{
1003 struct Qdisc *sch;
10297b99 1004
7625645e 1005 if (!bpf_try_module_get(ops, ops->owner)) {
a38a9882 1006 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
166ee5b8 1007 return NULL;
a38a9882 1008 }
6da7c8fc 1009
a38a9882 1010 sch = qdisc_alloc(dev_queue, ops, extack);
166ee5b8 1011 if (IS_ERR(sch)) {
7625645e 1012 bpf_module_put(ops, ops->owner);
166ee5b8
ED
1013 return NULL;
1014 }
9f9afec4 1015 sch->parent = parentid;
3d54b82f 1016
f5a7833e
CW
1017 if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1018 trace_qdisc_create(ops, dev_queue->dev, parentid);
1da177e4 1019 return sch;
f5a7833e 1020 }
1da177e4 1021
86bd446b 1022 qdisc_put(sch);
1da177e4
LT
1023 return NULL;
1024}
62e3ba1b 1025EXPORT_SYMBOL(qdisc_create_dflt);
1da177e4 1026
5fb66229 1027/* Under qdisc_lock(qdisc) and BH! */
1da177e4
LT
1028
1029void qdisc_reset(struct Qdisc *qdisc)
1030{
20fea08b 1031 const struct Qdisc_ops *ops = qdisc->ops;
1da177e4 1032
a34dac0b
CW
1033 trace_qdisc_reset(qdisc);
1034
1da177e4
LT
1035 if (ops->reset)
1036 ops->reset(qdisc);
67305ebc 1037
c9a40d1c
ED
1038 __skb_queue_purge(&qdisc->gso_skb);
1039 __skb_queue_purge(&qdisc->skb_bad_txq);
70e57d5e 1040
4d202a0d 1041 qdisc->q.qlen = 0;
c8e18129 1042 qdisc->qstats.backlog = 0;
1da177e4 1043}
62e3ba1b 1044EXPORT_SYMBOL(qdisc_reset);
1da177e4 1045
81d947e2 1046void qdisc_free(struct Qdisc *qdisc)
5d944c64 1047{
73c20a8b 1048 if (qdisc_is_percpu_stats(qdisc)) {
22e0f8b9 1049 free_percpu(qdisc->cpu_bstats);
73c20a8b
JF
1050 free_percpu(qdisc->cpu_qstats);
1051 }
22e0f8b9 1052
846e463a 1053 kfree(qdisc);
5d944c64
ED
1054}
1055
5362700c 1056static void qdisc_free_cb(struct rcu_head *head)
3a7d0d07
VB
1057{
1058 struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1059
1060 qdisc_free(q);
1061}
1062
84ad0af0 1063static void __qdisc_destroy(struct Qdisc *qdisc)
1da177e4 1064{
8a34c5dc 1065 const struct Qdisc_ops *ops = qdisc->ops;
913b47d3 1066 struct net_device *dev = qdisc_dev(qdisc);
8a34c5dc 1067
3a682fbd 1068#ifdef CONFIG_NET_SCHED
59cc1f61 1069 qdisc_hash_del(qdisc);
f6e0b239 1070
a2da570d 1071 qdisc_put_stab(rtnl_dereference(qdisc->stab));
3a682fbd 1072#endif
1c0d32fd 1073 gen_kill_estimator(&qdisc->rate_est);
4909daba
CW
1074
1075 qdisc_reset(qdisc);
1076
913b47d3 1077
8a34c5dc
DM
1078 if (ops->destroy)
1079 ops->destroy(qdisc);
1080
af0cb3fa 1081 lockdep_unregister_key(&qdisc->root_lock_key);
c8240344 1082 bpf_module_put(ops, ops->owner);
913b47d3 1083 netdev_put(dev, &qdisc->dev_tracker);
8a34c5dc 1084
a34dac0b
CW
1085 trace_qdisc_destroy(qdisc);
1086
3a7d0d07 1087 call_rcu(&qdisc->rcu, qdisc_free_cb);
1da177e4 1088}
86bd446b 1089
84ad0af0
PY
1090void qdisc_destroy(struct Qdisc *qdisc)
1091{
1092 if (qdisc->flags & TCQ_F_BUILTIN)
1093 return;
1094
1095 __qdisc_destroy(qdisc);
1096}
1097
86bd446b
VB
1098void qdisc_put(struct Qdisc *qdisc)
1099{
6efb971b
CW
1100 if (!qdisc)
1101 return;
1102
86bd446b
VB
1103 if (qdisc->flags & TCQ_F_BUILTIN ||
1104 !refcount_dec_and_test(&qdisc->refcnt))
1105 return;
1106
84ad0af0 1107 __qdisc_destroy(qdisc);
86bd446b
VB
1108}
1109EXPORT_SYMBOL(qdisc_put);
1da177e4 1110
3a7d0d07
VB
1111/* Version of qdisc_put() that is called with rtnl mutex unlocked.
1112 * Intended to be used as optimization, this function only takes rtnl lock if
1113 * qdisc reference counter reached zero.
1114 */
1115
1116void qdisc_put_unlocked(struct Qdisc *qdisc)
1117{
1118 if (qdisc->flags & TCQ_F_BUILTIN ||
1119 !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1120 return;
1121
84ad0af0 1122 __qdisc_destroy(qdisc);
3a7d0d07
VB
1123 rtnl_unlock();
1124}
1125EXPORT_SYMBOL(qdisc_put_unlocked);
1126
589983cd
PM
1127/* Attach toplevel qdisc to device queue. */
1128struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1129 struct Qdisc *qdisc)
1130{
d636fc5d 1131 struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
589983cd
PM
1132 spinlock_t *root_lock;
1133
1134 root_lock = qdisc_lock(oqdisc);
1135 spin_lock_bh(root_lock);
1136
589983cd
PM
1137 /* ... and graft new one */
1138 if (qdisc == NULL)
1139 qdisc = &noop_qdisc;
d636fc5d 1140 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
589983cd
PM
1141 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1142
1143 spin_unlock_bh(root_lock);
1144
1145 return oqdisc;
1146}
b8970f0b 1147EXPORT_SYMBOL(dev_graft_qdisc);
589983cd 1148
f612466e
WH
1149static void shutdown_scheduler_queue(struct net_device *dev,
1150 struct netdev_queue *dev_queue,
1151 void *_qdisc_default)
1152{
d636fc5d 1153 struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
f612466e
WH
1154 struct Qdisc *qdisc_default = _qdisc_default;
1155
1156 if (qdisc) {
1157 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
d636fc5d 1158 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
f612466e
WH
1159
1160 qdisc_put(qdisc);
1161 }
1162}
1163
e8a0464c
DM
1164static void attach_one_default_qdisc(struct net_device *dev,
1165 struct netdev_queue *dev_queue,
1166 void *_unused)
1167{
3e692f21
PS
1168 struct Qdisc *qdisc;
1169 const struct Qdisc_ops *ops = default_qdisc_ops;
e8a0464c 1170
3e692f21
PS
1171 if (dev->priv_flags & IFF_NO_QUEUE)
1172 ops = &noqueue_qdisc_ops;
546b85bb
VP
1173 else if(dev->type == ARPHRD_CAN)
1174 ops = &pfifo_fast_ops;
3e692f21 1175
a38a9882 1176 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
bf6dba76 1177 if (!qdisc)
3e692f21 1178 return;
bf6dba76 1179
3e692f21 1180 if (!netif_is_multiqueue(dev))
4eaf3b84 1181 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
d636fc5d 1182 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
e8a0464c
DM
1183}
1184
6ec1c69a
DM
1185static void attach_default_qdiscs(struct net_device *dev)
1186{
1187 struct netdev_queue *txq;
1188 struct Qdisc *qdisc;
1189
1190 txq = netdev_get_tx_queue(dev, 0);
1191
4b469955 1192 if (!netif_is_multiqueue(dev) ||
4b469955 1193 dev->priv_flags & IFF_NO_QUEUE) {
6ec1c69a 1194 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
d636fc5d 1195 qdisc = rtnl_dereference(txq->qdisc_sleeping);
5891cd5e
ED
1196 rcu_assign_pointer(dev->qdisc, qdisc);
1197 qdisc_refcount_inc(qdisc);
6ec1c69a 1198 } else {
a38a9882 1199 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
6ec1c69a 1200 if (qdisc) {
5891cd5e 1201 rcu_assign_pointer(dev->qdisc, qdisc);
e57a784d 1202 qdisc->ops->attach(qdisc);
6ec1c69a
DM
1203 }
1204 }
5891cd5e 1205 qdisc = rtnl_dereference(dev->qdisc);
bf6dba76
JDB
1206
1207 /* Detect default qdisc setup/init failed and fallback to "noqueue" */
5891cd5e 1208 if (qdisc == &noop_qdisc) {
bf6dba76
JDB
1209 netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1210 default_qdisc_ops->id, noqueue_qdisc_ops.id);
f612466e 1211 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
bf6dba76
JDB
1212 dev->priv_flags |= IFF_NO_QUEUE;
1213 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
d636fc5d 1214 qdisc = rtnl_dereference(txq->qdisc_sleeping);
5891cd5e
ED
1215 rcu_assign_pointer(dev->qdisc, qdisc);
1216 qdisc_refcount_inc(qdisc);
bf6dba76
JDB
1217 dev->priv_flags ^= IFF_NO_QUEUE;
1218 }
1219
59cc1f61 1220#ifdef CONFIG_NET_SCHED
5891cd5e
ED
1221 if (qdisc != &noop_qdisc)
1222 qdisc_hash_add(qdisc, false);
59cc1f61 1223#endif
6ec1c69a
DM
1224}
1225
e8a0464c
DM
1226static void transition_one_qdisc(struct net_device *dev,
1227 struct netdev_queue *dev_queue,
1228 void *_need_watchdog)
1229{
d636fc5d 1230 struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
e8a0464c
DM
1231 int *need_watchdog_p = _need_watchdog;
1232
a9312ae8
DM
1233 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1234 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1235
83874000 1236 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
3e692f21 1237 if (need_watchdog_p) {
5337824f 1238 WRITE_ONCE(dev_queue->trans_start, 0);
e8a0464c 1239 *need_watchdog_p = 1;
9d21493b 1240 }
e8a0464c
DM
1241}
1242
1da177e4
LT
1243void dev_activate(struct net_device *dev)
1244{
e8a0464c 1245 int need_watchdog;
b0e1e646 1246
1da177e4 1247 /* No queueing discipline is attached to device;
6da7c8fc 1248 * create default one for devices, which need queueing
1249 * and noqueue_qdisc for virtual interfaces
1da177e4
LT
1250 */
1251
5891cd5e 1252 if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
6ec1c69a 1253 attach_default_qdiscs(dev);
af356afa 1254
cacaddf5
TC
1255 if (!netif_carrier_ok(dev))
1256 /* Delay activation until next carrier-on event */
1257 return;
1258
e8a0464c
DM
1259 need_watchdog = 0;
1260 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
24824a09
ED
1261 if (dev_ingress_queue(dev))
1262 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
e8a0464c
DM
1263
1264 if (need_watchdog) {
860e9538 1265 netif_trans_update(dev);
1b960cd1 1266 netdev_watchdog_up(dev);
1da177e4 1267 }
b0e1e646 1268}
b8970f0b 1269EXPORT_SYMBOL(dev_activate);
b0e1e646 1270
70f50965
CW
1271static void qdisc_deactivate(struct Qdisc *qdisc)
1272{
70f50965
CW
1273 if (qdisc->flags & TCQ_F_BUILTIN)
1274 return;
70f50965
CW
1275
1276 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
70f50965
CW
1277}
1278
e8a0464c
DM
1279static void dev_deactivate_queue(struct net_device *dev,
1280 struct netdev_queue *dev_queue,
88df16f8 1281 void *_sync_needed)
b0e1e646 1282{
88df16f8 1283 bool *sync_needed = _sync_needed;
970565bb 1284 struct Qdisc *qdisc;
970565bb 1285
46e5da40 1286 qdisc = rtnl_dereference(dev_queue->qdisc);
b0e1e646 1287 if (qdisc) {
88df16f8
ED
1288 if (qdisc->enqueue)
1289 *sync_needed = true;
70f50965 1290 qdisc_deactivate(qdisc);
88df16f8 1291 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
b0e1e646 1292 }
1da177e4
LT
1293}
1294
2fb541c8
YL
1295static void dev_reset_queue(struct net_device *dev,
1296 struct netdev_queue *dev_queue,
1297 void *_unused)
1298{
1299 struct Qdisc *qdisc;
1300 bool nolock;
1301
d636fc5d 1302 qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
2fb541c8
YL
1303 if (!qdisc)
1304 return;
1305
1306 nolock = qdisc->flags & TCQ_F_NOLOCK;
1307
1308 if (nolock)
1309 spin_lock_bh(&qdisc->seqlock);
1310 spin_lock_bh(qdisc_lock(qdisc));
1311
1312 qdisc_reset(qdisc);
1313
1314 spin_unlock_bh(qdisc_lock(qdisc));
102b55ee
YL
1315 if (nolock) {
1316 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
c4fef01b 1317 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
2fb541c8 1318 spin_unlock_bh(&qdisc->seqlock);
102b55ee 1319 }
2fb541c8
YL
1320}
1321
4335cd2d 1322static bool some_qdisc_is_busy(struct net_device *dev)
e8a0464c
DM
1323{
1324 unsigned int i;
1325
1326 for (i = 0; i < dev->num_tx_queues; i++) {
1327 struct netdev_queue *dev_queue;
7698b4fc 1328 spinlock_t *root_lock;
e2627c8c 1329 struct Qdisc *q;
e8a0464c
DM
1330 int val;
1331
1332 dev_queue = netdev_get_tx_queue(dev, i);
d636fc5d 1333 q = rtnl_dereference(dev_queue->qdisc_sleeping);
e8a0464c 1334
32f7b44d
PA
1335 root_lock = qdisc_lock(q);
1336 spin_lock_bh(root_lock);
e8a0464c 1337
32f7b44d
PA
1338 val = (qdisc_is_running(q) ||
1339 test_bit(__QDISC_STATE_SCHED, &q->state));
e8a0464c 1340
32f7b44d 1341 spin_unlock_bh(root_lock);
e8a0464c
DM
1342
1343 if (val)
1344 return true;
1345 }
1346 return false;
1347}
1348
3137663d
ED
1349/**
1350 * dev_deactivate_many - deactivate transmissions on several devices
1351 * @head: list of devices to deactivate
1352 *
1353 * This function returns only when all outstanding transmissions
1354 * have completed, unless all devices are in dismantle phase.
1355 */
44345724 1356void dev_deactivate_many(struct list_head *head)
1da177e4 1357{
88df16f8 1358 bool sync_needed = false;
44345724 1359 struct net_device *dev;
41a23b07 1360
5cde2829 1361 list_for_each_entry(dev, head, close_list) {
44345724 1362 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
88df16f8 1363 &sync_needed);
44345724
OP
1364 if (dev_ingress_queue(dev))
1365 dev_deactivate_queue(dev, dev_ingress_queue(dev),
88df16f8 1366 &sync_needed);
44345724 1367
1b960cd1 1368 netdev_watchdog_down(dev);
44345724 1369 }
1da177e4 1370
88df16f8
ED
1371 /* Wait for outstanding qdisc enqueuing calls. */
1372 if (sync_needed)
1373 synchronize_net();
1da177e4 1374
2fb541c8
YL
1375 list_for_each_entry(dev, head, close_list) {
1376 netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1377
1378 if (dev_ingress_queue(dev))
1379 dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1380 }
1381
d4828d85 1382 /* Wait for outstanding qdisc_run calls. */
7bbde83b 1383 list_for_each_entry(dev, head, close_list) {
4eab421b
MKB
1384 while (some_qdisc_is_busy(dev)) {
1385 /* wait_event() would avoid this sleep-loop but would
1386 * require expensive checks in the fast paths of packet
1387 * processing which isn't worth it.
1388 */
1389 schedule_timeout_uninterruptible(1);
1390 }
7bbde83b 1391 }
44345724
OP
1392}
1393
1394void dev_deactivate(struct net_device *dev)
1395{
1396 LIST_HEAD(single);
1397
5cde2829 1398 list_add(&dev->close_list, &single);
44345724 1399 dev_deactivate_many(&single);
5f04d506 1400 list_del(&single);
1da177e4 1401}
b8970f0b 1402EXPORT_SYMBOL(dev_deactivate);
1da177e4 1403
48bfd55e
CW
1404static int qdisc_change_tx_queue_len(struct net_device *dev,
1405 struct netdev_queue *dev_queue)
1406{
d636fc5d 1407 struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
48bfd55e
CW
1408 const struct Qdisc_ops *ops = qdisc->ops;
1409
1410 if (ops->change_tx_queue_len)
1411 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1412 return 0;
1413}
1414
1e080f17
JK
1415void dev_qdisc_change_real_num_tx(struct net_device *dev,
1416 unsigned int new_real_tx)
1417{
5891cd5e 1418 struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1e080f17
JK
1419
1420 if (qdisc->ops->change_real_num_tx)
1421 qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1422}
1423
f7116fb4
JK
1424void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1425{
1426#ifdef CONFIG_NET_SCHED
1427 struct net_device *dev = qdisc_dev(sch);
1428 struct Qdisc *qdisc;
1429 unsigned int i;
1430
1431 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
d636fc5d 1432 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
f7116fb4
JK
1433 /* Only update the default qdiscs we created,
1434 * qdiscs with handles are always hashed.
1435 */
1436 if (qdisc != &noop_qdisc && !qdisc->handle)
1437 qdisc_hash_del(qdisc);
1438 }
1439 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
d636fc5d 1440 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
f7116fb4
JK
1441 if (qdisc != &noop_qdisc && !qdisc->handle)
1442 qdisc_hash_add(qdisc, false);
1443 }
1444#endif
1445}
1446EXPORT_SYMBOL(mq_change_real_num_tx);
1447
48bfd55e
CW
1448int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1449{
1450 bool up = dev->flags & IFF_UP;
1451 unsigned int i;
1452 int ret = 0;
1453
1454 if (up)
1455 dev_deactivate(dev);
1456
1457 for (i = 0; i < dev->num_tx_queues; i++) {
1458 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1459
1460 /* TODO: revert changes on a partial failure */
1461 if (ret)
1462 break;
1463 }
1464
1465 if (up)
1466 dev_activate(dev);
1467 return ret;
1468}
1469
b0e1e646
DM
1470static void dev_init_scheduler_queue(struct net_device *dev,
1471 struct netdev_queue *dev_queue,
e8a0464c 1472 void *_qdisc)
b0e1e646 1473{
e8a0464c
DM
1474 struct Qdisc *qdisc = _qdisc;
1475
46e5da40 1476 rcu_assign_pointer(dev_queue->qdisc, qdisc);
d636fc5d 1477 rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
b0e1e646
DM
1478}
1479
1da177e4
LT
1480void dev_init_scheduler(struct net_device *dev)
1481{
5891cd5e 1482 rcu_assign_pointer(dev->qdisc, &noop_qdisc);
e8a0464c 1483 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
24824a09
ED
1484 if (dev_ingress_queue(dev))
1485 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1da177e4 1486
cdeabbb8 1487 timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1da177e4
LT
1488}
1489
b0e1e646
DM
1490void dev_shutdown(struct net_device *dev)
1491{
e8a0464c 1492 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
24824a09
ED
1493 if (dev_ingress_queue(dev))
1494 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
5891cd5e
ED
1495 qdisc_put(rtnl_dereference(dev->qdisc));
1496 rcu_assign_pointer(dev->qdisc, &noop_qdisc);
af356afa 1497
547b792c 1498 WARN_ON(timer_pending(&dev->watchdog_timer));
1da177e4 1499}
292f1c7f 1500
2ffe0395
BZ
1501/**
1502 * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division
1503 * @rate: Rate to compute reciprocal division values of
1504 * @mult: Multiplier for reciprocal division
1505 * @shift: Shift for reciprocal division
1506 *
1507 * The multiplier and shift for reciprocal division by rate are stored
1508 * in mult and shift.
1509 *
1510 * The deal here is to replace a divide by a reciprocal one
1511 * in fast path (a reciprocal divide is a multiply and a shift)
1512 *
1513 * Normal formula would be :
1514 * time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1515 *
1516 * We compute mult/shift to use instead :
1517 * time_in_ns = (len * mult) >> shift;
1518 *
1519 * We try to get the highest possible mult value for accuracy,
1520 * but have to make sure no overflows will ever happen.
1521 *
1522 * reciprocal_value() is not used here it doesn't handle 64-bit values.
1523 */
1524static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1525{
1526 u64 factor = NSEC_PER_SEC;
1527
1528 *mult = 1;
1529 *shift = 0;
1530
1531 if (rate <= 0)
1532 return;
1533
1534 for (;;) {
1535 *mult = div64_u64(factor, rate);
1536 if (*mult & (1U << 31) || factor & (1ULL << 63))
1537 break;
1538 factor <<= 1;
1539 (*shift)++;
1540 }
1541}
1542
01cb71d2 1543void psched_ratecfg_precompute(struct psched_ratecfg *r,
3e1e3aae
ED
1544 const struct tc_ratespec *conf,
1545 u64 rate64)
292f1c7f 1546{
01cb71d2
ED
1547 memset(r, 0, sizeof(*r));
1548 r->overhead = conf->overhead;
fb80445c 1549 r->mpu = conf->mpu;
3e1e3aae 1550 r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
8a8e3d84 1551 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
2ffe0395 1552 psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
292f1c7f
JP
1553}
1554EXPORT_SYMBOL(psched_ratecfg_precompute);
46209401 1555
2ffe0395
BZ
1556void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1557{
1558 r->rate_pkts_ps = pktrate64;
1559 psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1560}
1561EXPORT_SYMBOL(psched_ppscfg_precompute);
1562
46209401
JP
1563void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1564 struct tcf_proto *tp_head)
1565{
ed76f5ed
VB
1566 /* Protected with chain0->filter_chain_lock.
1567 * Can't access chain directly because tp_head can be NULL.
1568 */
1569 struct mini_Qdisc *miniq_old =
1570 rcu_dereference_protected(*miniqp->p_miniq, 1);
46209401
JP
1571 struct mini_Qdisc *miniq;
1572
1573 if (!tp_head) {
1574 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
26746382 1575 } else {
85c0c3eb 1576 miniq = miniq_old != &miniqp->miniq1 ?
26746382 1577 &miniqp->miniq1 : &miniqp->miniq2;
46209401 1578
26746382
SF
1579 /* We need to make sure that readers won't see the miniq
1580 * we are about to modify. So ensure that at least one RCU
1581 * grace period has elapsed since the miniq was made
1582 * inactive.
1583 */
1584 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1585 cond_synchronize_rcu(miniq->rcu_state);
1586 else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1587 synchronize_rcu_expedited();
46209401 1588
26746382
SF
1589 miniq->filter_list = tp_head;
1590 rcu_assign_pointer(*miniqp->p_miniq, miniq);
1591 }
46209401
JP
1592
1593 if (miniq_old)
26746382 1594 /* This is counterpart of the rcu sync above. We need to
46209401
JP
1595 * block potential new user of miniq_old until all readers
1596 * are not seeing it.
1597 */
26746382 1598 miniq_old->rcu_state = start_poll_synchronize_rcu();
46209401
JP
1599}
1600EXPORT_SYMBOL(mini_qdisc_pair_swap);
1601
7d17c544
PB
1602void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1603 struct tcf_block *block)
1604{
1605 miniqp->miniq1.block = block;
1606 miniqp->miniq2.block = block;
1607}
1608EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1609
46209401
JP
1610void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1611 struct mini_Qdisc __rcu **p_miniq)
1612{
1613 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1614 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1615 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1616 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
26746382
SF
1617 miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1618 miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
46209401
JP
1619 miniqp->p_miniq = p_miniq;
1620}
1621EXPORT_SYMBOL(mini_qdisc_pair_init);