xen/netback: fix incorrect usage of RING_HAS_UNCONSUMED_REQUESTS()
[linux-block.git] / include / net / sch_generic.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_SCHED_GENERIC_H
3#define __NET_SCHED_GENERIC_H
4
1da177e4
LT
5#include <linux/netdevice.h>
6#include <linux/types.h>
7#include <linux/rcupdate.h>
1da177e4
LT
8#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
22e0f8b9 10#include <linux/percpu.h>
5772e9a3 11#include <linux/dynamic_queue_limits.h>
5bc17018 12#include <linux/list.h>
7b936405 13#include <linux/refcount.h>
7aa0045d 14#include <linux/workqueue.h>
c266f64d 15#include <linux/mutex.h>
4f8116c8 16#include <linux/rwsem.h>
97394bef 17#include <linux/atomic.h>
59eb87cb 18#include <linux/hashtable.h>
1da177e4 19#include <net/gen_stats.h>
be577ddc 20#include <net/rtnetlink.h>
a7323311 21#include <net/flow_offload.h>
1da177e4
LT
22
23struct Qdisc_ops;
24struct qdisc_walker;
25struct tcf_walker;
26struct module;
d58e468b 27struct bpf_flow_keys;
1da177e4 28
fd2c3ef7 29struct qdisc_rate_table {
1da177e4
LT
30 struct tc_ratespec rate;
31 u32 data[256];
32 struct qdisc_rate_table *next;
33 int refcnt;
34};
35
fd2c3ef7 36enum qdisc_state_t {
37437bb2 37 __QDISC_STATE_SCHED,
a9312ae8 38 __QDISC_STATE_DEACTIVATED,
a90c57f2 39 __QDISC_STATE_MISSED,
c4fef01b 40 __QDISC_STATE_DRAINING,
97604c65
ED
41};
42
43enum qdisc_state2_t {
29cbcd85
AD
44 /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
45 * Use qdisc_run_begin/end() or qdisc_is_running() instead.
46 */
97604c65 47 __QDISC_STATE2_RUNNING,
e2627c8c
DM
48};
49
c4fef01b
YL
50#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
51#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
52
53#define QDISC_STATE_NON_EMPTY (QDISC_STATE_MISSED | \
54 QDISC_STATE_DRAINING)
55
175f9c1b 56struct qdisc_size_table {
a2da570d 57 struct rcu_head rcu;
175f9c1b
JK
58 struct list_head list;
59 struct tc_sizespec szopts;
60 int refcnt;
61 u16 data[];
62};
63
48da34b7
FW
64/* similar to sk_buff_head, but skb->prev pointer is undefined. */
65struct qdisc_skb_head {
66 struct sk_buff *head;
67 struct sk_buff *tail;
73eb628d 68 __u32 qlen;
48da34b7
FW
69 spinlock_t lock;
70};
71
fd2c3ef7 72struct Qdisc {
520ac30f
ED
73 int (*enqueue)(struct sk_buff *skb,
74 struct Qdisc *sch,
75 struct sk_buff **to_free);
76 struct sk_buff * (*dequeue)(struct Qdisc *sch);
05bdd2f1 77 unsigned int flags;
b00355db 78#define TCQ_F_BUILTIN 1
fd245a4a
ED
79#define TCQ_F_INGRESS 2
80#define TCQ_F_CAN_BYPASS 4
81#define TCQ_F_MQROOT 8
1abbe139
ED
82#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
83 * q->dev_queue : It can test
84 * netif_xmit_frozen_or_stopped() before
85 * dequeueing next packet.
86 * Its true for MQ/MQPRIO slaves, or non
87 * multiqueue device.
88 */
b00355db 89#define TCQ_F_WARN_NONWC (1 << 16)
22e0f8b9 90#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
4eaf3b84
ED
91#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
92 * qdisc_tree_decrease_qlen() should stop.
93 */
49b49971 94#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
6b3ba914 95#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
7a4fa291 96#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
45203a3b 97 u32 limit;
05bdd2f1 98 const struct Qdisc_ops *ops;
a2da570d 99 struct qdisc_size_table __rcu *stab;
59cc1f61 100 struct hlist_node hash;
1da177e4
LT
101 u32 handle;
102 u32 parent;
72b25a91 103
5e140dfc 104 struct netdev_queue *dev_queue;
5e140dfc 105
1c0d32fd 106 struct net_rate_estimator __rcu *rate_est;
50dc9a85 107 struct gnet_stats_basic_sync __percpu *cpu_bstats;
0d32ef8c 108 struct gnet_stats_queue __percpu *cpu_qstats;
846e463a 109 int pad;
e9be0e99 110 refcount_t refcnt;
0d32ef8c 111
5e140dfc
ED
112 /*
113 * For performance sake on SMP, we put highly modified fields at the end
114 */
a53851e2 115 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
48da34b7 116 struct qdisc_skb_head q;
50dc9a85 117 struct gnet_stats_basic_sync bstats;
0d32ef8c 118 struct gnet_stats_queue qstats;
4d202a0d 119 unsigned long state;
97604c65 120 unsigned long state2; /* must be written under qdisc spinlock */
4d202a0d 121 struct Qdisc *next_sched;
70e57d5e 122 struct sk_buff_head skb_bad_txq;
45203a3b
ED
123
124 spinlock_t busylock ____cacheline_aligned_in_smp;
96009c7d 125 spinlock_t seqlock;
28cff537 126
3a7d0d07 127 struct rcu_head rcu;
606509f2 128 netdevice_tracker dev_tracker;
846e463a
ED
129 /* private data */
130 long privdata[] ____cacheline_aligned;
1da177e4
LT
131};
132
551143d8
ED
133static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
134{
135 if (qdisc->flags & TCQ_F_BUILTIN)
136 return;
137 refcount_inc(&qdisc->refcnt);
138}
139
9d7e82ce
VB
140/* Intended to be used by unlocked users, when concurrent qdisc release is
141 * possible.
142 */
143
144static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
145{
146 if (qdisc->flags & TCQ_F_BUILTIN)
147 return qdisc;
148 if (refcount_inc_not_zero(&qdisc->refcnt))
149 return qdisc;
150 return NULL;
151}
152
29cbcd85
AD
153/* For !TCQ_F_NOLOCK qdisc: callers must either call this within a qdisc
154 * root_lock section, or provide their own memory barriers -- ordering
155 * against qdisc_run_begin/end() atomic bit operations.
156 */
96009c7d 157static inline bool qdisc_is_running(struct Qdisc *qdisc)
bc135b23 158{
32f7b44d 159 if (qdisc->flags & TCQ_F_NOLOCK)
96009c7d 160 return spin_is_locked(&qdisc->seqlock);
97604c65 161 return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
bc135b23
ED
162}
163
c4fef01b
YL
164static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
165{
166 return !(READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY);
167}
168
9c01c9f1
PA
169static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
170{
171 return q->flags & TCQ_F_CPUSTATS;
172}
173
28cff537
PA
174static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
175{
9c01c9f1 176 if (qdisc_is_percpu_stats(qdisc))
d3e0f575 177 return nolock_qdisc_is_empty(qdisc);
90b2be27 178 return !READ_ONCE(qdisc->q.qlen);
28cff537
PA
179}
180
29cbcd85
AD
181/* For !TCQ_F_NOLOCK qdisc, qdisc_run_begin/end() must be invoked with
182 * the qdisc root lock acquired.
183 */
bc135b23
ED
184static inline bool qdisc_run_begin(struct Qdisc *qdisc)
185{
32f7b44d 186 if (qdisc->flags & TCQ_F_NOLOCK) {
a90c57f2 187 if (spin_trylock(&qdisc->seqlock))
d3e0f575 188 return true;
a90c57f2 189
a54ce370
VR
190 /* No need to insist if the MISSED flag was already set.
191 * Note that test_and_set_bit() also gives us memory ordering
192 * guarantees wrt potential earlier enqueue() and below
193 * spin_trylock(), both of which are necessary to prevent races
89837eb4 194 */
a54ce370 195 if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
a90c57f2
YL
196 return false;
197
a54ce370
VR
198 /* Try to take the lock again to make sure that we will either
199 * grab it or the CPU that still has it will see MISSED set
200 * when testing it in qdisc_run_end()
a90c57f2 201 */
d3e0f575 202 return spin_trylock(&qdisc->seqlock);
32f7b44d 203 }
97604c65 204 return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
bc135b23
ED
205}
206
207static inline void qdisc_run_end(struct Qdisc *qdisc)
208{
a90c57f2 209 if (qdisc->flags & TCQ_F_NOLOCK) {
96009c7d 210 spin_unlock(&qdisc->seqlock);
a90c57f2
YL
211
212 if (unlikely(test_bit(__QDISC_STATE_MISSED,
c4fef01b 213 &qdisc->state)))
a90c57f2 214 __netif_schedule(qdisc);
dd25296a 215 } else {
97604c65 216 __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
a90c57f2 217 }
fd245a4a
ED
218}
219
5772e9a3
JDB
220static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
221{
222 return qdisc->flags & TCQ_F_ONETXQUEUE;
223}
224
225static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
226{
227#ifdef CONFIG_BQL
228 /* Non-BQL migrated drivers will return 0, too. */
229 return dql_avail(&txq->dql);
230#else
231 return 0;
232#endif
233}
234
fd2c3ef7 235struct Qdisc_class_ops {
dfcd2a2b 236 unsigned int flags;
1da177e4 237 /* Child qdisc manipulation */
926e61b7 238 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
1da177e4 239 int (*graft)(struct Qdisc *, unsigned long cl,
653d6fd6
AA
240 struct Qdisc *, struct Qdisc **,
241 struct netlink_ext_ack *extack);
1da177e4 242 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
43effa1e 243 void (*qlen_notify)(struct Qdisc *, unsigned long);
1da177e4
LT
244
245 /* Class manipulation routines */
143976ce 246 unsigned long (*find)(struct Qdisc *, u32 classid);
1da177e4 247 int (*change)(struct Qdisc *, u32, u32,
793d81d6
AA
248 struct nlattr **, unsigned long *,
249 struct netlink_ext_ack *);
4dd78a73
MM
250 int (*delete)(struct Qdisc *, unsigned long,
251 struct netlink_ext_ack *);
1da177e4
LT
252 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
253
254 /* Filter manipulation */
0ac4bd68 255 struct tcf_block * (*tcf_block)(struct Qdisc *sch,
cbaacc4e
AA
256 unsigned long arg,
257 struct netlink_ext_ack *extack);
1da177e4
LT
258 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
259 u32 classid);
260 void (*unbind_tcf)(struct Qdisc *, unsigned long);
261
262 /* rtnetlink specific */
263 int (*dump)(struct Qdisc *, unsigned long,
264 struct sk_buff *skb, struct tcmsg*);
265 int (*dump_stats)(struct Qdisc *, unsigned long,
266 struct gnet_dump *);
267};
268
dfcd2a2b
VB
269/* Qdisc_class_ops flag values */
270
271/* Implements API that doesn't require rtnl lock */
272enum qdisc_class_ops_flags {
273 QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
274};
275
fd2c3ef7 276struct Qdisc_ops {
1da177e4 277 struct Qdisc_ops *next;
20fea08b 278 const struct Qdisc_class_ops *cl_ops;
1da177e4
LT
279 char id[IFNAMSIZ];
280 int priv_size;
d59f5ffa 281 unsigned int static_flags;
1da177e4 282
520ac30f
ED
283 int (*enqueue)(struct sk_buff *skb,
284 struct Qdisc *sch,
285 struct sk_buff **to_free);
1da177e4 286 struct sk_buff * (*dequeue)(struct Qdisc *);
90d841fd 287 struct sk_buff * (*peek)(struct Qdisc *);
1da177e4 288
e63d7dfd
AA
289 int (*init)(struct Qdisc *sch, struct nlattr *arg,
290 struct netlink_ext_ack *extack);
1da177e4
LT
291 void (*reset)(struct Qdisc *);
292 void (*destroy)(struct Qdisc *);
0ac4bd68 293 int (*change)(struct Qdisc *sch,
2030721c
AA
294 struct nlattr *arg,
295 struct netlink_ext_ack *extack);
0ac4bd68 296 void (*attach)(struct Qdisc *sch);
48bfd55e 297 int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
1e080f17
JK
298 void (*change_real_num_tx)(struct Qdisc *sch,
299 unsigned int new_real_tx);
1da177e4
LT
300
301 int (*dump)(struct Qdisc *, struct sk_buff *);
302 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
303
d47a6b0e
JP
304 void (*ingress_block_set)(struct Qdisc *sch,
305 u32 block_index);
306 void (*egress_block_set)(struct Qdisc *sch,
307 u32 block_index);
308 u32 (*ingress_block_get)(struct Qdisc *sch);
309 u32 (*egress_block_get)(struct Qdisc *sch);
310
1da177e4
LT
311 struct module *owner;
312};
313
314
fd2c3ef7 315struct tcf_result {
db50514f
JP
316 union {
317 struct {
318 unsigned long class;
319 u32 classid;
320 };
321 const struct tcf_proto *goto_tp;
cd11b164 322
720f22fe 323 /* used in the skb_tc_reinsert function */
cd11b164
PA
324 struct {
325 bool ingress;
326 struct gnet_stats_queue *qstats;
327 };
db50514f 328 };
1da177e4
LT
329};
330
9f407f17
JP
331struct tcf_chain;
332
fd2c3ef7 333struct tcf_proto_ops {
36272874 334 struct list_head head;
1da177e4
LT
335 char kind[IFNAMSIZ];
336
dc7f9f6e
ED
337 int (*classify)(struct sk_buff *,
338 const struct tcf_proto *,
339 struct tcf_result *);
1da177e4 340 int (*init)(struct tcf_proto*);
12db03b6 341 void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
715df5ec 342 struct netlink_ext_ack *extack);
1da177e4 343
8113c095 344 void* (*get)(struct tcf_proto*, u32 handle);
7d5509fa 345 void (*put)(struct tcf_proto *tp, void *f);
c1b52739 346 int (*change)(struct net *net, struct sk_buff *,
af4c6641 347 struct tcf_proto*, unsigned long,
add93b61 348 u32 handle, struct nlattr **,
695176bf 349 void **, u32,
7306db38 350 struct netlink_ext_ack *);
8865fdd4 351 int (*delete)(struct tcf_proto *tp, void *arg,
12db03b6 352 bool *last, bool rtnl_held,
571acf21 353 struct netlink_ext_ack *);
a5b72a08 354 bool (*delete_empty)(struct tcf_proto *tp);
12db03b6
VB
355 void (*walk)(struct tcf_proto *tp,
356 struct tcf_walker *arg, bool rtnl_held);
e56185c7 357 int (*reoffload)(struct tcf_proto *tp, bool add,
a7323311 358 flow_setup_cb_t *cb, void *cb_priv,
e56185c7 359 struct netlink_ext_ack *extack);
a449a3e7
VB
360 void (*hw_add)(struct tcf_proto *tp,
361 void *type_data);
362 void (*hw_del)(struct tcf_proto *tp,
363 void *type_data);
2e24cd75
CW
364 void (*bind_class)(void *, u32, unsigned long,
365 void *, unsigned long);
9f407f17
JP
366 void * (*tmplt_create)(struct net *net,
367 struct tcf_chain *chain,
368 struct nlattr **tca,
369 struct netlink_ext_ack *extack);
370 void (*tmplt_destroy)(void *tmplt_priv);
1da177e4
LT
371
372 /* rtnetlink specific */
8113c095 373 int (*dump)(struct net*, struct tcf_proto*, void *,
12db03b6
VB
374 struct sk_buff *skb, struct tcmsg*,
375 bool);
f8ab1807
VB
376 int (*terse_dump)(struct net *net,
377 struct tcf_proto *tp, void *fh,
378 struct sk_buff *skb,
379 struct tcmsg *t, bool rtnl_held);
9f407f17
JP
380 int (*tmplt_dump)(struct sk_buff *skb,
381 struct net *net,
382 void *tmplt_priv);
1da177e4
LT
383
384 struct module *owner;
12db03b6
VB
385 int flags;
386};
387
a5b72a08
DC
388/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
389 * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
390 * conditions can occur when filters are inserted/deleted simultaneously.
391 */
12db03b6
VB
392enum tcf_proto_ops_flags {
393 TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
1da177e4
LT
394};
395
fd2c3ef7 396struct tcf_proto {
1da177e4 397 /* Fast access part */
25d8c0d5
JF
398 struct tcf_proto __rcu *next;
399 void __rcu *root;
7fd4b288
PA
400
401 /* called under RCU BH lock*/
dc7f9f6e
ED
402 int (*classify)(struct sk_buff *,
403 const struct tcf_proto *,
404 struct tcf_result *);
66c6f529 405 __be16 protocol;
1da177e4
LT
406
407 /* All the rest */
408 u32 prio;
1da177e4 409 void *data;
dc7f9f6e 410 const struct tcf_proto_ops *ops;
5bc17018 411 struct tcf_chain *chain;
8b64678e
VB
412 /* Lock protects tcf_proto shared state and can be used by unlocked
413 * classifiers to protect their private data.
414 */
415 spinlock_t lock;
416 bool deleting;
4dbfa766 417 refcount_t refcnt;
25d8c0d5 418 struct rcu_head rcu;
59eb87cb 419 struct hlist_node destroy_ht_node;
1da177e4
LT
420};
421
175f9c1b 422struct qdisc_skb_cb {
089b19a9
SF
423 struct {
424 unsigned int pkt_len;
425 u16 slave_dev_queue_mapping;
426 u16 tc_classid;
d58e468b 427 };
25711786
ED
428#define QDISC_CB_PRIV_LEN 20
429 unsigned char data[QDISC_CB_PRIV_LEN];
175f9c1b
JK
430};
431
c7eb7d72
JP
432typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
433
2190d1d0 434struct tcf_chain {
ed76f5ed
VB
435 /* Protects filter_chain. */
436 struct mutex filter_chain_lock;
2190d1d0 437 struct tcf_proto __rcu *filter_chain;
5bc17018
JP
438 struct list_head list;
439 struct tcf_block *block;
440 u32 index; /* chain index */
441 unsigned int refcnt;
1f3ed383 442 unsigned int action_refcnt;
32a4f5ec 443 bool explicitly_created;
726d0612 444 bool flushing;
9f407f17
JP
445 const struct tcf_proto_ops *tmplt_ops;
446 void *tmplt_priv;
ee3bbfe8 447 struct rcu_head rcu;
6529eaba
JP
448};
449
2190d1d0 450struct tcf_block {
c266f64d
VB
451 /* Lock protects tcf_block and lifetime-management data of chains
452 * attached to the block (refcnt, action_refcnt, explicitly_created).
453 */
454 struct mutex lock;
5bc17018 455 struct list_head chain_list;
48617387 456 u32 index; /* block index for shared blocks */
a7df4870 457 u32 classid; /* which class this block belongs to */
cfebd7e2 458 refcount_t refcnt;
855319be 459 struct net *net;
69d78ef2 460 struct Qdisc *q;
4f8116c8 461 struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
14bfb13f 462 struct flow_block flow_block;
f36fe1c4
JP
463 struct list_head owner_list;
464 bool keep_dst;
97394bef 465 atomic_t offloadcnt; /* Number of oddloaded filters */
caa72601 466 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
c9f14470 467 unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
f71e0ca4
JP
468 struct {
469 struct tcf_chain *chain;
470 struct list_head filter_chain_list;
471 } chain0;
0607e439 472 struct rcu_head rcu;
59eb87cb
JH
473 DECLARE_HASHTABLE(proto_destroy_ht, 7);
474 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
2190d1d0
JP
475};
476
ed76f5ed
VB
477static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
478{
479 return lockdep_is_held(&chain->filter_chain_lock);
480}
8b64678e
VB
481
482static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
483{
484 return lockdep_is_held(&tp->lock);
485}
ed76f5ed
VB
486
487#define tcf_chain_dereference(p, chain) \
488 rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
489
8b64678e
VB
490#define tcf_proto_dereference(p, tp) \
491 rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
492
16bda13d
DM
493static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
494{
495 struct qdisc_skb_cb *qcb;
5ee31c68 496
038ebb1a 497 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
16bda13d
DM
498 BUILD_BUG_ON(sizeof(qcb->data) < sz);
499}
500
05bdd2f1 501static inline int qdisc_qlen(const struct Qdisc *q)
bbd8a0d3
KK
502{
503 return q->q.qlen;
504}
505
73eb628d 506static inline int qdisc_qlen_sum(const struct Qdisc *q)
7e66016f 507{
73eb628d
PA
508 __u32 qlen = q->qstats.qlen;
509 int i;
7e66016f 510
73eb628d
PA
511 if (qdisc_is_percpu_stats(q)) {
512 for_each_possible_cpu(i)
513 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
514 } else {
6172abc1 515 qlen += q->q.qlen;
73eb628d 516 }
7e66016f
JF
517
518 return qlen;
519}
520
bfe0d029 521static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
175f9c1b
JK
522{
523 return (struct qdisc_skb_cb *)skb->cb;
524}
525
83874000
DM
526static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
527{
528 return &qdisc->q.lock;
529}
530
05bdd2f1 531static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
7698b4fc 532{
46e5da40
JF
533 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
534
535 return q;
7698b4fc
DM
536}
537
159d2c7d
ED
538static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
539{
540 return rcu_dereference_bh(qdisc->dev_queue->qdisc);
541}
542
05bdd2f1 543static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
2540e051
JP
544{
545 return qdisc->dev_queue->qdisc_sleeping;
546}
547
7e43f112
DM
548/* The qdisc root lock is a mechanism by which to top level
549 * of a qdisc tree can be locked from any qdisc node in the
550 * forest. This allows changing the configuration of some
551 * aspect of the qdisc tree while blocking out asynchronous
552 * qdisc access in the packet processing paths.
553 *
554 * It is only legal to do this when the root will not change
555 * on us. Otherwise we'll potentially lock the wrong qdisc
556 * root. This is enforced by holding the RTNL semaphore, which
557 * all users of this lock accessor must do.
558 */
05bdd2f1 559static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
7698b4fc
DM
560{
561 struct Qdisc *root = qdisc_root(qdisc);
562
7e43f112 563 ASSERT_RTNL();
83874000 564 return qdisc_lock(root);
7698b4fc
DM
565}
566
05bdd2f1 567static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
f6f9b93f
JP
568{
569 struct Qdisc *root = qdisc_root_sleeping(qdisc);
570
571 ASSERT_RTNL();
572 return qdisc_lock(root);
573}
574
05bdd2f1 575static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
5ce2d488
DM
576{
577 return qdisc->dev_queue->dev;
578}
1da177e4 579
ca1e4ab1 580static inline void sch_tree_lock(struct Qdisc *q)
78a5b30b 581{
ca1e4ab1
MM
582 if (q->flags & TCQ_F_MQROOT)
583 spin_lock_bh(qdisc_lock(q));
584 else
585 spin_lock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
586}
587
ca1e4ab1 588static inline void sch_tree_unlock(struct Qdisc *q)
78a5b30b 589{
ca1e4ab1
MM
590 if (q->flags & TCQ_F_MQROOT)
591 spin_unlock_bh(qdisc_lock(q));
592 else
593 spin_unlock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
594}
595
e41a33e6
TG
596extern struct Qdisc noop_qdisc;
597extern struct Qdisc_ops noop_qdisc_ops;
6ec1c69a
DM
598extern struct Qdisc_ops pfifo_fast_ops;
599extern struct Qdisc_ops mq_qdisc_ops;
d66d6c31 600extern struct Qdisc_ops noqueue_qdisc_ops;
6da7c8fc 601extern const struct Qdisc_ops *default_qdisc_ops;
1f27cde3
ED
602static inline const struct Qdisc_ops *
603get_default_qdisc_ops(const struct net_device *dev, int ntx)
604{
605 return ntx < dev->real_num_tx_queues ?
606 default_qdisc_ops : &pfifo_fast_ops;
607}
e41a33e6 608
fd2c3ef7 609struct Qdisc_class_common {
6fe1c7a5
PM
610 u32 classid;
611 struct hlist_node hnode;
612};
613
fd2c3ef7 614struct Qdisc_class_hash {
6fe1c7a5
PM
615 struct hlist_head *hash;
616 unsigned int hashsize;
617 unsigned int hashmask;
618 unsigned int hashelems;
619};
620
621static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
622{
623 id ^= id >> 8;
624 id ^= id >> 4;
625 return id & mask;
626}
627
628static inline struct Qdisc_class_common *
05bdd2f1 629qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
6fe1c7a5
PM
630{
631 struct Qdisc_class_common *cl;
6fe1c7a5
PM
632 unsigned int h;
633
7d3f0cd4
GF
634 if (!id)
635 return NULL;
636
6fe1c7a5 637 h = qdisc_class_hash(id, hash->hashmask);
b67bfe0d 638 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
6fe1c7a5
PM
639 if (cl->classid == id)
640 return cl;
641 }
642 return NULL;
643}
644
384c181e
AN
645static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
646{
647 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
648
649 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
650}
651
5c15257f
JP
652int qdisc_class_hash_init(struct Qdisc_class_hash *);
653void qdisc_class_hash_insert(struct Qdisc_class_hash *,
654 struct Qdisc_class_common *);
655void qdisc_class_hash_remove(struct Qdisc_class_hash *,
656 struct Qdisc_class_common *);
657void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
658void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
659
48bfd55e 660int dev_qdisc_change_tx_queue_len(struct net_device *dev);
1e080f17
JK
661void dev_qdisc_change_real_num_tx(struct net_device *dev,
662 unsigned int new_real_tx);
5c15257f
JP
663void dev_init_scheduler(struct net_device *dev);
664void dev_shutdown(struct net_device *dev);
665void dev_activate(struct net_device *dev);
666void dev_deactivate(struct net_device *dev);
667void dev_deactivate_many(struct list_head *head);
668struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
669 struct Qdisc *qdisc);
670void qdisc_reset(struct Qdisc *qdisc);
86bd446b 671void qdisc_put(struct Qdisc *qdisc);
3a7d0d07 672void qdisc_put_unlocked(struct Qdisc *qdisc);
5f2939d9 673void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
b592843c
JK
674#ifdef CONFIG_NET_SCHED
675int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
676 void *type_data);
bfaee911
JK
677void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
678 struct Qdisc *new, struct Qdisc *old,
679 enum tc_setup_type type, void *type_data,
680 struct netlink_ext_ack *extack);
b592843c
JK
681#else
682static inline int
683qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
684 void *type_data)
685{
686 q->flags &= ~TCQ_F_OFFLOADED;
687 return 0;
688}
bfaee911
JK
689
690static inline void
691qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
692 struct Qdisc *new, struct Qdisc *old,
693 enum tc_setup_type type, void *type_data,
694 struct netlink_ext_ack *extack)
695{
696}
b592843c 697#endif
5c15257f 698struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
d0bd684d
AA
699 const struct Qdisc_ops *ops,
700 struct netlink_ext_ack *extack);
81d947e2 701void qdisc_free(struct Qdisc *qdisc);
5c15257f 702struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
a38a9882
AA
703 const struct Qdisc_ops *ops, u32 parentid,
704 struct netlink_ext_ack *extack);
5c15257f
JP
705void __qdisc_calculate_pkt_len(struct sk_buff *skb,
706 const struct qdisc_size_table *stab);
27b29f63 707int skb_do_redirect(struct sk_buff *);
1da177e4 708
fdc5432a
DB
709static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
710{
711#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 712 return skb->tc_at_ingress;
fdc5432a
DB
713#else
714 return false;
715#endif
716}
717
e7246e12
WB
718static inline bool skb_skip_tc_classify(struct sk_buff *skb)
719{
720#ifdef CONFIG_NET_CLS_ACT
721 if (skb->tc_skip_classify) {
722 skb->tc_skip_classify = 0;
723 return true;
724 }
725#endif
726 return false;
727}
728
3a053b1a 729/* Reset all TX qdiscs greater than index of a device. */
f0796d5c 730static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
5aa70995 731{
4ef6acff
JF
732 struct Qdisc *qdisc;
733
f0796d5c 734 for (; i < dev->num_tx_queues; i++) {
46e5da40 735 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
4ef6acff
JF
736 if (qdisc) {
737 spin_lock_bh(qdisc_lock(qdisc));
738 qdisc_reset(qdisc);
739 spin_unlock_bh(qdisc_lock(qdisc));
740 }
741 }
5aa70995
DM
742}
743
3e745dd6
DM
744/* Are all TX queues of the device empty? */
745static inline bool qdisc_all_tx_empty(const struct net_device *dev)
746{
e8a0464c 747 unsigned int i;
46e5da40
JF
748
749 rcu_read_lock();
e8a0464c
DM
750 for (i = 0; i < dev->num_tx_queues; i++) {
751 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 752 const struct Qdisc *q = rcu_dereference(txq->qdisc);
3e745dd6 753
1f5e6fdd 754 if (!qdisc_is_empty(q)) {
46e5da40 755 rcu_read_unlock();
e8a0464c 756 return false;
46e5da40 757 }
e8a0464c 758 }
46e5da40 759 rcu_read_unlock();
e8a0464c 760 return true;
3e745dd6
DM
761}
762
6fa9864b 763/* Are any of the TX qdiscs changing? */
05bdd2f1 764static inline bool qdisc_tx_changing(const struct net_device *dev)
6fa9864b 765{
e8a0464c 766 unsigned int i;
46e5da40 767
e8a0464c
DM
768 for (i = 0; i < dev->num_tx_queues; i++) {
769 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 770 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
e8a0464c
DM
771 return true;
772 }
773 return false;
6fa9864b
DM
774}
775
e8a0464c 776/* Is the device using the noop qdisc on all queues? */
05297949
DM
777static inline bool qdisc_tx_is_noop(const struct net_device *dev)
778{
e8a0464c 779 unsigned int i;
46e5da40 780
e8a0464c
DM
781 for (i = 0; i < dev->num_tx_queues; i++) {
782 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 783 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
e8a0464c
DM
784 return false;
785 }
786 return true;
05297949
DM
787}
788
bfe0d029 789static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
0abf77e5 790{
175f9c1b 791 return qdisc_skb_cb(skb)->pkt_len;
0abf77e5
JK
792}
793
c27f339a 794/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
378a2f09
JP
795enum net_xmit_qdisc_t {
796 __NET_XMIT_STOLEN = 0x00010000,
c27f339a 797 __NET_XMIT_BYPASS = 0x00020000,
378a2f09
JP
798};
799
c27f339a 800#ifdef CONFIG_NET_CLS_ACT
378a2f09 801#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
378a2f09
JP
802#else
803#define net_xmit_drop_count(e) (1)
804#endif
805
a2da570d
ED
806static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
807 const struct Qdisc *sch)
5f86173b 808{
3a682fbd 809#ifdef CONFIG_NET_SCHED
a2da570d
ED
810 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
811
812 if (stab)
813 __qdisc_calculate_pkt_len(skb, stab);
3a682fbd 814#endif
a2da570d
ED
815}
816
ac5c66f2 817static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
520ac30f 818 struct sk_buff **to_free)
a2da570d
ED
819{
820 qdisc_calculate_pkt_len(skb, sch);
ac5c66f2 821 return sch->enqueue(skb, sch, to_free);
5f86173b
JK
822}
823
50dc9a85 824static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
38040702
AV
825 __u64 bytes, __u32 packets)
826{
67c9e627 827 u64_stats_update_begin(&bstats->syncp);
50dc9a85
AD
828 u64_stats_add(&bstats->bytes, bytes);
829 u64_stats_add(&bstats->packets, packets);
67c9e627 830 u64_stats_update_end(&bstats->syncp);
38040702
AV
831}
832
50dc9a85 833static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
bfe0d029
ED
834 const struct sk_buff *skb)
835{
38040702
AV
836 _bstats_update(bstats,
837 qdisc_pkt_len(skb),
838 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
839}
840
24ea591d
ED
841static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
842 const struct sk_buff *skb)
843{
50dc9a85 844 bstats_update(this_cpu_ptr(sch->cpu_bstats), skb);
24ea591d
ED
845}
846
bfe0d029
ED
847static inline void qdisc_bstats_update(struct Qdisc *sch,
848 const struct sk_buff *skb)
bbd8a0d3 849{
bfe0d029 850 bstats_update(&sch->bstats, skb);
bbd8a0d3
KK
851}
852
25331d6c
JF
853static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
854 const struct sk_buff *skb)
855{
856 sch->qstats.backlog -= qdisc_pkt_len(skb);
857}
858
40bd0362
JF
859static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
860 const struct sk_buff *skb)
861{
862 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
863}
864
25331d6c
JF
865static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
866 const struct sk_buff *skb)
867{
868 sch->qstats.backlog += qdisc_pkt_len(skb);
869}
870
40bd0362
JF
871static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
872 const struct sk_buff *skb)
873{
874 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
875}
876
73eb628d 877static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
40bd0362 878{
73eb628d 879 this_cpu_inc(sch->cpu_qstats->qlen);
40bd0362
JF
880}
881
73eb628d 882static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
40bd0362 883{
73eb628d 884 this_cpu_dec(sch->cpu_qstats->qlen);
40bd0362
JF
885}
886
887static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
888{
889 this_cpu_inc(sch->cpu_qstats->requeues);
890}
891
25331d6c
JF
892static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
893{
894 sch->qstats.drops += count;
895}
896
24ea591d 897static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
25331d6c 898{
24ea591d 899 qstats->drops++;
25331d6c
JF
900}
901
24ea591d 902static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
b0ab6f92 903{
24ea591d
ED
904 qstats->overlimits++;
905}
b0ab6f92 906
24ea591d
ED
907static inline void qdisc_qstats_drop(struct Qdisc *sch)
908{
909 qstats_drop_inc(&sch->qstats);
910}
911
912static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
913{
eb60a8dd 914 this_cpu_inc(sch->cpu_qstats->drops);
b0ab6f92
JF
915}
916
25331d6c
JF
917static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
918{
919 sch->qstats.overlimits++;
920}
921
5dd431b6
PA
922static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
923{
924 __u32 qlen = qdisc_qlen_sum(sch);
925
926 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
927}
928
929static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
930 __u32 *backlog)
931{
932 struct gnet_stats_queue qstats = { 0 };
5dd431b6 933
10940eb7
SAS
934 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats);
935 *qlen = qstats.qlen + qdisc_qlen(sch);
5dd431b6
PA
936 *backlog = qstats.backlog;
937}
938
e5f0e8f8
PA
939static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
940{
941 __u32 qlen, backlog;
942
943 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
944 qdisc_tree_reduce_backlog(sch, qlen, backlog);
945}
946
947static inline void qdisc_purge_queue(struct Qdisc *sch)
948{
949 __u32 qlen, backlog;
950
951 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
952 qdisc_reset(sch);
953 qdisc_tree_reduce_backlog(sch, qlen, backlog);
954}
955
48da34b7
FW
956static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
957{
958 qh->head = NULL;
959 qh->tail = NULL;
960 qh->qlen = 0;
961}
962
aea890b8
DM
963static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
964 struct qdisc_skb_head *qh)
9972b25d 965{
48da34b7
FW
966 struct sk_buff *last = qh->tail;
967
968 if (last) {
969 skb->next = NULL;
970 last->next = skb;
971 qh->tail = skb;
972 } else {
973 qh->tail = skb;
974 qh->head = skb;
975 }
976 qh->qlen++;
9972b25d
TG
977}
978
979static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
980{
aea890b8
DM
981 __qdisc_enqueue_tail(skb, &sch->q);
982 qdisc_qstats_backlog_inc(sch, skb);
983 return NET_XMIT_SUCCESS;
9972b25d
TG
984}
985
59697730
DM
986static inline void __qdisc_enqueue_head(struct sk_buff *skb,
987 struct qdisc_skb_head *qh)
988{
989 skb->next = qh->head;
990
991 if (!qh->head)
992 qh->tail = skb;
993 qh->head = skb;
994 qh->qlen++;
995}
996
48da34b7 997static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
9972b25d 998{
48da34b7
FW
999 struct sk_buff *skb = qh->head;
1000
1001 if (likely(skb != NULL)) {
1002 qh->head = skb->next;
1003 qh->qlen--;
1004 if (qh->head == NULL)
1005 qh->tail = NULL;
1006 skb->next = NULL;
1007 }
9972b25d 1008
ec323368
FW
1009 return skb;
1010}
1011
1012static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
1013{
1014 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1015
9190b3b3 1016 if (likely(skb != NULL)) {
25331d6c 1017 qdisc_qstats_backlog_dec(sch, skb);
9190b3b3
ED
1018 qdisc_bstats_update(sch, skb);
1019 }
9972b25d
TG
1020
1021 return skb;
1022}
1023
520ac30f
ED
1024/* Instead of calling kfree_skb() while root qdisc lock is held,
1025 * queue the skb for future freeing at end of __dev_xmit_skb()
1026 */
1027static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
1028{
1029 skb->next = *to_free;
1030 *to_free = skb;
1031}
1032
35d889d1
AK
1033static inline void __qdisc_drop_all(struct sk_buff *skb,
1034 struct sk_buff **to_free)
1035{
1036 if (skb->prev)
1037 skb->prev->next = *to_free;
1038 else
1039 skb->next = *to_free;
1040 *to_free = skb;
1041}
1042
57dbb2d8 1043static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
48da34b7 1044 struct qdisc_skb_head *qh,
520ac30f 1045 struct sk_buff **to_free)
57dbb2d8 1046{
48da34b7 1047 struct sk_buff *skb = __qdisc_dequeue_head(qh);
57dbb2d8
HPP
1048
1049 if (likely(skb != NULL)) {
1050 unsigned int len = qdisc_pkt_len(skb);
520ac30f 1051
25331d6c 1052 qdisc_qstats_backlog_dec(sch, skb);
520ac30f 1053 __qdisc_drop(skb, to_free);
57dbb2d8
HPP
1054 return len;
1055 }
1056
1057 return 0;
1058}
1059
48a8f519
PM
1060static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
1061{
48da34b7
FW
1062 const struct qdisc_skb_head *qh = &sch->q;
1063
1064 return qh->head;
48a8f519
PM
1065}
1066
77be155c
JP
1067/* generic pseudo peek method for non-work-conserving qdisc */
1068static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
1069{
a53851e2
JF
1070 struct sk_buff *skb = skb_peek(&sch->gso_skb);
1071
77be155c 1072 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
a53851e2
JF
1073 if (!skb) {
1074 skb = sch->dequeue(sch);
1075
1076 if (skb) {
1077 __skb_queue_head(&sch->gso_skb, skb);
61c9eaf9 1078 /* it's still part of the queue */
a53851e2 1079 qdisc_qstats_backlog_inc(sch, skb);
61c9eaf9 1080 sch->q.qlen++;
a27758ff 1081 }
61c9eaf9 1082 }
77be155c 1083
a53851e2 1084 return skb;
77be155c
JP
1085}
1086
8a53e616
PA
1087static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1088 struct sk_buff *skb)
1089{
1090 if (qdisc_is_percpu_stats(sch)) {
1091 qdisc_qstats_cpu_backlog_dec(sch, skb);
1092 qdisc_bstats_cpu_update(sch, skb);
73eb628d 1093 qdisc_qstats_cpu_qlen_dec(sch);
8a53e616
PA
1094 } else {
1095 qdisc_qstats_backlog_dec(sch, skb);
1096 qdisc_bstats_update(sch, skb);
1097 sch->q.qlen--;
1098 }
1099}
1100
1101static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1102 unsigned int pkt_len)
1103{
1104 if (qdisc_is_percpu_stats(sch)) {
73eb628d 1105 qdisc_qstats_cpu_qlen_inc(sch);
8a53e616
PA
1106 this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1107 } else {
1108 sch->qstats.backlog += pkt_len;
1109 sch->q.qlen++;
1110 }
1111}
1112
77be155c
JP
1113/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
1114static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
1115{
a53851e2 1116 struct sk_buff *skb = skb_peek(&sch->gso_skb);
77be155c 1117
61c9eaf9 1118 if (skb) {
a53851e2 1119 skb = __skb_dequeue(&sch->gso_skb);
9c01c9f1
PA
1120 if (qdisc_is_percpu_stats(sch)) {
1121 qdisc_qstats_cpu_backlog_dec(sch, skb);
73eb628d 1122 qdisc_qstats_cpu_qlen_dec(sch);
9c01c9f1
PA
1123 } else {
1124 qdisc_qstats_backlog_dec(sch, skb);
1125 sch->q.qlen--;
1126 }
61c9eaf9 1127 } else {
77be155c 1128 skb = sch->dequeue(sch);
61c9eaf9 1129 }
77be155c
JP
1130
1131 return skb;
1132}
1133
48da34b7 1134static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
9972b25d
TG
1135{
1136 /*
1137 * We do not know the backlog in bytes of this list, it
1138 * is up to the caller to correct it
1139 */
48da34b7
FW
1140 ASSERT_RTNL();
1141 if (qh->qlen) {
1142 rtnl_kfree_skbs(qh->head, qh->tail);
1143
1144 qh->head = NULL;
1145 qh->tail = NULL;
1146 qh->qlen = 0;
1b5c5493 1147 }
9972b25d
TG
1148}
1149
1150static inline void qdisc_reset_queue(struct Qdisc *sch)
1151{
1b5c5493 1152 __qdisc_reset_queue(&sch->q);
9972b25d
TG
1153 sch->qstats.backlog = 0;
1154}
1155
86a7996c
WC
1156static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1157 struct Qdisc **pold)
1158{
1159 struct Qdisc *old;
1160
1161 sch_tree_lock(sch);
1162 old = *pold;
1163 *pold = new;
e5f0e8f8 1164 if (old != NULL)
938e0fcd 1165 qdisc_purge_queue(old);
86a7996c
WC
1166 sch_tree_unlock(sch);
1167
1168 return old;
1169}
1170
1b5c5493
ED
1171static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
1172{
1173 rtnl_kfree_skbs(skb, skb);
1174 qdisc_qstats_drop(sch);
1175}
1176
40bd0362
JF
1177static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
1178 struct sk_buff **to_free)
1179{
1180 __qdisc_drop(skb, to_free);
1181 qdisc_qstats_cpu_drop(sch);
1182
1183 return NET_XMIT_DROP;
1184}
520ac30f
ED
1185
1186static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
1187 struct sk_buff **to_free)
9972b25d 1188{
520ac30f 1189 __qdisc_drop(skb, to_free);
25331d6c 1190 qdisc_qstats_drop(sch);
9972b25d
TG
1191
1192 return NET_XMIT_DROP;
1193}
1194
35d889d1
AK
1195static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
1196 struct sk_buff **to_free)
1197{
1198 __qdisc_drop_all(skb, to_free);
1199 qdisc_qstats_drop(sch);
1200
1201 return NET_XMIT_DROP;
1202}
1203
e9bef55d
JDB
1204/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
1205 long it will take to send a packet given its size.
1206 */
1207static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
1208{
e08b0998
JDB
1209 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
1210 if (slot < 0)
1211 slot = 0;
e9bef55d
JDB
1212 slot >>= rtab->rate.cell_log;
1213 if (slot > 255)
a02cec21 1214 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
e9bef55d
JDB
1215 return rtab->data[slot];
1216}
1217
292f1c7f 1218struct psched_ratecfg {
130d3d68 1219 u64 rate_bytes_ps; /* bytes per second */
01cb71d2
ED
1220 u32 mult;
1221 u16 overhead;
fb80445c 1222 u16 mpu;
8a8e3d84 1223 u8 linklayer;
01cb71d2 1224 u8 shift;
292f1c7f
JP
1225};
1226
1227static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
1228 unsigned int len)
1229{
8a8e3d84
JDB
1230 len += r->overhead;
1231
fb80445c
KB
1232 if (len < r->mpu)
1233 len = r->mpu;
1234
8a8e3d84
JDB
1235 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
1236 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
1237
1238 return ((u64)len * r->mult) >> r->shift;
292f1c7f
JP
1239}
1240
5c15257f 1241void psched_ratecfg_precompute(struct psched_ratecfg *r,
3e1e3aae
ED
1242 const struct tc_ratespec *conf,
1243 u64 rate64);
292f1c7f 1244
01cb71d2
ED
1245static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
1246 const struct psched_ratecfg *r)
292f1c7f 1247{
01cb71d2 1248 memset(res, 0, sizeof(*res));
3e1e3aae
ED
1249
1250 /* legacy struct tc_ratespec has a 32bit @rate field
1251 * Qdisc using 64bit rate should add new attributes
1252 * in order to maintain compatibility.
1253 */
1254 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
1255
01cb71d2 1256 res->overhead = r->overhead;
fb80445c 1257 res->mpu = r->mpu;
8a8e3d84 1258 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
292f1c7f
JP
1259}
1260
2ffe0395
BZ
1261struct psched_pktrate {
1262 u64 rate_pkts_ps; /* packets per second */
1263 u32 mult;
1264 u8 shift;
1265};
1266
1267static inline u64 psched_pkt2t_ns(const struct psched_pktrate *r,
1268 unsigned int pkt_num)
1269{
1270 return ((u64)pkt_num * r->mult) >> r->shift;
1271}
1272
1273void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64);
1274
46209401
JP
1275/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
1276 * The fast path only needs to access filter list and to update stats
1277 */
1278struct mini_Qdisc {
1279 struct tcf_proto *filter_list;
7d17c544 1280 struct tcf_block *block;
50dc9a85 1281 struct gnet_stats_basic_sync __percpu *cpu_bstats;
46209401 1282 struct gnet_stats_queue __percpu *cpu_qstats;
26746382 1283 unsigned long rcu_state;
46209401
JP
1284};
1285
1286static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
1287 const struct sk_buff *skb)
1288{
50dc9a85 1289 bstats_update(this_cpu_ptr(miniq->cpu_bstats), skb);
46209401
JP
1290}
1291
1292static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
1293{
1294 this_cpu_inc(miniq->cpu_qstats->drops);
1295}
1296
1297struct mini_Qdisc_pair {
1298 struct mini_Qdisc miniq1;
1299 struct mini_Qdisc miniq2;
1300 struct mini_Qdisc __rcu **p_miniq;
1301};
1302
1303void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1304 struct tcf_proto *tp_head);
1305void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1306 struct mini_Qdisc __rcu **p_miniq);
7d17c544
PB
1307void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1308 struct tcf_block *block);
46209401 1309
f7116fb4
JK
1310void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
1311
c129412f 1312int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
cd11b164 1313
1da177e4 1314#endif