net: sched: sch: add extack for change qdisc ops
[linux-block.git] / include / net / sch_generic.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_SCHED_GENERIC_H
3#define __NET_SCHED_GENERIC_H
4
1da177e4
LT
5#include <linux/netdevice.h>
6#include <linux/types.h>
7#include <linux/rcupdate.h>
1da177e4
LT
8#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
22e0f8b9 10#include <linux/percpu.h>
5772e9a3 11#include <linux/dynamic_queue_limits.h>
5bc17018 12#include <linux/list.h>
7b936405 13#include <linux/refcount.h>
7aa0045d 14#include <linux/workqueue.h>
1da177e4 15#include <net/gen_stats.h>
be577ddc 16#include <net/rtnetlink.h>
1da177e4
LT
17
18struct Qdisc_ops;
19struct qdisc_walker;
20struct tcf_walker;
21struct module;
22
fd2c3ef7 23struct qdisc_rate_table {
1da177e4
LT
24 struct tc_ratespec rate;
25 u32 data[256];
26 struct qdisc_rate_table *next;
27 int refcnt;
28};
29
fd2c3ef7 30enum qdisc_state_t {
37437bb2 31 __QDISC_STATE_SCHED,
a9312ae8 32 __QDISC_STATE_DEACTIVATED,
e2627c8c
DM
33};
34
175f9c1b 35struct qdisc_size_table {
a2da570d 36 struct rcu_head rcu;
175f9c1b
JK
37 struct list_head list;
38 struct tc_sizespec szopts;
39 int refcnt;
40 u16 data[];
41};
42
48da34b7
FW
43/* similar to sk_buff_head, but skb->prev pointer is undefined. */
44struct qdisc_skb_head {
45 struct sk_buff *head;
46 struct sk_buff *tail;
47 __u32 qlen;
48 spinlock_t lock;
49};
50
fd2c3ef7 51struct Qdisc {
520ac30f
ED
52 int (*enqueue)(struct sk_buff *skb,
53 struct Qdisc *sch,
54 struct sk_buff **to_free);
55 struct sk_buff * (*dequeue)(struct Qdisc *sch);
05bdd2f1 56 unsigned int flags;
b00355db 57#define TCQ_F_BUILTIN 1
fd245a4a
ED
58#define TCQ_F_INGRESS 2
59#define TCQ_F_CAN_BYPASS 4
60#define TCQ_F_MQROOT 8
1abbe139
ED
61#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
62 * q->dev_queue : It can test
63 * netif_xmit_frozen_or_stopped() before
64 * dequeueing next packet.
65 * Its true for MQ/MQPRIO slaves, or non
66 * multiqueue device.
67 */
b00355db 68#define TCQ_F_WARN_NONWC (1 << 16)
22e0f8b9 69#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
4eaf3b84
ED
70#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
71 * qdisc_tree_decrease_qlen() should stop.
72 */
49b49971 73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
6b3ba914 74#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
7a4fa291 75#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
45203a3b 76 u32 limit;
05bdd2f1 77 const struct Qdisc_ops *ops;
a2da570d 78 struct qdisc_size_table __rcu *stab;
59cc1f61 79 struct hlist_node hash;
1da177e4
LT
80 u32 handle;
81 u32 parent;
72b25a91 82
5e140dfc 83 struct netdev_queue *dev_queue;
5e140dfc 84
1c0d32fd 85 struct net_rate_estimator __rcu *rate_est;
0d32ef8c
ED
86 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
87 struct gnet_stats_queue __percpu *cpu_qstats;
88
5e140dfc
ED
89 /*
90 * For performance sake on SMP, we put highly modified fields at the end
91 */
a53851e2 92 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
48da34b7 93 struct qdisc_skb_head q;
0d32ef8c 94 struct gnet_stats_basic_packed bstats;
f9eb8aea 95 seqcount_t running;
0d32ef8c 96 struct gnet_stats_queue qstats;
4d202a0d
ED
97 unsigned long state;
98 struct Qdisc *next_sched;
70e57d5e 99 struct sk_buff_head skb_bad_txq;
45203a3b 100 int padded;
7b936405 101 refcount_t refcnt;
45203a3b
ED
102
103 spinlock_t busylock ____cacheline_aligned_in_smp;
1da177e4
LT
104};
105
551143d8
ED
106static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
107{
108 if (qdisc->flags & TCQ_F_BUILTIN)
109 return;
110 refcount_inc(&qdisc->refcnt);
111}
112
fd245a4a 113static inline bool qdisc_is_running(const struct Qdisc *qdisc)
bc135b23 114{
f9eb8aea 115 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
bc135b23
ED
116}
117
118static inline bool qdisc_run_begin(struct Qdisc *qdisc)
119{
fd245a4a
ED
120 if (qdisc_is_running(qdisc))
121 return false;
52fbb290
ED
122 /* Variant of write_seqcount_begin() telling lockdep a trylock
123 * was attempted.
124 */
125 raw_write_seqcount_begin(&qdisc->running);
126 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
fd245a4a 127 return true;
bc135b23
ED
128}
129
130static inline void qdisc_run_end(struct Qdisc *qdisc)
131{
f9eb8aea 132 write_seqcount_end(&qdisc->running);
fd245a4a
ED
133}
134
5772e9a3
JDB
135static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
136{
137 return qdisc->flags & TCQ_F_ONETXQUEUE;
138}
139
140static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
141{
142#ifdef CONFIG_BQL
143 /* Non-BQL migrated drivers will return 0, too. */
144 return dql_avail(&txq->dql);
145#else
146 return 0;
147#endif
148}
149
fd2c3ef7 150struct Qdisc_class_ops {
1da177e4 151 /* Child qdisc manipulation */
926e61b7 152 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
1da177e4
LT
153 int (*graft)(struct Qdisc *, unsigned long cl,
154 struct Qdisc *, struct Qdisc **);
155 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
43effa1e 156 void (*qlen_notify)(struct Qdisc *, unsigned long);
1da177e4
LT
157
158 /* Class manipulation routines */
143976ce 159 unsigned long (*find)(struct Qdisc *, u32 classid);
1da177e4 160 int (*change)(struct Qdisc *, u32, u32,
1e90474c 161 struct nlattr **, unsigned long *);
1da177e4
LT
162 int (*delete)(struct Qdisc *, unsigned long);
163 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
164
165 /* Filter manipulation */
0ac4bd68
AA
166 struct tcf_block * (*tcf_block)(struct Qdisc *sch,
167 unsigned long arg);
1da177e4
LT
168 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
169 u32 classid);
170 void (*unbind_tcf)(struct Qdisc *, unsigned long);
171
172 /* rtnetlink specific */
173 int (*dump)(struct Qdisc *, unsigned long,
174 struct sk_buff *skb, struct tcmsg*);
175 int (*dump_stats)(struct Qdisc *, unsigned long,
176 struct gnet_dump *);
177};
178
fd2c3ef7 179struct Qdisc_ops {
1da177e4 180 struct Qdisc_ops *next;
20fea08b 181 const struct Qdisc_class_ops *cl_ops;
1da177e4
LT
182 char id[IFNAMSIZ];
183 int priv_size;
d59f5ffa 184 unsigned int static_flags;
1da177e4 185
520ac30f
ED
186 int (*enqueue)(struct sk_buff *skb,
187 struct Qdisc *sch,
188 struct sk_buff **to_free);
1da177e4 189 struct sk_buff * (*dequeue)(struct Qdisc *);
90d841fd 190 struct sk_buff * (*peek)(struct Qdisc *);
1da177e4 191
e63d7dfd
AA
192 int (*init)(struct Qdisc *sch, struct nlattr *arg,
193 struct netlink_ext_ack *extack);
1da177e4
LT
194 void (*reset)(struct Qdisc *);
195 void (*destroy)(struct Qdisc *);
0ac4bd68 196 int (*change)(struct Qdisc *sch,
2030721c
AA
197 struct nlattr *arg,
198 struct netlink_ext_ack *extack);
0ac4bd68 199 void (*attach)(struct Qdisc *sch);
1da177e4
LT
200
201 int (*dump)(struct Qdisc *, struct sk_buff *);
202 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
203
204 struct module *owner;
205};
206
207
fd2c3ef7 208struct tcf_result {
db50514f
JP
209 union {
210 struct {
211 unsigned long class;
212 u32 classid;
213 };
214 const struct tcf_proto *goto_tp;
215 };
1da177e4
LT
216};
217
fd2c3ef7 218struct tcf_proto_ops {
36272874 219 struct list_head head;
1da177e4
LT
220 char kind[IFNAMSIZ];
221
dc7f9f6e
ED
222 int (*classify)(struct sk_buff *,
223 const struct tcf_proto *,
224 struct tcf_result *);
1da177e4 225 int (*init)(struct tcf_proto*);
763dbf63 226 void (*destroy)(struct tcf_proto*);
1da177e4 227
8113c095 228 void* (*get)(struct tcf_proto*, u32 handle);
c1b52739 229 int (*change)(struct net *net, struct sk_buff *,
af4c6641 230 struct tcf_proto*, unsigned long,
add93b61 231 u32 handle, struct nlattr **,
8113c095
WC
232 void **, bool);
233 int (*delete)(struct tcf_proto*, void *, bool*);
1da177e4 234 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
07d79fc7 235 void (*bind_class)(void *, u32, unsigned long);
1da177e4
LT
236
237 /* rtnetlink specific */
8113c095 238 int (*dump)(struct net*, struct tcf_proto*, void *,
1da177e4
LT
239 struct sk_buff *skb, struct tcmsg*);
240
241 struct module *owner;
242};
243
fd2c3ef7 244struct tcf_proto {
1da177e4 245 /* Fast access part */
25d8c0d5
JF
246 struct tcf_proto __rcu *next;
247 void __rcu *root;
dc7f9f6e
ED
248 int (*classify)(struct sk_buff *,
249 const struct tcf_proto *,
250 struct tcf_result *);
66c6f529 251 __be16 protocol;
1da177e4
LT
252
253 /* All the rest */
254 u32 prio;
255 u32 classid;
256 struct Qdisc *q;
257 void *data;
dc7f9f6e 258 const struct tcf_proto_ops *ops;
5bc17018 259 struct tcf_chain *chain;
25d8c0d5 260 struct rcu_head rcu;
1da177e4
LT
261};
262
175f9c1b
JK
263struct qdisc_skb_cb {
264 unsigned int pkt_len;
df4ab5b3 265 u16 slave_dev_queue_mapping;
045efa82 266 u16 tc_classid;
25711786
ED
267#define QDISC_CB_PRIV_LEN 20
268 unsigned char data[QDISC_CB_PRIV_LEN];
175f9c1b
JK
269};
270
c7eb7d72
JP
271typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
272
2190d1d0
JP
273struct tcf_chain {
274 struct tcf_proto __rcu *filter_chain;
c7eb7d72
JP
275 tcf_chain_head_change_t *chain_head_change;
276 void *chain_head_change_priv;
5bc17018
JP
277 struct list_head list;
278 struct tcf_block *block;
279 u32 index; /* chain index */
280 unsigned int refcnt;
6529eaba
JP
281};
282
2190d1d0 283struct tcf_block {
5bc17018 284 struct list_head chain_list;
855319be 285 struct net *net;
69d78ef2 286 struct Qdisc *q;
acb67442 287 struct list_head cb_list;
2190d1d0
JP
288};
289
16bda13d
DM
290static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
291{
292 struct qdisc_skb_cb *qcb;
5ee31c68
ED
293
294 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
16bda13d
DM
295 BUILD_BUG_ON(sizeof(qcb->data) < sz);
296}
297
7e66016f
JF
298static inline int qdisc_qlen_cpu(const struct Qdisc *q)
299{
300 return this_cpu_ptr(q->cpu_qstats)->qlen;
301}
302
05bdd2f1 303static inline int qdisc_qlen(const struct Qdisc *q)
bbd8a0d3
KK
304{
305 return q->q.qlen;
306}
307
7e66016f
JF
308static inline int qdisc_qlen_sum(const struct Qdisc *q)
309{
310 __u32 qlen = 0;
311 int i;
312
313 if (q->flags & TCQ_F_NOLOCK) {
314 for_each_possible_cpu(i)
315 qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
316 } else {
317 qlen = q->q.qlen;
318 }
319
320 return qlen;
321}
322
bfe0d029 323static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
175f9c1b
JK
324{
325 return (struct qdisc_skb_cb *)skb->cb;
326}
327
83874000
DM
328static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
329{
330 return &qdisc->q.lock;
331}
332
05bdd2f1 333static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
7698b4fc 334{
46e5da40
JF
335 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
336
337 return q;
7698b4fc
DM
338}
339
05bdd2f1 340static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
2540e051
JP
341{
342 return qdisc->dev_queue->qdisc_sleeping;
343}
344
7e43f112
DM
345/* The qdisc root lock is a mechanism by which to top level
346 * of a qdisc tree can be locked from any qdisc node in the
347 * forest. This allows changing the configuration of some
348 * aspect of the qdisc tree while blocking out asynchronous
349 * qdisc access in the packet processing paths.
350 *
351 * It is only legal to do this when the root will not change
352 * on us. Otherwise we'll potentially lock the wrong qdisc
353 * root. This is enforced by holding the RTNL semaphore, which
354 * all users of this lock accessor must do.
355 */
05bdd2f1 356static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
7698b4fc
DM
357{
358 struct Qdisc *root = qdisc_root(qdisc);
359
7e43f112 360 ASSERT_RTNL();
83874000 361 return qdisc_lock(root);
7698b4fc
DM
362}
363
05bdd2f1 364static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
f6f9b93f
JP
365{
366 struct Qdisc *root = qdisc_root_sleeping(qdisc);
367
368 ASSERT_RTNL();
369 return qdisc_lock(root);
370}
371
edb09eb1
ED
372static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
373{
374 struct Qdisc *root = qdisc_root_sleeping(qdisc);
375
376 ASSERT_RTNL();
377 return &root->running;
378}
379
05bdd2f1 380static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
5ce2d488
DM
381{
382 return qdisc->dev_queue->dev;
383}
1da177e4 384
05bdd2f1 385static inline void sch_tree_lock(const struct Qdisc *q)
78a5b30b 386{
fe439dd0 387 spin_lock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
388}
389
05bdd2f1 390static inline void sch_tree_unlock(const struct Qdisc *q)
78a5b30b 391{
fe439dd0 392 spin_unlock_bh(qdisc_root_sleeping_lock(q));
78a5b30b
DM
393}
394
e41a33e6
TG
395extern struct Qdisc noop_qdisc;
396extern struct Qdisc_ops noop_qdisc_ops;
6ec1c69a
DM
397extern struct Qdisc_ops pfifo_fast_ops;
398extern struct Qdisc_ops mq_qdisc_ops;
d66d6c31 399extern struct Qdisc_ops noqueue_qdisc_ops;
6da7c8fc 400extern const struct Qdisc_ops *default_qdisc_ops;
1f27cde3
ED
401static inline const struct Qdisc_ops *
402get_default_qdisc_ops(const struct net_device *dev, int ntx)
403{
404 return ntx < dev->real_num_tx_queues ?
405 default_qdisc_ops : &pfifo_fast_ops;
406}
e41a33e6 407
fd2c3ef7 408struct Qdisc_class_common {
6fe1c7a5
PM
409 u32 classid;
410 struct hlist_node hnode;
411};
412
fd2c3ef7 413struct Qdisc_class_hash {
6fe1c7a5
PM
414 struct hlist_head *hash;
415 unsigned int hashsize;
416 unsigned int hashmask;
417 unsigned int hashelems;
418};
419
420static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
421{
422 id ^= id >> 8;
423 id ^= id >> 4;
424 return id & mask;
425}
426
427static inline struct Qdisc_class_common *
05bdd2f1 428qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
6fe1c7a5
PM
429{
430 struct Qdisc_class_common *cl;
6fe1c7a5
PM
431 unsigned int h;
432
7d3f0cd4
GF
433 if (!id)
434 return NULL;
435
6fe1c7a5 436 h = qdisc_class_hash(id, hash->hashmask);
b67bfe0d 437 hlist_for_each_entry(cl, &hash->hash[h], hnode) {
6fe1c7a5
PM
438 if (cl->classid == id)
439 return cl;
440 }
441 return NULL;
442}
443
384c181e
AN
444static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
445{
446 u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
447
448 return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
449}
450
5c15257f
JP
451int qdisc_class_hash_init(struct Qdisc_class_hash *);
452void qdisc_class_hash_insert(struct Qdisc_class_hash *,
453 struct Qdisc_class_common *);
454void qdisc_class_hash_remove(struct Qdisc_class_hash *,
455 struct Qdisc_class_common *);
456void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
457void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
458
459void dev_init_scheduler(struct net_device *dev);
460void dev_shutdown(struct net_device *dev);
461void dev_activate(struct net_device *dev);
462void dev_deactivate(struct net_device *dev);
463void dev_deactivate_many(struct list_head *head);
464struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
465 struct Qdisc *qdisc);
466void qdisc_reset(struct Qdisc *qdisc);
467void qdisc_destroy(struct Qdisc *qdisc);
2ccccf5f
WC
468void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
469 unsigned int len);
5c15257f 470struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
d2a7f269 471 const struct Qdisc_ops *ops);
5c15257f 472struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
d2a7f269 473 const struct Qdisc_ops *ops, u32 parentid);
5c15257f
JP
474void __qdisc_calculate_pkt_len(struct sk_buff *skb,
475 const struct qdisc_size_table *stab);
27b29f63 476int skb_do_redirect(struct sk_buff *);
1da177e4 477
a5135bcf
WB
478static inline void skb_reset_tc(struct sk_buff *skb)
479{
480#ifdef CONFIG_NET_CLS_ACT
bc31c905 481 skb->tc_redirected = 0;
a5135bcf
WB
482#endif
483}
484
fdc5432a
DB
485static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
486{
487#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 488 return skb->tc_at_ingress;
fdc5432a
DB
489#else
490 return false;
491#endif
492}
493
e7246e12
WB
494static inline bool skb_skip_tc_classify(struct sk_buff *skb)
495{
496#ifdef CONFIG_NET_CLS_ACT
497 if (skb->tc_skip_classify) {
498 skb->tc_skip_classify = 0;
499 return true;
500 }
501#endif
502 return false;
503}
504
f0796d5c
JF
505/* Reset all TX qdiscs greater then index of a device. */
506static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
5aa70995 507{
4ef6acff
JF
508 struct Qdisc *qdisc;
509
f0796d5c 510 for (; i < dev->num_tx_queues; i++) {
46e5da40 511 qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
4ef6acff
JF
512 if (qdisc) {
513 spin_lock_bh(qdisc_lock(qdisc));
514 qdisc_reset(qdisc);
515 spin_unlock_bh(qdisc_lock(qdisc));
516 }
517 }
5aa70995
DM
518}
519
5aa70995
DM
520static inline void qdisc_reset_all_tx(struct net_device *dev)
521{
f0796d5c 522 qdisc_reset_all_tx_gt(dev, 0);
5aa70995
DM
523}
524
3e745dd6
DM
525/* Are all TX queues of the device empty? */
526static inline bool qdisc_all_tx_empty(const struct net_device *dev)
527{
e8a0464c 528 unsigned int i;
46e5da40
JF
529
530 rcu_read_lock();
e8a0464c
DM
531 for (i = 0; i < dev->num_tx_queues; i++) {
532 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 533 const struct Qdisc *q = rcu_dereference(txq->qdisc);
3e745dd6 534
46e5da40
JF
535 if (q->q.qlen) {
536 rcu_read_unlock();
e8a0464c 537 return false;
46e5da40 538 }
e8a0464c 539 }
46e5da40 540 rcu_read_unlock();
e8a0464c 541 return true;
3e745dd6
DM
542}
543
6fa9864b 544/* Are any of the TX qdiscs changing? */
05bdd2f1 545static inline bool qdisc_tx_changing(const struct net_device *dev)
6fa9864b 546{
e8a0464c 547 unsigned int i;
46e5da40 548
e8a0464c
DM
549 for (i = 0; i < dev->num_tx_queues; i++) {
550 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 551 if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
e8a0464c
DM
552 return true;
553 }
554 return false;
6fa9864b
DM
555}
556
e8a0464c 557/* Is the device using the noop qdisc on all queues? */
05297949
DM
558static inline bool qdisc_tx_is_noop(const struct net_device *dev)
559{
e8a0464c 560 unsigned int i;
46e5da40 561
e8a0464c
DM
562 for (i = 0; i < dev->num_tx_queues; i++) {
563 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
46e5da40 564 if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
e8a0464c
DM
565 return false;
566 }
567 return true;
05297949
DM
568}
569
bfe0d029 570static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
0abf77e5 571{
175f9c1b 572 return qdisc_skb_cb(skb)->pkt_len;
0abf77e5
JK
573}
574
c27f339a 575/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
378a2f09
JP
576enum net_xmit_qdisc_t {
577 __NET_XMIT_STOLEN = 0x00010000,
c27f339a 578 __NET_XMIT_BYPASS = 0x00020000,
378a2f09
JP
579};
580
c27f339a 581#ifdef CONFIG_NET_CLS_ACT
378a2f09 582#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
378a2f09
JP
583#else
584#define net_xmit_drop_count(e) (1)
585#endif
586
a2da570d
ED
587static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
588 const struct Qdisc *sch)
5f86173b 589{
3a682fbd 590#ifdef CONFIG_NET_SCHED
a2da570d
ED
591 struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
592
593 if (stab)
594 __qdisc_calculate_pkt_len(skb, stab);
3a682fbd 595#endif
a2da570d
ED
596}
597
520ac30f
ED
598static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
599 struct sk_buff **to_free)
a2da570d
ED
600{
601 qdisc_calculate_pkt_len(skb, sch);
520ac30f 602 return sch->enqueue(skb, sch, to_free);
5f86173b
JK
603}
604
22e0f8b9
JF
605static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
606{
607 return q->flags & TCQ_F_CPUSTATS;
608}
bfe0d029 609
38040702
AV
610static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
611 __u64 bytes, __u32 packets)
612{
613 bstats->bytes += bytes;
614 bstats->packets += packets;
615}
616
bfe0d029
ED
617static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
618 const struct sk_buff *skb)
619{
38040702
AV
620 _bstats_update(bstats,
621 qdisc_pkt_len(skb),
622 skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
623}
624
625static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
626 __u64 bytes, __u32 packets)
627{
628 u64_stats_update_begin(&bstats->syncp);
629 _bstats_update(&bstats->bstats, bytes, packets);
630 u64_stats_update_end(&bstats->syncp);
bfe0d029
ED
631}
632
24ea591d
ED
633static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
634 const struct sk_buff *skb)
22e0f8b9 635{
22e0f8b9
JF
636 u64_stats_update_begin(&bstats->syncp);
637 bstats_update(&bstats->bstats, skb);
638 u64_stats_update_end(&bstats->syncp);
639}
640
24ea591d
ED
641static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
642 const struct sk_buff *skb)
643{
644 bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
645}
646
bfe0d029
ED
647static inline void qdisc_bstats_update(struct Qdisc *sch,
648 const struct sk_buff *skb)
bbd8a0d3 649{
bfe0d029 650 bstats_update(&sch->bstats, skb);
bbd8a0d3
KK
651}
652
25331d6c
JF
653static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
654 const struct sk_buff *skb)
655{
656 sch->qstats.backlog -= qdisc_pkt_len(skb);
657}
658
40bd0362
JF
659static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
660 const struct sk_buff *skb)
661{
662 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
663}
664
25331d6c
JF
665static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
666 const struct sk_buff *skb)
667{
668 sch->qstats.backlog += qdisc_pkt_len(skb);
669}
670
40bd0362
JF
671static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
672 const struct sk_buff *skb)
673{
674 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
675}
676
677static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
678{
679 this_cpu_inc(sch->cpu_qstats->qlen);
680}
681
682static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
683{
684 this_cpu_dec(sch->cpu_qstats->qlen);
685}
686
687static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
688{
689 this_cpu_inc(sch->cpu_qstats->requeues);
690}
691
25331d6c
JF
692static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
693{
694 sch->qstats.drops += count;
695}
696
24ea591d 697static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
25331d6c 698{
24ea591d 699 qstats->drops++;
25331d6c
JF
700}
701
24ea591d 702static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
b0ab6f92 703{
24ea591d
ED
704 qstats->overlimits++;
705}
b0ab6f92 706
24ea591d
ED
707static inline void qdisc_qstats_drop(struct Qdisc *sch)
708{
709 qstats_drop_inc(&sch->qstats);
710}
711
712static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
713{
eb60a8dd 714 this_cpu_inc(sch->cpu_qstats->drops);
b0ab6f92
JF
715}
716
25331d6c
JF
717static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
718{
719 sch->qstats.overlimits++;
720}
721
48da34b7
FW
722static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
723{
724 qh->head = NULL;
725 qh->tail = NULL;
726 qh->qlen = 0;
727}
728
9972b25d 729static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
48da34b7 730 struct qdisc_skb_head *qh)
9972b25d 731{
48da34b7
FW
732 struct sk_buff *last = qh->tail;
733
734 if (last) {
735 skb->next = NULL;
736 last->next = skb;
737 qh->tail = skb;
738 } else {
739 qh->tail = skb;
740 qh->head = skb;
741 }
742 qh->qlen++;
25331d6c 743 qdisc_qstats_backlog_inc(sch, skb);
9972b25d
TG
744
745 return NET_XMIT_SUCCESS;
746}
747
748static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
749{
750 return __qdisc_enqueue_tail(skb, sch, &sch->q);
751}
752
48da34b7 753static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
9972b25d 754{
48da34b7
FW
755 struct sk_buff *skb = qh->head;
756
757 if (likely(skb != NULL)) {
758 qh->head = skb->next;
759 qh->qlen--;
760 if (qh->head == NULL)
761 qh->tail = NULL;
762 skb->next = NULL;
763 }
9972b25d 764
ec323368
FW
765 return skb;
766}
767
768static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
769{
770 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
771
9190b3b3 772 if (likely(skb != NULL)) {
25331d6c 773 qdisc_qstats_backlog_dec(sch, skb);
9190b3b3
ED
774 qdisc_bstats_update(sch, skb);
775 }
9972b25d
TG
776
777 return skb;
778}
779
520ac30f
ED
780/* Instead of calling kfree_skb() while root qdisc lock is held,
781 * queue the skb for future freeing at end of __dev_xmit_skb()
782 */
783static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
784{
785 skb->next = *to_free;
786 *to_free = skb;
787}
788
57dbb2d8 789static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
48da34b7 790 struct qdisc_skb_head *qh,
520ac30f 791 struct sk_buff **to_free)
57dbb2d8 792{
48da34b7 793 struct sk_buff *skb = __qdisc_dequeue_head(qh);
57dbb2d8
HPP
794
795 if (likely(skb != NULL)) {
796 unsigned int len = qdisc_pkt_len(skb);
520ac30f 797
25331d6c 798 qdisc_qstats_backlog_dec(sch, skb);
520ac30f 799 __qdisc_drop(skb, to_free);
57dbb2d8
HPP
800 return len;
801 }
802
803 return 0;
804}
805
520ac30f
ED
806static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
807 struct sk_buff **to_free)
57dbb2d8 808{
520ac30f 809 return __qdisc_queue_drop_head(sch, &sch->q, to_free);
9972b25d
TG
810}
811
48a8f519
PM
812static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
813{
48da34b7
FW
814 const struct qdisc_skb_head *qh = &sch->q;
815
816 return qh->head;
48a8f519
PM
817}
818
77be155c
JP
819/* generic pseudo peek method for non-work-conserving qdisc */
820static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
821{
a53851e2
JF
822 struct sk_buff *skb = skb_peek(&sch->gso_skb);
823
77be155c 824 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
a53851e2
JF
825 if (!skb) {
826 skb = sch->dequeue(sch);
827
828 if (skb) {
829 __skb_queue_head(&sch->gso_skb, skb);
61c9eaf9 830 /* it's still part of the queue */
a53851e2 831 qdisc_qstats_backlog_inc(sch, skb);
61c9eaf9 832 sch->q.qlen++;
a27758ff 833 }
61c9eaf9 834 }
77be155c 835
a53851e2 836 return skb;
77be155c
JP
837}
838
839/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
840static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
841{
a53851e2 842 struct sk_buff *skb = skb_peek(&sch->gso_skb);
77be155c 843
61c9eaf9 844 if (skb) {
a53851e2 845 skb = __skb_dequeue(&sch->gso_skb);
a27758ff 846 qdisc_qstats_backlog_dec(sch, skb);
61c9eaf9
JP
847 sch->q.qlen--;
848 } else {
77be155c 849 skb = sch->dequeue(sch);
61c9eaf9 850 }
77be155c
JP
851
852 return skb;
853}
854
48da34b7 855static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
9972b25d
TG
856{
857 /*
858 * We do not know the backlog in bytes of this list, it
859 * is up to the caller to correct it
860 */
48da34b7
FW
861 ASSERT_RTNL();
862 if (qh->qlen) {
863 rtnl_kfree_skbs(qh->head, qh->tail);
864
865 qh->head = NULL;
866 qh->tail = NULL;
867 qh->qlen = 0;
1b5c5493 868 }
9972b25d
TG
869}
870
871static inline void qdisc_reset_queue(struct Qdisc *sch)
872{
1b5c5493 873 __qdisc_reset_queue(&sch->q);
9972b25d
TG
874 sch->qstats.backlog = 0;
875}
876
86a7996c
WC
877static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
878 struct Qdisc **pold)
879{
880 struct Qdisc *old;
881
882 sch_tree_lock(sch);
883 old = *pold;
884 *pold = new;
885 if (old != NULL) {
68a66d14
KK
886 unsigned int qlen = old->q.qlen;
887 unsigned int backlog = old->qstats.backlog;
888
86a7996c 889 qdisc_reset(old);
68a66d14 890 qdisc_tree_reduce_backlog(old, qlen, backlog);
86a7996c
WC
891 }
892 sch_tree_unlock(sch);
893
894 return old;
895}
896
1b5c5493
ED
897static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
898{
899 rtnl_kfree_skbs(skb, skb);
900 qdisc_qstats_drop(sch);
901}
902
40bd0362
JF
903static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
904 struct sk_buff **to_free)
905{
906 __qdisc_drop(skb, to_free);
907 qdisc_qstats_cpu_drop(sch);
908
909 return NET_XMIT_DROP;
910}
520ac30f
ED
911
912static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
913 struct sk_buff **to_free)
9972b25d 914{
520ac30f 915 __qdisc_drop(skb, to_free);
25331d6c 916 qdisc_qstats_drop(sch);
9972b25d
TG
917
918 return NET_XMIT_DROP;
919}
920
e9bef55d
JDB
921/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
922 long it will take to send a packet given its size.
923 */
924static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
925{
e08b0998
JDB
926 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
927 if (slot < 0)
928 slot = 0;
e9bef55d
JDB
929 slot >>= rtab->rate.cell_log;
930 if (slot > 255)
a02cec21 931 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
e9bef55d
JDB
932 return rtab->data[slot];
933}
934
292f1c7f 935struct psched_ratecfg {
130d3d68 936 u64 rate_bytes_ps; /* bytes per second */
01cb71d2
ED
937 u32 mult;
938 u16 overhead;
8a8e3d84 939 u8 linklayer;
01cb71d2 940 u8 shift;
292f1c7f
JP
941};
942
943static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
944 unsigned int len)
945{
8a8e3d84
JDB
946 len += r->overhead;
947
948 if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
949 return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
950
951 return ((u64)len * r->mult) >> r->shift;
292f1c7f
JP
952}
953
5c15257f 954void psched_ratecfg_precompute(struct psched_ratecfg *r,
3e1e3aae
ED
955 const struct tc_ratespec *conf,
956 u64 rate64);
292f1c7f 957
01cb71d2
ED
958static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
959 const struct psched_ratecfg *r)
292f1c7f 960{
01cb71d2 961 memset(res, 0, sizeof(*res));
3e1e3aae
ED
962
963 /* legacy struct tc_ratespec has a 32bit @rate field
964 * Qdisc using 64bit rate should add new attributes
965 * in order to maintain compatibility.
966 */
967 res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
968
01cb71d2 969 res->overhead = r->overhead;
8a8e3d84 970 res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
292f1c7f
JP
971}
972
46209401
JP
973/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
974 * The fast path only needs to access filter list and to update stats
975 */
976struct mini_Qdisc {
977 struct tcf_proto *filter_list;
978 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
979 struct gnet_stats_queue __percpu *cpu_qstats;
980 struct rcu_head rcu;
981};
982
983static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
984 const struct sk_buff *skb)
985{
986 bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
987}
988
989static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
990{
991 this_cpu_inc(miniq->cpu_qstats->drops);
992}
993
994struct mini_Qdisc_pair {
995 struct mini_Qdisc miniq1;
996 struct mini_Qdisc miniq2;
997 struct mini_Qdisc __rcu **p_miniq;
998};
999
1000void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1001 struct tcf_proto *tp_head);
1002void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1003 struct mini_Qdisc __rcu **p_miniq);
1004
1da177e4 1005#endif