net: sched: introduce reference counting for tcf_proto
[linux-block.git] / include / net / pkt_cls.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
7aa0045d 6#include <linux/workqueue.h>
1da177e4
LT
7#include <net/sch_generic.h>
8#include <net/act_api.h>
8f256622 9#include <net/flow_offload.h>
1da177e4 10
cd11b164
PA
11/* TC action not accessible from user space */
12#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
13
1da177e4
LT
14/* Basic packet classifier frontend definitions. */
15
fd2c3ef7 16struct tcf_walker {
1da177e4
LT
17 int stop;
18 int skip;
19 int count;
01683a14 20 unsigned long cookie;
8113c095 21 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
1da177e4
LT
22};
23
5c15257f
JP
24int register_tcf_proto_ops(struct tcf_proto_ops *ops);
25int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
1da177e4 26
8c4083b3
JP
27enum tcf_block_binder_type {
28 TCF_BLOCK_BINDER_TYPE_UNSPEC,
6e40cf2d
JP
29 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
30 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
8c4083b3
JP
31};
32
33struct tcf_block_ext_info {
34 enum tcf_block_binder_type binder_type;
c7eb7d72
JP
35 tcf_chain_head_change_t *chain_head_change;
36 void *chain_head_change_priv;
48617387 37 u32 block_index;
8c4083b3
JP
38};
39
acb67442 40struct tcf_block_cb;
aaa908ff 41bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
acb67442 42
8ae70032 43#ifdef CONFIG_NET_CLS
1f3ed383
JP
44struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
45 u32 chain_index);
1f3ed383 46void tcf_chain_put_by_act(struct tcf_chain *chain);
bbf73830
VB
47struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
48 struct tcf_chain *chain);
f36fe1c4 49void tcf_block_netif_keep_dst(struct tcf_block *block);
6529eaba 50int tcf_block_get(struct tcf_block **p_block,
8d1a77f9
AA
51 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
52 struct netlink_ext_ack *extack);
c7eb7d72 53int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
8d1a77f9
AA
54 struct tcf_block_ext_info *ei,
55 struct netlink_ext_ack *extack);
6529eaba 56void tcf_block_put(struct tcf_block *block);
c7eb7d72 57void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3 58 struct tcf_block_ext_info *ei);
44186460 59
48617387
JP
60static inline bool tcf_block_shared(struct tcf_block *block)
61{
62 return block->index;
63}
64
44186460
JP
65static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
66{
48617387 67 WARN_ON(tcf_block_shared(block));
44186460
JP
68 return block->q;
69}
70
acb67442
JP
71void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
72struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
73 tc_setup_cb_t *cb, void *cb_ident);
74void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
75unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
76struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
77 tc_setup_cb_t *cb, void *cb_ident,
60513bd8
JH
78 void *cb_priv,
79 struct netlink_ext_ack *extack);
acb67442
JP
80int tcf_block_cb_register(struct tcf_block *block,
81 tc_setup_cb_t *cb, void *cb_ident,
60513bd8 82 void *cb_priv, struct netlink_ext_ack *extack);
32636742
JH
83void __tcf_block_cb_unregister(struct tcf_block *block,
84 struct tcf_block_cb *block_cb);
acb67442
JP
85void tcf_block_cb_unregister(struct tcf_block *block,
86 tc_setup_cb_t *cb, void *cb_ident);
7f76fa36
JH
87int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
88 tc_indr_block_bind_cb_t *cb, void *cb_ident);
89int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
90 tc_indr_block_bind_cb_t *cb, void *cb_ident);
91void __tc_indr_block_cb_unregister(struct net_device *dev,
92 tc_indr_block_bind_cb_t *cb, void *cb_ident);
93void tc_indr_block_cb_unregister(struct net_device *dev,
94 tc_indr_block_bind_cb_t *cb, void *cb_ident);
acb67442 95
87d83093
JP
96int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
97 struct tcf_result *res, bool compat_mode);
98
8ae70032 99#else
6529eaba
JP
100static inline
101int tcf_block_get(struct tcf_block **p_block,
3c149091
SM
102 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
103 struct netlink_ext_ack *extack)
6529eaba
JP
104{
105 return 0;
106}
107
8c4083b3 108static inline
c7eb7d72 109int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
33c30a8b
QM
110 struct tcf_block_ext_info *ei,
111 struct netlink_ext_ack *extack)
8c4083b3
JP
112{
113 return 0;
114}
115
6529eaba 116static inline void tcf_block_put(struct tcf_block *block)
8ae70032
JP
117{
118}
87d83093 119
8c4083b3 120static inline
c7eb7d72 121void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3
JP
122 struct tcf_block_ext_info *ei)
123{
124}
125
44186460
JP
126static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
127{
128 return NULL;
129}
130
acb67442
JP
131static inline
132int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
133 void *cb_priv)
134{
135 return 0;
136}
137
138static inline
139void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
140 void *cb_priv)
141{
142}
143
144static inline
145void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
146{
147 return NULL;
148}
149
150static inline
151struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
152 tc_setup_cb_t *cb, void *cb_ident)
153{
154 return NULL;
155}
156
157static inline
158void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
159{
160}
161
162static inline
163unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
164{
165 return 0;
166}
167
168static inline
169struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
170 tc_setup_cb_t *cb, void *cb_ident,
60513bd8
JH
171 void *cb_priv,
172 struct netlink_ext_ack *extack)
acb67442
JP
173{
174 return NULL;
175}
176
177static inline
178int tcf_block_cb_register(struct tcf_block *block,
179 tc_setup_cb_t *cb, void *cb_ident,
60513bd8 180 void *cb_priv, struct netlink_ext_ack *extack)
acb67442
JP
181{
182 return 0;
183}
184
185static inline
32636742
JH
186void __tcf_block_cb_unregister(struct tcf_block *block,
187 struct tcf_block_cb *block_cb)
acb67442
JP
188{
189}
190
191static inline
192void tcf_block_cb_unregister(struct tcf_block *block,
193 tc_setup_cb_t *cb, void *cb_ident)
194{
195}
196
7f76fa36
JH
197static inline
198int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
199 tc_indr_block_bind_cb_t *cb, void *cb_ident)
200{
201 return 0;
202}
203
204static inline
205int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
206 tc_indr_block_bind_cb_t *cb, void *cb_ident)
207{
208 return 0;
209}
210
211static inline
212void __tc_indr_block_cb_unregister(struct net_device *dev,
213 tc_indr_block_bind_cb_t *cb, void *cb_ident)
214{
215}
216
217static inline
218void tc_indr_block_cb_unregister(struct net_device *dev,
219 tc_indr_block_bind_cb_t *cb, void *cb_ident)
220{
221}
222
87d83093
JP
223static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
224 struct tcf_result *res, bool compat_mode)
225{
226 return TC_ACT_UNSPEC;
227}
8ae70032 228#endif
cf1facda 229
1da177e4
LT
230static inline unsigned long
231__cls_set_class(unsigned long *clp, unsigned long cl)
232{
a0efb80c 233 return xchg(clp, cl);
1da177e4
LT
234}
235
236static inline unsigned long
34e3759c 237cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
1da177e4
LT
238{
239 unsigned long old_cl;
34e3759c
JP
240
241 sch_tree_lock(q);
1da177e4 242 old_cl = __cls_set_class(clp, cl);
34e3759c 243 sch_tree_unlock(q);
1da177e4
LT
244 return old_cl;
245}
246
247static inline void
248tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
249{
34e3759c 250 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
251 unsigned long cl;
252
34e3759c
JP
253 /* Check q as it is not set for shared blocks. In that case,
254 * setting class is not supported.
255 */
256 if (!q)
257 return;
258 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
259 cl = cls_set_class(q, &r->class, cl);
1da177e4 260 if (cl)
34e3759c 261 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
262}
263
264static inline void
265tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
266{
34e3759c 267 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
268 unsigned long cl;
269
34e3759c
JP
270 if (!q)
271 return;
1da177e4 272 if ((cl = __cls_set_class(&r->class, 0)) != 0)
34e3759c 273 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
274}
275
fd2c3ef7 276struct tcf_exts {
1da177e4 277#ifdef CONFIG_NET_CLS_ACT
33be6271 278 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
22dc13c8
WC
279 int nr_actions;
280 struct tc_action **actions;
e4b95c41 281 struct net *net;
1da177e4 282#endif
5da57f42
WC
283 /* Map to export classifier specific extension TLV types to the
284 * generic extensions API. Unsupported extensions must be set to 0.
285 */
1da177e4
LT
286 int action;
287 int police;
288};
289
b9a24bb7 290static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
33be6271
WC
291{
292#ifdef CONFIG_NET_CLS_ACT
5da57f42 293 exts->type = 0;
22dc13c8 294 exts->nr_actions = 0;
e4b95c41 295 exts->net = NULL;
22dc13c8
WC
296 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
297 GFP_KERNEL);
b9a24bb7
WC
298 if (!exts->actions)
299 return -ENOMEM;
33be6271 300#endif
5da57f42
WC
301 exts->action = action;
302 exts->police = police;
b9a24bb7 303 return 0;
33be6271
WC
304}
305
e4b95c41
CW
306/* Return false if the netns is being destroyed in cleanup_net(). Callers
307 * need to do cleanup synchronously in this case, otherwise may race with
308 * tc_action_net_exit(). Return true for other cases.
309 */
310static inline bool tcf_exts_get_net(struct tcf_exts *exts)
311{
312#ifdef CONFIG_NET_CLS_ACT
313 exts->net = maybe_get_net(exts->net);
314 return exts->net != NULL;
315#else
316 return true;
317#endif
318}
319
320static inline void tcf_exts_put_net(struct tcf_exts *exts)
321{
322#ifdef CONFIG_NET_CLS_ACT
323 if (exts->net)
324 put_net(exts->net);
325#endif
326}
327
22dc13c8 328#ifdef CONFIG_NET_CLS_ACT
244cd96a
CW
329#define tcf_exts_for_each_action(i, a, exts) \
330 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
331#else
332#define tcf_exts_for_each_action(i, a, exts) \
191672ca 333 for (; 0; (void)(i), (void)(a), (void)(exts))
22dc13c8 334#endif
22dc13c8 335
d897a638
JK
336static inline void
337tcf_exts_stats_update(const struct tcf_exts *exts,
338 u64 bytes, u64 packets, u64 lastuse)
339{
340#ifdef CONFIG_NET_CLS_ACT
341 int i;
342
343 preempt_disable();
344
345 for (i = 0; i < exts->nr_actions; i++) {
346 struct tc_action *a = exts->actions[i];
347
28169aba 348 tcf_action_stats_update(a, bytes, packets, lastuse, true);
d897a638
JK
349 }
350
351 preempt_enable();
352#endif
353}
354
3bcc0cec
JP
355/**
356 * tcf_exts_has_actions - check if at least one action is present
357 * @exts: tc filter extensions handle
358 *
359 * Returns true if at least one action is present.
360 */
361static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
362{
2734437e 363#ifdef CONFIG_NET_CLS_ACT
3bcc0cec
JP
364 return exts->nr_actions;
365#else
366 return false;
367#endif
368}
2734437e 369
3bcc0cec
JP
370/**
371 * tcf_exts_has_one_action - check if exactly one action is present
372 * @exts: tc filter extensions handle
373 *
374 * Returns true if exactly one action is present.
375 */
376static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
377{
378#ifdef CONFIG_NET_CLS_ACT
379 return exts->nr_actions == 1;
380#else
381 return false;
382#endif
383}
2734437e 384
244cd96a
CW
385static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
386{
387#ifdef CONFIG_NET_CLS_ACT
388 return exts->actions[0];
389#else
390 return NULL;
391#endif
392}
393
af69afc5
JP
394/**
395 * tcf_exts_exec - execute tc filter extensions
396 * @skb: socket buffer
397 * @exts: tc filter extensions handle
398 * @res: desired result
399 *
af089e70 400 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
af69afc5
JP
401 * a negative number if the filter must be considered unmatched or
402 * a positive action code (TC_ACT_*) which must be returned to the
403 * underlying layer.
404 */
405static inline int
406tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
407 struct tcf_result *res)
408{
409#ifdef CONFIG_NET_CLS_ACT
ec1a9cca 410 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
af69afc5 411#endif
af089e70 412 return TC_ACT_OK;
af69afc5
JP
413}
414
5c15257f
JP
415int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
416 struct nlattr **tb, struct nlattr *rate_tlv,
50a56190
AA
417 struct tcf_exts *exts, bool ovr,
418 struct netlink_ext_ack *extack);
18d0264f 419void tcf_exts_destroy(struct tcf_exts *exts);
9b0d4446 420void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
5da57f42
WC
421int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
422int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
1da177e4
LT
423
424/**
425 * struct tcf_pkt_info - packet information
426 */
fd2c3ef7 427struct tcf_pkt_info {
1da177e4
LT
428 unsigned char * ptr;
429 int nexthdr;
430};
431
432#ifdef CONFIG_NET_EMATCH
433
434struct tcf_ematch_ops;
435
436/**
437 * struct tcf_ematch - extended match (ematch)
438 *
439 * @matchid: identifier to allow userspace to reidentify a match
440 * @flags: flags specifying attributes and the relation to other matches
441 * @ops: the operations lookup table of the corresponding ematch module
442 * @datalen: length of the ematch specific configuration data
443 * @data: ematch specific data
444 */
fd2c3ef7 445struct tcf_ematch {
1da177e4
LT
446 struct tcf_ematch_ops * ops;
447 unsigned long data;
448 unsigned int datalen;
449 u16 matchid;
450 u16 flags;
82a470f1 451 struct net *net;
1da177e4
LT
452};
453
454static inline int tcf_em_is_container(struct tcf_ematch *em)
455{
456 return !em->ops;
457}
458
459static inline int tcf_em_is_simple(struct tcf_ematch *em)
460{
461 return em->flags & TCF_EM_SIMPLE;
462}
463
464static inline int tcf_em_is_inverted(struct tcf_ematch *em)
465{
466 return em->flags & TCF_EM_INVERT;
467}
468
469static inline int tcf_em_last_match(struct tcf_ematch *em)
470{
471 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
472}
473
474static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
475{
476 if (tcf_em_last_match(em))
477 return 1;
478
479 if (result == 0 && em->flags & TCF_EM_REL_AND)
480 return 1;
481
482 if (result != 0 && em->flags & TCF_EM_REL_OR)
483 return 1;
484
485 return 0;
486}
487
488/**
489 * struct tcf_ematch_tree - ematch tree handle
490 *
491 * @hdr: ematch tree header supplied by userspace
492 * @matches: array of ematches
493 */
fd2c3ef7 494struct tcf_ematch_tree {
1da177e4
LT
495 struct tcf_ematch_tree_hdr hdr;
496 struct tcf_ematch * matches;
497
498};
499
500/**
501 * struct tcf_ematch_ops - ematch module operations
502 *
503 * @kind: identifier (kind) of this ematch module
504 * @datalen: length of expected configuration data (optional)
505 * @change: called during validation (optional)
506 * @match: called during ematch tree evaluation, must return 1/0
507 * @destroy: called during destroyage (optional)
508 * @dump: called during dumping process (optional)
509 * @owner: owner, must be set to THIS_MODULE
510 * @link: link to previous/next ematch module (internal use)
511 */
fd2c3ef7 512struct tcf_ematch_ops {
1da177e4
LT
513 int kind;
514 int datalen;
82a470f1 515 int (*change)(struct net *net, void *,
1da177e4
LT
516 int, struct tcf_ematch *);
517 int (*match)(struct sk_buff *, struct tcf_ematch *,
518 struct tcf_pkt_info *);
82a470f1 519 void (*destroy)(struct tcf_ematch *);
1da177e4
LT
520 int (*dump)(struct sk_buff *, struct tcf_ematch *);
521 struct module *owner;
522 struct list_head link;
523};
524
5c15257f
JP
525int tcf_em_register(struct tcf_ematch_ops *);
526void tcf_em_unregister(struct tcf_ematch_ops *);
527int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
528 struct tcf_ematch_tree *);
82a470f1 529void tcf_em_tree_destroy(struct tcf_ematch_tree *);
5c15257f
JP
530int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
531int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
532 struct tcf_pkt_info *);
1da177e4 533
1da177e4
LT
534/**
535 * tcf_em_tree_match - evaulate an ematch tree
536 *
537 * @skb: socket buffer of the packet in question
538 * @tree: ematch tree to be used for evaluation
539 * @info: packet information examined by classifier
540 *
541 * This function matches @skb against the ematch tree in @tree by going
542 * through all ematches respecting their logic relations returning
543 * as soon as the result is obvious.
544 *
545 * Returns 1 if the ematch tree as-one matches, no ematches are configured
546 * or ematch is not enabled in the kernel, otherwise 0 is returned.
547 */
548static inline int tcf_em_tree_match(struct sk_buff *skb,
549 struct tcf_ematch_tree *tree,
550 struct tcf_pkt_info *info)
551{
552 if (tree->hdr.nmatches)
553 return __tcf_em_tree_match(skb, tree, info);
554 else
555 return 1;
556}
557
db3d99c0
PM
558#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
559
1da177e4
LT
560#else /* CONFIG_NET_EMATCH */
561
fd2c3ef7 562struct tcf_ematch_tree {
1da177e4
LT
563};
564
565#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
82a470f1 566#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
1da177e4 567#define tcf_em_tree_dump(skb, t, tlv) (0)
1da177e4
LT
568#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
569
570#endif /* CONFIG_NET_EMATCH */
571
572static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
573{
574 switch (layer) {
575 case TCF_LAYER_LINK:
d3303a65 576 return skb_mac_header(skb);
1da177e4 577 case TCF_LAYER_NETWORK:
d56f90a7 578 return skb_network_header(skb);
1da177e4 579 case TCF_LAYER_TRANSPORT:
9c70220b 580 return skb_transport_header(skb);
1da177e4
LT
581 }
582
583 return NULL;
584}
585
eddc9ec5
ACM
586static inline int tcf_valid_offset(const struct sk_buff *skb,
587 const unsigned char *ptr, const int len)
1da177e4 588{
da521b2c
DM
589 return likely((ptr + len) <= skb_tail_pointer(skb) &&
590 ptr >= skb->head &&
591 (ptr <= (ptr + len)));
1da177e4
LT
592}
593
594#ifdef CONFIG_NET_CLS_IND
0eeb8ffc
DL
595#include <net/net_namespace.h>
596
1da177e4 597static inline int
1057c55f
AA
598tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
599 struct netlink_ext_ack *extack)
1da177e4 600{
2519a602
WC
601 char indev[IFNAMSIZ];
602 struct net_device *dev;
603
1057c55f
AA
604 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
605 NL_SET_ERR_MSG(extack, "Interface name too long");
1da177e4 606 return -EINVAL;
1057c55f 607 }
2519a602
WC
608 dev = __dev_get_by_name(net, indev);
609 if (!dev)
610 return -ENODEV;
611 return dev->ifindex;
1da177e4
LT
612}
613
2519a602
WC
614static inline bool
615tcf_match_indev(struct sk_buff *skb, int ifindex)
1da177e4 616{
2519a602
WC
617 if (!ifindex)
618 return true;
619 if (!skb->skb_iif)
620 return false;
621 return ifindex == skb->skb_iif;
1da177e4
LT
622}
623#endif /* CONFIG_NET_CLS_IND */
624
3a7b6861
PNA
625int tc_setup_flow_action(struct flow_action *flow_action,
626 const struct tcf_exts *exts);
aeb3fecd
CW
627int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
628 void *type_data, bool err_stop);
e3ab786b 629unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
717503b9 630
8c4083b3
JP
631enum tc_block_command {
632 TC_BLOCK_BIND,
633 TC_BLOCK_UNBIND,
634};
635
636struct tc_block_offload {
637 enum tc_block_command command;
638 enum tcf_block_binder_type binder_type;
639 struct tcf_block *block;
60513bd8 640 struct netlink_ext_ack *extack;
8c4083b3
JP
641};
642
5fd9fc4e 643struct tc_cls_common_offload {
5fd9fc4e
JP
644 u32 chain_index;
645 __be16 protocol;
d7c1c8d2 646 u32 prio;
8f0b425a 647 struct netlink_ext_ack *extack;
5fd9fc4e
JP
648};
649
a1b7c5fd
JF
650struct tc_cls_u32_knode {
651 struct tcf_exts *exts;
068ceb35 652 struct tcf_result *res;
e014860e 653 struct tc_u32_sel *sel;
a1b7c5fd
JF
654 u32 handle;
655 u32 val;
656 u32 mask;
657 u32 link_handle;
e014860e 658 u8 fshift;
a1b7c5fd
JF
659};
660
661struct tc_cls_u32_hnode {
662 u32 handle;
663 u32 prio;
664 unsigned int divisor;
665};
666
667enum tc_clsu32_command {
668 TC_CLSU32_NEW_KNODE,
669 TC_CLSU32_REPLACE_KNODE,
670 TC_CLSU32_DELETE_KNODE,
671 TC_CLSU32_NEW_HNODE,
672 TC_CLSU32_REPLACE_HNODE,
673 TC_CLSU32_DELETE_HNODE,
674};
675
676struct tc_cls_u32_offload {
5fd9fc4e 677 struct tc_cls_common_offload common;
a1b7c5fd
JF
678 /* knode values */
679 enum tc_clsu32_command command;
680 union {
681 struct tc_cls_u32_knode knode;
682 struct tc_cls_u32_hnode hnode;
683 };
684};
685
7b06e8ae 686static inline bool tc_can_offload(const struct net_device *dev)
6843e7a2 687{
70b5aee4 688 return dev->features & NETIF_F_HW_TC;
6843e7a2
JF
689}
690
f9eda14f
QM
691static inline bool tc_can_offload_extack(const struct net_device *dev,
692 struct netlink_ext_ack *extack)
693{
694 bool can = tc_can_offload(dev);
695
696 if (!can)
697 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
698
699 return can;
700}
701
878db9f0
JK
702static inline bool
703tc_cls_can_offload_and_chain0(const struct net_device *dev,
704 struct tc_cls_common_offload *common)
705{
706 if (!tc_can_offload_extack(dev, common->extack))
707 return false;
708 if (common->chain_index) {
709 NL_SET_ERR_MSG(common->extack,
710 "Driver supports only offload of chain 0");
711 return false;
712 }
713 return true;
714}
715
55330f05
HHZ
716static inline bool tc_skip_hw(u32 flags)
717{
718 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
719}
720
d34e3e18
SS
721static inline bool tc_skip_sw(u32 flags)
722{
723 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
724}
725
726/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
727static inline bool tc_flags_valid(u32 flags)
728{
81c7288b
MRL
729 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
730 TCA_CLS_FLAGS_VERBOSE))
d34e3e18
SS
731 return false;
732
81c7288b 733 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
d34e3e18
SS
734 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
735 return false;
736
737 return true;
738}
739
e696028a
OG
740static inline bool tc_in_hw(u32 flags)
741{
742 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
743}
744
34832e1c
JK
745static inline void
746tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
747 const struct tcf_proto *tp, u32 flags,
748 struct netlink_ext_ack *extack)
749{
750 cls_common->chain_index = tp->chain->index;
751 cls_common->protocol = tp->protocol;
752 cls_common->prio = tp->prio;
81c7288b 753 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
34832e1c
JK
754 cls_common->extack = extack;
755}
756
5b33f488
AV
757enum tc_fl_command {
758 TC_CLSFLOWER_REPLACE,
759 TC_CLSFLOWER_DESTROY,
10cbc684 760 TC_CLSFLOWER_STATS,
34738452
JP
761 TC_CLSFLOWER_TMPLT_CREATE,
762 TC_CLSFLOWER_TMPLT_DESTROY,
5b33f488
AV
763};
764
765struct tc_cls_flower_offload {
5fd9fc4e 766 struct tc_cls_common_offload common;
5b33f488 767 enum tc_fl_command command;
8208d21b 768 unsigned long cookie;
8f256622 769 struct flow_rule *rule;
3b1903ef 770 struct flow_stats stats;
384c181e 771 u32 classid;
5b33f488
AV
772};
773
8f256622
PNA
774static inline struct flow_rule *
775tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
776{
777 return tc_flow_cmd->rule;
778}
779
b87f7936
YG
780enum tc_matchall_command {
781 TC_CLSMATCHALL_REPLACE,
782 TC_CLSMATCHALL_DESTROY,
783};
784
785struct tc_cls_matchall_offload {
5fd9fc4e 786 struct tc_cls_common_offload common;
b87f7936
YG
787 enum tc_matchall_command command;
788 struct tcf_exts *exts;
789 unsigned long cookie;
790};
791
332ae8e2 792enum tc_clsbpf_command {
102740bd 793 TC_CLSBPF_OFFLOAD,
68d64063 794 TC_CLSBPF_STATS,
332ae8e2
JK
795};
796
797struct tc_cls_bpf_offload {
5fd9fc4e 798 struct tc_cls_common_offload common;
332ae8e2
JK
799 enum tc_clsbpf_command command;
800 struct tcf_exts *exts;
801 struct bpf_prog *prog;
102740bd 802 struct bpf_prog *oldprog;
332ae8e2
JK
803 const char *name;
804 bool exts_integrated;
805};
806
4e8b86c0
AN
807struct tc_mqprio_qopt_offload {
808 /* struct tc_mqprio_qopt must always be the first element */
809 struct tc_mqprio_qopt qopt;
810 u16 mode;
811 u16 shaper;
812 u32 flags;
813 u64 min_rate[TC_QOPT_MAX_QUEUE];
814 u64 max_rate[TC_QOPT_MAX_QUEUE];
815};
1045ba77
JHS
816
817/* This structure holds cookie structure that is passed from user
818 * to the kernel for actions and classifiers
819 */
820struct tc_cookie {
821 u8 *data;
822 u32 len;
eec94fdb 823 struct rcu_head rcu;
1045ba77 824};
602f3baf 825
f34b4aac
NF
826struct tc_qopt_offload_stats {
827 struct gnet_stats_basic_packed *bstats;
828 struct gnet_stats_queue *qstats;
829};
830
f971b132
JK
831enum tc_mq_command {
832 TC_MQ_CREATE,
833 TC_MQ_DESTROY,
47c669a4 834 TC_MQ_STATS,
d577a3d2
JK
835 TC_MQ_GRAFT,
836};
837
838struct tc_mq_opt_offload_graft_params {
839 unsigned long queue;
840 u32 child_handle;
f971b132
JK
841};
842
843struct tc_mq_qopt_offload {
844 enum tc_mq_command command;
845 u32 handle;
d577a3d2
JK
846 union {
847 struct tc_qopt_offload_stats stats;
848 struct tc_mq_opt_offload_graft_params graft_params;
849 };
f971b132
JK
850};
851
602f3baf
NF
852enum tc_red_command {
853 TC_RED_REPLACE,
854 TC_RED_DESTROY,
855 TC_RED_STATS,
856 TC_RED_XSTATS,
bf2a752b 857 TC_RED_GRAFT,
602f3baf
NF
858};
859
860struct tc_red_qopt_offload_params {
861 u32 min;
862 u32 max;
863 u32 probability;
c0b7490b 864 u32 limit;
602f3baf 865 bool is_ecn;
190852a5 866 bool is_harddrop;
416ef9b1 867 struct gnet_stats_queue *qstats;
602f3baf 868};
602f3baf
NF
869
870struct tc_red_qopt_offload {
871 enum tc_red_command command;
872 u32 handle;
873 u32 parent;
874 union {
875 struct tc_red_qopt_offload_params set;
f34b4aac 876 struct tc_qopt_offload_stats stats;
602f3baf 877 struct red_stats *xstats;
bf2a752b 878 u32 child_handle;
602f3baf
NF
879 };
880};
881
890d8d23
JK
882enum tc_gred_command {
883 TC_GRED_REPLACE,
884 TC_GRED_DESTROY,
e49efd52 885 TC_GRED_STATS,
890d8d23
JK
886};
887
888struct tc_gred_vq_qopt_offload_params {
889 bool present;
890 u32 limit;
891 u32 prio;
892 u32 min;
893 u32 max;
894 bool is_ecn;
895 bool is_harddrop;
896 u32 probability;
897 /* Only need backlog, see struct tc_prio_qopt_offload_params */
898 u32 *backlog;
899};
900
901struct tc_gred_qopt_offload_params {
902 bool grio_on;
903 bool wred_on;
904 unsigned int dp_cnt;
905 unsigned int dp_def;
906 struct gnet_stats_queue *qstats;
907 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
908};
909
e49efd52
JK
910struct tc_gred_qopt_offload_stats {
911 struct gnet_stats_basic_packed bstats[MAX_DPs];
912 struct gnet_stats_queue qstats[MAX_DPs];
913 struct red_stats *xstats[MAX_DPs];
914};
915
890d8d23
JK
916struct tc_gred_qopt_offload {
917 enum tc_gred_command command;
918 u32 handle;
919 u32 parent;
920 union {
921 struct tc_gred_qopt_offload_params set;
e49efd52 922 struct tc_gred_qopt_offload_stats stats;
890d8d23
JK
923 };
924};
925
7fdb61b4
NF
926enum tc_prio_command {
927 TC_PRIO_REPLACE,
928 TC_PRIO_DESTROY,
929 TC_PRIO_STATS,
b9c7a7ac 930 TC_PRIO_GRAFT,
7fdb61b4
NF
931};
932
933struct tc_prio_qopt_offload_params {
934 int bands;
935 u8 priomap[TC_PRIO_MAX + 1];
936 /* In case that a prio qdisc is offloaded and now is changed to a
937 * non-offloadedable config, it needs to update the backlog & qlen
938 * values to negate the HW backlog & qlen values (and only them).
939 */
940 struct gnet_stats_queue *qstats;
941};
942
b9c7a7ac
NF
943struct tc_prio_qopt_offload_graft_params {
944 u8 band;
945 u32 child_handle;
946};
947
7fdb61b4
NF
948struct tc_prio_qopt_offload {
949 enum tc_prio_command command;
950 u32 handle;
951 u32 parent;
952 union {
953 struct tc_prio_qopt_offload_params replace_params;
954 struct tc_qopt_offload_stats stats;
b9c7a7ac 955 struct tc_prio_qopt_offload_graft_params graft_params;
7fdb61b4
NF
956 };
957};
b9c7a7ac 958
98b0e5f6
JK
959enum tc_root_command {
960 TC_ROOT_GRAFT,
961};
962
963struct tc_root_qopt_offload {
964 enum tc_root_command command;
965 u32 handle;
966 bool ingress;
967};
968
1da177e4 969#endif