Merge branch 'for-5.3/upstream-fixes' into for-linus
[linux-2.6-block.git] / include / net / pkt_cls.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
7aa0045d 6#include <linux/workqueue.h>
1da177e4
LT
7#include <net/sch_generic.h>
8#include <net/act_api.h>
a5148626 9#include <net/net_namespace.h>
1da177e4 10
cd11b164 11/* TC action not accessible from user space */
720f22fe 12#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
cd11b164 13
1da177e4
LT
14/* Basic packet classifier frontend definitions. */
15
fd2c3ef7 16struct tcf_walker {
1da177e4
LT
17 int stop;
18 int skip;
19 int count;
6676d5e4 20 bool nonempty;
01683a14 21 unsigned long cookie;
8113c095 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
1da177e4
LT
23};
24
5c15257f
JP
25int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
1da177e4 27
8c4083b3 28struct tcf_block_ext_info {
32f8c409 29 enum flow_block_binder_type binder_type;
c7eb7d72
JP
30 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv;
48617387 32 u32 block_index;
8c4083b3
JP
33};
34
acb67442 35struct tcf_block_cb;
aaa908ff 36bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
acb67442 37
8ae70032 38#ifdef CONFIG_NET_CLS
1f3ed383
JP
39struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 u32 chain_index);
1f3ed383 41void tcf_chain_put_by_act(struct tcf_chain *chain);
bbf73830
VB
42struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
43 struct tcf_chain *chain);
fe2923af 44struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
12db03b6 45 struct tcf_proto *tp, bool rtnl_held);
f36fe1c4 46void tcf_block_netif_keep_dst(struct tcf_block *block);
6529eaba 47int tcf_block_get(struct tcf_block **p_block,
8d1a77f9
AA
48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 struct netlink_ext_ack *extack);
c7eb7d72 50int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
8d1a77f9
AA
51 struct tcf_block_ext_info *ei,
52 struct netlink_ext_ack *extack);
6529eaba 53void tcf_block_put(struct tcf_block *block);
c7eb7d72 54void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3 55 struct tcf_block_ext_info *ei);
44186460 56
48617387
JP
57static inline bool tcf_block_shared(struct tcf_block *block)
58{
59 return block->index;
60}
61
c1a970d0
VB
62static inline bool tcf_block_non_null_shared(struct tcf_block *block)
63{
64 return block && block->index;
65}
66
44186460
JP
67static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68{
48617387 69 WARN_ON(tcf_block_shared(block));
44186460
JP
70 return block->q;
71}
72
7f76fa36
JH
73int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
74 tc_indr_block_bind_cb_t *cb, void *cb_ident);
75int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
76 tc_indr_block_bind_cb_t *cb, void *cb_ident);
77void __tc_indr_block_cb_unregister(struct net_device *dev,
78 tc_indr_block_bind_cb_t *cb, void *cb_ident);
79void tc_indr_block_cb_unregister(struct net_device *dev,
80 tc_indr_block_bind_cb_t *cb, void *cb_ident);
acb67442 81
87d83093
JP
82int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res, bool compat_mode);
84
8ae70032 85#else
88c44a52
PJV
86static inline bool tcf_block_shared(struct tcf_block *block)
87{
88 return false;
89}
90
c1a970d0
VB
91static inline bool tcf_block_non_null_shared(struct tcf_block *block)
92{
93 return false;
94}
95
6529eaba
JP
96static inline
97int tcf_block_get(struct tcf_block **p_block,
3c149091
SM
98 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
99 struct netlink_ext_ack *extack)
6529eaba
JP
100{
101 return 0;
102}
103
8c4083b3 104static inline
c7eb7d72 105int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
33c30a8b
QM
106 struct tcf_block_ext_info *ei,
107 struct netlink_ext_ack *extack)
8c4083b3
JP
108{
109 return 0;
110}
111
6529eaba 112static inline void tcf_block_put(struct tcf_block *block)
8ae70032
JP
113{
114}
87d83093 115
8c4083b3 116static inline
c7eb7d72 117void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3
JP
118 struct tcf_block_ext_info *ei)
119{
120}
121
44186460
JP
122static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
123{
124 return NULL;
125}
126
acb67442 127static inline
a7323311 128int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
acb67442
JP
129 void *cb_priv)
130{
131 return 0;
132}
133
134static inline
a7323311 135void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
acb67442
JP
136 void *cb_priv)
137{
138}
139
7f76fa36
JH
140static inline
141int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
142 tc_indr_block_bind_cb_t *cb, void *cb_ident)
143{
144 return 0;
145}
146
147static inline
148int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
149 tc_indr_block_bind_cb_t *cb, void *cb_ident)
150{
151 return 0;
152}
153
154static inline
155void __tc_indr_block_cb_unregister(struct net_device *dev,
156 tc_indr_block_bind_cb_t *cb, void *cb_ident)
157{
158}
159
160static inline
161void tc_indr_block_cb_unregister(struct net_device *dev,
162 tc_indr_block_bind_cb_t *cb, void *cb_ident)
163{
164}
165
87d83093
JP
166static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
167 struct tcf_result *res, bool compat_mode)
168{
169 return TC_ACT_UNSPEC;
170}
8ae70032 171#endif
cf1facda 172
1da177e4
LT
173static inline unsigned long
174__cls_set_class(unsigned long *clp, unsigned long cl)
175{
a0efb80c 176 return xchg(clp, cl);
1da177e4
LT
177}
178
179static inline unsigned long
34e3759c 180cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
1da177e4
LT
181{
182 unsigned long old_cl;
34e3759c
JP
183
184 sch_tree_lock(q);
1da177e4 185 old_cl = __cls_set_class(clp, cl);
34e3759c 186 sch_tree_unlock(q);
1da177e4
LT
187 return old_cl;
188}
189
190static inline void
191tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
192{
34e3759c 193 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
194 unsigned long cl;
195
34e3759c
JP
196 /* Check q as it is not set for shared blocks. In that case,
197 * setting class is not supported.
198 */
199 if (!q)
200 return;
201 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
202 cl = cls_set_class(q, &r->class, cl);
1da177e4 203 if (cl)
34e3759c 204 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
205}
206
207static inline void
208tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
209{
34e3759c 210 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
211 unsigned long cl;
212
34e3759c
JP
213 if (!q)
214 return;
1da177e4 215 if ((cl = __cls_set_class(&r->class, 0)) != 0)
34e3759c 216 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
217}
218
fd2c3ef7 219struct tcf_exts {
1da177e4 220#ifdef CONFIG_NET_CLS_ACT
33be6271 221 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
22dc13c8
WC
222 int nr_actions;
223 struct tc_action **actions;
e4b95c41 224 struct net *net;
1da177e4 225#endif
5da57f42
WC
226 /* Map to export classifier specific extension TLV types to the
227 * generic extensions API. Unsupported extensions must be set to 0.
228 */
1da177e4
LT
229 int action;
230 int police;
231};
232
14215108
CW
233static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
234 int action, int police)
33be6271
WC
235{
236#ifdef CONFIG_NET_CLS_ACT
5da57f42 237 exts->type = 0;
22dc13c8 238 exts->nr_actions = 0;
14215108 239 exts->net = net;
22dc13c8
WC
240 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
241 GFP_KERNEL);
b9a24bb7
WC
242 if (!exts->actions)
243 return -ENOMEM;
33be6271 244#endif
5da57f42
WC
245 exts->action = action;
246 exts->police = police;
b9a24bb7 247 return 0;
33be6271
WC
248}
249
e4b95c41
CW
250/* Return false if the netns is being destroyed in cleanup_net(). Callers
251 * need to do cleanup synchronously in this case, otherwise may race with
252 * tc_action_net_exit(). Return true for other cases.
253 */
254static inline bool tcf_exts_get_net(struct tcf_exts *exts)
255{
256#ifdef CONFIG_NET_CLS_ACT
257 exts->net = maybe_get_net(exts->net);
258 return exts->net != NULL;
259#else
260 return true;
261#endif
262}
263
264static inline void tcf_exts_put_net(struct tcf_exts *exts)
265{
266#ifdef CONFIG_NET_CLS_ACT
267 if (exts->net)
268 put_net(exts->net);
269#endif
270}
271
22dc13c8 272#ifdef CONFIG_NET_CLS_ACT
244cd96a
CW
273#define tcf_exts_for_each_action(i, a, exts) \
274 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
275#else
276#define tcf_exts_for_each_action(i, a, exts) \
191672ca 277 for (; 0; (void)(i), (void)(a), (void)(exts))
22dc13c8 278#endif
22dc13c8 279
d897a638
JK
280static inline void
281tcf_exts_stats_update(const struct tcf_exts *exts,
282 u64 bytes, u64 packets, u64 lastuse)
283{
284#ifdef CONFIG_NET_CLS_ACT
285 int i;
286
287 preempt_disable();
288
289 for (i = 0; i < exts->nr_actions; i++) {
290 struct tc_action *a = exts->actions[i];
291
28169aba 292 tcf_action_stats_update(a, bytes, packets, lastuse, true);
d897a638
JK
293 }
294
295 preempt_enable();
296#endif
297}
298
3bcc0cec
JP
299/**
300 * tcf_exts_has_actions - check if at least one action is present
301 * @exts: tc filter extensions handle
302 *
303 * Returns true if at least one action is present.
304 */
305static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
306{
2734437e 307#ifdef CONFIG_NET_CLS_ACT
3bcc0cec
JP
308 return exts->nr_actions;
309#else
310 return false;
311#endif
312}
2734437e 313
af69afc5
JP
314/**
315 * tcf_exts_exec - execute tc filter extensions
316 * @skb: socket buffer
317 * @exts: tc filter extensions handle
318 * @res: desired result
319 *
af089e70 320 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
af69afc5
JP
321 * a negative number if the filter must be considered unmatched or
322 * a positive action code (TC_ACT_*) which must be returned to the
323 * underlying layer.
324 */
325static inline int
326tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
327 struct tcf_result *res)
328{
329#ifdef CONFIG_NET_CLS_ACT
ec1a9cca 330 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
af69afc5 331#endif
af089e70 332 return TC_ACT_OK;
af69afc5
JP
333}
334
5c15257f
JP
335int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
336 struct nlattr **tb, struct nlattr *rate_tlv,
ec6743a1 337 struct tcf_exts *exts, bool ovr, bool rtnl_held,
50a56190 338 struct netlink_ext_ack *extack);
18d0264f 339void tcf_exts_destroy(struct tcf_exts *exts);
9b0d4446 340void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
5da57f42
WC
341int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
342int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
1da177e4
LT
343
344/**
345 * struct tcf_pkt_info - packet information
346 */
fd2c3ef7 347struct tcf_pkt_info {
1da177e4
LT
348 unsigned char * ptr;
349 int nexthdr;
350};
351
352#ifdef CONFIG_NET_EMATCH
353
354struct tcf_ematch_ops;
355
356/**
357 * struct tcf_ematch - extended match (ematch)
358 *
359 * @matchid: identifier to allow userspace to reidentify a match
360 * @flags: flags specifying attributes and the relation to other matches
361 * @ops: the operations lookup table of the corresponding ematch module
362 * @datalen: length of the ematch specific configuration data
363 * @data: ematch specific data
364 */
fd2c3ef7 365struct tcf_ematch {
1da177e4
LT
366 struct tcf_ematch_ops * ops;
367 unsigned long data;
368 unsigned int datalen;
369 u16 matchid;
370 u16 flags;
82a470f1 371 struct net *net;
1da177e4
LT
372};
373
374static inline int tcf_em_is_container(struct tcf_ematch *em)
375{
376 return !em->ops;
377}
378
379static inline int tcf_em_is_simple(struct tcf_ematch *em)
380{
381 return em->flags & TCF_EM_SIMPLE;
382}
383
384static inline int tcf_em_is_inverted(struct tcf_ematch *em)
385{
386 return em->flags & TCF_EM_INVERT;
387}
388
389static inline int tcf_em_last_match(struct tcf_ematch *em)
390{
391 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
392}
393
394static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
395{
396 if (tcf_em_last_match(em))
397 return 1;
398
399 if (result == 0 && em->flags & TCF_EM_REL_AND)
400 return 1;
401
402 if (result != 0 && em->flags & TCF_EM_REL_OR)
403 return 1;
404
405 return 0;
406}
407
408/**
409 * struct tcf_ematch_tree - ematch tree handle
410 *
411 * @hdr: ematch tree header supplied by userspace
412 * @matches: array of ematches
413 */
fd2c3ef7 414struct tcf_ematch_tree {
1da177e4
LT
415 struct tcf_ematch_tree_hdr hdr;
416 struct tcf_ematch * matches;
417
418};
419
420/**
421 * struct tcf_ematch_ops - ematch module operations
422 *
423 * @kind: identifier (kind) of this ematch module
424 * @datalen: length of expected configuration data (optional)
425 * @change: called during validation (optional)
426 * @match: called during ematch tree evaluation, must return 1/0
427 * @destroy: called during destroyage (optional)
428 * @dump: called during dumping process (optional)
429 * @owner: owner, must be set to THIS_MODULE
430 * @link: link to previous/next ematch module (internal use)
431 */
fd2c3ef7 432struct tcf_ematch_ops {
1da177e4
LT
433 int kind;
434 int datalen;
82a470f1 435 int (*change)(struct net *net, void *,
1da177e4
LT
436 int, struct tcf_ematch *);
437 int (*match)(struct sk_buff *, struct tcf_ematch *,
438 struct tcf_pkt_info *);
82a470f1 439 void (*destroy)(struct tcf_ematch *);
1da177e4
LT
440 int (*dump)(struct sk_buff *, struct tcf_ematch *);
441 struct module *owner;
442 struct list_head link;
443};
444
5c15257f
JP
445int tcf_em_register(struct tcf_ematch_ops *);
446void tcf_em_unregister(struct tcf_ematch_ops *);
447int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
448 struct tcf_ematch_tree *);
82a470f1 449void tcf_em_tree_destroy(struct tcf_ematch_tree *);
5c15257f
JP
450int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
451int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
452 struct tcf_pkt_info *);
1da177e4 453
1da177e4
LT
454/**
455 * tcf_em_tree_match - evaulate an ematch tree
456 *
457 * @skb: socket buffer of the packet in question
458 * @tree: ematch tree to be used for evaluation
459 * @info: packet information examined by classifier
460 *
461 * This function matches @skb against the ematch tree in @tree by going
462 * through all ematches respecting their logic relations returning
463 * as soon as the result is obvious.
464 *
465 * Returns 1 if the ematch tree as-one matches, no ematches are configured
466 * or ematch is not enabled in the kernel, otherwise 0 is returned.
467 */
468static inline int tcf_em_tree_match(struct sk_buff *skb,
469 struct tcf_ematch_tree *tree,
470 struct tcf_pkt_info *info)
471{
472 if (tree->hdr.nmatches)
473 return __tcf_em_tree_match(skb, tree, info);
474 else
475 return 1;
476}
477
db3d99c0
PM
478#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
479
1da177e4
LT
480#else /* CONFIG_NET_EMATCH */
481
fd2c3ef7 482struct tcf_ematch_tree {
1da177e4
LT
483};
484
485#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
82a470f1 486#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
1da177e4 487#define tcf_em_tree_dump(skb, t, tlv) (0)
1da177e4
LT
488#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
489
490#endif /* CONFIG_NET_EMATCH */
491
492static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
493{
494 switch (layer) {
495 case TCF_LAYER_LINK:
d3303a65 496 return skb_mac_header(skb);
1da177e4 497 case TCF_LAYER_NETWORK:
d56f90a7 498 return skb_network_header(skb);
1da177e4 499 case TCF_LAYER_TRANSPORT:
9c70220b 500 return skb_transport_header(skb);
1da177e4
LT
501 }
502
503 return NULL;
504}
505
eddc9ec5
ACM
506static inline int tcf_valid_offset(const struct sk_buff *skb,
507 const unsigned char *ptr, const int len)
1da177e4 508{
da521b2c
DM
509 return likely((ptr + len) <= skb_tail_pointer(skb) &&
510 ptr >= skb->head &&
511 (ptr <= (ptr + len)));
1da177e4
LT
512}
513
1da177e4 514static inline int
1057c55f
AA
515tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
516 struct netlink_ext_ack *extack)
1da177e4 517{
2519a602
WC
518 char indev[IFNAMSIZ];
519 struct net_device *dev;
520
1057c55f
AA
521 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
522 NL_SET_ERR_MSG(extack, "Interface name too long");
1da177e4 523 return -EINVAL;
1057c55f 524 }
2519a602
WC
525 dev = __dev_get_by_name(net, indev);
526 if (!dev)
527 return -ENODEV;
528 return dev->ifindex;
1da177e4
LT
529}
530
2519a602
WC
531static inline bool
532tcf_match_indev(struct sk_buff *skb, int ifindex)
1da177e4 533{
2519a602
WC
534 if (!ifindex)
535 return true;
536 if (!skb->skb_iif)
537 return false;
538 return ifindex == skb->skb_iif;
1da177e4 539}
1da177e4 540
3a7b6861
PNA
541int tc_setup_flow_action(struct flow_action *flow_action,
542 const struct tcf_exts *exts);
aeb3fecd
CW
543int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
544 void *type_data, bool err_stop);
e3ab786b 545unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
717503b9 546
a1b7c5fd
JF
547struct tc_cls_u32_knode {
548 struct tcf_exts *exts;
068ceb35 549 struct tcf_result *res;
e014860e 550 struct tc_u32_sel *sel;
a1b7c5fd
JF
551 u32 handle;
552 u32 val;
553 u32 mask;
554 u32 link_handle;
e014860e 555 u8 fshift;
a1b7c5fd
JF
556};
557
558struct tc_cls_u32_hnode {
559 u32 handle;
560 u32 prio;
561 unsigned int divisor;
562};
563
564enum tc_clsu32_command {
565 TC_CLSU32_NEW_KNODE,
566 TC_CLSU32_REPLACE_KNODE,
567 TC_CLSU32_DELETE_KNODE,
568 TC_CLSU32_NEW_HNODE,
569 TC_CLSU32_REPLACE_HNODE,
570 TC_CLSU32_DELETE_HNODE,
571};
572
573struct tc_cls_u32_offload {
f9e30088 574 struct flow_cls_common_offload common;
a1b7c5fd
JF
575 /* knode values */
576 enum tc_clsu32_command command;
577 union {
578 struct tc_cls_u32_knode knode;
579 struct tc_cls_u32_hnode hnode;
580 };
581};
582
7b06e8ae 583static inline bool tc_can_offload(const struct net_device *dev)
6843e7a2 584{
70b5aee4 585 return dev->features & NETIF_F_HW_TC;
6843e7a2
JF
586}
587
f9eda14f
QM
588static inline bool tc_can_offload_extack(const struct net_device *dev,
589 struct netlink_ext_ack *extack)
590{
591 bool can = tc_can_offload(dev);
592
593 if (!can)
594 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
595
596 return can;
597}
598
878db9f0
JK
599static inline bool
600tc_cls_can_offload_and_chain0(const struct net_device *dev,
f9e30088 601 struct flow_cls_common_offload *common)
878db9f0
JK
602{
603 if (!tc_can_offload_extack(dev, common->extack))
604 return false;
605 if (common->chain_index) {
606 NL_SET_ERR_MSG(common->extack,
607 "Driver supports only offload of chain 0");
608 return false;
609 }
610 return true;
611}
612
55330f05
HHZ
613static inline bool tc_skip_hw(u32 flags)
614{
615 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
616}
617
d34e3e18
SS
618static inline bool tc_skip_sw(u32 flags)
619{
620 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
621}
622
623/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
624static inline bool tc_flags_valid(u32 flags)
625{
81c7288b
MRL
626 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
627 TCA_CLS_FLAGS_VERBOSE))
d34e3e18
SS
628 return false;
629
81c7288b 630 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
d34e3e18
SS
631 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
632 return false;
633
634 return true;
635}
636
e696028a
OG
637static inline bool tc_in_hw(u32 flags)
638{
639 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
640}
641
34832e1c 642static inline void
f9e30088 643tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
34832e1c
JK
644 const struct tcf_proto *tp, u32 flags,
645 struct netlink_ext_ack *extack)
646{
647 cls_common->chain_index = tp->chain->index;
648 cls_common->protocol = tp->protocol;
649 cls_common->prio = tp->prio;
81c7288b 650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
34832e1c
JK
651 cls_common->extack = extack;
652}
653
b87f7936
YG
654enum tc_matchall_command {
655 TC_CLSMATCHALL_REPLACE,
656 TC_CLSMATCHALL_DESTROY,
b7fe4ab8 657 TC_CLSMATCHALL_STATS,
b87f7936
YG
658};
659
660struct tc_cls_matchall_offload {
f9e30088 661 struct flow_cls_common_offload common;
b87f7936 662 enum tc_matchall_command command;
f00cbf19 663 struct flow_rule *rule;
b7fe4ab8 664 struct flow_stats stats;
b87f7936
YG
665 unsigned long cookie;
666};
667
332ae8e2 668enum tc_clsbpf_command {
102740bd 669 TC_CLSBPF_OFFLOAD,
68d64063 670 TC_CLSBPF_STATS,
332ae8e2
JK
671};
672
673struct tc_cls_bpf_offload {
f9e30088 674 struct flow_cls_common_offload common;
332ae8e2
JK
675 enum tc_clsbpf_command command;
676 struct tcf_exts *exts;
677 struct bpf_prog *prog;
102740bd 678 struct bpf_prog *oldprog;
332ae8e2
JK
679 const char *name;
680 bool exts_integrated;
681};
682
4e8b86c0
AN
683struct tc_mqprio_qopt_offload {
684 /* struct tc_mqprio_qopt must always be the first element */
685 struct tc_mqprio_qopt qopt;
686 u16 mode;
687 u16 shaper;
688 u32 flags;
689 u64 min_rate[TC_QOPT_MAX_QUEUE];
690 u64 max_rate[TC_QOPT_MAX_QUEUE];
691};
1045ba77
JHS
692
693/* This structure holds cookie structure that is passed from user
694 * to the kernel for actions and classifiers
695 */
696struct tc_cookie {
697 u8 *data;
698 u32 len;
eec94fdb 699 struct rcu_head rcu;
1045ba77 700};
602f3baf 701
f34b4aac
NF
702struct tc_qopt_offload_stats {
703 struct gnet_stats_basic_packed *bstats;
704 struct gnet_stats_queue *qstats;
705};
706
f971b132
JK
707enum tc_mq_command {
708 TC_MQ_CREATE,
709 TC_MQ_DESTROY,
47c669a4 710 TC_MQ_STATS,
d577a3d2
JK
711 TC_MQ_GRAFT,
712};
713
714struct tc_mq_opt_offload_graft_params {
715 unsigned long queue;
716 u32 child_handle;
f971b132
JK
717};
718
719struct tc_mq_qopt_offload {
720 enum tc_mq_command command;
721 u32 handle;
d577a3d2
JK
722 union {
723 struct tc_qopt_offload_stats stats;
724 struct tc_mq_opt_offload_graft_params graft_params;
725 };
f971b132
JK
726};
727
602f3baf
NF
728enum tc_red_command {
729 TC_RED_REPLACE,
730 TC_RED_DESTROY,
731 TC_RED_STATS,
732 TC_RED_XSTATS,
bf2a752b 733 TC_RED_GRAFT,
602f3baf
NF
734};
735
736struct tc_red_qopt_offload_params {
737 u32 min;
738 u32 max;
739 u32 probability;
c0b7490b 740 u32 limit;
602f3baf 741 bool is_ecn;
190852a5 742 bool is_harddrop;
416ef9b1 743 struct gnet_stats_queue *qstats;
602f3baf 744};
602f3baf
NF
745
746struct tc_red_qopt_offload {
747 enum tc_red_command command;
748 u32 handle;
749 u32 parent;
750 union {
751 struct tc_red_qopt_offload_params set;
f34b4aac 752 struct tc_qopt_offload_stats stats;
602f3baf 753 struct red_stats *xstats;
bf2a752b 754 u32 child_handle;
602f3baf
NF
755 };
756};
757
890d8d23
JK
758enum tc_gred_command {
759 TC_GRED_REPLACE,
760 TC_GRED_DESTROY,
e49efd52 761 TC_GRED_STATS,
890d8d23
JK
762};
763
764struct tc_gred_vq_qopt_offload_params {
765 bool present;
766 u32 limit;
767 u32 prio;
768 u32 min;
769 u32 max;
770 bool is_ecn;
771 bool is_harddrop;
772 u32 probability;
773 /* Only need backlog, see struct tc_prio_qopt_offload_params */
774 u32 *backlog;
775};
776
777struct tc_gred_qopt_offload_params {
778 bool grio_on;
779 bool wred_on;
780 unsigned int dp_cnt;
781 unsigned int dp_def;
782 struct gnet_stats_queue *qstats;
783 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
784};
785
e49efd52
JK
786struct tc_gred_qopt_offload_stats {
787 struct gnet_stats_basic_packed bstats[MAX_DPs];
788 struct gnet_stats_queue qstats[MAX_DPs];
789 struct red_stats *xstats[MAX_DPs];
790};
791
890d8d23
JK
792struct tc_gred_qopt_offload {
793 enum tc_gred_command command;
794 u32 handle;
795 u32 parent;
796 union {
797 struct tc_gred_qopt_offload_params set;
e49efd52 798 struct tc_gred_qopt_offload_stats stats;
890d8d23
JK
799 };
800};
801
7fdb61b4
NF
802enum tc_prio_command {
803 TC_PRIO_REPLACE,
804 TC_PRIO_DESTROY,
805 TC_PRIO_STATS,
b9c7a7ac 806 TC_PRIO_GRAFT,
7fdb61b4
NF
807};
808
809struct tc_prio_qopt_offload_params {
810 int bands;
811 u8 priomap[TC_PRIO_MAX + 1];
812 /* In case that a prio qdisc is offloaded and now is changed to a
813 * non-offloadedable config, it needs to update the backlog & qlen
814 * values to negate the HW backlog & qlen values (and only them).
815 */
816 struct gnet_stats_queue *qstats;
817};
818
b9c7a7ac
NF
819struct tc_prio_qopt_offload_graft_params {
820 u8 band;
821 u32 child_handle;
822};
823
7fdb61b4
NF
824struct tc_prio_qopt_offload {
825 enum tc_prio_command command;
826 u32 handle;
827 u32 parent;
828 union {
829 struct tc_prio_qopt_offload_params replace_params;
830 struct tc_qopt_offload_stats stats;
b9c7a7ac 831 struct tc_prio_qopt_offload_graft_params graft_params;
7fdb61b4
NF
832 };
833};
b9c7a7ac 834
98b0e5f6
JK
835enum tc_root_command {
836 TC_ROOT_GRAFT,
837};
838
839struct tc_root_qopt_offload {
840 enum tc_root_command command;
841 u32 handle;
842 bool ingress;
843};
844
1da177e4 845#endif