net: sched: Pass root lock to Qdisc_ops.enqueue
[linux-block.git] / include / net / pkt_cls.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
7aa0045d 6#include <linux/workqueue.h>
1da177e4
LT
7#include <net/sch_generic.h>
8#include <net/act_api.h>
a5148626 9#include <net/net_namespace.h>
1da177e4 10
cd11b164 11/* TC action not accessible from user space */
720f22fe 12#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
cd11b164 13
1da177e4
LT
14/* Basic packet classifier frontend definitions. */
15
fd2c3ef7 16struct tcf_walker {
1da177e4
LT
17 int stop;
18 int skip;
19 int count;
6676d5e4 20 bool nonempty;
01683a14 21 unsigned long cookie;
8113c095 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
1da177e4
LT
23};
24
5c15257f
JP
25int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
1da177e4 27
8c4083b3 28struct tcf_block_ext_info {
32f8c409 29 enum flow_block_binder_type binder_type;
c7eb7d72
JP
30 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv;
48617387 32 u32 block_index;
8c4083b3
JP
33};
34
acb67442 35struct tcf_block_cb;
aaa908ff 36bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
acb67442 37
8ae70032 38#ifdef CONFIG_NET_CLS
1f3ed383
JP
39struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 u32 chain_index);
1f3ed383 41void tcf_chain_put_by_act(struct tcf_chain *chain);
bbf73830
VB
42struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
43 struct tcf_chain *chain);
fe2923af 44struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
12db03b6 45 struct tcf_proto *tp, bool rtnl_held);
f36fe1c4 46void tcf_block_netif_keep_dst(struct tcf_block *block);
6529eaba 47int tcf_block_get(struct tcf_block **p_block,
8d1a77f9
AA
48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 struct netlink_ext_ack *extack);
c7eb7d72 50int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
8d1a77f9
AA
51 struct tcf_block_ext_info *ei,
52 struct netlink_ext_ack *extack);
6529eaba 53void tcf_block_put(struct tcf_block *block);
c7eb7d72 54void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3 55 struct tcf_block_ext_info *ei);
44186460 56
48617387
JP
57static inline bool tcf_block_shared(struct tcf_block *block)
58{
59 return block->index;
60}
61
c1a970d0
VB
62static inline bool tcf_block_non_null_shared(struct tcf_block *block)
63{
64 return block && block->index;
65}
66
44186460
JP
67static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68{
48617387 69 WARN_ON(tcf_block_shared(block));
44186460
JP
70 return block->q;
71}
72
87d83093
JP
73int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
74 struct tcf_result *res, bool compat_mode);
7d17c544
PB
75int tcf_classify_ingress(struct sk_buff *skb,
76 const struct tcf_block *ingress_block,
77 const struct tcf_proto *tp, struct tcf_result *res,
78 bool compat_mode);
87d83093 79
8ae70032 80#else
88c44a52
PJV
81static inline bool tcf_block_shared(struct tcf_block *block)
82{
83 return false;
84}
85
c1a970d0
VB
86static inline bool tcf_block_non_null_shared(struct tcf_block *block)
87{
88 return false;
89}
90
6529eaba
JP
91static inline
92int tcf_block_get(struct tcf_block **p_block,
3c149091
SM
93 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
94 struct netlink_ext_ack *extack)
6529eaba
JP
95{
96 return 0;
97}
98
8c4083b3 99static inline
c7eb7d72 100int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
33c30a8b
QM
101 struct tcf_block_ext_info *ei,
102 struct netlink_ext_ack *extack)
8c4083b3
JP
103{
104 return 0;
105}
106
6529eaba 107static inline void tcf_block_put(struct tcf_block *block)
8ae70032
JP
108{
109}
87d83093 110
8c4083b3 111static inline
c7eb7d72 112void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
8c4083b3
JP
113 struct tcf_block_ext_info *ei)
114{
115}
116
44186460
JP
117static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
118{
119 return NULL;
120}
121
acb67442 122static inline
a7323311 123int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
acb67442
JP
124 void *cb_priv)
125{
126 return 0;
127}
128
129static inline
a7323311 130void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
acb67442
JP
131 void *cb_priv)
132{
133}
134
87d83093
JP
135static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
136 struct tcf_result *res, bool compat_mode)
137{
138 return TC_ACT_UNSPEC;
139}
9410c940
PB
140
141static inline int tcf_classify_ingress(struct sk_buff *skb,
7d17c544 142 const struct tcf_block *ingress_block,
9410c940
PB
143 const struct tcf_proto *tp,
144 struct tcf_result *res, bool compat_mode)
145{
146 return TC_ACT_UNSPEC;
147}
148
8ae70032 149#endif
cf1facda 150
1da177e4
LT
151static inline unsigned long
152__cls_set_class(unsigned long *clp, unsigned long cl)
153{
a0efb80c 154 return xchg(clp, cl);
1da177e4
LT
155}
156
2e24cd75
CW
157static inline void
158__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
1da177e4 159{
2e24cd75 160 unsigned long cl;
34e3759c 161
2e24cd75
CW
162 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
163 cl = __cls_set_class(&r->class, cl);
164 if (cl)
165 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
166}
167
168static inline void
169tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
170{
34e3759c 171 struct Qdisc *q = tp->chain->block->q;
1da177e4 172
34e3759c
JP
173 /* Check q as it is not set for shared blocks. In that case,
174 * setting class is not supported.
175 */
176 if (!q)
177 return;
2e24cd75
CW
178 sch_tree_lock(q);
179 __tcf_bind_filter(q, r, base);
180 sch_tree_unlock(q);
181}
182
183static inline void
184__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
185{
186 unsigned long cl;
187
188 if ((cl = __cls_set_class(&r->class, 0)) != 0)
34e3759c 189 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
190}
191
192static inline void
193tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
194{
34e3759c 195 struct Qdisc *q = tp->chain->block->q;
1da177e4 196
34e3759c
JP
197 if (!q)
198 return;
2e24cd75 199 __tcf_unbind_filter(q, r);
1da177e4
LT
200}
201
fd2c3ef7 202struct tcf_exts {
1da177e4 203#ifdef CONFIG_NET_CLS_ACT
33be6271 204 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
22dc13c8
WC
205 int nr_actions;
206 struct tc_action **actions;
e4b95c41 207 struct net *net;
1da177e4 208#endif
5da57f42
WC
209 /* Map to export classifier specific extension TLV types to the
210 * generic extensions API. Unsupported extensions must be set to 0.
211 */
1da177e4
LT
212 int action;
213 int police;
214};
215
14215108
CW
216static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
217 int action, int police)
33be6271
WC
218{
219#ifdef CONFIG_NET_CLS_ACT
5da57f42 220 exts->type = 0;
22dc13c8 221 exts->nr_actions = 0;
14215108 222 exts->net = net;
22dc13c8
WC
223 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
224 GFP_KERNEL);
b9a24bb7
WC
225 if (!exts->actions)
226 return -ENOMEM;
33be6271 227#endif
5da57f42
WC
228 exts->action = action;
229 exts->police = police;
b9a24bb7 230 return 0;
33be6271
WC
231}
232
e4b95c41
CW
233/* Return false if the netns is being destroyed in cleanup_net(). Callers
234 * need to do cleanup synchronously in this case, otherwise may race with
235 * tc_action_net_exit(). Return true for other cases.
236 */
237static inline bool tcf_exts_get_net(struct tcf_exts *exts)
238{
239#ifdef CONFIG_NET_CLS_ACT
240 exts->net = maybe_get_net(exts->net);
241 return exts->net != NULL;
242#else
243 return true;
244#endif
245}
246
247static inline void tcf_exts_put_net(struct tcf_exts *exts)
248{
249#ifdef CONFIG_NET_CLS_ACT
250 if (exts->net)
251 put_net(exts->net);
252#endif
253}
254
22dc13c8 255#ifdef CONFIG_NET_CLS_ACT
244cd96a
CW
256#define tcf_exts_for_each_action(i, a, exts) \
257 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
258#else
259#define tcf_exts_for_each_action(i, a, exts) \
191672ca 260 for (; 0; (void)(i), (void)(a), (void)(exts))
22dc13c8 261#endif
22dc13c8 262
d897a638
JK
263static inline void
264tcf_exts_stats_update(const struct tcf_exts *exts,
4b61d3e8 265 u64 bytes, u64 packets, u64 drops, u64 lastuse,
93a129eb 266 u8 used_hw_stats, bool used_hw_stats_valid)
d897a638
JK
267{
268#ifdef CONFIG_NET_CLS_ACT
269 int i;
270
271 preempt_disable();
272
273 for (i = 0; i < exts->nr_actions; i++) {
274 struct tc_action *a = exts->actions[i];
275
4b61d3e8
PL
276 tcf_action_stats_update(a, bytes, packets, drops,
277 lastuse, true);
93a129eb
JP
278 a->used_hw_stats = used_hw_stats;
279 a->used_hw_stats_valid = used_hw_stats_valid;
d897a638
JK
280 }
281
282 preempt_enable();
283#endif
284}
285
3bcc0cec
JP
286/**
287 * tcf_exts_has_actions - check if at least one action is present
288 * @exts: tc filter extensions handle
289 *
290 * Returns true if at least one action is present.
291 */
292static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
293{
2734437e 294#ifdef CONFIG_NET_CLS_ACT
3bcc0cec
JP
295 return exts->nr_actions;
296#else
297 return false;
298#endif
299}
2734437e 300
af69afc5
JP
301/**
302 * tcf_exts_exec - execute tc filter extensions
303 * @skb: socket buffer
304 * @exts: tc filter extensions handle
305 * @res: desired result
306 *
af089e70 307 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
af69afc5
JP
308 * a negative number if the filter must be considered unmatched or
309 * a positive action code (TC_ACT_*) which must be returned to the
310 * underlying layer.
311 */
312static inline int
313tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
314 struct tcf_result *res)
315{
316#ifdef CONFIG_NET_CLS_ACT
ec1a9cca 317 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
af69afc5 318#endif
af089e70 319 return TC_ACT_OK;
af69afc5
JP
320}
321
5c15257f
JP
322int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
323 struct nlattr **tb, struct nlattr *rate_tlv,
ec6743a1 324 struct tcf_exts *exts, bool ovr, bool rtnl_held,
50a56190 325 struct netlink_ext_ack *extack);
18d0264f 326void tcf_exts_destroy(struct tcf_exts *exts);
9b0d4446 327void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
5da57f42 328int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
ca44b738 329int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
5da57f42 330int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
1da177e4
LT
331
332/**
333 * struct tcf_pkt_info - packet information
334 */
fd2c3ef7 335struct tcf_pkt_info {
1da177e4
LT
336 unsigned char * ptr;
337 int nexthdr;
338};
339
340#ifdef CONFIG_NET_EMATCH
341
342struct tcf_ematch_ops;
343
344/**
345 * struct tcf_ematch - extended match (ematch)
346 *
347 * @matchid: identifier to allow userspace to reidentify a match
348 * @flags: flags specifying attributes and the relation to other matches
349 * @ops: the operations lookup table of the corresponding ematch module
350 * @datalen: length of the ematch specific configuration data
351 * @data: ematch specific data
352 */
fd2c3ef7 353struct tcf_ematch {
1da177e4
LT
354 struct tcf_ematch_ops * ops;
355 unsigned long data;
356 unsigned int datalen;
357 u16 matchid;
358 u16 flags;
82a470f1 359 struct net *net;
1da177e4
LT
360};
361
362static inline int tcf_em_is_container(struct tcf_ematch *em)
363{
364 return !em->ops;
365}
366
367static inline int tcf_em_is_simple(struct tcf_ematch *em)
368{
369 return em->flags & TCF_EM_SIMPLE;
370}
371
372static inline int tcf_em_is_inverted(struct tcf_ematch *em)
373{
374 return em->flags & TCF_EM_INVERT;
375}
376
377static inline int tcf_em_last_match(struct tcf_ematch *em)
378{
379 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
380}
381
382static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
383{
384 if (tcf_em_last_match(em))
385 return 1;
386
387 if (result == 0 && em->flags & TCF_EM_REL_AND)
388 return 1;
389
390 if (result != 0 && em->flags & TCF_EM_REL_OR)
391 return 1;
392
393 return 0;
394}
395
396/**
397 * struct tcf_ematch_tree - ematch tree handle
398 *
399 * @hdr: ematch tree header supplied by userspace
400 * @matches: array of ematches
401 */
fd2c3ef7 402struct tcf_ematch_tree {
1da177e4
LT
403 struct tcf_ematch_tree_hdr hdr;
404 struct tcf_ematch * matches;
405
406};
407
408/**
409 * struct tcf_ematch_ops - ematch module operations
410 *
411 * @kind: identifier (kind) of this ematch module
412 * @datalen: length of expected configuration data (optional)
413 * @change: called during validation (optional)
414 * @match: called during ematch tree evaluation, must return 1/0
415 * @destroy: called during destroyage (optional)
416 * @dump: called during dumping process (optional)
417 * @owner: owner, must be set to THIS_MODULE
418 * @link: link to previous/next ematch module (internal use)
419 */
fd2c3ef7 420struct tcf_ematch_ops {
1da177e4
LT
421 int kind;
422 int datalen;
82a470f1 423 int (*change)(struct net *net, void *,
1da177e4
LT
424 int, struct tcf_ematch *);
425 int (*match)(struct sk_buff *, struct tcf_ematch *,
426 struct tcf_pkt_info *);
82a470f1 427 void (*destroy)(struct tcf_ematch *);
1da177e4
LT
428 int (*dump)(struct sk_buff *, struct tcf_ematch *);
429 struct module *owner;
430 struct list_head link;
431};
432
5c15257f
JP
433int tcf_em_register(struct tcf_ematch_ops *);
434void tcf_em_unregister(struct tcf_ematch_ops *);
435int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
436 struct tcf_ematch_tree *);
82a470f1 437void tcf_em_tree_destroy(struct tcf_ematch_tree *);
5c15257f
JP
438int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
439int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
440 struct tcf_pkt_info *);
1da177e4 441
1da177e4
LT
442/**
443 * tcf_em_tree_match - evaulate an ematch tree
444 *
445 * @skb: socket buffer of the packet in question
446 * @tree: ematch tree to be used for evaluation
447 * @info: packet information examined by classifier
448 *
449 * This function matches @skb against the ematch tree in @tree by going
450 * through all ematches respecting their logic relations returning
451 * as soon as the result is obvious.
452 *
453 * Returns 1 if the ematch tree as-one matches, no ematches are configured
454 * or ematch is not enabled in the kernel, otherwise 0 is returned.
455 */
456static inline int tcf_em_tree_match(struct sk_buff *skb,
457 struct tcf_ematch_tree *tree,
458 struct tcf_pkt_info *info)
459{
460 if (tree->hdr.nmatches)
461 return __tcf_em_tree_match(skb, tree, info);
462 else
463 return 1;
464}
465
db3d99c0
PM
466#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
467
1da177e4
LT
468#else /* CONFIG_NET_EMATCH */
469
fd2c3ef7 470struct tcf_ematch_tree {
1da177e4
LT
471};
472
473#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
82a470f1 474#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
1da177e4 475#define tcf_em_tree_dump(skb, t, tlv) (0)
1da177e4
LT
476#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
477
478#endif /* CONFIG_NET_EMATCH */
479
480static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
481{
482 switch (layer) {
483 case TCF_LAYER_LINK:
d3303a65 484 return skb_mac_header(skb);
1da177e4 485 case TCF_LAYER_NETWORK:
d56f90a7 486 return skb_network_header(skb);
1da177e4 487 case TCF_LAYER_TRANSPORT:
9c70220b 488 return skb_transport_header(skb);
1da177e4
LT
489 }
490
491 return NULL;
492}
493
eddc9ec5
ACM
494static inline int tcf_valid_offset(const struct sk_buff *skb,
495 const unsigned char *ptr, const int len)
1da177e4 496{
da521b2c
DM
497 return likely((ptr + len) <= skb_tail_pointer(skb) &&
498 ptr >= skb->head &&
499 (ptr <= (ptr + len)));
1da177e4
LT
500}
501
1da177e4 502static inline int
1057c55f
AA
503tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
504 struct netlink_ext_ack *extack)
1da177e4 505{
2519a602
WC
506 char indev[IFNAMSIZ];
507 struct net_device *dev;
508
1057c55f 509 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
e4a58ef3
GN
510 NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
511 "Interface name too long");
1da177e4 512 return -EINVAL;
1057c55f 513 }
2519a602 514 dev = __dev_get_by_name(net, indev);
e4a58ef3
GN
515 if (!dev) {
516 NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
517 "Network device not found");
2519a602 518 return -ENODEV;
e4a58ef3 519 }
2519a602 520 return dev->ifindex;
1da177e4
LT
521}
522
2519a602
WC
523static inline bool
524tcf_match_indev(struct sk_buff *skb, int ifindex)
1da177e4 525{
2519a602
WC
526 if (!ifindex)
527 return true;
528 if (!skb->skb_iif)
529 return false;
530 return ifindex == skb->skb_iif;
1da177e4 531}
1da177e4 532
3a7b6861 533int tc_setup_flow_action(struct flow_action *flow_action,
b15e7a6e 534 const struct tcf_exts *exts);
5a6ff4b1
VB
535void tc_cleanup_flow_action(struct flow_action *flow_action);
536
aeb3fecd 537int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
40119211
VB
538 void *type_data, bool err_stop, bool rtnl_held);
539int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
540 enum tc_setup_type type, void *type_data, bool err_stop,
541 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
542int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
543 enum tc_setup_type type, void *type_data, bool err_stop,
544 u32 *old_flags, unsigned int *old_in_hw_count,
545 u32 *new_flags, unsigned int *new_in_hw_count,
546 bool rtnl_held);
547int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
548 enum tc_setup_type type, void *type_data, bool err_stop,
549 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
550int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
551 bool add, flow_setup_cb_t *cb,
552 enum tc_setup_type type, void *type_data,
553 void *cb_priv, u32 *flags, unsigned int *in_hw_count);
e3ab786b 554unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
717503b9 555
a1b7c5fd
JF
556struct tc_cls_u32_knode {
557 struct tcf_exts *exts;
068ceb35 558 struct tcf_result *res;
e014860e 559 struct tc_u32_sel *sel;
a1b7c5fd
JF
560 u32 handle;
561 u32 val;
562 u32 mask;
563 u32 link_handle;
e014860e 564 u8 fshift;
a1b7c5fd
JF
565};
566
567struct tc_cls_u32_hnode {
568 u32 handle;
569 u32 prio;
570 unsigned int divisor;
571};
572
573enum tc_clsu32_command {
574 TC_CLSU32_NEW_KNODE,
575 TC_CLSU32_REPLACE_KNODE,
576 TC_CLSU32_DELETE_KNODE,
577 TC_CLSU32_NEW_HNODE,
578 TC_CLSU32_REPLACE_HNODE,
579 TC_CLSU32_DELETE_HNODE,
580};
581
582struct tc_cls_u32_offload {
f9e30088 583 struct flow_cls_common_offload common;
a1b7c5fd
JF
584 /* knode values */
585 enum tc_clsu32_command command;
586 union {
587 struct tc_cls_u32_knode knode;
588 struct tc_cls_u32_hnode hnode;
589 };
590};
591
7b06e8ae 592static inline bool tc_can_offload(const struct net_device *dev)
6843e7a2 593{
70b5aee4 594 return dev->features & NETIF_F_HW_TC;
6843e7a2
JF
595}
596
f9eda14f
QM
597static inline bool tc_can_offload_extack(const struct net_device *dev,
598 struct netlink_ext_ack *extack)
599{
600 bool can = tc_can_offload(dev);
601
602 if (!can)
603 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
604
605 return can;
606}
607
878db9f0
JK
608static inline bool
609tc_cls_can_offload_and_chain0(const struct net_device *dev,
f9e30088 610 struct flow_cls_common_offload *common)
878db9f0
JK
611{
612 if (!tc_can_offload_extack(dev, common->extack))
613 return false;
614 if (common->chain_index) {
615 NL_SET_ERR_MSG(common->extack,
616 "Driver supports only offload of chain 0");
617 return false;
618 }
619 return true;
620}
621
55330f05
HHZ
622static inline bool tc_skip_hw(u32 flags)
623{
624 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
625}
626
d34e3e18
SS
627static inline bool tc_skip_sw(u32 flags)
628{
629 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
630}
631
632/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
633static inline bool tc_flags_valid(u32 flags)
634{
81c7288b
MRL
635 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
636 TCA_CLS_FLAGS_VERBOSE))
d34e3e18
SS
637 return false;
638
81c7288b 639 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
d34e3e18
SS
640 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
641 return false;
642
643 return true;
644}
645
e696028a
OG
646static inline bool tc_in_hw(u32 flags)
647{
648 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
649}
650
34832e1c 651static inline void
f9e30088 652tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
34832e1c
JK
653 const struct tcf_proto *tp, u32 flags,
654 struct netlink_ext_ack *extack)
655{
656 cls_common->chain_index = tp->chain->index;
657 cls_common->protocol = tp->protocol;
ef01adae 658 cls_common->prio = tp->prio >> 16;
81c7288b 659 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
34832e1c
JK
660 cls_common->extack = extack;
661}
662
b87f7936
YG
663enum tc_matchall_command {
664 TC_CLSMATCHALL_REPLACE,
665 TC_CLSMATCHALL_DESTROY,
b7fe4ab8 666 TC_CLSMATCHALL_STATS,
b87f7936
YG
667};
668
669struct tc_cls_matchall_offload {
f9e30088 670 struct flow_cls_common_offload common;
b87f7936 671 enum tc_matchall_command command;
f00cbf19 672 struct flow_rule *rule;
b7fe4ab8 673 struct flow_stats stats;
b87f7936
YG
674 unsigned long cookie;
675};
676
332ae8e2 677enum tc_clsbpf_command {
102740bd 678 TC_CLSBPF_OFFLOAD,
68d64063 679 TC_CLSBPF_STATS,
332ae8e2
JK
680};
681
682struct tc_cls_bpf_offload {
f9e30088 683 struct flow_cls_common_offload common;
332ae8e2
JK
684 enum tc_clsbpf_command command;
685 struct tcf_exts *exts;
686 struct bpf_prog *prog;
102740bd 687 struct bpf_prog *oldprog;
332ae8e2
JK
688 const char *name;
689 bool exts_integrated;
690};
691
4e8b86c0
AN
692struct tc_mqprio_qopt_offload {
693 /* struct tc_mqprio_qopt must always be the first element */
694 struct tc_mqprio_qopt qopt;
695 u16 mode;
696 u16 shaper;
697 u32 flags;
698 u64 min_rate[TC_QOPT_MAX_QUEUE];
699 u64 max_rate[TC_QOPT_MAX_QUEUE];
700};
1045ba77
JHS
701
702/* This structure holds cookie structure that is passed from user
703 * to the kernel for actions and classifiers
704 */
705struct tc_cookie {
706 u8 *data;
707 u32 len;
eec94fdb 708 struct rcu_head rcu;
1045ba77 709};
602f3baf 710
f34b4aac
NF
711struct tc_qopt_offload_stats {
712 struct gnet_stats_basic_packed *bstats;
713 struct gnet_stats_queue *qstats;
714};
715
f971b132
JK
716enum tc_mq_command {
717 TC_MQ_CREATE,
718 TC_MQ_DESTROY,
47c669a4 719 TC_MQ_STATS,
d577a3d2
JK
720 TC_MQ_GRAFT,
721};
722
723struct tc_mq_opt_offload_graft_params {
724 unsigned long queue;
725 u32 child_handle;
f971b132
JK
726};
727
728struct tc_mq_qopt_offload {
729 enum tc_mq_command command;
730 u32 handle;
d577a3d2
JK
731 union {
732 struct tc_qopt_offload_stats stats;
733 struct tc_mq_opt_offload_graft_params graft_params;
734 };
f971b132
JK
735};
736
602f3baf
NF
737enum tc_red_command {
738 TC_RED_REPLACE,
739 TC_RED_DESTROY,
740 TC_RED_STATS,
741 TC_RED_XSTATS,
bf2a752b 742 TC_RED_GRAFT,
602f3baf
NF
743};
744
745struct tc_red_qopt_offload_params {
746 u32 min;
747 u32 max;
748 u32 probability;
c0b7490b 749 u32 limit;
602f3baf 750 bool is_ecn;
190852a5 751 bool is_harddrop;
0a7fad23 752 bool is_nodrop;
416ef9b1 753 struct gnet_stats_queue *qstats;
602f3baf 754};
602f3baf
NF
755
756struct tc_red_qopt_offload {
757 enum tc_red_command command;
758 u32 handle;
759 u32 parent;
760 union {
761 struct tc_red_qopt_offload_params set;
f34b4aac 762 struct tc_qopt_offload_stats stats;
602f3baf 763 struct red_stats *xstats;
bf2a752b 764 u32 child_handle;
602f3baf
NF
765 };
766};
767
890d8d23
JK
768enum tc_gred_command {
769 TC_GRED_REPLACE,
770 TC_GRED_DESTROY,
e49efd52 771 TC_GRED_STATS,
890d8d23
JK
772};
773
774struct tc_gred_vq_qopt_offload_params {
775 bool present;
776 u32 limit;
777 u32 prio;
778 u32 min;
779 u32 max;
780 bool is_ecn;
781 bool is_harddrop;
782 u32 probability;
783 /* Only need backlog, see struct tc_prio_qopt_offload_params */
784 u32 *backlog;
785};
786
787struct tc_gred_qopt_offload_params {
788 bool grio_on;
789 bool wred_on;
790 unsigned int dp_cnt;
791 unsigned int dp_def;
792 struct gnet_stats_queue *qstats;
793 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
794};
795
e49efd52
JK
796struct tc_gred_qopt_offload_stats {
797 struct gnet_stats_basic_packed bstats[MAX_DPs];
798 struct gnet_stats_queue qstats[MAX_DPs];
799 struct red_stats *xstats[MAX_DPs];
800};
801
890d8d23
JK
802struct tc_gred_qopt_offload {
803 enum tc_gred_command command;
804 u32 handle;
805 u32 parent;
806 union {
807 struct tc_gred_qopt_offload_params set;
e49efd52 808 struct tc_gred_qopt_offload_stats stats;
890d8d23
JK
809 };
810};
811
7fdb61b4
NF
812enum tc_prio_command {
813 TC_PRIO_REPLACE,
814 TC_PRIO_DESTROY,
815 TC_PRIO_STATS,
b9c7a7ac 816 TC_PRIO_GRAFT,
7fdb61b4
NF
817};
818
819struct tc_prio_qopt_offload_params {
820 int bands;
821 u8 priomap[TC_PRIO_MAX + 1];
9586a992
PM
822 /* At the point of un-offloading the Qdisc, the reported backlog and
823 * qlen need to be reduced by the portion that is in HW.
7fdb61b4
NF
824 */
825 struct gnet_stats_queue *qstats;
826};
827
b9c7a7ac
NF
828struct tc_prio_qopt_offload_graft_params {
829 u8 band;
830 u32 child_handle;
831};
832
7fdb61b4
NF
833struct tc_prio_qopt_offload {
834 enum tc_prio_command command;
835 u32 handle;
836 u32 parent;
837 union {
838 struct tc_prio_qopt_offload_params replace_params;
839 struct tc_qopt_offload_stats stats;
b9c7a7ac 840 struct tc_prio_qopt_offload_graft_params graft_params;
7fdb61b4
NF
841 };
842};
b9c7a7ac 843
98b0e5f6
JK
844enum tc_root_command {
845 TC_ROOT_GRAFT,
846};
847
848struct tc_root_qopt_offload {
849 enum tc_root_command command;
850 u32 handle;
851 bool ingress;
852};
853
d35eb52b
PM
854enum tc_ets_command {
855 TC_ETS_REPLACE,
856 TC_ETS_DESTROY,
857 TC_ETS_STATS,
858 TC_ETS_GRAFT,
859};
860
861struct tc_ets_qopt_offload_replace_params {
862 unsigned int bands;
863 u8 priomap[TC_PRIO_MAX + 1];
864 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
865 unsigned int weights[TCQ_ETS_MAX_BANDS];
866 struct gnet_stats_queue *qstats;
867};
868
869struct tc_ets_qopt_offload_graft_params {
870 u8 band;
871 u32 child_handle;
872};
873
874struct tc_ets_qopt_offload {
875 enum tc_ets_command command;
876 u32 handle;
877 u32 parent;
878 union {
879 struct tc_ets_qopt_offload_replace_params replace_params;
880 struct tc_qopt_offload_stats stats;
881 struct tc_ets_qopt_offload_graft_params graft_params;
882 };
883};
884
ef6aadcc
PM
885enum tc_tbf_command {
886 TC_TBF_REPLACE,
887 TC_TBF_DESTROY,
888 TC_TBF_STATS,
889};
890
891struct tc_tbf_qopt_offload_replace_params {
892 struct psched_ratecfg rate;
893 u32 max_size;
894 struct gnet_stats_queue *qstats;
895};
896
897struct tc_tbf_qopt_offload {
898 enum tc_tbf_command command;
899 u32 handle;
900 u32 parent;
901 union {
902 struct tc_tbf_qopt_offload_replace_params replace_params;
903 struct tc_qopt_offload_stats stats;
904 };
905};
906
aaca9408
PM
907enum tc_fifo_command {
908 TC_FIFO_REPLACE,
909 TC_FIFO_DESTROY,
910 TC_FIFO_STATS,
911};
912
913struct tc_fifo_qopt_offload {
914 enum tc_fifo_command command;
915 u32 handle;
916 u32 parent;
917 union {
918 struct tc_qopt_offload_stats stats;
919 };
920};
921
1da177e4 922#endif