net: sched: use extended variants of block_get/put in ingress and clsact qdiscs
[linux-block.git] / include / net / pkt_cls.h
CommitLineData
1da177e4
LT
1#ifndef __NET_PKT_CLS_H
2#define __NET_PKT_CLS_H
3
4#include <linux/pkt_cls.h>
5#include <net/sch_generic.h>
6#include <net/act_api.h>
7
8/* Basic packet classifier frontend definitions. */
9
fd2c3ef7 10struct tcf_walker {
1da177e4
LT
11 int stop;
12 int skip;
13 int count;
8113c095 14 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
1da177e4
LT
15};
16
5c15257f
JP
17int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
1da177e4 19
8c4083b3
JP
20enum tcf_block_binder_type {
21 TCF_BLOCK_BINDER_TYPE_UNSPEC,
6e40cf2d
JP
22 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
23 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
8c4083b3
JP
24};
25
26struct tcf_block_ext_info {
27 enum tcf_block_binder_type binder_type;
28};
29
8ae70032 30#ifdef CONFIG_NET_CLS
367a8ce8
WC
31struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
32 bool create);
5bc17018 33void tcf_chain_put(struct tcf_chain *chain);
6529eaba 34int tcf_block_get(struct tcf_block **p_block,
69d78ef2 35 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
8c4083b3
JP
36int tcf_block_get_ext(struct tcf_block **p_block,
37 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
38 struct tcf_block_ext_info *ei);
6529eaba 39void tcf_block_put(struct tcf_block *block);
8c4083b3
JP
40void tcf_block_put_ext(struct tcf_block *block,
41 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
42 struct tcf_block_ext_info *ei);
44186460
JP
43
44static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
45{
46 return block->q;
47}
48
49static inline struct net_device *tcf_block_dev(struct tcf_block *block)
50{
51 return tcf_block_q(block)->dev_queue->dev;
52}
53
87d83093
JP
54int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
55 struct tcf_result *res, bool compat_mode);
56
8ae70032 57#else
6529eaba
JP
58static inline
59int tcf_block_get(struct tcf_block **p_block,
69d78ef2 60 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
6529eaba
JP
61{
62 return 0;
63}
64
8c4083b3
JP
65static inline
66int tcf_block_get_ext(struct tcf_block **p_block,
67 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
68 struct tcf_block_ext_info *ei)
69{
70 return 0;
71}
72
6529eaba 73static inline void tcf_block_put(struct tcf_block *block)
8ae70032
JP
74{
75}
87d83093 76
8c4083b3
JP
77static inline
78void tcf_block_put_ext(struct tcf_block *block,
79 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
80 struct tcf_block_ext_info *ei)
81{
82}
83
44186460
JP
84static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
85{
86 return NULL;
87}
88
89static inline struct net_device *tcf_block_dev(struct tcf_block *block)
90{
91 return NULL;
92}
93
87d83093
JP
94static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
95 struct tcf_result *res, bool compat_mode)
96{
97 return TC_ACT_UNSPEC;
98}
8ae70032 99#endif
cf1facda 100
1da177e4
LT
101static inline unsigned long
102__cls_set_class(unsigned long *clp, unsigned long cl)
103{
a0efb80c 104 return xchg(clp, cl);
1da177e4
LT
105}
106
107static inline unsigned long
34e3759c 108cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
1da177e4
LT
109{
110 unsigned long old_cl;
34e3759c
JP
111
112 sch_tree_lock(q);
1da177e4 113 old_cl = __cls_set_class(clp, cl);
34e3759c 114 sch_tree_unlock(q);
1da177e4
LT
115 return old_cl;
116}
117
118static inline void
119tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
120{
34e3759c 121 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
122 unsigned long cl;
123
34e3759c
JP
124 /* Check q as it is not set for shared blocks. In that case,
125 * setting class is not supported.
126 */
127 if (!q)
128 return;
129 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
130 cl = cls_set_class(q, &r->class, cl);
1da177e4 131 if (cl)
34e3759c 132 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
133}
134
135static inline void
136tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
137{
34e3759c 138 struct Qdisc *q = tp->chain->block->q;
1da177e4
LT
139 unsigned long cl;
140
34e3759c
JP
141 if (!q)
142 return;
1da177e4 143 if ((cl = __cls_set_class(&r->class, 0)) != 0)
34e3759c 144 q->ops->cl_ops->unbind_tcf(q, cl);
1da177e4
LT
145}
146
fd2c3ef7 147struct tcf_exts {
1da177e4 148#ifdef CONFIG_NET_CLS_ACT
33be6271 149 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
22dc13c8
WC
150 int nr_actions;
151 struct tc_action **actions;
1da177e4 152#endif
5da57f42
WC
153 /* Map to export classifier specific extension TLV types to the
154 * generic extensions API. Unsupported extensions must be set to 0.
155 */
1da177e4
LT
156 int action;
157 int police;
158};
159
b9a24bb7 160static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
33be6271
WC
161{
162#ifdef CONFIG_NET_CLS_ACT
5da57f42 163 exts->type = 0;
22dc13c8
WC
164 exts->nr_actions = 0;
165 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
166 GFP_KERNEL);
b9a24bb7
WC
167 if (!exts->actions)
168 return -ENOMEM;
33be6271 169#endif
5da57f42
WC
170 exts->action = action;
171 exts->police = police;
b9a24bb7 172 return 0;
33be6271
WC
173}
174
22dc13c8
WC
175static inline void tcf_exts_to_list(const struct tcf_exts *exts,
176 struct list_head *actions)
177{
178#ifdef CONFIG_NET_CLS_ACT
179 int i;
180
181 for (i = 0; i < exts->nr_actions; i++) {
182 struct tc_action *a = exts->actions[i];
183
fa5effe7 184 list_add_tail(&a->list, actions);
22dc13c8
WC
185 }
186#endif
187}
188
d897a638
JK
189static inline void
190tcf_exts_stats_update(const struct tcf_exts *exts,
191 u64 bytes, u64 packets, u64 lastuse)
192{
193#ifdef CONFIG_NET_CLS_ACT
194 int i;
195
196 preempt_disable();
197
198 for (i = 0; i < exts->nr_actions; i++) {
199 struct tc_action *a = exts->actions[i];
200
201 tcf_action_stats_update(a, bytes, packets, lastuse);
202 }
203
204 preempt_enable();
205#endif
206}
207
3bcc0cec
JP
208/**
209 * tcf_exts_has_actions - check if at least one action is present
210 * @exts: tc filter extensions handle
211 *
212 * Returns true if at least one action is present.
213 */
214static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
215{
2734437e 216#ifdef CONFIG_NET_CLS_ACT
3bcc0cec
JP
217 return exts->nr_actions;
218#else
219 return false;
220#endif
221}
2734437e 222
3bcc0cec
JP
223/**
224 * tcf_exts_has_one_action - check if exactly one action is present
225 * @exts: tc filter extensions handle
226 *
227 * Returns true if exactly one action is present.
228 */
229static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
230{
231#ifdef CONFIG_NET_CLS_ACT
232 return exts->nr_actions == 1;
233#else
234 return false;
235#endif
236}
2734437e 237
af69afc5
JP
238/**
239 * tcf_exts_exec - execute tc filter extensions
240 * @skb: socket buffer
241 * @exts: tc filter extensions handle
242 * @res: desired result
243 *
af089e70 244 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
af69afc5
JP
245 * a negative number if the filter must be considered unmatched or
246 * a positive action code (TC_ACT_*) which must be returned to the
247 * underlying layer.
248 */
249static inline int
250tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
251 struct tcf_result *res)
252{
253#ifdef CONFIG_NET_CLS_ACT
ec1a9cca 254 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
af69afc5 255#endif
af089e70 256 return TC_ACT_OK;
af69afc5
JP
257}
258
5c15257f
JP
259int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
260 struct nlattr **tb, struct nlattr *rate_tlv,
2f7ef2f8 261 struct tcf_exts *exts, bool ovr);
18d0264f 262void tcf_exts_destroy(struct tcf_exts *exts);
9b0d4446 263void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
5da57f42
WC
264int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
265int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
1da177e4
LT
266
267/**
268 * struct tcf_pkt_info - packet information
269 */
fd2c3ef7 270struct tcf_pkt_info {
1da177e4
LT
271 unsigned char * ptr;
272 int nexthdr;
273};
274
275#ifdef CONFIG_NET_EMATCH
276
277struct tcf_ematch_ops;
278
279/**
280 * struct tcf_ematch - extended match (ematch)
281 *
282 * @matchid: identifier to allow userspace to reidentify a match
283 * @flags: flags specifying attributes and the relation to other matches
284 * @ops: the operations lookup table of the corresponding ematch module
285 * @datalen: length of the ematch specific configuration data
286 * @data: ematch specific data
287 */
fd2c3ef7 288struct tcf_ematch {
1da177e4
LT
289 struct tcf_ematch_ops * ops;
290 unsigned long data;
291 unsigned int datalen;
292 u16 matchid;
293 u16 flags;
82a470f1 294 struct net *net;
1da177e4
LT
295};
296
297static inline int tcf_em_is_container(struct tcf_ematch *em)
298{
299 return !em->ops;
300}
301
302static inline int tcf_em_is_simple(struct tcf_ematch *em)
303{
304 return em->flags & TCF_EM_SIMPLE;
305}
306
307static inline int tcf_em_is_inverted(struct tcf_ematch *em)
308{
309 return em->flags & TCF_EM_INVERT;
310}
311
312static inline int tcf_em_last_match(struct tcf_ematch *em)
313{
314 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
315}
316
317static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
318{
319 if (tcf_em_last_match(em))
320 return 1;
321
322 if (result == 0 && em->flags & TCF_EM_REL_AND)
323 return 1;
324
325 if (result != 0 && em->flags & TCF_EM_REL_OR)
326 return 1;
327
328 return 0;
329}
330
331/**
332 * struct tcf_ematch_tree - ematch tree handle
333 *
334 * @hdr: ematch tree header supplied by userspace
335 * @matches: array of ematches
336 */
fd2c3ef7 337struct tcf_ematch_tree {
1da177e4
LT
338 struct tcf_ematch_tree_hdr hdr;
339 struct tcf_ematch * matches;
340
341};
342
343/**
344 * struct tcf_ematch_ops - ematch module operations
345 *
346 * @kind: identifier (kind) of this ematch module
347 * @datalen: length of expected configuration data (optional)
348 * @change: called during validation (optional)
349 * @match: called during ematch tree evaluation, must return 1/0
350 * @destroy: called during destroyage (optional)
351 * @dump: called during dumping process (optional)
352 * @owner: owner, must be set to THIS_MODULE
353 * @link: link to previous/next ematch module (internal use)
354 */
fd2c3ef7 355struct tcf_ematch_ops {
1da177e4
LT
356 int kind;
357 int datalen;
82a470f1 358 int (*change)(struct net *net, void *,
1da177e4
LT
359 int, struct tcf_ematch *);
360 int (*match)(struct sk_buff *, struct tcf_ematch *,
361 struct tcf_pkt_info *);
82a470f1 362 void (*destroy)(struct tcf_ematch *);
1da177e4
LT
363 int (*dump)(struct sk_buff *, struct tcf_ematch *);
364 struct module *owner;
365 struct list_head link;
366};
367
5c15257f
JP
368int tcf_em_register(struct tcf_ematch_ops *);
369void tcf_em_unregister(struct tcf_ematch_ops *);
370int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
371 struct tcf_ematch_tree *);
82a470f1 372void tcf_em_tree_destroy(struct tcf_ematch_tree *);
5c15257f
JP
373int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
374int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
375 struct tcf_pkt_info *);
1da177e4 376
1da177e4
LT
377/**
378 * tcf_em_tree_match - evaulate an ematch tree
379 *
380 * @skb: socket buffer of the packet in question
381 * @tree: ematch tree to be used for evaluation
382 * @info: packet information examined by classifier
383 *
384 * This function matches @skb against the ematch tree in @tree by going
385 * through all ematches respecting their logic relations returning
386 * as soon as the result is obvious.
387 *
388 * Returns 1 if the ematch tree as-one matches, no ematches are configured
389 * or ematch is not enabled in the kernel, otherwise 0 is returned.
390 */
391static inline int tcf_em_tree_match(struct sk_buff *skb,
392 struct tcf_ematch_tree *tree,
393 struct tcf_pkt_info *info)
394{
395 if (tree->hdr.nmatches)
396 return __tcf_em_tree_match(skb, tree, info);
397 else
398 return 1;
399}
400
db3d99c0
PM
401#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
402
1da177e4
LT
403#else /* CONFIG_NET_EMATCH */
404
fd2c3ef7 405struct tcf_ematch_tree {
1da177e4
LT
406};
407
408#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
82a470f1 409#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
1da177e4 410#define tcf_em_tree_dump(skb, t, tlv) (0)
1da177e4
LT
411#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
412
413#endif /* CONFIG_NET_EMATCH */
414
415static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
416{
417 switch (layer) {
418 case TCF_LAYER_LINK:
419 return skb->data;
420 case TCF_LAYER_NETWORK:
d56f90a7 421 return skb_network_header(skb);
1da177e4 422 case TCF_LAYER_TRANSPORT:
9c70220b 423 return skb_transport_header(skb);
1da177e4
LT
424 }
425
426 return NULL;
427}
428
eddc9ec5
ACM
429static inline int tcf_valid_offset(const struct sk_buff *skb,
430 const unsigned char *ptr, const int len)
1da177e4 431{
da521b2c
DM
432 return likely((ptr + len) <= skb_tail_pointer(skb) &&
433 ptr >= skb->head &&
434 (ptr <= (ptr + len)));
1da177e4
LT
435}
436
437#ifdef CONFIG_NET_CLS_IND
0eeb8ffc
DL
438#include <net/net_namespace.h>
439
1da177e4 440static inline int
2519a602 441tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
1da177e4 442{
2519a602
WC
443 char indev[IFNAMSIZ];
444 struct net_device *dev;
445
add93b61 446 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
1da177e4 447 return -EINVAL;
2519a602
WC
448 dev = __dev_get_by_name(net, indev);
449 if (!dev)
450 return -ENODEV;
451 return dev->ifindex;
1da177e4
LT
452}
453
2519a602
WC
454static inline bool
455tcf_match_indev(struct sk_buff *skb, int ifindex)
1da177e4 456{
2519a602
WC
457 if (!ifindex)
458 return true;
459 if (!skb->skb_iif)
460 return false;
461 return ifindex == skb->skb_iif;
1da177e4
LT
462}
463#endif /* CONFIG_NET_CLS_IND */
464
717503b9
JP
465int tc_setup_cb_call(struct tcf_exts *exts, enum tc_setup_type type,
466 void *type_data, bool err_stop);
467
8c4083b3
JP
468enum tc_block_command {
469 TC_BLOCK_BIND,
470 TC_BLOCK_UNBIND,
471};
472
473struct tc_block_offload {
474 enum tc_block_command command;
475 enum tcf_block_binder_type binder_type;
476 struct tcf_block *block;
477};
478
5fd9fc4e 479struct tc_cls_common_offload {
5fd9fc4e
JP
480 u32 chain_index;
481 __be16 protocol;
d7c1c8d2 482 u32 prio;
7690f2a5 483 u32 classid;
5fd9fc4e
JP
484};
485
486static inline void
487tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
488 const struct tcf_proto *tp)
489{
5fd9fc4e
JP
490 cls_common->chain_index = tp->chain->index;
491 cls_common->protocol = tp->protocol;
d7c1c8d2 492 cls_common->prio = tp->prio;
7690f2a5 493 cls_common->classid = tp->classid;
5fd9fc4e
JP
494}
495
a1b7c5fd
JF
496struct tc_cls_u32_knode {
497 struct tcf_exts *exts;
e014860e 498 struct tc_u32_sel *sel;
a1b7c5fd
JF
499 u32 handle;
500 u32 val;
501 u32 mask;
502 u32 link_handle;
e014860e 503 u8 fshift;
a1b7c5fd
JF
504};
505
506struct tc_cls_u32_hnode {
507 u32 handle;
508 u32 prio;
509 unsigned int divisor;
510};
511
512enum tc_clsu32_command {
513 TC_CLSU32_NEW_KNODE,
514 TC_CLSU32_REPLACE_KNODE,
515 TC_CLSU32_DELETE_KNODE,
516 TC_CLSU32_NEW_HNODE,
517 TC_CLSU32_REPLACE_HNODE,
518 TC_CLSU32_DELETE_HNODE,
519};
520
521struct tc_cls_u32_offload {
5fd9fc4e 522 struct tc_cls_common_offload common;
a1b7c5fd
JF
523 /* knode values */
524 enum tc_clsu32_command command;
525 union {
526 struct tc_cls_u32_knode knode;
527 struct tc_cls_u32_hnode hnode;
528 };
529};
530
7b06e8ae 531static inline bool tc_can_offload(const struct net_device *dev)
6843e7a2 532{
2b6ab0d3
JF
533 if (!(dev->features & NETIF_F_HW_TC))
534 return false;
9e8ce79c
JF
535 if (!dev->netdev_ops->ndo_setup_tc)
536 return false;
9e8ce79c 537 return true;
6843e7a2
JF
538}
539
55330f05
HHZ
540static inline bool tc_skip_hw(u32 flags)
541{
542 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
543}
544
7b06e8ae 545static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
55330f05
HHZ
546{
547 if (tc_skip_hw(flags))
548 return false;
7b06e8ae 549 return tc_can_offload(dev);
55330f05
HHZ
550}
551
d34e3e18
SS
552static inline bool tc_skip_sw(u32 flags)
553{
554 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
555}
556
557/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
558static inline bool tc_flags_valid(u32 flags)
559{
560 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
561 return false;
562
563 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
564 return false;
565
566 return true;
567}
568
e696028a
OG
569static inline bool tc_in_hw(u32 flags)
570{
571 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
572}
573
5b33f488
AV
574enum tc_fl_command {
575 TC_CLSFLOWER_REPLACE,
576 TC_CLSFLOWER_DESTROY,
10cbc684 577 TC_CLSFLOWER_STATS,
5b33f488
AV
578};
579
580struct tc_cls_flower_offload {
5fd9fc4e 581 struct tc_cls_common_offload common;
5b33f488 582 enum tc_fl_command command;
8208d21b 583 unsigned long cookie;
5b33f488
AV
584 struct flow_dissector *dissector;
585 struct fl_flow_key *mask;
586 struct fl_flow_key *key;
587 struct tcf_exts *exts;
588};
589
b87f7936
YG
590enum tc_matchall_command {
591 TC_CLSMATCHALL_REPLACE,
592 TC_CLSMATCHALL_DESTROY,
593};
594
595struct tc_cls_matchall_offload {
5fd9fc4e 596 struct tc_cls_common_offload common;
b87f7936
YG
597 enum tc_matchall_command command;
598 struct tcf_exts *exts;
599 unsigned long cookie;
600};
601
332ae8e2
JK
602enum tc_clsbpf_command {
603 TC_CLSBPF_ADD,
604 TC_CLSBPF_REPLACE,
605 TC_CLSBPF_DESTROY,
68d64063 606 TC_CLSBPF_STATS,
332ae8e2
JK
607};
608
609struct tc_cls_bpf_offload {
5fd9fc4e 610 struct tc_cls_common_offload common;
332ae8e2
JK
611 enum tc_clsbpf_command command;
612 struct tcf_exts *exts;
613 struct bpf_prog *prog;
614 const char *name;
615 bool exts_integrated;
0d01d45f 616 u32 gen_flags;
332ae8e2
JK
617};
618
4e8b86c0
AN
619struct tc_mqprio_qopt_offload {
620 /* struct tc_mqprio_qopt must always be the first element */
621 struct tc_mqprio_qopt qopt;
622 u16 mode;
623 u16 shaper;
624 u32 flags;
625 u64 min_rate[TC_QOPT_MAX_QUEUE];
626 u64 max_rate[TC_QOPT_MAX_QUEUE];
627};
1045ba77
JHS
628
629/* This structure holds cookie structure that is passed from user
630 * to the kernel for actions and classifiers
631 */
632struct tc_cookie {
633 u8 *data;
634 u32 len;
635};
1da177e4 636#endif