1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_flower.c Flower classifier
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
18 #include <linux/mpls.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
27 #include <net/dst_metadata.h>
29 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 struct flow_dissector_key_meta meta;
33 struct flow_dissector_key_control control;
34 struct flow_dissector_key_control enc_control;
35 struct flow_dissector_key_basic basic;
36 struct flow_dissector_key_eth_addrs eth;
37 struct flow_dissector_key_vlan vlan;
38 struct flow_dissector_key_vlan cvlan;
40 struct flow_dissector_key_ipv4_addrs ipv4;
41 struct flow_dissector_key_ipv6_addrs ipv6;
43 struct flow_dissector_key_ports tp;
44 struct flow_dissector_key_icmp icmp;
45 struct flow_dissector_key_arp arp;
46 struct flow_dissector_key_keyid enc_key_id;
48 struct flow_dissector_key_ipv4_addrs enc_ipv4;
49 struct flow_dissector_key_ipv6_addrs enc_ipv6;
51 struct flow_dissector_key_ports enc_tp;
52 struct flow_dissector_key_mpls mpls;
53 struct flow_dissector_key_tcp tcp;
54 struct flow_dissector_key_ip ip;
55 struct flow_dissector_key_ip enc_ip;
56 struct flow_dissector_key_enc_opts enc_opts;
57 struct flow_dissector_key_ports tp_min;
58 struct flow_dissector_key_ports tp_max;
59 struct flow_dissector_key_ct ct;
60 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
62 struct fl_flow_mask_range {
63 unsigned short int start;
64 unsigned short int end;
68 struct fl_flow_key key;
69 struct fl_flow_mask_range range;
71 struct rhash_head ht_node;
73 struct rhashtable_params filter_ht_params;
74 struct flow_dissector dissector;
75 struct list_head filters;
76 struct rcu_work rwork;
77 struct list_head list;
81 struct fl_flow_tmplt {
82 struct fl_flow_key dummy_key;
83 struct fl_flow_key mask;
84 struct flow_dissector dissector;
85 struct tcf_chain *chain;
90 spinlock_t masks_lock; /* Protect masks list */
91 struct list_head masks;
92 struct list_head hw_filters;
93 struct rcu_work rwork;
94 struct idr handle_idr;
97 struct cls_fl_filter {
98 struct fl_flow_mask *mask;
99 struct rhash_head ht_node;
100 struct fl_flow_key mkey;
101 struct tcf_exts exts;
102 struct tcf_result res;
103 struct fl_flow_key key;
104 struct list_head list;
105 struct list_head hw_list;
109 struct rcu_work rwork;
110 struct net_device *hw_dev;
111 /* Flower classifier is unlocked, which means that its reference counter
112 * can be changed concurrently without any kind of external
113 * synchronization. Use atomic reference counter to be concurrency-safe.
119 static const struct rhashtable_params mask_ht_params = {
120 .key_offset = offsetof(struct fl_flow_mask, key),
121 .key_len = sizeof(struct fl_flow_key),
122 .head_offset = offsetof(struct fl_flow_mask, ht_node),
123 .automatic_shrinking = true,
126 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
128 return mask->range.end - mask->range.start;
131 static void fl_mask_update_range(struct fl_flow_mask *mask)
133 const u8 *bytes = (const u8 *) &mask->key;
134 size_t size = sizeof(mask->key);
135 size_t i, first = 0, last;
137 for (i = 0; i < size; i++) {
144 for (i = size - 1; i != first; i--) {
150 mask->range.start = rounddown(first, sizeof(long));
151 mask->range.end = roundup(last + 1, sizeof(long));
154 static void *fl_key_get_start(struct fl_flow_key *key,
155 const struct fl_flow_mask *mask)
157 return (u8 *) key + mask->range.start;
160 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
161 struct fl_flow_mask *mask)
163 const long *lkey = fl_key_get_start(key, mask);
164 const long *lmask = fl_key_get_start(&mask->key, mask);
165 long *lmkey = fl_key_get_start(mkey, mask);
168 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
169 *lmkey++ = *lkey++ & *lmask++;
172 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
173 struct fl_flow_mask *mask)
175 const long *lmask = fl_key_get_start(&mask->key, mask);
181 ltmplt = fl_key_get_start(&tmplt->mask, mask);
182 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
183 if (~*ltmplt++ & *lmask++)
189 static void fl_clear_masked_range(struct fl_flow_key *key,
190 struct fl_flow_mask *mask)
192 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
195 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
196 struct fl_flow_key *key,
197 struct fl_flow_key *mkey)
199 __be16 min_mask, max_mask, min_val, max_val;
201 min_mask = htons(filter->mask->key.tp_min.dst);
202 max_mask = htons(filter->mask->key.tp_max.dst);
203 min_val = htons(filter->key.tp_min.dst);
204 max_val = htons(filter->key.tp_max.dst);
206 if (min_mask && max_mask) {
207 if (htons(key->tp.dst) < min_val ||
208 htons(key->tp.dst) > max_val)
211 /* skb does not have min and max values */
212 mkey->tp_min.dst = filter->mkey.tp_min.dst;
213 mkey->tp_max.dst = filter->mkey.tp_max.dst;
218 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
219 struct fl_flow_key *key,
220 struct fl_flow_key *mkey)
222 __be16 min_mask, max_mask, min_val, max_val;
224 min_mask = htons(filter->mask->key.tp_min.src);
225 max_mask = htons(filter->mask->key.tp_max.src);
226 min_val = htons(filter->key.tp_min.src);
227 max_val = htons(filter->key.tp_max.src);
229 if (min_mask && max_mask) {
230 if (htons(key->tp.src) < min_val ||
231 htons(key->tp.src) > max_val)
234 /* skb does not have min and max values */
235 mkey->tp_min.src = filter->mkey.tp_min.src;
236 mkey->tp_max.src = filter->mkey.tp_max.src;
241 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
242 struct fl_flow_key *mkey)
244 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
245 mask->filter_ht_params);
248 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
249 struct fl_flow_key *mkey,
250 struct fl_flow_key *key)
252 struct cls_fl_filter *filter, *f;
254 list_for_each_entry_rcu(filter, &mask->filters, list) {
255 if (!fl_range_port_dst_cmp(filter, key, mkey))
258 if (!fl_range_port_src_cmp(filter, key, mkey))
261 f = __fl_lookup(mask, mkey);
268 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
269 struct fl_flow_key *mkey,
270 struct fl_flow_key *key)
272 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
273 return fl_lookup_range(mask, mkey, key);
275 return __fl_lookup(mask, mkey);
278 static u16 fl_ct_info_to_flower_map[] = {
279 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
280 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
281 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
282 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
283 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
284 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
285 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
286 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
287 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
288 TCA_FLOWER_KEY_CT_FLAGS_NEW,
291 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
292 struct tcf_result *res)
294 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
295 struct fl_flow_key skb_mkey;
296 struct fl_flow_key skb_key;
297 struct fl_flow_mask *mask;
298 struct cls_fl_filter *f;
300 list_for_each_entry_rcu(mask, &head->masks, list) {
301 fl_clear_masked_range(&skb_key, mask);
303 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
304 /* skb_flow_dissect() does not set n_proto in case an unknown
305 * protocol, so do it rather here.
307 skb_key.basic.n_proto = skb->protocol;
308 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
309 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
310 fl_ct_info_to_flower_map,
311 ARRAY_SIZE(fl_ct_info_to_flower_map));
312 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
314 fl_set_masked_key(&skb_mkey, &skb_key, mask);
316 f = fl_lookup(mask, &skb_mkey, &skb_key);
317 if (f && !tc_skip_sw(f->flags)) {
319 return tcf_exts_exec(skb, &f->exts, res);
325 static int fl_init(struct tcf_proto *tp)
327 struct cls_fl_head *head;
329 head = kzalloc(sizeof(*head), GFP_KERNEL);
333 spin_lock_init(&head->masks_lock);
334 INIT_LIST_HEAD_RCU(&head->masks);
335 INIT_LIST_HEAD(&head->hw_filters);
336 rcu_assign_pointer(tp->root, head);
337 idr_init(&head->handle_idr);
339 return rhashtable_init(&head->ht, &mask_ht_params);
342 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
344 /* temporary masks don't have their filters list and ht initialized */
345 if (mask_init_done) {
346 WARN_ON(!list_empty(&mask->filters));
347 rhashtable_destroy(&mask->ht);
352 static void fl_mask_free_work(struct work_struct *work)
354 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
355 struct fl_flow_mask, rwork);
357 fl_mask_free(mask, true);
360 static void fl_uninit_mask_free_work(struct work_struct *work)
362 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
363 struct fl_flow_mask, rwork);
365 fl_mask_free(mask, false);
368 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
370 if (!refcount_dec_and_test(&mask->refcnt))
373 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
375 spin_lock(&head->masks_lock);
376 list_del_rcu(&mask->list);
377 spin_unlock(&head->masks_lock);
379 tcf_queue_work(&mask->rwork, fl_mask_free_work);
384 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
386 /* Flower classifier only changes root pointer during init and destroy.
387 * Users must obtain reference to tcf_proto instance before calling its
388 * API, so tp->root pointer is protected from concurrent call to
389 * fl_destroy() by reference counting.
391 return rcu_dereference_raw(tp->root);
394 static void __fl_destroy_filter(struct cls_fl_filter *f)
396 tcf_exts_destroy(&f->exts);
397 tcf_exts_put_net(&f->exts);
401 static void fl_destroy_filter_work(struct work_struct *work)
403 struct cls_fl_filter *f = container_of(to_rcu_work(work),
404 struct cls_fl_filter, rwork);
406 __fl_destroy_filter(f);
409 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
410 bool rtnl_held, struct netlink_ext_ack *extack)
412 struct tcf_block *block = tp->chain->block;
413 struct flow_cls_offload cls_flower = {};
418 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
419 cls_flower.command = FLOW_CLS_DESTROY;
420 cls_flower.cookie = (unsigned long) f;
422 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
423 &f->flags, &f->in_hw_count, true);
424 spin_lock(&tp->lock);
425 list_del_init(&f->hw_list);
426 spin_unlock(&tp->lock);
432 static int fl_hw_replace_filter(struct tcf_proto *tp,
433 struct cls_fl_filter *f, bool rtnl_held,
434 struct netlink_ext_ack *extack)
436 struct cls_fl_head *head = fl_head_dereference(tp);
437 struct tcf_block *block = tp->chain->block;
438 struct flow_cls_offload cls_flower = {};
439 bool skip_sw = tc_skip_sw(f->flags);
445 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
446 if (!cls_flower.rule) {
451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
452 cls_flower.command = FLOW_CLS_REPLACE;
453 cls_flower.cookie = (unsigned long) f;
454 cls_flower.rule->match.dissector = &f->mask->dissector;
455 cls_flower.rule->match.mask = &f->mask->key;
456 cls_flower.rule->match.key = &f->mkey;
457 cls_flower.classid = f->res.classid;
459 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
461 kfree(cls_flower.rule);
463 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
469 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
470 skip_sw, &f->flags, &f->in_hw_count, true);
471 kfree(cls_flower.rule);
474 fl_hw_destroy_filter(tp, f, true, NULL);
478 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
483 spin_lock(&tp->lock);
484 list_add(&f->hw_list, &head->hw_filters);
485 spin_unlock(&tp->lock);
493 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
496 struct tcf_block *block = tp->chain->block;
497 struct flow_cls_offload cls_flower = {};
502 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
503 cls_flower.command = FLOW_CLS_STATS;
504 cls_flower.cookie = (unsigned long) f;
505 cls_flower.classid = f->res.classid;
507 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
509 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
510 cls_flower.stats.pkts,
511 cls_flower.stats.lastused);
517 static void __fl_put(struct cls_fl_filter *f)
519 if (!refcount_dec_and_test(&f->refcnt))
522 if (tcf_exts_get_net(&f->exts))
523 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
525 __fl_destroy_filter(f);
528 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
530 struct cls_fl_filter *f;
533 f = idr_find(&head->handle_idr, handle);
534 if (f && !refcount_inc_not_zero(&f->refcnt))
541 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
542 bool *last, bool rtnl_held,
543 struct netlink_ext_ack *extack)
545 struct cls_fl_head *head = fl_head_dereference(tp);
549 spin_lock(&tp->lock);
551 spin_unlock(&tp->lock);
556 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
557 f->mask->filter_ht_params);
558 idr_remove(&head->handle_idr, f->handle);
559 list_del_rcu(&f->list);
560 spin_unlock(&tp->lock);
562 *last = fl_mask_put(head, f->mask);
563 if (!tc_skip_hw(f->flags))
564 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
565 tcf_unbind_filter(tp, &f->res);
571 static void fl_destroy_sleepable(struct work_struct *work)
573 struct cls_fl_head *head = container_of(to_rcu_work(work),
577 rhashtable_destroy(&head->ht);
579 module_put(THIS_MODULE);
582 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
583 struct netlink_ext_ack *extack)
585 struct cls_fl_head *head = fl_head_dereference(tp);
586 struct fl_flow_mask *mask, *next_mask;
587 struct cls_fl_filter *f, *next;
590 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
591 list_for_each_entry_safe(f, next, &mask->filters, list) {
592 __fl_delete(tp, f, &last, rtnl_held, extack);
597 idr_destroy(&head->handle_idr);
599 __module_get(THIS_MODULE);
600 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
603 static void fl_put(struct tcf_proto *tp, void *arg)
605 struct cls_fl_filter *f = arg;
610 static void *fl_get(struct tcf_proto *tp, u32 handle)
612 struct cls_fl_head *head = fl_head_dereference(tp);
614 return __fl_get(head, handle);
617 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
618 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
619 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
620 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
622 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
623 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
624 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
625 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
626 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
627 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
628 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
629 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
630 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
631 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
632 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
633 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
634 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
635 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
636 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
637 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
638 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
639 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
640 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
641 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
642 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
643 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
644 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
645 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
646 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
647 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
648 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
649 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
650 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
651 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
652 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
653 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
654 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
655 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
656 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
657 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
658 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
659 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
664 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
665 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
666 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
667 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
668 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
669 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
670 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
671 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
672 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
673 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
675 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
676 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
677 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
678 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
681 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
682 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
683 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
684 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
685 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
688 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
689 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
690 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
691 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
695 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
696 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
697 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
698 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
701 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
702 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
703 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 },
704 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 },
705 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
706 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
707 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
708 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
709 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
710 .len = 128 / BITS_PER_BYTE },
711 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
712 .len = 128 / BITS_PER_BYTE },
715 static const struct nla_policy
716 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
717 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
720 static const struct nla_policy
721 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
722 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
723 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
724 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
728 static void fl_set_key_val(struct nlattr **tb,
729 void *val, int val_type,
730 void *mask, int mask_type, int len)
734 nla_memcpy(val, tb[val_type], len);
735 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
736 memset(mask, 0xff, len);
738 nla_memcpy(mask, tb[mask_type], len);
741 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
742 struct fl_flow_key *mask)
744 fl_set_key_val(tb, &key->tp_min.dst,
745 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
746 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
747 fl_set_key_val(tb, &key->tp_max.dst,
748 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
749 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
750 fl_set_key_val(tb, &key->tp_min.src,
751 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
752 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
753 fl_set_key_val(tb, &key->tp_max.src,
754 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
755 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
757 if ((mask->tp_min.dst && mask->tp_max.dst &&
758 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
759 (mask->tp_min.src && mask->tp_max.src &&
760 htons(key->tp_max.src) <= htons(key->tp_min.src)))
766 static int fl_set_key_mpls(struct nlattr **tb,
767 struct flow_dissector_key_mpls *key_val,
768 struct flow_dissector_key_mpls *key_mask)
770 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
771 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
772 key_mask->mpls_ttl = MPLS_TTL_MASK;
774 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
775 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
777 if (bos & ~MPLS_BOS_MASK)
779 key_val->mpls_bos = bos;
780 key_mask->mpls_bos = MPLS_BOS_MASK;
782 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
783 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
785 if (tc & ~MPLS_TC_MASK)
787 key_val->mpls_tc = tc;
788 key_mask->mpls_tc = MPLS_TC_MASK;
790 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
791 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
793 if (label & ~MPLS_LABEL_MASK)
795 key_val->mpls_label = label;
796 key_mask->mpls_label = MPLS_LABEL_MASK;
801 static void fl_set_key_vlan(struct nlattr **tb,
803 int vlan_id_key, int vlan_prio_key,
804 struct flow_dissector_key_vlan *key_val,
805 struct flow_dissector_key_vlan *key_mask)
807 #define VLAN_PRIORITY_MASK 0x7
809 if (tb[vlan_id_key]) {
811 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
812 key_mask->vlan_id = VLAN_VID_MASK;
814 if (tb[vlan_prio_key]) {
815 key_val->vlan_priority =
816 nla_get_u8(tb[vlan_prio_key]) &
818 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
820 key_val->vlan_tpid = ethertype;
821 key_mask->vlan_tpid = cpu_to_be16(~0);
824 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
825 u32 *dissector_key, u32 *dissector_mask,
826 u32 flower_flag_bit, u32 dissector_flag_bit)
828 if (flower_mask & flower_flag_bit) {
829 *dissector_mask |= dissector_flag_bit;
830 if (flower_key & flower_flag_bit)
831 *dissector_key |= dissector_flag_bit;
835 static int fl_set_key_flags(struct nlattr **tb,
836 u32 *flags_key, u32 *flags_mask)
840 /* mask is mandatory for flags */
841 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
844 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
845 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
850 fl_set_key_flag(key, mask, flags_key, flags_mask,
851 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
852 fl_set_key_flag(key, mask, flags_key, flags_mask,
853 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
854 FLOW_DIS_FIRST_FRAG);
859 static void fl_set_key_ip(struct nlattr **tb, bool encap,
860 struct flow_dissector_key_ip *key,
861 struct flow_dissector_key_ip *mask)
863 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
864 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
865 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
866 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
868 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
869 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
872 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
873 int depth, int option_len,
874 struct netlink_ext_ack *extack)
876 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
877 struct nlattr *class = NULL, *type = NULL, *data = NULL;
878 struct geneve_opt *opt;
879 int err, data_len = 0;
881 if (option_len > sizeof(struct geneve_opt))
882 data_len = option_len - sizeof(struct geneve_opt);
884 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
885 memset(opt, 0xff, option_len);
886 opt->length = data_len / 4;
891 /* If no mask has been prodived we assume an exact match. */
893 return sizeof(struct geneve_opt) + data_len;
895 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
896 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
900 err = nla_parse_nested_deprecated(tb,
901 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
902 nla, geneve_opt_policy, extack);
906 /* We are not allowed to omit any of CLASS, TYPE or DATA
907 * fields from the key.
910 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
911 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
912 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
913 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
917 /* Omitting any of CLASS, TYPE or DATA fields is allowed
920 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
921 int new_len = key->enc_opts.len;
923 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
924 data_len = nla_len(data);
926 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
930 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
934 new_len += sizeof(struct geneve_opt) + data_len;
935 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
936 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
937 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
940 opt->length = data_len / 4;
941 memcpy(opt->opt_data, nla_data(data), data_len);
944 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
945 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
946 opt->opt_class = nla_get_be16(class);
949 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
950 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
951 opt->type = nla_get_u8(type);
954 return sizeof(struct geneve_opt) + data_len;
957 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
958 struct fl_flow_key *mask,
959 struct netlink_ext_ack *extack)
961 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
962 int err, option_len, key_depth, msk_depth = 0;
964 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
965 TCA_FLOWER_KEY_ENC_OPTS_MAX,
966 enc_opts_policy, extack);
970 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
972 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
973 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
974 TCA_FLOWER_KEY_ENC_OPTS_MAX,
975 enc_opts_policy, extack);
979 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
980 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
983 nla_for_each_attr(nla_opt_key, nla_enc_key,
984 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
985 switch (nla_type(nla_opt_key)) {
986 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
988 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
989 option_len = fl_set_geneve_opt(nla_opt_key, key,
990 key_depth, option_len,
995 key->enc_opts.len += option_len;
996 /* At the same time we need to parse through the mask
997 * in order to verify exact and mask attribute lengths.
999 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1000 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1001 msk_depth, option_len,
1006 mask->enc_opts.len += option_len;
1007 if (key->enc_opts.len != mask->enc_opts.len) {
1008 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1013 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1016 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1024 static int fl_set_key_ct(struct nlattr **tb,
1025 struct flow_dissector_key_ct *key,
1026 struct flow_dissector_key_ct *mask,
1027 struct netlink_ext_ack *extack)
1029 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1030 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1031 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1034 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1035 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1036 sizeof(key->ct_state));
1038 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1039 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1040 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1043 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1044 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1045 sizeof(key->ct_zone));
1047 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1048 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1049 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1052 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1053 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1054 sizeof(key->ct_mark));
1056 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1057 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1058 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1061 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1062 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1063 sizeof(key->ct_labels));
1069 static int fl_set_key(struct net *net, struct nlattr **tb,
1070 struct fl_flow_key *key, struct fl_flow_key *mask,
1071 struct netlink_ext_ack *extack)
1076 if (tb[TCA_FLOWER_INDEV]) {
1077 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1080 key->meta.ingress_ifindex = err;
1081 mask->meta.ingress_ifindex = 0xffffffff;
1084 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1085 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1086 sizeof(key->eth.dst));
1087 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1088 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1089 sizeof(key->eth.src));
1091 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1092 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1094 if (eth_type_vlan(ethertype)) {
1095 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1096 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1099 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1100 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1101 if (eth_type_vlan(ethertype)) {
1102 fl_set_key_vlan(tb, ethertype,
1103 TCA_FLOWER_KEY_CVLAN_ID,
1104 TCA_FLOWER_KEY_CVLAN_PRIO,
1105 &key->cvlan, &mask->cvlan);
1106 fl_set_key_val(tb, &key->basic.n_proto,
1107 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1108 &mask->basic.n_proto,
1110 sizeof(key->basic.n_proto));
1112 key->basic.n_proto = ethertype;
1113 mask->basic.n_proto = cpu_to_be16(~0);
1117 key->basic.n_proto = ethertype;
1118 mask->basic.n_proto = cpu_to_be16(~0);
1122 if (key->basic.n_proto == htons(ETH_P_IP) ||
1123 key->basic.n_proto == htons(ETH_P_IPV6)) {
1124 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1125 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1126 sizeof(key->basic.ip_proto));
1127 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1130 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1131 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1132 mask->control.addr_type = ~0;
1133 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1134 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1135 sizeof(key->ipv4.src));
1136 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1137 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1138 sizeof(key->ipv4.dst));
1139 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1140 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1141 mask->control.addr_type = ~0;
1142 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1143 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1144 sizeof(key->ipv6.src));
1145 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1146 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1147 sizeof(key->ipv6.dst));
1150 if (key->basic.ip_proto == IPPROTO_TCP) {
1151 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1152 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1153 sizeof(key->tp.src));
1154 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1155 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1156 sizeof(key->tp.dst));
1157 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1158 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1159 sizeof(key->tcp.flags));
1160 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1161 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1162 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1163 sizeof(key->tp.src));
1164 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1165 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1166 sizeof(key->tp.dst));
1167 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1168 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1169 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1170 sizeof(key->tp.src));
1171 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1172 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1173 sizeof(key->tp.dst));
1174 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1175 key->basic.ip_proto == IPPROTO_ICMP) {
1176 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1178 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1179 sizeof(key->icmp.type));
1180 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1182 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1183 sizeof(key->icmp.code));
1184 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1185 key->basic.ip_proto == IPPROTO_ICMPV6) {
1186 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1188 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1189 sizeof(key->icmp.type));
1190 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1192 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1193 sizeof(key->icmp.code));
1194 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1195 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1196 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1199 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1200 key->basic.n_proto == htons(ETH_P_RARP)) {
1201 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1202 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1203 sizeof(key->arp.sip));
1204 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1205 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1206 sizeof(key->arp.tip));
1207 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1208 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1209 sizeof(key->arp.op));
1210 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1211 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1212 sizeof(key->arp.sha));
1213 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1214 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1215 sizeof(key->arp.tha));
1218 if (key->basic.ip_proto == IPPROTO_TCP ||
1219 key->basic.ip_proto == IPPROTO_UDP ||
1220 key->basic.ip_proto == IPPROTO_SCTP) {
1221 ret = fl_set_key_port_range(tb, key, mask);
1226 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1227 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1228 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1229 mask->enc_control.addr_type = ~0;
1230 fl_set_key_val(tb, &key->enc_ipv4.src,
1231 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1232 &mask->enc_ipv4.src,
1233 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1234 sizeof(key->enc_ipv4.src));
1235 fl_set_key_val(tb, &key->enc_ipv4.dst,
1236 TCA_FLOWER_KEY_ENC_IPV4_DST,
1237 &mask->enc_ipv4.dst,
1238 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1239 sizeof(key->enc_ipv4.dst));
1242 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1243 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1244 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1245 mask->enc_control.addr_type = ~0;
1246 fl_set_key_val(tb, &key->enc_ipv6.src,
1247 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1248 &mask->enc_ipv6.src,
1249 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1250 sizeof(key->enc_ipv6.src));
1251 fl_set_key_val(tb, &key->enc_ipv6.dst,
1252 TCA_FLOWER_KEY_ENC_IPV6_DST,
1253 &mask->enc_ipv6.dst,
1254 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1255 sizeof(key->enc_ipv6.dst));
1258 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1259 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1260 sizeof(key->enc_key_id.keyid));
1262 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1263 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1264 sizeof(key->enc_tp.src));
1266 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1267 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1268 sizeof(key->enc_tp.dst));
1270 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1272 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1273 ret = fl_set_enc_opt(tb, key, mask, extack);
1278 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1282 if (tb[TCA_FLOWER_KEY_FLAGS])
1283 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1288 static void fl_mask_copy(struct fl_flow_mask *dst,
1289 struct fl_flow_mask *src)
1291 const void *psrc = fl_key_get_start(&src->key, src);
1292 void *pdst = fl_key_get_start(&dst->key, src);
1294 memcpy(pdst, psrc, fl_mask_range(src));
1295 dst->range = src->range;
1298 static const struct rhashtable_params fl_ht_params = {
1299 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1300 .head_offset = offsetof(struct cls_fl_filter, ht_node),
1301 .automatic_shrinking = true,
1304 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1306 mask->filter_ht_params = fl_ht_params;
1307 mask->filter_ht_params.key_len = fl_mask_range(mask);
1308 mask->filter_ht_params.key_offset += mask->range.start;
1310 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1313 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1314 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1316 #define FL_KEY_IS_MASKED(mask, member) \
1317 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
1318 0, FL_KEY_MEMBER_SIZE(member)) \
1320 #define FL_KEY_SET(keys, cnt, id, member) \
1322 keys[cnt].key_id = id; \
1323 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
1327 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
1329 if (FL_KEY_IS_MASKED(mask, member)) \
1330 FL_KEY_SET(keys, cnt, id, member); \
1333 static void fl_init_dissector(struct flow_dissector *dissector,
1334 struct fl_flow_key *mask)
1336 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1339 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1340 FLOW_DISSECTOR_KEY_META, meta);
1341 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1342 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1343 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1344 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1345 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1346 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1347 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1348 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1349 if (FL_KEY_IS_MASKED(mask, tp) ||
1350 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1351 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
1352 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1353 FLOW_DISSECTOR_KEY_IP, ip);
1354 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1355 FLOW_DISSECTOR_KEY_TCP, tcp);
1356 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1357 FLOW_DISSECTOR_KEY_ICMP, icmp);
1358 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1359 FLOW_DISSECTOR_KEY_ARP, arp);
1360 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1361 FLOW_DISSECTOR_KEY_MPLS, mpls);
1362 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1363 FLOW_DISSECTOR_KEY_VLAN, vlan);
1364 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1365 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1366 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1367 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1368 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1369 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1370 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1371 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1372 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1373 FL_KEY_IS_MASKED(mask, enc_ipv6))
1374 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1376 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1377 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1378 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1379 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1380 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1381 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1382 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1383 FLOW_DISSECTOR_KEY_CT, ct);
1385 skb_flow_dissector_init(dissector, keys, cnt);
1388 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1389 struct fl_flow_mask *mask)
1391 struct fl_flow_mask *newmask;
1394 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1396 return ERR_PTR(-ENOMEM);
1398 fl_mask_copy(newmask, mask);
1400 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1401 (newmask->key.tp_min.src && newmask->key.tp_max.src))
1402 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1404 err = fl_init_mask_hashtable(newmask);
1408 fl_init_dissector(&newmask->dissector, &newmask->key);
1410 INIT_LIST_HEAD_RCU(&newmask->filters);
1412 refcount_set(&newmask->refcnt, 1);
1413 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1414 &newmask->ht_node, mask_ht_params);
1416 goto errout_destroy;
1418 spin_lock(&head->masks_lock);
1419 list_add_tail_rcu(&newmask->list, &head->masks);
1420 spin_unlock(&head->masks_lock);
1425 rhashtable_destroy(&newmask->ht);
1429 return ERR_PTR(err);
1432 static int fl_check_assign_mask(struct cls_fl_head *head,
1433 struct cls_fl_filter *fnew,
1434 struct cls_fl_filter *fold,
1435 struct fl_flow_mask *mask)
1437 struct fl_flow_mask *newmask;
1442 /* Insert mask as temporary node to prevent concurrent creation of mask
1443 * with same key. Any concurrent lookups with same key will return
1444 * -EAGAIN because mask's refcnt is zero.
1446 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1454 goto errout_cleanup;
1457 newmask = fl_create_new_mask(head, mask);
1458 if (IS_ERR(newmask)) {
1459 ret = PTR_ERR(newmask);
1460 goto errout_cleanup;
1463 fnew->mask = newmask;
1465 } else if (IS_ERR(fnew->mask)) {
1466 ret = PTR_ERR(fnew->mask);
1467 } else if (fold && fold->mask != fnew->mask) {
1469 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1470 /* Mask was deleted concurrently, try again */
1477 rhashtable_remove_fast(&head->ht, &mask->ht_node,
1482 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1483 struct cls_fl_filter *f, struct fl_flow_mask *mask,
1484 unsigned long base, struct nlattr **tb,
1485 struct nlattr *est, bool ovr,
1486 struct fl_flow_tmplt *tmplt, bool rtnl_held,
1487 struct netlink_ext_ack *extack)
1491 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1496 if (tb[TCA_FLOWER_CLASSID]) {
1497 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1500 tcf_bind_filter(tp, &f->res, base);
1505 err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1509 fl_mask_update_range(mask);
1510 fl_set_masked_key(&f->mkey, &f->key, mask);
1512 if (!fl_mask_fits_tmplt(tmplt, mask)) {
1513 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1520 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1521 struct cls_fl_filter *fold,
1524 struct fl_flow_mask *mask = fnew->mask;
1527 err = rhashtable_lookup_insert_fast(&mask->ht,
1529 mask->filter_ht_params);
1532 /* It is okay if filter with same key exists when
1535 return fold && err == -EEXIST ? 0 : err;
1542 static int fl_change(struct net *net, struct sk_buff *in_skb,
1543 struct tcf_proto *tp, unsigned long base,
1544 u32 handle, struct nlattr **tca,
1545 void **arg, bool ovr, bool rtnl_held,
1546 struct netlink_ext_ack *extack)
1548 struct cls_fl_head *head = fl_head_dereference(tp);
1549 struct cls_fl_filter *fold = *arg;
1550 struct cls_fl_filter *fnew;
1551 struct fl_flow_mask *mask;
1556 if (!tca[TCA_OPTIONS]) {
1561 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1567 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1570 goto errout_mask_alloc;
1573 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1574 tca[TCA_OPTIONS], fl_policy, NULL);
1578 if (fold && handle && fold->handle != handle) {
1583 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1588 INIT_LIST_HEAD(&fnew->hw_list);
1589 refcount_set(&fnew->refcnt, 1);
1591 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1595 if (tb[TCA_FLOWER_FLAGS]) {
1596 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1598 if (!tc_flags_valid(fnew->flags)) {
1604 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1605 tp->chain->tmplt_priv, rtnl_held, extack);
1609 err = fl_check_assign_mask(head, fnew, fold, mask);
1613 err = fl_ht_insert_unique(fnew, fold, &in_ht);
1617 if (!tc_skip_hw(fnew->flags)) {
1618 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1623 if (!tc_in_hw(fnew->flags))
1624 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1626 spin_lock(&tp->lock);
1628 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1629 * proto again or create new one, if necessary.
1637 /* Fold filter was deleted concurrently. Retry lookup. */
1638 if (fold->deleted) {
1643 fnew->handle = handle;
1646 struct rhashtable_params params =
1647 fnew->mask->filter_ht_params;
1649 err = rhashtable_insert_fast(&fnew->mask->ht,
1657 refcount_inc(&fnew->refcnt);
1658 rhashtable_remove_fast(&fold->mask->ht,
1660 fold->mask->filter_ht_params);
1661 idr_replace(&head->handle_idr, fnew, fnew->handle);
1662 list_replace_rcu(&fold->list, &fnew->list);
1663 fold->deleted = true;
1665 spin_unlock(&tp->lock);
1667 fl_mask_put(head, fold->mask);
1668 if (!tc_skip_hw(fold->flags))
1669 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1670 tcf_unbind_filter(tp, &fold->res);
1671 /* Caller holds reference to fold, so refcnt is always > 0
1674 refcount_dec(&fold->refcnt);
1678 /* user specifies a handle and it doesn't exist */
1679 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1680 handle, GFP_ATOMIC);
1682 /* Filter with specified handle was concurrently
1683 * inserted after initial check in cls_api. This is not
1684 * necessarily an error if NLM_F_EXCL is not set in
1685 * message flags. Returning EAGAIN will cause cls_api to
1686 * try to update concurrently inserted rule.
1692 err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1693 INT_MAX, GFP_ATOMIC);
1698 refcount_inc(&fnew->refcnt);
1699 fnew->handle = handle;
1700 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1701 spin_unlock(&tp->lock);
1707 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1711 spin_lock(&tp->lock);
1713 fnew->deleted = true;
1714 spin_unlock(&tp->lock);
1715 if (!tc_skip_hw(fnew->flags))
1716 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1718 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1719 fnew->mask->filter_ht_params);
1721 fl_mask_put(head, fnew->mask);
1727 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1734 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1735 bool rtnl_held, struct netlink_ext_ack *extack)
1737 struct cls_fl_head *head = fl_head_dereference(tp);
1738 struct cls_fl_filter *f = arg;
1742 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1743 *last = list_empty(&head->masks);
1749 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1752 struct cls_fl_head *head = fl_head_dereference(tp);
1753 unsigned long id = arg->cookie, tmp;
1754 struct cls_fl_filter *f;
1756 arg->count = arg->skip;
1758 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1759 /* don't return filters that are being deleted */
1760 if (!refcount_inc_not_zero(&f->refcnt))
1762 if (arg->fn(tp, f, arg) < 0) {
1773 static struct cls_fl_filter *
1774 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1776 struct cls_fl_head *head = fl_head_dereference(tp);
1778 spin_lock(&tp->lock);
1779 if (list_empty(&head->hw_filters)) {
1780 spin_unlock(&tp->lock);
1785 f = list_entry(&head->hw_filters, struct cls_fl_filter,
1787 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1788 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1789 spin_unlock(&tp->lock);
1794 spin_unlock(&tp->lock);
1798 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1799 void *cb_priv, struct netlink_ext_ack *extack)
1801 struct tcf_block *block = tp->chain->block;
1802 struct flow_cls_offload cls_flower = {};
1803 struct cls_fl_filter *f = NULL;
1806 /* hw_filters list can only be changed by hw offload functions after
1807 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1812 while ((f = fl_get_next_hw_filter(tp, f, add))) {
1814 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1815 if (!cls_flower.rule) {
1820 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1822 cls_flower.command = add ?
1823 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1824 cls_flower.cookie = (unsigned long)f;
1825 cls_flower.rule->match.dissector = &f->mask->dissector;
1826 cls_flower.rule->match.mask = &f->mask->key;
1827 cls_flower.rule->match.key = &f->mkey;
1829 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1831 kfree(cls_flower.rule);
1832 if (tc_skip_sw(f->flags)) {
1833 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1840 cls_flower.classid = f->res.classid;
1842 err = tc_setup_cb_reoffload(block, tp, add, cb,
1843 TC_SETUP_CLSFLOWER, &cls_flower,
1846 kfree(cls_flower.rule);
1859 static int fl_hw_create_tmplt(struct tcf_chain *chain,
1860 struct fl_flow_tmplt *tmplt)
1862 struct flow_cls_offload cls_flower = {};
1863 struct tcf_block *block = chain->block;
1865 cls_flower.rule = flow_rule_alloc(0);
1866 if (!cls_flower.rule)
1869 cls_flower.common.chain_index = chain->index;
1870 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
1871 cls_flower.cookie = (unsigned long) tmplt;
1872 cls_flower.rule->match.dissector = &tmplt->dissector;
1873 cls_flower.rule->match.mask = &tmplt->mask;
1874 cls_flower.rule->match.key = &tmplt->dummy_key;
1876 /* We don't care if driver (any of them) fails to handle this
1877 * call. It serves just as a hint for it.
1879 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
1880 kfree(cls_flower.rule);
1885 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1886 struct fl_flow_tmplt *tmplt)
1888 struct flow_cls_offload cls_flower = {};
1889 struct tcf_block *block = chain->block;
1891 cls_flower.common.chain_index = chain->index;
1892 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
1893 cls_flower.cookie = (unsigned long) tmplt;
1895 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
1898 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1899 struct nlattr **tca,
1900 struct netlink_ext_ack *extack)
1902 struct fl_flow_tmplt *tmplt;
1906 if (!tca[TCA_OPTIONS])
1907 return ERR_PTR(-EINVAL);
1909 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1911 return ERR_PTR(-ENOBUFS);
1912 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1913 tca[TCA_OPTIONS], fl_policy, NULL);
1917 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1922 tmplt->chain = chain;
1923 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1927 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1929 err = fl_hw_create_tmplt(chain, tmplt);
1940 return ERR_PTR(err);
1943 static void fl_tmplt_destroy(void *tmplt_priv)
1945 struct fl_flow_tmplt *tmplt = tmplt_priv;
1947 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1951 static int fl_dump_key_val(struct sk_buff *skb,
1952 void *val, int val_type,
1953 void *mask, int mask_type, int len)
1957 if (!memchr_inv(mask, 0, len))
1959 err = nla_put(skb, val_type, len, val);
1962 if (mask_type != TCA_FLOWER_UNSPEC) {
1963 err = nla_put(skb, mask_type, len, mask);
1970 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1971 struct fl_flow_key *mask)
1973 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1974 &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1975 sizeof(key->tp_min.dst)) ||
1976 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1977 &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1978 sizeof(key->tp_max.dst)) ||
1979 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1980 &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1981 sizeof(key->tp_min.src)) ||
1982 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1983 &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1984 sizeof(key->tp_max.src)))
1990 static int fl_dump_key_mpls(struct sk_buff *skb,
1991 struct flow_dissector_key_mpls *mpls_key,
1992 struct flow_dissector_key_mpls *mpls_mask)
1996 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1998 if (mpls_mask->mpls_ttl) {
1999 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2000 mpls_key->mpls_ttl);
2004 if (mpls_mask->mpls_tc) {
2005 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2010 if (mpls_mask->mpls_label) {
2011 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2012 mpls_key->mpls_label);
2016 if (mpls_mask->mpls_bos) {
2017 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2018 mpls_key->mpls_bos);
2025 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2026 struct flow_dissector_key_ip *key,
2027 struct flow_dissector_key_ip *mask)
2029 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2030 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2031 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2032 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2034 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2035 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2041 static int fl_dump_key_vlan(struct sk_buff *skb,
2042 int vlan_id_key, int vlan_prio_key,
2043 struct flow_dissector_key_vlan *vlan_key,
2044 struct flow_dissector_key_vlan *vlan_mask)
2048 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2050 if (vlan_mask->vlan_id) {
2051 err = nla_put_u16(skb, vlan_id_key,
2056 if (vlan_mask->vlan_priority) {
2057 err = nla_put_u8(skb, vlan_prio_key,
2058 vlan_key->vlan_priority);
2065 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2066 u32 *flower_key, u32 *flower_mask,
2067 u32 flower_flag_bit, u32 dissector_flag_bit)
2069 if (dissector_mask & dissector_flag_bit) {
2070 *flower_mask |= flower_flag_bit;
2071 if (dissector_key & dissector_flag_bit)
2072 *flower_key |= flower_flag_bit;
2076 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2082 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2088 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2089 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2090 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2091 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2092 FLOW_DIS_FIRST_FRAG);
2094 _key = cpu_to_be32(key);
2095 _mask = cpu_to_be32(mask);
2097 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2101 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2104 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2105 struct flow_dissector_key_enc_opts *enc_opts)
2107 struct geneve_opt *opt;
2108 struct nlattr *nest;
2111 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2113 goto nla_put_failure;
2115 while (enc_opts->len > opt_off) {
2116 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2118 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2120 goto nla_put_failure;
2121 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2123 goto nla_put_failure;
2124 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2125 opt->length * 4, opt->opt_data))
2126 goto nla_put_failure;
2128 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2130 nla_nest_end(skb, nest);
2134 nla_nest_cancel(skb, nest);
2138 static int fl_dump_key_ct(struct sk_buff *skb,
2139 struct flow_dissector_key_ct *key,
2140 struct flow_dissector_key_ct *mask)
2142 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2143 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2144 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2145 sizeof(key->ct_state)))
2146 goto nla_put_failure;
2148 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2149 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2150 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2151 sizeof(key->ct_zone)))
2152 goto nla_put_failure;
2154 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2155 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2156 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2157 sizeof(key->ct_mark)))
2158 goto nla_put_failure;
2160 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2161 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2162 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2163 sizeof(key->ct_labels)))
2164 goto nla_put_failure;
2172 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2173 struct flow_dissector_key_enc_opts *enc_opts)
2175 struct nlattr *nest;
2181 nest = nla_nest_start_noflag(skb, enc_opt_type);
2183 goto nla_put_failure;
2185 switch (enc_opts->dst_opt_type) {
2186 case TUNNEL_GENEVE_OPT:
2187 err = fl_dump_key_geneve_opt(skb, enc_opts);
2189 goto nla_put_failure;
2192 goto nla_put_failure;
2194 nla_nest_end(skb, nest);
2198 nla_nest_cancel(skb, nest);
2202 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2203 struct flow_dissector_key_enc_opts *key_opts,
2204 struct flow_dissector_key_enc_opts *msk_opts)
2208 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2212 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2215 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2216 struct fl_flow_key *key, struct fl_flow_key *mask)
2218 if (mask->meta.ingress_ifindex) {
2219 struct net_device *dev;
2221 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2222 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2223 goto nla_put_failure;
2226 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2227 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2228 sizeof(key->eth.dst)) ||
2229 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2230 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2231 sizeof(key->eth.src)) ||
2232 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2233 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2234 sizeof(key->basic.n_proto)))
2235 goto nla_put_failure;
2237 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2238 goto nla_put_failure;
2240 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2241 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2242 goto nla_put_failure;
2244 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2245 TCA_FLOWER_KEY_CVLAN_PRIO,
2246 &key->cvlan, &mask->cvlan) ||
2247 (mask->cvlan.vlan_tpid &&
2248 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2249 key->cvlan.vlan_tpid)))
2250 goto nla_put_failure;
2252 if (mask->basic.n_proto) {
2253 if (mask->cvlan.vlan_tpid) {
2254 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2255 key->basic.n_proto))
2256 goto nla_put_failure;
2257 } else if (mask->vlan.vlan_tpid) {
2258 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2259 key->basic.n_proto))
2260 goto nla_put_failure;
2264 if ((key->basic.n_proto == htons(ETH_P_IP) ||
2265 key->basic.n_proto == htons(ETH_P_IPV6)) &&
2266 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2267 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2268 sizeof(key->basic.ip_proto)) ||
2269 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2270 goto nla_put_failure;
2272 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2273 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2274 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2275 sizeof(key->ipv4.src)) ||
2276 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2277 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2278 sizeof(key->ipv4.dst))))
2279 goto nla_put_failure;
2280 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2281 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2282 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2283 sizeof(key->ipv6.src)) ||
2284 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2285 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2286 sizeof(key->ipv6.dst))))
2287 goto nla_put_failure;
2289 if (key->basic.ip_proto == IPPROTO_TCP &&
2290 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2291 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2292 sizeof(key->tp.src)) ||
2293 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2294 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2295 sizeof(key->tp.dst)) ||
2296 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2297 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2298 sizeof(key->tcp.flags))))
2299 goto nla_put_failure;
2300 else if (key->basic.ip_proto == IPPROTO_UDP &&
2301 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2302 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2303 sizeof(key->tp.src)) ||
2304 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2305 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2306 sizeof(key->tp.dst))))
2307 goto nla_put_failure;
2308 else if (key->basic.ip_proto == IPPROTO_SCTP &&
2309 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2310 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2311 sizeof(key->tp.src)) ||
2312 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2313 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2314 sizeof(key->tp.dst))))
2315 goto nla_put_failure;
2316 else if (key->basic.n_proto == htons(ETH_P_IP) &&
2317 key->basic.ip_proto == IPPROTO_ICMP &&
2318 (fl_dump_key_val(skb, &key->icmp.type,
2319 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2320 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2321 sizeof(key->icmp.type)) ||
2322 fl_dump_key_val(skb, &key->icmp.code,
2323 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2324 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2325 sizeof(key->icmp.code))))
2326 goto nla_put_failure;
2327 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2328 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2329 (fl_dump_key_val(skb, &key->icmp.type,
2330 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2331 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2332 sizeof(key->icmp.type)) ||
2333 fl_dump_key_val(skb, &key->icmp.code,
2334 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2335 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2336 sizeof(key->icmp.code))))
2337 goto nla_put_failure;
2338 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2339 key->basic.n_proto == htons(ETH_P_RARP)) &&
2340 (fl_dump_key_val(skb, &key->arp.sip,
2341 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2342 TCA_FLOWER_KEY_ARP_SIP_MASK,
2343 sizeof(key->arp.sip)) ||
2344 fl_dump_key_val(skb, &key->arp.tip,
2345 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2346 TCA_FLOWER_KEY_ARP_TIP_MASK,
2347 sizeof(key->arp.tip)) ||
2348 fl_dump_key_val(skb, &key->arp.op,
2349 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2350 TCA_FLOWER_KEY_ARP_OP_MASK,
2351 sizeof(key->arp.op)) ||
2352 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2353 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2354 sizeof(key->arp.sha)) ||
2355 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2356 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2357 sizeof(key->arp.tha))))
2358 goto nla_put_failure;
2360 if ((key->basic.ip_proto == IPPROTO_TCP ||
2361 key->basic.ip_proto == IPPROTO_UDP ||
2362 key->basic.ip_proto == IPPROTO_SCTP) &&
2363 fl_dump_key_port_range(skb, key, mask))
2364 goto nla_put_failure;
2366 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2367 (fl_dump_key_val(skb, &key->enc_ipv4.src,
2368 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2369 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2370 sizeof(key->enc_ipv4.src)) ||
2371 fl_dump_key_val(skb, &key->enc_ipv4.dst,
2372 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2373 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2374 sizeof(key->enc_ipv4.dst))))
2375 goto nla_put_failure;
2376 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2377 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2378 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2379 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2380 sizeof(key->enc_ipv6.src)) ||
2381 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2382 TCA_FLOWER_KEY_ENC_IPV6_DST,
2383 &mask->enc_ipv6.dst,
2384 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2385 sizeof(key->enc_ipv6.dst))))
2386 goto nla_put_failure;
2388 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2389 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2390 sizeof(key->enc_key_id)) ||
2391 fl_dump_key_val(skb, &key->enc_tp.src,
2392 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2394 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2395 sizeof(key->enc_tp.src)) ||
2396 fl_dump_key_val(skb, &key->enc_tp.dst,
2397 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2399 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2400 sizeof(key->enc_tp.dst)) ||
2401 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2402 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2403 goto nla_put_failure;
2405 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2406 goto nla_put_failure;
2408 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2409 goto nla_put_failure;
2417 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2418 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2420 struct cls_fl_filter *f = fh;
2421 struct nlattr *nest;
2422 struct fl_flow_key *key, *mask;
2428 t->tcm_handle = f->handle;
2430 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2432 goto nla_put_failure;
2434 spin_lock(&tp->lock);
2436 if (f->res.classid &&
2437 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2438 goto nla_put_failure_locked;
2441 mask = &f->mask->key;
2442 skip_hw = tc_skip_hw(f->flags);
2444 if (fl_dump_key(skb, net, key, mask))
2445 goto nla_put_failure_locked;
2447 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2448 goto nla_put_failure_locked;
2450 spin_unlock(&tp->lock);
2453 fl_hw_update_stats(tp, f, rtnl_held);
2455 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2456 goto nla_put_failure;
2458 if (tcf_exts_dump(skb, &f->exts))
2459 goto nla_put_failure;
2461 nla_nest_end(skb, nest);
2463 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2464 goto nla_put_failure;
2468 nla_put_failure_locked:
2469 spin_unlock(&tp->lock);
2471 nla_nest_cancel(skb, nest);
2475 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2477 struct fl_flow_tmplt *tmplt = tmplt_priv;
2478 struct fl_flow_key *key, *mask;
2479 struct nlattr *nest;
2481 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2483 goto nla_put_failure;
2485 key = &tmplt->dummy_key;
2486 mask = &tmplt->mask;
2488 if (fl_dump_key(skb, net, key, mask))
2489 goto nla_put_failure;
2491 nla_nest_end(skb, nest);
2496 nla_nest_cancel(skb, nest);
2500 static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2502 struct cls_fl_filter *f = fh;
2504 if (f && f->res.classid == classid)
2508 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2510 .classify = fl_classify,
2512 .destroy = fl_destroy,
2515 .change = fl_change,
2516 .delete = fl_delete,
2518 .reoffload = fl_reoffload,
2520 .bind_class = fl_bind_class,
2521 .tmplt_create = fl_tmplt_create,
2522 .tmplt_destroy = fl_tmplt_destroy,
2523 .tmplt_dump = fl_tmplt_dump,
2524 .owner = THIS_MODULE,
2525 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
2528 static int __init cls_fl_init(void)
2530 return register_tcf_proto_ops(&cls_fl_ops);
2533 static void __exit cls_fl_exit(void)
2535 unregister_tcf_proto_ops(&cls_fl_ops);
2538 module_init(cls_fl_init);
2539 module_exit(cls_fl_exit);
2541 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2542 MODULE_DESCRIPTION("Flower classifier");
2543 MODULE_LICENSE("GPL v2");