1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 Nicira, Inc.
6 #include <linux/module.h>
7 #include <linux/openvswitch.h>
10 #include <linux/sctp.h>
11 #include <linux/static_key.h>
13 #include <net/genetlink.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_count.h>
16 #include <net/netfilter/nf_conntrack_helper.h>
17 #include <net/netfilter/nf_conntrack_labels.h>
18 #include <net/netfilter/nf_conntrack_seqadj.h>
19 #include <net/netfilter/nf_conntrack_timeout.h>
20 #include <net/netfilter/nf_conntrack_zones.h>
21 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
22 #include <net/ipv6_frag.h>
24 #if IS_ENABLED(CONFIG_NF_NAT)
25 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_conntrack_act_ct.h>
31 #include "conntrack.h"
33 #include "flow_netlink.h"
35 struct ovs_ct_len_tbl {
40 /* Metadata mark for masked write to conntrack mark */
46 /* Metadata label for masked write to conntrack label. */
48 struct ovs_key_ct_labels value;
49 struct ovs_key_ct_labels mask;
53 OVS_CT_NAT = 1 << 0, /* NAT for committed connections only. */
54 OVS_CT_SRC_NAT = 1 << 1, /* Source NAT for NEW connections. */
55 OVS_CT_DST_NAT = 1 << 2, /* Destination NAT for NEW connections. */
58 /* Conntrack action context for execution. */
59 struct ovs_conntrack_info {
60 struct nf_conntrack_helper *helper;
61 struct nf_conntrack_zone zone;
64 u8 nat : 3; /* enum ovs_ct_nat */
66 u8 have_eventmask : 1;
68 u32 eventmask; /* Mask of 1 << IPCT_*. */
70 struct md_labels labels;
71 char timeout[CTNL_TIMEOUT_NAME_MAX];
72 struct nf_ct_timeout *nf_ct_timeout;
73 #if IS_ENABLED(CONFIG_NF_NAT)
74 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
78 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
79 #define OVS_CT_LIMIT_UNLIMITED 0
80 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
81 #define CT_LIMIT_HASH_BUCKETS 512
82 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled);
85 /* Elements in ovs_ct_limit_info->limits hash table */
86 struct hlist_node hlist_node;
92 struct ovs_ct_limit_info {
94 struct hlist_head *limits;
95 struct nf_conncount_data *data;
98 static const struct nla_policy ct_limit_policy[OVS_CT_LIMIT_ATTR_MAX + 1] = {
99 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NLA_NESTED, },
103 static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
105 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
107 static u16 key_to_nfproto(const struct sw_flow_key *key)
109 switch (ntohs(key->eth.type)) {
115 return NFPROTO_UNSPEC;
119 /* Map SKB connection state into the values used by flow definition. */
120 static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
122 u8 ct_state = OVS_CS_F_TRACKED;
125 case IP_CT_ESTABLISHED_REPLY:
126 case IP_CT_RELATED_REPLY:
127 ct_state |= OVS_CS_F_REPLY_DIR;
134 case IP_CT_ESTABLISHED:
135 case IP_CT_ESTABLISHED_REPLY:
136 ct_state |= OVS_CS_F_ESTABLISHED;
139 case IP_CT_RELATED_REPLY:
140 ct_state |= OVS_CS_F_RELATED;
143 ct_state |= OVS_CS_F_NEW;
152 static u32 ovs_ct_get_mark(const struct nf_conn *ct)
154 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
155 return ct ? READ_ONCE(ct->mark) : 0;
161 /* Guard against conntrack labels max size shrinking below 128 bits. */
162 #if NF_CT_LABELS_MAX_SIZE < 16
163 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
166 static void ovs_ct_get_labels(const struct nf_conn *ct,
167 struct ovs_key_ct_labels *labels)
169 struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
172 memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
174 memset(labels, 0, OVS_CT_LABELS_LEN);
177 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
178 const struct nf_conntrack_tuple *orig,
181 key->ct_orig_proto = orig->dst.protonum;
182 if (orig->dst.protonum == icmp_proto) {
183 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
184 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
186 key->ct.orig_tp.src = orig->src.u.all;
187 key->ct.orig_tp.dst = orig->dst.u.all;
191 static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
192 const struct nf_conntrack_zone *zone,
193 const struct nf_conn *ct)
195 key->ct_state = state;
196 key->ct_zone = zone->id;
197 key->ct.mark = ovs_ct_get_mark(ct);
198 ovs_ct_get_labels(ct, &key->ct.labels);
201 const struct nf_conntrack_tuple *orig;
203 /* Use the master if we have one. */
206 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
208 /* IP version must match with the master connection. */
209 if (key->eth.type == htons(ETH_P_IP) &&
210 nf_ct_l3num(ct) == NFPROTO_IPV4) {
211 key->ipv4.ct_orig.src = orig->src.u3.ip;
212 key->ipv4.ct_orig.dst = orig->dst.u3.ip;
213 __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
215 } else if (key->eth.type == htons(ETH_P_IPV6) &&
216 !sw_flow_key_is_nd(key) &&
217 nf_ct_l3num(ct) == NFPROTO_IPV6) {
218 key->ipv6.ct_orig.src = orig->src.u3.in6;
219 key->ipv6.ct_orig.dst = orig->dst.u3.in6;
220 __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
224 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
225 * original direction key fields.
227 key->ct_orig_proto = 0;
230 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
231 * previously sent the packet to conntrack via the ct action. If
232 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
233 * initialized from the connection status.
235 static void ovs_ct_update_key(const struct sk_buff *skb,
236 const struct ovs_conntrack_info *info,
237 struct sw_flow_key *key, bool post_ct,
240 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
241 enum ip_conntrack_info ctinfo;
245 ct = nf_ct_get(skb, &ctinfo);
247 state = ovs_ct_get_state(ctinfo);
248 /* All unconfirmed entries are NEW connections. */
249 if (!nf_ct_is_confirmed(ct))
250 state |= OVS_CS_F_NEW;
251 /* OVS persists the related flag for the duration of the
255 state |= OVS_CS_F_RELATED;
256 if (keep_nat_flags) {
257 state |= key->ct_state & OVS_CS_F_NAT_MASK;
259 if (ct->status & IPS_SRC_NAT)
260 state |= OVS_CS_F_SRC_NAT;
261 if (ct->status & IPS_DST_NAT)
262 state |= OVS_CS_F_DST_NAT;
264 zone = nf_ct_zone(ct);
265 } else if (post_ct) {
266 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
270 __ovs_ct_update_key(key, state, zone, ct);
273 /* This is called to initialize CT key fields possibly coming in from the local
276 void ovs_ct_fill_key(const struct sk_buff *skb,
277 struct sw_flow_key *key,
280 ovs_ct_update_key(skb, NULL, key, post_ct, false);
283 int ovs_ct_put_key(const struct sw_flow_key *swkey,
284 const struct sw_flow_key *output, struct sk_buff *skb)
286 if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
289 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
290 nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
293 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
294 nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
297 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
298 nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
302 if (swkey->ct_orig_proto) {
303 if (swkey->eth.type == htons(ETH_P_IP)) {
304 struct ovs_key_ct_tuple_ipv4 orig;
306 memset(&orig, 0, sizeof(orig));
307 orig.ipv4_src = output->ipv4.ct_orig.src;
308 orig.ipv4_dst = output->ipv4.ct_orig.dst;
309 orig.src_port = output->ct.orig_tp.src;
310 orig.dst_port = output->ct.orig_tp.dst;
311 orig.ipv4_proto = output->ct_orig_proto;
313 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
314 sizeof(orig), &orig))
316 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
317 struct ovs_key_ct_tuple_ipv6 orig;
319 memset(&orig, 0, sizeof(orig));
320 memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
321 sizeof(orig.ipv6_src));
322 memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
323 sizeof(orig.ipv6_dst));
324 orig.src_port = output->ct.orig_tp.src;
325 orig.dst_port = output->ct.orig_tp.dst;
326 orig.ipv6_proto = output->ct_orig_proto;
328 if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
329 sizeof(orig), &orig))
337 static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
338 u32 ct_mark, u32 mask)
340 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
343 new_mark = ct_mark | (READ_ONCE(ct->mark) & ~(mask));
344 if (READ_ONCE(ct->mark) != new_mark) {
345 WRITE_ONCE(ct->mark, new_mark);
346 if (nf_ct_is_confirmed(ct))
347 nf_conntrack_event_cache(IPCT_MARK, ct);
348 key->ct.mark = new_mark;
357 static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
359 struct nf_conn_labels *cl;
361 cl = nf_ct_labels_find(ct);
363 nf_ct_labels_ext_add(ct);
364 cl = nf_ct_labels_find(ct);
370 /* Initialize labels for a new, yet to be committed conntrack entry. Note that
371 * since the new connection is not yet confirmed, and thus no-one else has
372 * access to it's labels, we simply write them over.
374 static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
375 const struct ovs_key_ct_labels *labels,
376 const struct ovs_key_ct_labels *mask)
378 struct nf_conn_labels *cl, *master_cl;
379 bool have_mask = labels_nonzero(mask);
381 /* Inherit master's labels to the related connection? */
382 master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
384 if (!master_cl && !have_mask)
385 return 0; /* Nothing to do. */
387 cl = ovs_ct_get_conn_labels(ct);
391 /* Inherit the master's labels, if any. */
396 u32 *dst = (u32 *)cl->bits;
399 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
400 dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
401 (labels->ct_labels_32[i]
402 & mask->ct_labels_32[i]);
405 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
406 * IPCT_LABEL bit is set in the event cache.
408 nf_conntrack_event_cache(IPCT_LABEL, ct);
410 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
415 static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
416 const struct ovs_key_ct_labels *labels,
417 const struct ovs_key_ct_labels *mask)
419 struct nf_conn_labels *cl;
422 cl = ovs_ct_get_conn_labels(ct);
426 err = nf_connlabels_replace(ct, labels->ct_labels_32,
428 OVS_CT_LABELS_LEN_32);
432 memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
437 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
438 * value if 'skb' is freed.
440 static int handle_fragments(struct net *net, struct sw_flow_key *key,
441 u16 zone, struct sk_buff *skb)
443 struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
446 if (key->eth.type == htons(ETH_P_IP)) {
447 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
449 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
450 err = ip_defrag(net, skb, user);
454 ovs_cb.mru = IPCB(skb)->frag_max_size;
455 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
456 } else if (key->eth.type == htons(ETH_P_IPV6)) {
457 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
459 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
460 err = nf_ct_frag6_gather(net, skb, user);
462 if (err != -EINPROGRESS)
467 key->ip.proto = ipv6_hdr(skb)->nexthdr;
468 ovs_cb.mru = IP6CB(skb)->frag_max_size;
472 return -EPFNOSUPPORT;
475 /* The key extracted from the fragment that completed this datagram
476 * likely didn't have an L4 header, so regenerate it.
478 ovs_flow_key_update_l3l4(skb, key);
480 key->ip.frag = OVS_FRAG_TYPE_NONE;
483 *OVS_CB(skb) = ovs_cb;
488 static struct nf_conntrack_expect *
489 ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
490 u16 proto, const struct sk_buff *skb)
492 struct nf_conntrack_tuple tuple;
493 struct nf_conntrack_expect *exp;
495 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
498 exp = __nf_ct_expect_find(net, zone, &tuple);
500 struct nf_conntrack_tuple_hash *h;
502 /* Delete existing conntrack entry, if it clashes with the
503 * expectation. This can happen since conntrack ALGs do not
504 * check for clashes between (new) expectations and existing
505 * conntrack entries. nf_conntrack_in() will check the
506 * expectations only if a conntrack entry can not be found,
507 * which can lead to OVS finding the expectation (here) in the
508 * init direction, but which will not be removed by the
509 * nf_conntrack_in() call, if a matching conntrack entry is
510 * found instead. In this case all init direction packets
511 * would be reported as new related packets, while reply
512 * direction packets would be reported as un-related
513 * established packets.
515 h = nf_conntrack_find_get(net, zone, &tuple);
517 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
519 nf_ct_delete(ct, 0, 0);
527 /* This replicates logic from nf_conntrack_core.c that is not exported. */
528 static enum ip_conntrack_info
529 ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
531 const struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
533 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
534 return IP_CT_ESTABLISHED_REPLY;
535 /* Once we've had two way comms, always ESTABLISHED. */
536 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status))
537 return IP_CT_ESTABLISHED;
538 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
539 return IP_CT_RELATED;
543 /* Find an existing connection which this packet belongs to without
544 * re-attributing statistics or modifying the connection state. This allows an
545 * skb->_nfct lost due to an upcall to be recovered during actions execution.
547 * Must be called with rcu_read_lock.
549 * On success, populates skb->_nfct and returns the connection. Returns NULL
550 * if there is no existing entry.
552 static struct nf_conn *
553 ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
554 u8 l3num, struct sk_buff *skb, bool natted)
556 struct nf_conntrack_tuple tuple;
557 struct nf_conntrack_tuple_hash *h;
560 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), l3num,
562 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
566 /* Must invert the tuple if skb has been transformed by NAT. */
568 struct nf_conntrack_tuple inverse;
570 if (!nf_ct_invert_tuple(&inverse, &tuple)) {
571 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
577 /* look for tuple match */
578 h = nf_conntrack_find_get(net, zone, &tuple);
580 return NULL; /* Not found. */
582 ct = nf_ct_tuplehash_to_ctrack(h);
584 /* Inverted packet tuple matches the reverse direction conntrack tuple,
585 * select the other tuplehash to get the right 'ctinfo' bits for this
589 h = &ct->tuplehash[!h->tuple.dst.dir];
591 nf_ct_set(skb, ct, ovs_ct_get_info(h));
596 struct nf_conn *ovs_ct_executed(struct net *net,
597 const struct sw_flow_key *key,
598 const struct ovs_conntrack_info *info,
602 struct nf_conn *ct = NULL;
604 /* If no ct, check if we have evidence that an existing conntrack entry
605 * might be found for this skb. This happens when we lose a skb->_nfct
606 * due to an upcall, or if the direction is being forced. If the
607 * connection was not confirmed, it is not cached and needs to be run
608 * through conntrack again.
610 *ct_executed = (key->ct_state & OVS_CS_F_TRACKED) &&
611 !(key->ct_state & OVS_CS_F_INVALID) &&
612 (key->ct_zone == info->zone.id);
614 if (*ct_executed || (!key->ct_state && info->force)) {
615 ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
623 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
624 static bool skb_nfct_cached(struct net *net,
625 const struct sw_flow_key *key,
626 const struct ovs_conntrack_info *info,
629 enum ip_conntrack_info ctinfo;
631 bool ct_executed = true;
633 ct = nf_ct_get(skb, &ctinfo);
635 ct = ovs_ct_executed(net, key, info, skb, &ct_executed);
638 nf_ct_get(skb, &ctinfo);
642 if (!net_eq(net, read_pnet(&ct->ct_net)))
644 if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
647 struct nf_conn_help *help;
649 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
650 if (help && rcu_access_pointer(help->helper) != info->helper)
653 if (info->nf_ct_timeout) {
654 struct nf_conn_timeout *timeout_ext;
656 timeout_ext = nf_ct_timeout_find(ct);
657 if (!timeout_ext || info->nf_ct_timeout !=
658 rcu_dereference(timeout_ext->timeout))
661 /* Force conntrack entry direction to the current packet? */
662 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
663 /* Delete the conntrack entry if confirmed, else just release
666 if (nf_ct_is_confirmed(ct))
667 nf_ct_delete(ct, 0, 0);
670 nf_ct_set(skb, NULL, 0);
677 #if IS_ENABLED(CONFIG_NF_NAT)
678 static void ovs_nat_update_key(struct sw_flow_key *key,
679 const struct sk_buff *skb,
680 enum nf_nat_manip_type maniptype)
682 if (maniptype == NF_NAT_MANIP_SRC) {
685 key->ct_state |= OVS_CS_F_SRC_NAT;
686 if (key->eth.type == htons(ETH_P_IP))
687 key->ipv4.addr.src = ip_hdr(skb)->saddr;
688 else if (key->eth.type == htons(ETH_P_IPV6))
689 memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr,
690 sizeof(key->ipv6.addr.src));
694 if (key->ip.proto == IPPROTO_UDP)
695 src = udp_hdr(skb)->source;
696 else if (key->ip.proto == IPPROTO_TCP)
697 src = tcp_hdr(skb)->source;
698 else if (key->ip.proto == IPPROTO_SCTP)
699 src = sctp_hdr(skb)->source;
707 key->ct_state |= OVS_CS_F_DST_NAT;
708 if (key->eth.type == htons(ETH_P_IP))
709 key->ipv4.addr.dst = ip_hdr(skb)->daddr;
710 else if (key->eth.type == htons(ETH_P_IPV6))
711 memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr,
712 sizeof(key->ipv6.addr.dst));
716 if (key->ip.proto == IPPROTO_UDP)
717 dst = udp_hdr(skb)->dest;
718 else if (key->ip.proto == IPPROTO_TCP)
719 dst = tcp_hdr(skb)->dest;
720 else if (key->ip.proto == IPPROTO_SCTP)
721 dst = sctp_hdr(skb)->dest;
729 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
730 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
731 const struct ovs_conntrack_info *info,
732 struct sk_buff *skb, struct nf_conn *ct,
733 enum ip_conntrack_info ctinfo)
737 if (!(info->nat & OVS_CT_NAT))
739 if (info->nat & OVS_CT_SRC_NAT)
740 action |= BIT(NF_NAT_MANIP_SRC);
741 if (info->nat & OVS_CT_DST_NAT)
742 action |= BIT(NF_NAT_MANIP_DST);
744 err = nf_ct_nat(skb, ct, ctinfo, &action, &info->range, info->commit);
746 if (action & BIT(NF_NAT_MANIP_SRC))
747 ovs_nat_update_key(key, skb, NF_NAT_MANIP_SRC);
748 if (action & BIT(NF_NAT_MANIP_DST))
749 ovs_nat_update_key(key, skb, NF_NAT_MANIP_DST);
753 #else /* !CONFIG_NF_NAT */
754 static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
755 const struct ovs_conntrack_info *info,
756 struct sk_buff *skb, struct nf_conn *ct,
757 enum ip_conntrack_info ctinfo)
763 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
764 * not done already. Update key with new CT state after passing the packet
766 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
767 * set to NULL and 0 will be returned.
769 static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
770 const struct ovs_conntrack_info *info,
773 /* If we are recirculating packets to match on conntrack fields and
774 * committing with a separate conntrack action, then we don't need to
775 * actually run the packet through conntrack twice unless it's for a
778 bool cached = skb_nfct_cached(net, key, info, skb);
779 enum ip_conntrack_info ctinfo;
783 struct nf_hook_state state = {
784 .hook = NF_INET_PRE_ROUTING,
788 struct nf_conn *tmpl = info->ct;
791 /* Associate skb with specified zone. */
793 ct = nf_ct_get(skb, &ctinfo);
795 nf_conntrack_get(&tmpl->ct_general);
796 nf_ct_set(skb, tmpl, IP_CT_NEW);
799 err = nf_conntrack_in(skb, &state);
800 if (err != NF_ACCEPT)
803 /* Clear CT state NAT flags to mark that we have not yet done
804 * NAT after the nf_conntrack_in() call. We can actually clear
805 * the whole state, as it will be re-initialized below.
809 /* Update the key, but keep the NAT flags. */
810 ovs_ct_update_key(skb, info, key, true, true);
813 ct = nf_ct_get(skb, &ctinfo);
815 bool add_helper = false;
817 /* Packets starting a new connection must be NATted before the
818 * helper, so that the helper knows about the NAT. We enforce
819 * this by delaying both NAT and helper calls for unconfirmed
820 * connections until the committing CT action. For later
821 * packets NAT and Helper may be called in either order.
823 * NAT will be done only if the CT action has NAT, and only
824 * once per packet (per zone), as guarded by the NAT bits in
827 if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
828 (nf_ct_is_confirmed(ct) || info->commit) &&
829 ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
833 /* Userspace may decide to perform a ct lookup without a helper
834 * specified followed by a (recirculate and) commit with one,
835 * or attach a helper in a later commit. Therefore, for
836 * connections which we will commit, we may need to attach
839 if (!nf_ct_is_confirmed(ct) && info->commit &&
840 info->helper && !nfct_help(ct)) {
841 int err = __nf_ct_try_assign_helper(ct, info->ct,
847 /* helper installed, add seqadj if NAT is required */
848 if (info->nat && !nfct_seqadj(ct)) {
849 if (!nfct_seqadj_ext_add(ct))
854 /* Call the helper only if:
855 * - nf_conntrack_in() was executed above ("!cached") or a
856 * helper was just attached ("add_helper") for a confirmed
858 * - When committing an unconfirmed connection.
860 if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
862 nf_ct_helper(skb, ct, ctinfo, info->family) != NF_ACCEPT) {
866 if (nf_ct_protonum(ct) == IPPROTO_TCP &&
867 nf_ct_is_confirmed(ct) && nf_conntrack_tcp_established(ct)) {
868 /* Be liberal for tcp packets so that out-of-window
869 * packets are not marked invalid.
871 nf_ct_set_tcp_be_liberal(ct);
874 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
880 /* Lookup connection and read fields into key. */
881 static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
882 const struct ovs_conntrack_info *info,
885 struct nf_conntrack_expect *exp;
887 /* If we pass an expected packet through nf_conntrack_in() the
888 * expectation is typically removed, but the packet could still be
889 * lost in upcall processing. To prevent this from happening we
890 * perform an explicit expectation lookup. Expected connections are
891 * always new, and will be passed through conntrack only when they are
892 * committed, as it is OK to remove the expectation at that time.
894 exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
898 /* NOTE: New connections are NATted and Helped only when
899 * committed, so we are not calling into NAT here.
901 state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
902 __ovs_ct_update_key(key, state, &info->zone, exp->master);
907 err = __ovs_ct_lookup(net, key, info, skb);
911 ct = (struct nf_conn *)skb_nfct(skb);
913 nf_ct_deliver_cached_events(ct);
919 static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
923 for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
924 if (labels->ct_labels_32[i])
930 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
931 static struct hlist_head *ct_limit_hash_bucket(
932 const struct ovs_ct_limit_info *info, u16 zone)
934 return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
937 /* Call with ovs_mutex */
938 static void ct_limit_set(const struct ovs_ct_limit_info *info,
939 struct ovs_ct_limit *new_ct_limit)
941 struct ovs_ct_limit *ct_limit;
942 struct hlist_head *head;
944 head = ct_limit_hash_bucket(info, new_ct_limit->zone);
945 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
946 if (ct_limit->zone == new_ct_limit->zone) {
947 hlist_replace_rcu(&ct_limit->hlist_node,
948 &new_ct_limit->hlist_node);
949 kfree_rcu(ct_limit, rcu);
954 hlist_add_head_rcu(&new_ct_limit->hlist_node, head);
957 /* Call with ovs_mutex */
958 static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
960 struct ovs_ct_limit *ct_limit;
961 struct hlist_head *head;
962 struct hlist_node *n;
964 head = ct_limit_hash_bucket(info, zone);
965 hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) {
966 if (ct_limit->zone == zone) {
967 hlist_del_rcu(&ct_limit->hlist_node);
968 kfree_rcu(ct_limit, rcu);
974 /* Call with RCU read lock */
975 static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
977 struct ovs_ct_limit *ct_limit;
978 struct hlist_head *head;
980 head = ct_limit_hash_bucket(info, zone);
981 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
982 if (ct_limit->zone == zone)
983 return ct_limit->limit;
986 return info->default_limit;
989 static int ovs_ct_check_limit(struct net *net,
990 const struct ovs_conntrack_info *info,
991 const struct nf_conntrack_tuple *tuple)
993 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
994 const struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
995 u32 per_zone_limit, connections;
998 conncount_key = info->zone.id;
1000 per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
1001 if (per_zone_limit == OVS_CT_LIMIT_UNLIMITED)
1004 connections = nf_conncount_count(net, ct_limit_info->data,
1005 &conncount_key, tuple, &info->zone);
1006 if (connections > per_zone_limit)
1013 /* Lookup connection and confirm if unconfirmed. */
1014 static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
1015 const struct ovs_conntrack_info *info,
1016 struct sk_buff *skb)
1018 enum ip_conntrack_info ctinfo;
1022 err = __ovs_ct_lookup(net, key, info, skb);
1026 /* The connection could be invalid, in which case this is a no-op.*/
1027 ct = nf_ct_get(skb, &ctinfo);
1031 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1032 if (static_branch_unlikely(&ovs_ct_limit_enabled)) {
1033 if (!nf_ct_is_confirmed(ct)) {
1034 err = ovs_ct_check_limit(net, info,
1035 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1037 net_warn_ratelimited("openvswitch: zone: %u "
1038 "exceeds conntrack limit\n",
1046 /* Set the conntrack event mask if given. NEW and DELETE events have
1047 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1048 * typically would receive many kinds of updates. Setting the event
1049 * mask allows those events to be filtered. The set event mask will
1050 * remain in effect for the lifetime of the connection unless changed
1051 * by a further CT action with both the commit flag and the eventmask
1053 if (info->have_eventmask) {
1054 struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct);
1057 cache->ctmask = info->eventmask;
1060 /* Apply changes before confirming the connection so that the initial
1061 * conntrack NEW netlink event carries the values given in the CT
1064 if (info->mark.mask) {
1065 err = ovs_ct_set_mark(ct, key, info->mark.value,
1070 if (!nf_ct_is_confirmed(ct)) {
1071 err = ovs_ct_init_labels(ct, key, &info->labels.value,
1072 &info->labels.mask);
1076 nf_conn_act_ct_ext_add(ct);
1077 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1078 labels_nonzero(&info->labels.mask)) {
1079 err = ovs_ct_set_labels(ct, key, &info->labels.value,
1080 &info->labels.mask);
1084 /* This will take care of sending queued events even if the connection
1085 * is already confirmed.
1087 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1093 /* Trim the skb to the length specified by the IP/IPv6 header,
1094 * removing any trailing lower-layer padding. This prepares the skb
1095 * for higher-layer processing that assumes skb->len excludes padding
1096 * (such as nf_ip_checksum). The caller needs to pull the skb to the
1097 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
1099 static int ovs_skb_network_trim(struct sk_buff *skb)
1104 switch (skb->protocol) {
1105 case htons(ETH_P_IP):
1106 len = ntohs(ip_hdr(skb)->tot_len);
1108 case htons(ETH_P_IPV6):
1109 len = sizeof(struct ipv6hdr)
1110 + ntohs(ipv6_hdr(skb)->payload_len);
1116 err = pskb_trim_rcsum(skb, len);
1123 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1124 * value if 'skb' is freed.
1126 int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1127 struct sw_flow_key *key,
1128 const struct ovs_conntrack_info *info)
1133 /* The conntrack module expects to be working at L3. */
1134 nh_ofs = skb_network_offset(skb);
1135 skb_pull_rcsum(skb, nh_ofs);
1137 err = ovs_skb_network_trim(skb);
1141 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
1142 err = handle_fragments(net, key, info->zone.id, skb);
1148 err = ovs_ct_commit(net, key, info, skb);
1150 err = ovs_ct_lookup(net, key, info, skb);
1152 skb_push_rcsum(skb, nh_ofs);
1158 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
1160 enum ip_conntrack_info ctinfo;
1163 ct = nf_ct_get(skb, &ctinfo);
1166 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
1169 ovs_ct_fill_key(skb, key, false);
1174 #if IS_ENABLED(CONFIG_NF_NAT)
1175 static int parse_nat(const struct nlattr *attr,
1176 struct ovs_conntrack_info *info, bool log)
1180 bool have_ip_max = false;
1181 bool have_proto_max = false;
1182 bool ip_vers = (info->family == NFPROTO_IPV6);
1184 nla_for_each_nested(a, attr, rem) {
1185 static const int ovs_nat_attr_lens[OVS_NAT_ATTR_MAX + 1][2] = {
1186 [OVS_NAT_ATTR_SRC] = {0, 0},
1187 [OVS_NAT_ATTR_DST] = {0, 0},
1188 [OVS_NAT_ATTR_IP_MIN] = {sizeof(struct in_addr),
1189 sizeof(struct in6_addr)},
1190 [OVS_NAT_ATTR_IP_MAX] = {sizeof(struct in_addr),
1191 sizeof(struct in6_addr)},
1192 [OVS_NAT_ATTR_PROTO_MIN] = {sizeof(u16), sizeof(u16)},
1193 [OVS_NAT_ATTR_PROTO_MAX] = {sizeof(u16), sizeof(u16)},
1194 [OVS_NAT_ATTR_PERSISTENT] = {0, 0},
1195 [OVS_NAT_ATTR_PROTO_HASH] = {0, 0},
1196 [OVS_NAT_ATTR_PROTO_RANDOM] = {0, 0},
1198 int type = nla_type(a);
1200 if (type > OVS_NAT_ATTR_MAX) {
1201 OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)",
1202 type, OVS_NAT_ATTR_MAX);
1206 if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) {
1207 OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)",
1209 ovs_nat_attr_lens[type][ip_vers]);
1214 case OVS_NAT_ATTR_SRC:
1215 case OVS_NAT_ATTR_DST:
1217 OVS_NLERR(log, "Only one type of NAT may be specified");
1220 info->nat |= OVS_CT_NAT;
1221 info->nat |= ((type == OVS_NAT_ATTR_SRC)
1222 ? OVS_CT_SRC_NAT : OVS_CT_DST_NAT);
1225 case OVS_NAT_ATTR_IP_MIN:
1226 nla_memcpy(&info->range.min_addr, a,
1227 sizeof(info->range.min_addr));
1228 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1231 case OVS_NAT_ATTR_IP_MAX:
1233 nla_memcpy(&info->range.max_addr, a,
1234 sizeof(info->range.max_addr));
1235 info->range.flags |= NF_NAT_RANGE_MAP_IPS;
1238 case OVS_NAT_ATTR_PROTO_MIN:
1239 info->range.min_proto.all = htons(nla_get_u16(a));
1240 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1243 case OVS_NAT_ATTR_PROTO_MAX:
1244 have_proto_max = true;
1245 info->range.max_proto.all = htons(nla_get_u16(a));
1246 info->range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1249 case OVS_NAT_ATTR_PERSISTENT:
1250 info->range.flags |= NF_NAT_RANGE_PERSISTENT;
1253 case OVS_NAT_ATTR_PROTO_HASH:
1254 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM;
1257 case OVS_NAT_ATTR_PROTO_RANDOM:
1258 info->range.flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
1262 OVS_NLERR(log, "Unknown nat attribute (%d)", type);
1268 OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem);
1272 /* Do not allow flags if no type is given. */
1273 if (info->range.flags) {
1275 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
1279 info->nat = OVS_CT_NAT; /* NAT existing connections. */
1280 } else if (!info->commit) {
1282 "NAT attributes may be specified only when CT COMMIT flag is also specified."
1286 /* Allow missing IP_MAX. */
1287 if (info->range.flags & NF_NAT_RANGE_MAP_IPS && !have_ip_max) {
1288 memcpy(&info->range.max_addr, &info->range.min_addr,
1289 sizeof(info->range.max_addr));
1291 /* Allow missing PROTO_MAX. */
1292 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1294 info->range.max_proto.all = info->range.min_proto.all;
1300 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
1301 [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 },
1302 [OVS_CT_ATTR_FORCE_COMMIT] = { .minlen = 0, .maxlen = 0 },
1303 [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16),
1304 .maxlen = sizeof(u16) },
1305 [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark),
1306 .maxlen = sizeof(struct md_mark) },
1307 [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels),
1308 .maxlen = sizeof(struct md_labels) },
1309 [OVS_CT_ATTR_HELPER] = { .minlen = 1,
1310 .maxlen = NF_CT_HELPER_NAME_LEN },
1311 #if IS_ENABLED(CONFIG_NF_NAT)
1312 /* NAT length is checked when parsing the nested attributes. */
1313 [OVS_CT_ATTR_NAT] = { .minlen = 0, .maxlen = INT_MAX },
1315 [OVS_CT_ATTR_EVENTMASK] = { .minlen = sizeof(u32),
1316 .maxlen = sizeof(u32) },
1317 [OVS_CT_ATTR_TIMEOUT] = { .minlen = 1,
1318 .maxlen = CTNL_TIMEOUT_NAME_MAX },
1321 static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1322 const char **helper, bool log)
1327 nla_for_each_nested(a, attr, rem) {
1328 int type = nla_type(a);
1332 if (type > OVS_CT_ATTR_MAX) {
1334 "Unknown conntrack attr (type=%d, max=%d)",
1335 type, OVS_CT_ATTR_MAX);
1339 maxlen = ovs_ct_attr_lens[type].maxlen;
1340 minlen = ovs_ct_attr_lens[type].minlen;
1341 if (nla_len(a) < minlen || nla_len(a) > maxlen) {
1343 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1344 type, nla_len(a), maxlen);
1349 case OVS_CT_ATTR_FORCE_COMMIT:
1352 case OVS_CT_ATTR_COMMIT:
1353 info->commit = true;
1355 #ifdef CONFIG_NF_CONNTRACK_ZONES
1356 case OVS_CT_ATTR_ZONE:
1357 info->zone.id = nla_get_u16(a);
1360 #ifdef CONFIG_NF_CONNTRACK_MARK
1361 case OVS_CT_ATTR_MARK: {
1362 struct md_mark *mark = nla_data(a);
1365 OVS_NLERR(log, "ct_mark mask cannot be 0");
1372 #ifdef CONFIG_NF_CONNTRACK_LABELS
1373 case OVS_CT_ATTR_LABELS: {
1374 struct md_labels *labels = nla_data(a);
1376 if (!labels_nonzero(&labels->mask)) {
1377 OVS_NLERR(log, "ct_labels mask cannot be 0");
1380 info->labels = *labels;
1384 case OVS_CT_ATTR_HELPER:
1385 *helper = nla_data(a);
1386 if (!memchr(*helper, '\0', nla_len(a))) {
1387 OVS_NLERR(log, "Invalid conntrack helper");
1391 #if IS_ENABLED(CONFIG_NF_NAT)
1392 case OVS_CT_ATTR_NAT: {
1393 int err = parse_nat(a, info, log);
1400 case OVS_CT_ATTR_EVENTMASK:
1401 info->have_eventmask = true;
1402 info->eventmask = nla_get_u32(a);
1404 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1405 case OVS_CT_ATTR_TIMEOUT:
1406 memcpy(info->timeout, nla_data(a), nla_len(a));
1407 if (!memchr(info->timeout, '\0', nla_len(a))) {
1408 OVS_NLERR(log, "Invalid conntrack timeout");
1415 OVS_NLERR(log, "Unknown conntrack attr (%d)",
1421 #ifdef CONFIG_NF_CONNTRACK_MARK
1422 if (!info->commit && info->mark.mask) {
1424 "Setting conntrack mark requires 'commit' flag.");
1428 #ifdef CONFIG_NF_CONNTRACK_LABELS
1429 if (!info->commit && labels_nonzero(&info->labels.mask)) {
1431 "Setting conntrack labels requires 'commit' flag.");
1436 OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
1443 bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
1445 if (attr == OVS_KEY_ATTR_CT_STATE)
1447 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1448 attr == OVS_KEY_ATTR_CT_ZONE)
1450 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1451 attr == OVS_KEY_ATTR_CT_MARK)
1453 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1454 attr == OVS_KEY_ATTR_CT_LABELS) {
1455 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
1457 return ovs_net->xt_label;
1463 int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1464 const struct sw_flow_key *key,
1465 struct sw_flow_actions **sfa, bool log)
1467 struct ovs_conntrack_info ct_info;
1468 const char *helper = NULL;
1472 family = key_to_nfproto(key);
1473 if (family == NFPROTO_UNSPEC) {
1474 OVS_NLERR(log, "ct family unspecified");
1478 memset(&ct_info, 0, sizeof(ct_info));
1479 ct_info.family = family;
1481 nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
1482 NF_CT_DEFAULT_ZONE_DIR, 0);
1484 err = parse_ct(attr, &ct_info, &helper, log);
1488 /* Set up template for tracking connections in specific zones. */
1489 ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
1491 OVS_NLERR(log, "Failed to allocate conntrack template");
1495 if (ct_info.timeout[0]) {
1496 if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
1498 pr_info_ratelimited("Failed to associated timeout "
1499 "policy `%s'\n", ct_info.timeout);
1501 ct_info.nf_ct_timeout = rcu_dereference(
1502 nf_ct_timeout_find(ct_info.ct)->timeout);
1507 err = nf_ct_add_helper(ct_info.ct, helper, ct_info.family,
1508 key->ip.proto, ct_info.nat, &ct_info.helper);
1510 OVS_NLERR(log, "Failed to add %s helper %d", helper, err);
1515 err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
1516 sizeof(ct_info), log);
1520 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1523 __ovs_ct_free_action(&ct_info);
1527 #if IS_ENABLED(CONFIG_NF_NAT)
1528 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info,
1529 struct sk_buff *skb)
1531 struct nlattr *start;
1533 start = nla_nest_start_noflag(skb, OVS_CT_ATTR_NAT);
1537 if (info->nat & OVS_CT_SRC_NAT) {
1538 if (nla_put_flag(skb, OVS_NAT_ATTR_SRC))
1540 } else if (info->nat & OVS_CT_DST_NAT) {
1541 if (nla_put_flag(skb, OVS_NAT_ATTR_DST))
1547 if (info->range.flags & NF_NAT_RANGE_MAP_IPS) {
1548 if (IS_ENABLED(CONFIG_NF_NAT) &&
1549 info->family == NFPROTO_IPV4) {
1550 if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN,
1551 info->range.min_addr.ip) ||
1552 (info->range.max_addr.ip
1553 != info->range.min_addr.ip &&
1554 (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX,
1555 info->range.max_addr.ip))))
1557 } else if (IS_ENABLED(CONFIG_IPV6) &&
1558 info->family == NFPROTO_IPV6) {
1559 if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN,
1560 &info->range.min_addr.in6) ||
1561 (memcmp(&info->range.max_addr.in6,
1562 &info->range.min_addr.in6,
1563 sizeof(info->range.max_addr.in6)) &&
1564 (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX,
1565 &info->range.max_addr.in6))))
1571 if (info->range.flags & NF_NAT_RANGE_PROTO_SPECIFIED &&
1572 (nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MIN,
1573 ntohs(info->range.min_proto.all)) ||
1574 (info->range.max_proto.all != info->range.min_proto.all &&
1575 nla_put_u16(skb, OVS_NAT_ATTR_PROTO_MAX,
1576 ntohs(info->range.max_proto.all)))))
1579 if (info->range.flags & NF_NAT_RANGE_PERSISTENT &&
1580 nla_put_flag(skb, OVS_NAT_ATTR_PERSISTENT))
1582 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM &&
1583 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_HASH))
1585 if (info->range.flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY &&
1586 nla_put_flag(skb, OVS_NAT_ATTR_PROTO_RANDOM))
1589 nla_nest_end(skb, start);
1595 int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
1596 struct sk_buff *skb)
1598 struct nlattr *start;
1600 start = nla_nest_start_noflag(skb, OVS_ACTION_ATTR_CT);
1604 if (ct_info->commit && nla_put_flag(skb, ct_info->force
1605 ? OVS_CT_ATTR_FORCE_COMMIT
1606 : OVS_CT_ATTR_COMMIT))
1608 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1609 nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
1611 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
1612 nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
1615 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1616 labels_nonzero(&ct_info->labels.mask) &&
1617 nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
1620 if (ct_info->helper) {
1621 if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
1622 ct_info->helper->name))
1625 if (ct_info->have_eventmask &&
1626 nla_put_u32(skb, OVS_CT_ATTR_EVENTMASK, ct_info->eventmask))
1628 if (ct_info->timeout[0]) {
1629 if (nla_put_string(skb, OVS_CT_ATTR_TIMEOUT, ct_info->timeout))
1633 #if IS_ENABLED(CONFIG_NF_NAT)
1634 if (ct_info->nat && !ovs_ct_nat_to_attr(ct_info, skb))
1637 nla_nest_end(skb, start);
1642 void ovs_ct_free_action(const struct nlattr *a)
1644 struct ovs_conntrack_info *ct_info = nla_data(a);
1646 __ovs_ct_free_action(ct_info);
1649 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1651 if (ct_info->helper) {
1652 #if IS_ENABLED(CONFIG_NF_NAT)
1654 nf_nat_helper_put(ct_info->helper);
1656 nf_conntrack_helper_put(ct_info->helper);
1659 if (ct_info->timeout[0])
1660 nf_ct_destroy_timeout(ct_info->ct);
1661 nf_ct_tmpl_free(ct_info->ct);
1665 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1666 static int ovs_ct_limit_init(struct net *net, struct ovs_net *ovs_net)
1670 ovs_net->ct_limit_info = kmalloc(sizeof(*ovs_net->ct_limit_info),
1672 if (!ovs_net->ct_limit_info)
1675 ovs_net->ct_limit_info->default_limit = OVS_CT_LIMIT_DEFAULT;
1676 ovs_net->ct_limit_info->limits =
1677 kmalloc_array(CT_LIMIT_HASH_BUCKETS, sizeof(struct hlist_head),
1679 if (!ovs_net->ct_limit_info->limits) {
1680 kfree(ovs_net->ct_limit_info);
1684 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; i++)
1685 INIT_HLIST_HEAD(&ovs_net->ct_limit_info->limits[i]);
1687 ovs_net->ct_limit_info->data =
1688 nf_conncount_init(net, NFPROTO_INET, sizeof(u32));
1690 if (IS_ERR(ovs_net->ct_limit_info->data)) {
1691 err = PTR_ERR(ovs_net->ct_limit_info->data);
1692 kfree(ovs_net->ct_limit_info->limits);
1693 kfree(ovs_net->ct_limit_info);
1694 pr_err("openvswitch: failed to init nf_conncount %d\n", err);
1700 static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
1702 const struct ovs_ct_limit_info *info = ovs_net->ct_limit_info;
1705 nf_conncount_destroy(net, NFPROTO_INET, info->data);
1706 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1707 struct hlist_head *head = &info->limits[i];
1708 struct ovs_ct_limit *ct_limit;
1710 hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
1711 lockdep_ovsl_is_held())
1712 kfree_rcu(ct_limit, rcu);
1714 kfree(info->limits);
1718 static struct sk_buff *
1719 ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
1720 struct ovs_header **ovs_reply_header)
1722 struct ovs_header *ovs_header = info->userhdr;
1723 struct sk_buff *skb;
1725 skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1727 return ERR_PTR(-ENOMEM);
1729 *ovs_reply_header = genlmsg_put(skb, info->snd_portid,
1731 &dp_ct_limit_genl_family, 0, cmd);
1733 if (!*ovs_reply_header) {
1735 return ERR_PTR(-EMSGSIZE);
1737 (*ovs_reply_header)->dp_ifindex = ovs_header->dp_ifindex;
1742 static bool check_zone_id(int zone_id, u16 *pzone)
1744 if (zone_id >= 0 && zone_id <= 65535) {
1745 *pzone = (u16)zone_id;
1751 static int ovs_ct_limit_set_zone_limit(struct nlattr *nla_zone_limit,
1752 struct ovs_ct_limit_info *info)
1754 struct ovs_zone_limit *zone_limit;
1758 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1759 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1761 while (rem >= sizeof(*zone_limit)) {
1762 if (unlikely(zone_limit->zone_id ==
1763 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1765 info->default_limit = zone_limit->limit;
1767 } else if (unlikely(!check_zone_id(
1768 zone_limit->zone_id, &zone))) {
1769 OVS_NLERR(true, "zone id is out of range");
1771 struct ovs_ct_limit *ct_limit;
1773 ct_limit = kmalloc(sizeof(*ct_limit),
1774 GFP_KERNEL_ACCOUNT);
1778 ct_limit->zone = zone;
1779 ct_limit->limit = zone_limit->limit;
1782 ct_limit_set(info, ct_limit);
1785 rem -= NLA_ALIGN(sizeof(*zone_limit));
1786 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1787 NLA_ALIGN(sizeof(*zone_limit)));
1791 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem);
1796 static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
1797 struct ovs_ct_limit_info *info)
1799 struct ovs_zone_limit *zone_limit;
1803 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1804 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1806 while (rem >= sizeof(*zone_limit)) {
1807 if (unlikely(zone_limit->zone_id ==
1808 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1810 info->default_limit = OVS_CT_LIMIT_DEFAULT;
1812 } else if (unlikely(!check_zone_id(
1813 zone_limit->zone_id, &zone))) {
1814 OVS_NLERR(true, "zone id is out of range");
1817 ct_limit_del(info, zone);
1820 rem -= NLA_ALIGN(sizeof(*zone_limit));
1821 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1822 NLA_ALIGN(sizeof(*zone_limit)));
1826 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem);
1831 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
1832 struct sk_buff *reply)
1834 struct ovs_zone_limit zone_limit = {
1835 .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
1836 .limit = info->default_limit,
1839 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
1842 static int __ovs_ct_limit_get_zone_limit(struct net *net,
1843 struct nf_conncount_data *data,
1844 u16 zone_id, u32 limit,
1845 struct sk_buff *reply)
1847 struct nf_conntrack_zone ct_zone;
1848 struct ovs_zone_limit zone_limit;
1849 u32 conncount_key = zone_id;
1851 zone_limit.zone_id = zone_id;
1852 zone_limit.limit = limit;
1853 nf_ct_zone_init(&ct_zone, zone_id, NF_CT_DEFAULT_ZONE_DIR, 0);
1855 zone_limit.count = nf_conncount_count(net, data, &conncount_key, NULL,
1857 return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
1860 static int ovs_ct_limit_get_zone_limit(struct net *net,
1861 struct nlattr *nla_zone_limit,
1862 struct ovs_ct_limit_info *info,
1863 struct sk_buff *reply)
1865 struct ovs_zone_limit *zone_limit;
1870 rem = NLA_ALIGN(nla_len(nla_zone_limit));
1871 zone_limit = (struct ovs_zone_limit *)nla_data(nla_zone_limit);
1873 while (rem >= sizeof(*zone_limit)) {
1874 if (unlikely(zone_limit->zone_id ==
1875 OVS_ZONE_LIMIT_DEFAULT_ZONE)) {
1876 err = ovs_ct_limit_get_default_limit(info, reply);
1879 } else if (unlikely(!check_zone_id(zone_limit->zone_id,
1881 OVS_NLERR(true, "zone id is out of range");
1884 limit = ct_limit_get(info, zone);
1887 err = __ovs_ct_limit_get_zone_limit(
1888 net, info->data, zone, limit, reply);
1892 rem -= NLA_ALIGN(sizeof(*zone_limit));
1893 zone_limit = (struct ovs_zone_limit *)((u8 *)zone_limit +
1894 NLA_ALIGN(sizeof(*zone_limit)));
1898 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem);
1903 static int ovs_ct_limit_get_all_zone_limit(struct net *net,
1904 struct ovs_ct_limit_info *info,
1905 struct sk_buff *reply)
1907 struct ovs_ct_limit *ct_limit;
1908 struct hlist_head *head;
1911 err = ovs_ct_limit_get_default_limit(info, reply);
1916 for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
1917 head = &info->limits[i];
1918 hlist_for_each_entry_rcu(ct_limit, head, hlist_node) {
1919 err = __ovs_ct_limit_get_zone_limit(net, info->data,
1920 ct_limit->zone, ct_limit->limit, reply);
1931 static int ovs_ct_limit_cmd_set(struct sk_buff *skb, struct genl_info *info)
1933 struct nlattr **a = info->attrs;
1934 struct sk_buff *reply;
1935 struct ovs_header *ovs_reply_header;
1936 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1937 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1940 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_SET,
1943 return PTR_ERR(reply);
1945 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
1950 err = ovs_ct_limit_set_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
1955 static_branch_enable(&ovs_ct_limit_enabled);
1957 genlmsg_end(reply, ovs_reply_header);
1958 return genlmsg_reply(reply, info);
1965 static int ovs_ct_limit_cmd_del(struct sk_buff *skb, struct genl_info *info)
1967 struct nlattr **a = info->attrs;
1968 struct sk_buff *reply;
1969 struct ovs_header *ovs_reply_header;
1970 struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1971 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
1974 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_DEL,
1977 return PTR_ERR(reply);
1979 if (!a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
1984 err = ovs_ct_limit_del_zone_limit(a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT],
1989 genlmsg_end(reply, ovs_reply_header);
1990 return genlmsg_reply(reply, info);
1997 static int ovs_ct_limit_cmd_get(struct sk_buff *skb, struct genl_info *info)
1999 struct nlattr **a = info->attrs;
2000 struct nlattr *nla_reply;
2001 struct sk_buff *reply;
2002 struct ovs_header *ovs_reply_header;
2003 struct net *net = sock_net(skb->sk);
2004 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2005 struct ovs_ct_limit_info *ct_limit_info = ovs_net->ct_limit_info;
2008 reply = ovs_ct_limit_cmd_reply_start(info, OVS_CT_LIMIT_CMD_GET,
2011 return PTR_ERR(reply);
2013 nla_reply = nla_nest_start_noflag(reply, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
2019 if (a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
2020 err = ovs_ct_limit_get_zone_limit(
2021 net, a[OVS_CT_LIMIT_ATTR_ZONE_LIMIT], ct_limit_info,
2026 err = ovs_ct_limit_get_all_zone_limit(net, ct_limit_info,
2032 nla_nest_end(reply, nla_reply);
2033 genlmsg_end(reply, ovs_reply_header);
2034 return genlmsg_reply(reply, info);
2041 static const struct genl_small_ops ct_limit_genl_ops[] = {
2042 { .cmd = OVS_CT_LIMIT_CMD_SET,
2043 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2044 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2047 .doit = ovs_ct_limit_cmd_set,
2049 { .cmd = OVS_CT_LIMIT_CMD_DEL,
2050 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2051 .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN
2054 .doit = ovs_ct_limit_cmd_del,
2056 { .cmd = OVS_CT_LIMIT_CMD_GET,
2057 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2058 .flags = 0, /* OK for unprivileged users. */
2059 .doit = ovs_ct_limit_cmd_get,
2063 static const struct genl_multicast_group ovs_ct_limit_multicast_group = {
2064 .name = OVS_CT_LIMIT_MCGROUP,
2067 struct genl_family dp_ct_limit_genl_family __ro_after_init = {
2068 .hdrsize = sizeof(struct ovs_header),
2069 .name = OVS_CT_LIMIT_FAMILY,
2070 .version = OVS_CT_LIMIT_VERSION,
2071 .maxattr = OVS_CT_LIMIT_ATTR_MAX,
2072 .policy = ct_limit_policy,
2074 .parallel_ops = true,
2075 .small_ops = ct_limit_genl_ops,
2076 .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops),
2077 .resv_start_op = OVS_CT_LIMIT_CMD_GET + 1,
2078 .mcgrps = &ovs_ct_limit_multicast_group,
2080 .module = THIS_MODULE,
2084 int ovs_ct_init(struct net *net)
2086 unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
2087 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2089 if (nf_connlabels_get(net, n_bits - 1)) {
2090 ovs_net->xt_label = false;
2091 OVS_NLERR(true, "Failed to set connlabel length");
2093 ovs_net->xt_label = true;
2096 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2097 return ovs_ct_limit_init(net, ovs_net);
2103 void ovs_ct_exit(struct net *net)
2105 struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2107 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2108 ovs_ct_limit_exit(net, ovs_net);
2111 if (ovs_net->xt_label)
2112 nf_connlabels_put(net);