1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3 * Patrick Schaaf <bof@bof.de>
4 * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
7 /* Kernel module for IP set management */
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
13 #include <linux/skbuff.h>
14 #include <linux/spinlock.h>
15 #include <linux/rculist.h>
16 #include <net/netlink.h>
17 #include <net/net_namespace.h>
18 #include <net/netns/generic.h>
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter/nfnetlink.h>
23 #include <linux/netfilter/ipset/ip_set.h>
25 static LIST_HEAD(ip_set_type_list); /* all registered set types */
26 static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
27 static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
30 struct ip_set * __rcu *ip_set_list; /* all individual sets */
31 ip_set_id_t ip_set_max; /* max number of sets */
32 bool is_deleted; /* deleted by ip_set_net_exit */
33 bool is_destroyed; /* all sets are destroyed */
36 static unsigned int ip_set_net_id __read_mostly;
38 static struct ip_set_net *ip_set_pernet(struct net *net)
40 return net_generic(net, ip_set_net_id);
44 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
46 static unsigned int max_sets;
48 module_param(max_sets, int, 0600);
49 MODULE_PARM_DESC(max_sets, "maximal number of sets");
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
52 MODULE_DESCRIPTION("core IP set support");
53 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
55 /* When the nfnl mutex or ip_set_ref_lock is held: */
56 #define ip_set_dereference(p) \
57 rcu_dereference_protected(p, \
58 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
59 lockdep_is_held(&ip_set_ref_lock))
60 #define ip_set(inst, id) \
61 ip_set_dereference((inst)->ip_set_list)[id]
62 #define ip_set_ref_netlink(inst,id) \
63 rcu_dereference_raw((inst)->ip_set_list)[id]
65 /* The set types are implemented in modules and registered set types
66 * can be found in ip_set_type_list. Adding/deleting types is
67 * serialized by ip_set_type_mutex.
71 ip_set_type_lock(void)
73 mutex_lock(&ip_set_type_mutex);
77 ip_set_type_unlock(void)
79 mutex_unlock(&ip_set_type_mutex);
82 /* Register and deregister settype */
84 static struct ip_set_type *
85 find_set_type(const char *name, u8 family, u8 revision)
87 struct ip_set_type *type;
89 list_for_each_entry_rcu(type, &ip_set_type_list, list)
90 if (STRNCMP(type->name, name) &&
91 (type->family == family ||
92 type->family == NFPROTO_UNSPEC) &&
93 revision >= type->revision_min &&
94 revision <= type->revision_max)
99 /* Unlock, try to load a set type module and lock again */
101 load_settype(const char *name)
103 nfnl_unlock(NFNL_SUBSYS_IPSET);
104 pr_debug("try to load ip_set_%s\n", name);
105 if (request_module("ip_set_%s", name) < 0) {
106 pr_warn("Can't find ip_set type %s\n", name);
107 nfnl_lock(NFNL_SUBSYS_IPSET);
110 nfnl_lock(NFNL_SUBSYS_IPSET);
114 /* Find a set type and reference it */
115 #define find_set_type_get(name, family, revision, found) \
116 __find_set_type_get(name, family, revision, found, false)
119 __find_set_type_get(const char *name, u8 family, u8 revision,
120 struct ip_set_type **found, bool retry)
122 struct ip_set_type *type;
125 if (retry && !load_settype(name))
126 return -IPSET_ERR_FIND_TYPE;
129 *found = find_set_type(name, family, revision);
131 err = !try_module_get((*found)->me) ? -EFAULT : 0;
134 /* Make sure the type is already loaded
135 * but we don't support the revision
137 list_for_each_entry_rcu(type, &ip_set_type_list, list)
138 if (STRNCMP(type->name, name)) {
139 err = -IPSET_ERR_FIND_TYPE;
144 return retry ? -IPSET_ERR_FIND_TYPE :
145 __find_set_type_get(name, family, revision, found, true);
152 /* Find a given set type by name and family.
153 * If we succeeded, the supported minimal and maximum revisions are
156 #define find_set_type_minmax(name, family, min, max) \
157 __find_set_type_minmax(name, family, min, max, false)
160 __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
163 struct ip_set_type *type;
166 if (retry && !load_settype(name))
167 return -IPSET_ERR_FIND_TYPE;
169 *min = 255; *max = 0;
171 list_for_each_entry_rcu(type, &ip_set_type_list, list)
172 if (STRNCMP(type->name, name) &&
173 (type->family == family ||
174 type->family == NFPROTO_UNSPEC)) {
176 if (type->revision_min < *min)
177 *min = type->revision_min;
178 if (type->revision_max > *max)
179 *max = type->revision_max;
185 return retry ? -IPSET_ERR_FIND_TYPE :
186 __find_set_type_minmax(name, family, min, max, true);
189 #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
190 (f) == NFPROTO_IPV6 ? "inet6" : "any")
192 /* Register a set type structure. The type is identified by
193 * the unique triple of name, family and revision.
196 ip_set_type_register(struct ip_set_type *type)
200 if (type->protocol != IPSET_PROTOCOL) {
201 pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n",
202 type->name, family_name(type->family),
203 type->revision_min, type->revision_max,
204 type->protocol, IPSET_PROTOCOL);
209 if (find_set_type(type->name, type->family, type->revision_min)) {
211 pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
212 type->name, family_name(type->family),
214 ip_set_type_unlock();
217 list_add_rcu(&type->list, &ip_set_type_list);
218 pr_debug("type %s, family %s, revision %u:%u registered.\n",
219 type->name, family_name(type->family),
220 type->revision_min, type->revision_max);
221 ip_set_type_unlock();
225 EXPORT_SYMBOL_GPL(ip_set_type_register);
227 /* Unregister a set type. There's a small race with ip_set_create */
229 ip_set_type_unregister(struct ip_set_type *type)
232 if (!find_set_type(type->name, type->family, type->revision_min)) {
233 pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
234 type->name, family_name(type->family),
236 ip_set_type_unlock();
239 list_del_rcu(&type->list);
240 pr_debug("type %s, family %s with revision min %u unregistered.\n",
241 type->name, family_name(type->family), type->revision_min);
242 ip_set_type_unlock();
246 EXPORT_SYMBOL_GPL(ip_set_type_unregister);
248 /* Utility functions */
250 ip_set_alloc(size_t size)
252 void *members = NULL;
254 if (size < KMALLOC_MAX_SIZE)
255 members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
258 pr_debug("%p: allocated with kmalloc\n", members);
262 members = vzalloc(size);
265 pr_debug("%p: allocated with vmalloc\n", members);
269 EXPORT_SYMBOL_GPL(ip_set_alloc);
272 ip_set_free(void *members)
274 pr_debug("%p: free with %s\n", members,
275 is_vmalloc_addr(members) ? "vfree" : "kfree");
278 EXPORT_SYMBOL_GPL(ip_set_free);
281 flag_nested(const struct nlattr *nla)
283 return nla->nla_type & NLA_F_NESTED;
286 static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
287 [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
288 [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
289 .len = sizeof(struct in6_addr) },
293 ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
295 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
297 if (unlikely(!flag_nested(nla)))
298 return -IPSET_ERR_PROTOCOL;
299 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
300 ipaddr_policy, NULL))
301 return -IPSET_ERR_PROTOCOL;
302 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
303 return -IPSET_ERR_PROTOCOL;
305 *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
308 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
311 ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
313 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
315 if (unlikely(!flag_nested(nla)))
316 return -IPSET_ERR_PROTOCOL;
318 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
319 ipaddr_policy, NULL))
320 return -IPSET_ERR_PROTOCOL;
321 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
322 return -IPSET_ERR_PROTOCOL;
324 memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
325 sizeof(struct in6_addr));
328 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
331 ip_set_timeout_get(const unsigned long *timeout)
335 if (*timeout == IPSET_ELEM_PERMANENT)
338 t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC;
339 /* Zero value in userspace means no timeout */
340 return t == 0 ? 1 : t;
344 ip_set_comment_uget(struct nlattr *tb)
349 /* Called from uadd only, protected by the set spinlock.
350 * The kadt functions don't use the comment extensions in any way.
353 ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
354 const struct ip_set_ext *ext)
356 struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
357 size_t len = ext->comment ? strlen(ext->comment) : 0;
360 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
362 rcu_assign_pointer(comment->c, NULL);
366 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
367 len = IPSET_MAX_COMMENT_SIZE;
368 c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
371 strlcpy(c->str, ext->comment, len + 1);
372 set->ext_size += sizeof(*c) + strlen(c->str) + 1;
373 rcu_assign_pointer(comment->c, c);
375 EXPORT_SYMBOL_GPL(ip_set_init_comment);
377 /* Used only when dumping a set, protected by rcu_read_lock() */
379 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
381 struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
385 return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
388 /* Called from uadd/udel, flush or the garbage collectors protected
389 * by the set spinlock.
390 * Called when the set is destroyed and when there can't be any user
391 * of the set data anymore.
394 ip_set_comment_free(struct ip_set *set, void *ptr)
396 struct ip_set_comment *comment = ptr;
397 struct ip_set_comment_rcu *c;
399 c = rcu_dereference_protected(comment->c, 1);
402 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
404 rcu_assign_pointer(comment->c, NULL);
407 typedef void (*destroyer)(struct ip_set *, void *);
408 /* ipset data extension types, in size order */
410 const struct ip_set_ext_type ip_set_extensions[] = {
411 [IPSET_EXT_ID_COUNTER] = {
412 .type = IPSET_EXT_COUNTER,
413 .flag = IPSET_FLAG_WITH_COUNTERS,
414 .len = sizeof(struct ip_set_counter),
415 .align = __alignof__(struct ip_set_counter),
417 [IPSET_EXT_ID_TIMEOUT] = {
418 .type = IPSET_EXT_TIMEOUT,
419 .len = sizeof(unsigned long),
420 .align = __alignof__(unsigned long),
422 [IPSET_EXT_ID_SKBINFO] = {
423 .type = IPSET_EXT_SKBINFO,
424 .flag = IPSET_FLAG_WITH_SKBINFO,
425 .len = sizeof(struct ip_set_skbinfo),
426 .align = __alignof__(struct ip_set_skbinfo),
428 [IPSET_EXT_ID_COMMENT] = {
429 .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
430 .flag = IPSET_FLAG_WITH_COMMENT,
431 .len = sizeof(struct ip_set_comment),
432 .align = __alignof__(struct ip_set_comment),
433 .destroy = ip_set_comment_free,
436 EXPORT_SYMBOL_GPL(ip_set_extensions);
439 add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
441 return ip_set_extensions[id].flag ?
442 (flags & ip_set_extensions[id].flag) :
443 !!tb[IPSET_ATTR_TIMEOUT];
447 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
450 enum ip_set_ext_id id;
453 if (tb[IPSET_ATTR_CADT_FLAGS])
454 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
455 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
456 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
459 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
460 if (!add_extension(id, cadt_flags, tb))
462 len = ALIGN(len, ip_set_extensions[id].align);
463 set->offset[id] = len;
464 set->extensions |= ip_set_extensions[id].type;
465 len += ip_set_extensions[id].len;
467 return ALIGN(len, align);
469 EXPORT_SYMBOL_GPL(ip_set_elem_len);
472 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
473 struct ip_set_ext *ext)
477 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
478 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
479 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
480 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
481 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
482 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
483 return -IPSET_ERR_PROTOCOL;
485 if (tb[IPSET_ATTR_TIMEOUT]) {
486 if (!SET_WITH_TIMEOUT(set))
487 return -IPSET_ERR_TIMEOUT;
488 ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
490 if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
491 if (!SET_WITH_COUNTER(set))
492 return -IPSET_ERR_COUNTER;
493 if (tb[IPSET_ATTR_BYTES])
494 ext->bytes = be64_to_cpu(nla_get_be64(
495 tb[IPSET_ATTR_BYTES]));
496 if (tb[IPSET_ATTR_PACKETS])
497 ext->packets = be64_to_cpu(nla_get_be64(
498 tb[IPSET_ATTR_PACKETS]));
500 if (tb[IPSET_ATTR_COMMENT]) {
501 if (!SET_WITH_COMMENT(set))
502 return -IPSET_ERR_COMMENT;
503 ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
505 if (tb[IPSET_ATTR_SKBMARK]) {
506 if (!SET_WITH_SKBINFO(set))
507 return -IPSET_ERR_SKBINFO;
508 fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
509 ext->skbinfo.skbmark = fullmark >> 32;
510 ext->skbinfo.skbmarkmask = fullmark & 0xffffffff;
512 if (tb[IPSET_ATTR_SKBPRIO]) {
513 if (!SET_WITH_SKBINFO(set))
514 return -IPSET_ERR_SKBINFO;
515 ext->skbinfo.skbprio =
516 be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO]));
518 if (tb[IPSET_ATTR_SKBQUEUE]) {
519 if (!SET_WITH_SKBINFO(set))
520 return -IPSET_ERR_SKBINFO;
521 ext->skbinfo.skbqueue =
522 be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE]));
526 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
529 ip_set_get_bytes(const struct ip_set_counter *counter)
531 return (u64)atomic64_read(&(counter)->bytes);
535 ip_set_get_packets(const struct ip_set_counter *counter)
537 return (u64)atomic64_read(&(counter)->packets);
541 ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
543 return nla_put_net64(skb, IPSET_ATTR_BYTES,
544 cpu_to_be64(ip_set_get_bytes(counter)),
546 nla_put_net64(skb, IPSET_ATTR_PACKETS,
547 cpu_to_be64(ip_set_get_packets(counter)),
552 ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
554 /* Send nonzero parameters only */
555 return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
556 nla_put_net64(skb, IPSET_ATTR_SKBMARK,
557 cpu_to_be64((u64)skbinfo->skbmark << 32 |
558 skbinfo->skbmarkmask),
561 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
562 cpu_to_be32(skbinfo->skbprio))) ||
563 (skbinfo->skbqueue &&
564 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
565 cpu_to_be16(skbinfo->skbqueue)));
569 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
570 const void *e, bool active)
572 if (SET_WITH_TIMEOUT(set)) {
573 unsigned long *timeout = ext_timeout(e, set);
575 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
576 htonl(active ? ip_set_timeout_get(timeout)
580 if (SET_WITH_COUNTER(set) &&
581 ip_set_put_counter(skb, ext_counter(e, set)))
583 if (SET_WITH_COMMENT(set) &&
584 ip_set_put_comment(skb, ext_comment(e, set)))
586 if (SET_WITH_SKBINFO(set) &&
587 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
591 EXPORT_SYMBOL_GPL(ip_set_put_extensions);
594 ip_set_match_counter(u64 counter, u64 match, u8 op)
597 case IPSET_COUNTER_NONE:
599 case IPSET_COUNTER_EQ:
600 return counter == match;
601 case IPSET_COUNTER_NE:
602 return counter != match;
603 case IPSET_COUNTER_LT:
604 return counter < match;
605 case IPSET_COUNTER_GT:
606 return counter > match;
612 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
614 atomic64_add((long long)bytes, &(counter)->bytes);
618 ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
620 atomic64_add((long long)packets, &(counter)->packets);
624 ip_set_update_counter(struct ip_set_counter *counter,
625 const struct ip_set_ext *ext, u32 flags)
627 if (ext->packets != ULLONG_MAX &&
628 !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
629 ip_set_add_bytes(ext->bytes, counter);
630 ip_set_add_packets(ext->packets, counter);
635 ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
636 const struct ip_set_ext *ext,
637 struct ip_set_ext *mext, u32 flags)
639 mext->skbinfo = *skbinfo;
643 ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
644 struct ip_set_ext *mext, u32 flags, void *data)
646 if (SET_WITH_TIMEOUT(set) &&
647 ip_set_timeout_expired(ext_timeout(data, set)))
649 if (SET_WITH_COUNTER(set)) {
650 struct ip_set_counter *counter = ext_counter(data, set);
652 if (flags & IPSET_FLAG_MATCH_COUNTERS &&
653 !(ip_set_match_counter(ip_set_get_packets(counter),
654 mext->packets, mext->packets_op) &&
655 ip_set_match_counter(ip_set_get_bytes(counter),
656 mext->bytes, mext->bytes_op)))
658 ip_set_update_counter(counter, ext, flags);
660 if (SET_WITH_SKBINFO(set))
661 ip_set_get_skbinfo(ext_skbinfo(data, set),
665 EXPORT_SYMBOL_GPL(ip_set_match_extensions);
667 /* Creating/destroying/renaming/swapping affect the existence and
668 * the properties of a set. All of these can be executed from userspace
669 * only and serialized by the nfnl mutex indirectly from nfnetlink.
671 * Sets are identified by their index in ip_set_list and the index
672 * is used by the external references (set/SET netfilter modules).
674 * The set behind an index may change by swapping only, from userspace.
678 __ip_set_get(struct ip_set *set)
680 write_lock_bh(&ip_set_ref_lock);
682 write_unlock_bh(&ip_set_ref_lock);
686 __ip_set_put(struct ip_set *set)
688 write_lock_bh(&ip_set_ref_lock);
689 BUG_ON(set->ref == 0);
691 write_unlock_bh(&ip_set_ref_lock);
694 /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
695 * a separate reference counter
698 __ip_set_put_netlink(struct ip_set *set)
700 write_lock_bh(&ip_set_ref_lock);
701 BUG_ON(set->ref_netlink == 0);
703 write_unlock_bh(&ip_set_ref_lock);
706 /* Add, del and test set entries from kernel.
708 * The set behind the index must exist and must be referenced
709 * so it can't be destroyed (or changed) under our foot.
712 static struct ip_set *
713 ip_set_rcu_get(struct net *net, ip_set_id_t index)
716 struct ip_set_net *inst = ip_set_pernet(net);
719 /* ip_set_list itself needs to be protected */
720 set = rcu_dereference(inst->ip_set_list)[index];
727 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
728 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
730 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
734 pr_debug("set %s, index %u\n", set->name, index);
736 if (opt->dim < set->type->dimension ||
737 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
741 ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
742 rcu_read_unlock_bh();
744 if (ret == -EAGAIN) {
745 /* Type requests element to be completed */
746 pr_debug("element must be completed, ADD is triggered\n");
747 spin_lock_bh(&set->lock);
748 set->variant->kadt(set, skb, par, IPSET_ADD, opt);
749 spin_unlock_bh(&set->lock);
752 /* --return-nomatch: invert matched element */
753 if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) &&
754 (set->type->features & IPSET_TYPE_NOMATCH) &&
755 (ret > 0 || ret == -ENOTEMPTY))
759 /* Convert error codes to nomatch */
760 return (ret < 0 ? 0 : ret);
762 EXPORT_SYMBOL_GPL(ip_set_test);
765 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
766 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
768 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
772 pr_debug("set %s, index %u\n", set->name, index);
774 if (opt->dim < set->type->dimension ||
775 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
776 return -IPSET_ERR_TYPE_MISMATCH;
778 spin_lock_bh(&set->lock);
779 ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
780 spin_unlock_bh(&set->lock);
784 EXPORT_SYMBOL_GPL(ip_set_add);
787 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
788 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
790 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
794 pr_debug("set %s, index %u\n", set->name, index);
796 if (opt->dim < set->type->dimension ||
797 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
798 return -IPSET_ERR_TYPE_MISMATCH;
800 spin_lock_bh(&set->lock);
801 ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
802 spin_unlock_bh(&set->lock);
806 EXPORT_SYMBOL_GPL(ip_set_del);
808 /* Find set by name, reference it once. The reference makes sure the
809 * thing pointed to, does not go away under our feet.
813 ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
815 ip_set_id_t i, index = IPSET_INVALID_ID;
817 struct ip_set_net *inst = ip_set_pernet(net);
820 for (i = 0; i < inst->ip_set_max; i++) {
821 s = rcu_dereference(inst->ip_set_list)[i];
822 if (s && STRNCMP(s->name, name)) {
833 EXPORT_SYMBOL_GPL(ip_set_get_byname);
835 /* If the given set pointer points to a valid set, decrement
836 * reference count by 1. The caller shall not assume the index
837 * to be valid, after calling this function.
842 __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
847 set = rcu_dereference(inst->ip_set_list)[index];
854 ip_set_put_byindex(struct net *net, ip_set_id_t index)
856 struct ip_set_net *inst = ip_set_pernet(net);
858 __ip_set_put_byindex(inst, index);
860 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
862 /* Get the name of a set behind a set index.
863 * Set itself is protected by RCU, but its name isn't: to protect against
864 * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
868 ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
870 struct ip_set *set = ip_set_rcu_get(net, index);
874 read_lock_bh(&ip_set_ref_lock);
875 strncpy(name, set->name, IPSET_MAXNAMELEN);
876 read_unlock_bh(&ip_set_ref_lock);
878 EXPORT_SYMBOL_GPL(ip_set_name_byindex);
880 /* Routines to call by external subsystems, which do not
881 * call nfnl_lock for us.
884 /* Find set by index, reference it once. The reference makes sure the
885 * thing pointed to, does not go away under our feet.
887 * The nfnl mutex is used in the function.
890 ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
893 struct ip_set_net *inst = ip_set_pernet(net);
895 if (index >= inst->ip_set_max)
896 return IPSET_INVALID_ID;
898 nfnl_lock(NFNL_SUBSYS_IPSET);
899 set = ip_set(inst, index);
903 index = IPSET_INVALID_ID;
904 nfnl_unlock(NFNL_SUBSYS_IPSET);
908 EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
910 /* If the given set pointer points to a valid set, decrement
911 * reference count by 1. The caller shall not assume the index
912 * to be valid, after calling this function.
914 * The nfnl mutex is used in the function.
917 ip_set_nfnl_put(struct net *net, ip_set_id_t index)
920 struct ip_set_net *inst = ip_set_pernet(net);
922 nfnl_lock(NFNL_SUBSYS_IPSET);
923 if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
924 set = ip_set(inst, index);
928 nfnl_unlock(NFNL_SUBSYS_IPSET);
930 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
932 /* Communication protocol with userspace over netlink.
934 * The commands are serialized by the nfnl mutex.
937 static inline u8 protocol(const struct nlattr * const tb[])
939 return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]);
943 protocol_failed(const struct nlattr * const tb[])
945 return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL;
949 protocol_min_failed(const struct nlattr * const tb[])
951 return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN;
955 flag_exist(const struct nlmsghdr *nlh)
957 return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
960 static struct nlmsghdr *
961 start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
964 struct nlmsghdr *nlh;
965 struct nfgenmsg *nfmsg;
967 nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd),
968 sizeof(*nfmsg), flags);
972 nfmsg = nlmsg_data(nlh);
973 nfmsg->nfgen_family = NFPROTO_IPV4;
974 nfmsg->version = NFNETLINK_V0;
982 static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
983 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
984 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
985 .len = IPSET_MAXNAMELEN - 1 },
986 [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
987 .len = IPSET_MAXNAMELEN - 1},
988 [IPSET_ATTR_REVISION] = { .type = NLA_U8 },
989 [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
990 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
993 static struct ip_set *
994 find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
996 struct ip_set *set = NULL;
999 *id = IPSET_INVALID_ID;
1000 for (i = 0; i < inst->ip_set_max; i++) {
1001 set = ip_set(inst, i);
1002 if (set && STRNCMP(set->name, name)) {
1007 return (*id == IPSET_INVALID_ID ? NULL : set);
1010 static inline struct ip_set *
1011 find_set(struct ip_set_net *inst, const char *name)
1015 return find_set_and_id(inst, name, &id);
1019 find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
1020 struct ip_set **set)
1025 *index = IPSET_INVALID_ID;
1026 for (i = 0; i < inst->ip_set_max; i++) {
1027 s = ip_set(inst, i);
1029 if (*index == IPSET_INVALID_ID)
1031 } else if (STRNCMP(name, s->name)) {
1037 if (*index == IPSET_INVALID_ID)
1038 /* No free slot remained */
1039 return -IPSET_ERR_MAX_SETS;
1043 static int ip_set_none(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1044 const struct nlmsghdr *nlh,
1045 const struct nlattr * const attr[],
1046 struct netlink_ext_ack *extack)
1051 static int ip_set_create(struct net *net, struct sock *ctnl,
1052 struct sk_buff *skb, const struct nlmsghdr *nlh,
1053 const struct nlattr * const attr[],
1054 struct netlink_ext_ack *extack)
1056 struct ip_set_net *inst = ip_set_pernet(net);
1057 struct ip_set *set, *clash = NULL;
1058 ip_set_id_t index = IPSET_INVALID_ID;
1059 struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
1060 const char *name, *typename;
1061 u8 family, revision;
1062 u32 flags = flag_exist(nlh);
1065 if (unlikely(protocol_min_failed(attr) ||
1066 !attr[IPSET_ATTR_SETNAME] ||
1067 !attr[IPSET_ATTR_TYPENAME] ||
1068 !attr[IPSET_ATTR_REVISION] ||
1069 !attr[IPSET_ATTR_FAMILY] ||
1070 (attr[IPSET_ATTR_DATA] &&
1071 !flag_nested(attr[IPSET_ATTR_DATA]))))
1072 return -IPSET_ERR_PROTOCOL;
1074 name = nla_data(attr[IPSET_ATTR_SETNAME]);
1075 typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
1076 family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
1077 revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
1078 pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
1079 name, typename, family_name(family), revision);
1081 /* First, and without any locks, allocate and initialize
1082 * a normal base set structure.
1084 set = kzalloc(sizeof(*set), GFP_KERNEL);
1087 spin_lock_init(&set->lock);
1088 strlcpy(set->name, name, IPSET_MAXNAMELEN);
1089 set->family = family;
1090 set->revision = revision;
1092 /* Next, check that we know the type, and take
1093 * a reference on the type, to make sure it stays available
1094 * while constructing our new set.
1096 * After referencing the type, we try to create the type
1097 * specific part of the set without holding any locks.
1099 ret = find_set_type_get(typename, family, revision, &set->type);
1103 /* Without holding any locks, create private part. */
1104 if (attr[IPSET_ATTR_DATA] &&
1105 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
1106 set->type->create_policy, NULL)) {
1107 ret = -IPSET_ERR_PROTOCOL;
1111 ret = set->type->create(net, set, tb, flags);
1115 /* BTW, ret==0 here. */
1117 /* Here, we have a valid, constructed set and we are protected
1118 * by the nfnl mutex. Find the first free index in ip_set_list
1119 * and check clashing.
1121 ret = find_free_id(inst, set->name, &index, &clash);
1122 if (ret == -EEXIST) {
1123 /* If this is the same set and requested, ignore error */
1124 if ((flags & IPSET_FLAG_EXIST) &&
1125 STRNCMP(set->type->name, clash->type->name) &&
1126 set->type->family == clash->type->family &&
1127 set->type->revision_min == clash->type->revision_min &&
1128 set->type->revision_max == clash->type->revision_max &&
1129 set->variant->same_set(set, clash))
1132 } else if (ret == -IPSET_ERR_MAX_SETS) {
1133 struct ip_set **list, **tmp;
1134 ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
1136 if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
1140 list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
1143 /* nfnl mutex is held, both lists are valid */
1144 tmp = ip_set_dereference(inst->ip_set_list);
1145 memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
1146 rcu_assign_pointer(inst->ip_set_list, list);
1147 /* Make sure all current packets have passed through */
1150 index = inst->ip_set_max;
1151 inst->ip_set_max = i;
1158 /* Finally! Add our shiny new set to the list, and be done. */
1159 pr_debug("create: '%s' created with index %u!\n", set->name, index);
1160 ip_set(inst, index) = set;
1165 set->variant->destroy(set);
1167 module_put(set->type->me);
1175 static const struct nla_policy
1176 ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
1177 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1178 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1179 .len = IPSET_MAXNAMELEN - 1 },
1183 ip_set_destroy_set(struct ip_set *set)
1185 pr_debug("set: %s\n", set->name);
1187 /* Must call it without holding any lock */
1188 set->variant->destroy(set);
1189 module_put(set->type->me);
1193 static int ip_set_destroy(struct net *net, struct sock *ctnl,
1194 struct sk_buff *skb, const struct nlmsghdr *nlh,
1195 const struct nlattr * const attr[],
1196 struct netlink_ext_ack *extack)
1198 struct ip_set_net *inst = ip_set_pernet(net);
1203 if (unlikely(protocol_min_failed(attr)))
1204 return -IPSET_ERR_PROTOCOL;
1206 /* Must wait for flush to be really finished in list:set */
1209 /* Commands are serialized and references are
1210 * protected by the ip_set_ref_lock.
1211 * External systems (i.e. xt_set) must call
1212 * ip_set_put|get_nfnl_* functions, that way we
1213 * can safely check references here.
1215 * list:set timer can only decrement the reference
1216 * counter, so if it's already zero, we can proceed
1217 * without holding the lock.
1219 read_lock_bh(&ip_set_ref_lock);
1220 if (!attr[IPSET_ATTR_SETNAME]) {
1221 for (i = 0; i < inst->ip_set_max; i++) {
1222 s = ip_set(inst, i);
1223 if (s && (s->ref || s->ref_netlink)) {
1224 ret = -IPSET_ERR_BUSY;
1228 inst->is_destroyed = true;
1229 read_unlock_bh(&ip_set_ref_lock);
1230 for (i = 0; i < inst->ip_set_max; i++) {
1231 s = ip_set(inst, i);
1233 ip_set(inst, i) = NULL;
1234 ip_set_destroy_set(s);
1237 /* Modified by ip_set_destroy() only, which is serialized */
1238 inst->is_destroyed = false;
1240 s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
1245 } else if (s->ref || s->ref_netlink) {
1246 ret = -IPSET_ERR_BUSY;
1249 ip_set(inst, i) = NULL;
1250 read_unlock_bh(&ip_set_ref_lock);
1252 ip_set_destroy_set(s);
1256 read_unlock_bh(&ip_set_ref_lock);
1263 ip_set_flush_set(struct ip_set *set)
1265 pr_debug("set: %s\n", set->name);
1267 spin_lock_bh(&set->lock);
1268 set->variant->flush(set);
1269 spin_unlock_bh(&set->lock);
1272 static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1273 const struct nlmsghdr *nlh,
1274 const struct nlattr * const attr[],
1275 struct netlink_ext_ack *extack)
1277 struct ip_set_net *inst = ip_set_pernet(net);
1281 if (unlikely(protocol_min_failed(attr)))
1282 return -IPSET_ERR_PROTOCOL;
1284 if (!attr[IPSET_ATTR_SETNAME]) {
1285 for (i = 0; i < inst->ip_set_max; i++) {
1286 s = ip_set(inst, i);
1288 ip_set_flush_set(s);
1291 s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1295 ip_set_flush_set(s);
1303 static const struct nla_policy
1304 ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
1305 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1306 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1307 .len = IPSET_MAXNAMELEN - 1 },
1308 [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
1309 .len = IPSET_MAXNAMELEN - 1 },
1312 static int ip_set_rename(struct net *net, struct sock *ctnl,
1313 struct sk_buff *skb, const struct nlmsghdr *nlh,
1314 const struct nlattr * const attr[],
1315 struct netlink_ext_ack *extack)
1317 struct ip_set_net *inst = ip_set_pernet(net);
1318 struct ip_set *set, *s;
1323 if (unlikely(protocol_min_failed(attr) ||
1324 !attr[IPSET_ATTR_SETNAME] ||
1325 !attr[IPSET_ATTR_SETNAME2]))
1326 return -IPSET_ERR_PROTOCOL;
1328 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1332 write_lock_bh(&ip_set_ref_lock);
1333 if (set->ref != 0 || set->ref_netlink != 0) {
1334 ret = -IPSET_ERR_REFERENCED;
1338 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
1339 for (i = 0; i < inst->ip_set_max; i++) {
1340 s = ip_set(inst, i);
1341 if (s && STRNCMP(s->name, name2)) {
1342 ret = -IPSET_ERR_EXIST_SETNAME2;
1346 strncpy(set->name, name2, IPSET_MAXNAMELEN);
1349 write_unlock_bh(&ip_set_ref_lock);
1353 /* Swap two sets so that name/index points to the other.
1354 * References and set names are also swapped.
1356 * The commands are serialized by the nfnl mutex and references are
1357 * protected by the ip_set_ref_lock. The kernel interfaces
1358 * do not hold the mutex but the pointer settings are atomic
1359 * so the ip_set_list always contains valid pointers to the sets.
1362 static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1363 const struct nlmsghdr *nlh,
1364 const struct nlattr * const attr[],
1365 struct netlink_ext_ack *extack)
1367 struct ip_set_net *inst = ip_set_pernet(net);
1368 struct ip_set *from, *to;
1369 ip_set_id_t from_id, to_id;
1370 char from_name[IPSET_MAXNAMELEN];
1372 if (unlikely(protocol_min_failed(attr) ||
1373 !attr[IPSET_ATTR_SETNAME] ||
1374 !attr[IPSET_ATTR_SETNAME2]))
1375 return -IPSET_ERR_PROTOCOL;
1377 from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
1382 to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
1385 return -IPSET_ERR_EXIST_SETNAME2;
1387 /* Features must not change.
1388 * Not an artifical restriction anymore, as we must prevent
1389 * possible loops created by swapping in setlist type of sets.
1391 if (!(from->type->features == to->type->features &&
1392 from->family == to->family))
1393 return -IPSET_ERR_TYPE_MISMATCH;
1395 write_lock_bh(&ip_set_ref_lock);
1397 if (from->ref_netlink || to->ref_netlink) {
1398 write_unlock_bh(&ip_set_ref_lock);
1402 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
1403 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
1404 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
1406 swap(from->ref, to->ref);
1407 ip_set(inst, from_id) = to;
1408 ip_set(inst, to_id) = from;
1409 write_unlock_bh(&ip_set_ref_lock);
1414 /* List/save set data */
1421 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
1422 #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
1425 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
1429 if (SET_WITH_TIMEOUT(set))
1430 if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
1431 htonl(set->timeout))))
1433 if (SET_WITH_COUNTER(set))
1434 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
1435 if (SET_WITH_COMMENT(set))
1436 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
1437 if (SET_WITH_SKBINFO(set))
1438 cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
1439 if (SET_WITH_FORCEADD(set))
1440 cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
1444 return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
1446 EXPORT_SYMBOL_GPL(ip_set_put_flags);
1449 ip_set_dump_done(struct netlink_callback *cb)
1451 if (cb->args[IPSET_CB_ARG0]) {
1452 struct ip_set_net *inst =
1453 (struct ip_set_net *)cb->args[IPSET_CB_NET];
1454 ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1455 struct ip_set *set = ip_set_ref_netlink(inst, index);
1457 if (set->variant->uref)
1458 set->variant->uref(set, cb, false);
1459 pr_debug("release set %s\n", set->name);
1460 __ip_set_put_netlink(set);
1466 dump_attrs(struct nlmsghdr *nlh)
1468 const struct nlattr *attr;
1471 pr_debug("dump nlmsg\n");
1472 nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
1473 pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
1477 static const struct nla_policy
1478 ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
1479 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1480 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1481 .len = IPSET_MAXNAMELEN - 1 },
1482 [IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
1486 ip_set_dump_start(struct netlink_callback *cb)
1488 struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
1489 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1490 struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
1491 struct nlattr *attr = (void *)nlh + min_len;
1492 struct sk_buff *skb = cb->skb;
1493 struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
1497 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
1498 nlh->nlmsg_len - min_len,
1499 ip_set_dump_policy, NULL);
1503 cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
1504 if (cda[IPSET_ATTR_SETNAME]) {
1508 set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
1514 dump_type = DUMP_ONE;
1515 cb->args[IPSET_CB_INDEX] = index;
1517 dump_type = DUMP_ALL;
1520 if (cda[IPSET_ATTR_FLAGS]) {
1521 u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
1523 dump_type |= (f << 16);
1525 cb->args[IPSET_CB_NET] = (unsigned long)inst;
1526 cb->args[IPSET_CB_DUMP] = dump_type;
1531 /* We have to create and send the error message manually :-( */
1532 if (nlh->nlmsg_flags & NLM_F_ACK) {
1533 netlink_ack(cb->skb, nlh, ret, NULL);
1539 ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
1541 ip_set_id_t index = IPSET_INVALID_ID, max;
1542 struct ip_set *set = NULL;
1543 struct nlmsghdr *nlh = NULL;
1544 unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
1545 struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
1546 u32 dump_type, dump_flags;
1550 if (!cb->args[IPSET_CB_DUMP])
1553 if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
1556 dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]);
1557 dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]);
1558 max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1
1561 pr_debug("dump type, flag: %u %u index: %ld\n",
1562 dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
1563 for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
1564 index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1565 write_lock_bh(&ip_set_ref_lock);
1566 set = ip_set(inst, index);
1567 is_destroyed = inst->is_destroyed;
1568 if (!set || is_destroyed) {
1569 write_unlock_bh(&ip_set_ref_lock);
1570 if (dump_type == DUMP_ONE) {
1575 /* All sets are just being destroyed */
1581 /* When dumping all sets, we must dump "sorted"
1582 * so that lists (unions of sets) are dumped last.
1584 if (dump_type != DUMP_ONE &&
1585 ((dump_type == DUMP_ALL) ==
1586 !!(set->type->features & IPSET_DUMP_LAST))) {
1587 write_unlock_bh(&ip_set_ref_lock);
1590 pr_debug("List set: %s\n", set->name);
1591 if (!cb->args[IPSET_CB_ARG0]) {
1592 /* Start listing: make sure set won't be destroyed */
1593 pr_debug("reference set\n");
1596 write_unlock_bh(&ip_set_ref_lock);
1597 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
1598 cb->nlh->nlmsg_seq, flags,
1602 goto release_refcount;
1604 if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL,
1605 cb->args[IPSET_CB_PROTO]) ||
1606 nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
1607 goto nla_put_failure;
1608 if (dump_flags & IPSET_FLAG_LIST_SETNAME)
1610 switch (cb->args[IPSET_CB_ARG0]) {
1612 /* Core header data */
1613 if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
1615 nla_put_u8(skb, IPSET_ATTR_FAMILY,
1617 nla_put_u8(skb, IPSET_ATTR_REVISION,
1619 goto nla_put_failure;
1620 if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN &&
1621 nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index)))
1622 goto nla_put_failure;
1623 ret = set->variant->head(set, skb);
1625 goto release_refcount;
1626 if (dump_flags & IPSET_FLAG_LIST_HEADER)
1628 if (set->variant->uref)
1629 set->variant->uref(set, cb, true);
1632 ret = set->variant->list(set, skb, cb);
1633 if (!cb->args[IPSET_CB_ARG0])
1634 /* Set is done, proceed with next one */
1636 goto release_refcount;
1639 /* If we dump all sets, continue with dumping last ones */
1640 if (dump_type == DUMP_ALL) {
1641 dump_type = DUMP_LAST;
1642 cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
1643 cb->args[IPSET_CB_INDEX] = 0;
1644 if (set && set->variant->uref)
1645 set->variant->uref(set, cb, false);
1653 if (dump_type == DUMP_ONE)
1654 cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID;
1656 cb->args[IPSET_CB_INDEX]++;
1658 /* If there was an error or set is done, release set */
1659 if (ret || !cb->args[IPSET_CB_ARG0]) {
1660 set = ip_set_ref_netlink(inst, index);
1661 if (set->variant->uref)
1662 set->variant->uref(set, cb, false);
1663 pr_debug("release set %s\n", set->name);
1664 __ip_set_put_netlink(set);
1665 cb->args[IPSET_CB_ARG0] = 0;
1669 nlmsg_end(skb, nlh);
1670 pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
1674 return ret < 0 ? ret : skb->len;
1677 static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1678 const struct nlmsghdr *nlh,
1679 const struct nlattr * const attr[],
1680 struct netlink_ext_ack *extack)
1682 if (unlikely(protocol_min_failed(attr)))
1683 return -IPSET_ERR_PROTOCOL;
1686 struct netlink_dump_control c = {
1687 .start = ip_set_dump_start,
1688 .dump = ip_set_dump_do,
1689 .done = ip_set_dump_done,
1691 return netlink_dump_start(ctnl, skb, nlh, &c);
1695 /* Add, del and test */
1697 static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
1698 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1699 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1700 .len = IPSET_MAXNAMELEN - 1 },
1701 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
1702 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
1703 [IPSET_ATTR_ADT] = { .type = NLA_NESTED },
1707 call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1708 struct nlattr *tb[], enum ipset_adt adt,
1709 u32 flags, bool use_lineno)
1713 bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
1716 spin_lock_bh(&set->lock);
1717 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
1718 spin_unlock_bh(&set->lock);
1720 } while (ret == -EAGAIN &&
1721 set->variant->resize &&
1722 (ret = set->variant->resize(set, retried)) == 0);
1724 if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
1726 if (lineno && use_lineno) {
1727 /* Error in restore/batch mode: send back lineno */
1728 struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
1729 struct sk_buff *skb2;
1730 struct nlmsgerr *errmsg;
1731 size_t payload = min(SIZE_MAX,
1732 sizeof(*errmsg) + nlmsg_len(nlh));
1733 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1734 struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
1735 struct nlattr *cmdattr;
1738 skb2 = nlmsg_new(payload, GFP_KERNEL);
1741 rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
1742 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
1743 errmsg = nlmsg_data(rep);
1744 errmsg->error = ret;
1745 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
1746 cmdattr = (void *)&errmsg->msg + min_len;
1748 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
1749 nlh->nlmsg_len - min_len, ip_set_adt_policy,
1756 errline = nla_data(cda[IPSET_ATTR_LINENO]);
1760 netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
1762 /* Signal netlink not to send its ACK/errmsg. */
1769 static int ip_set_ad(struct net *net, struct sock *ctnl,
1770 struct sk_buff *skb,
1772 const struct nlmsghdr *nlh,
1773 const struct nlattr * const attr[],
1774 struct netlink_ext_ack *extack)
1776 struct ip_set_net *inst = ip_set_pernet(net);
1778 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
1779 const struct nlattr *nla;
1780 u32 flags = flag_exist(nlh);
1784 if (unlikely(protocol_min_failed(attr) ||
1785 !attr[IPSET_ATTR_SETNAME] ||
1786 !((attr[IPSET_ATTR_DATA] != NULL) ^
1787 (attr[IPSET_ATTR_ADT] != NULL)) ||
1788 (attr[IPSET_ATTR_DATA] &&
1789 !flag_nested(attr[IPSET_ATTR_DATA])) ||
1790 (attr[IPSET_ATTR_ADT] &&
1791 (!flag_nested(attr[IPSET_ATTR_ADT]) ||
1792 !attr[IPSET_ATTR_LINENO]))))
1793 return -IPSET_ERR_PROTOCOL;
1795 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1799 use_lineno = !!attr[IPSET_ATTR_LINENO];
1800 if (attr[IPSET_ATTR_DATA]) {
1801 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
1802 attr[IPSET_ATTR_DATA],
1803 set->type->adt_policy, NULL))
1804 return -IPSET_ERR_PROTOCOL;
1805 ret = call_ad(ctnl, skb, set, tb, adt, flags,
1810 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
1811 if (nla_type(nla) != IPSET_ATTR_DATA ||
1812 !flag_nested(nla) ||
1813 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
1814 set->type->adt_policy, NULL))
1815 return -IPSET_ERR_PROTOCOL;
1816 ret = call_ad(ctnl, skb, set, tb, adt,
1825 static int ip_set_uadd(struct net *net, struct sock *ctnl,
1826 struct sk_buff *skb, const struct nlmsghdr *nlh,
1827 const struct nlattr * const attr[],
1828 struct netlink_ext_ack *extack)
1830 return ip_set_ad(net, ctnl, skb,
1831 IPSET_ADD, nlh, attr, extack);
1834 static int ip_set_udel(struct net *net, struct sock *ctnl,
1835 struct sk_buff *skb, const struct nlmsghdr *nlh,
1836 const struct nlattr * const attr[],
1837 struct netlink_ext_ack *extack)
1839 return ip_set_ad(net, ctnl, skb,
1840 IPSET_DEL, nlh, attr, extack);
1843 static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1844 const struct nlmsghdr *nlh,
1845 const struct nlattr * const attr[],
1846 struct netlink_ext_ack *extack)
1848 struct ip_set_net *inst = ip_set_pernet(net);
1850 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
1854 if (unlikely(protocol_min_failed(attr) ||
1855 !attr[IPSET_ATTR_SETNAME] ||
1856 !attr[IPSET_ATTR_DATA] ||
1857 !flag_nested(attr[IPSET_ATTR_DATA])))
1858 return -IPSET_ERR_PROTOCOL;
1860 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1864 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
1865 set->type->adt_policy, NULL))
1866 return -IPSET_ERR_PROTOCOL;
1869 ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
1870 rcu_read_unlock_bh();
1871 /* Userspace can't trigger element to be re-added */
1875 return ret > 0 ? 0 : -IPSET_ERR_EXIST;
1878 /* Get headed data of a set */
1880 static int ip_set_header(struct net *net, struct sock *ctnl,
1881 struct sk_buff *skb, const struct nlmsghdr *nlh,
1882 const struct nlattr * const attr[],
1883 struct netlink_ext_ack *extack)
1885 struct ip_set_net *inst = ip_set_pernet(net);
1886 const struct ip_set *set;
1887 struct sk_buff *skb2;
1888 struct nlmsghdr *nlh2;
1891 if (unlikely(protocol_min_failed(attr) ||
1892 !attr[IPSET_ATTR_SETNAME]))
1893 return -IPSET_ERR_PROTOCOL;
1895 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1899 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1903 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1907 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
1908 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
1909 nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
1910 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
1911 nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
1912 goto nla_put_failure;
1913 nlmsg_end(skb2, nlh2);
1915 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1922 nlmsg_cancel(skb2, nlh2);
1930 static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
1931 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1932 [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
1933 .len = IPSET_MAXNAMELEN - 1 },
1934 [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
1937 static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1938 const struct nlmsghdr *nlh,
1939 const struct nlattr * const attr[],
1940 struct netlink_ext_ack *extack)
1942 struct sk_buff *skb2;
1943 struct nlmsghdr *nlh2;
1944 u8 family, min, max;
1945 const char *typename;
1948 if (unlikely(protocol_min_failed(attr) ||
1949 !attr[IPSET_ATTR_TYPENAME] ||
1950 !attr[IPSET_ATTR_FAMILY]))
1951 return -IPSET_ERR_PROTOCOL;
1953 family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
1954 typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
1955 ret = find_set_type_minmax(typename, family, &min, &max);
1959 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1963 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1967 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
1968 nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
1969 nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
1970 nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
1971 nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
1972 goto nla_put_failure;
1973 nlmsg_end(skb2, nlh2);
1975 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
1976 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1983 nlmsg_cancel(skb2, nlh2);
1989 /* Get protocol version */
1991 static const struct nla_policy
1992 ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
1993 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1996 static int ip_set_protocol(struct net *net, struct sock *ctnl,
1997 struct sk_buff *skb, const struct nlmsghdr *nlh,
1998 const struct nlattr * const attr[],
1999 struct netlink_ext_ack *extack)
2001 struct sk_buff *skb2;
2002 struct nlmsghdr *nlh2;
2005 if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
2006 return -IPSET_ERR_PROTOCOL;
2008 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2012 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2013 IPSET_CMD_PROTOCOL);
2016 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
2017 goto nla_put_failure;
2018 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN))
2019 goto nla_put_failure;
2020 nlmsg_end(skb2, nlh2);
2022 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2029 nlmsg_cancel(skb2, nlh2);
2035 /* Get set by name or index, from userspace */
2037 static int ip_set_byname(struct net *net, struct sock *ctnl,
2038 struct sk_buff *skb, const struct nlmsghdr *nlh,
2039 const struct nlattr * const attr[],
2040 struct netlink_ext_ack *extack)
2042 struct ip_set_net *inst = ip_set_pernet(net);
2043 struct sk_buff *skb2;
2044 struct nlmsghdr *nlh2;
2045 ip_set_id_t id = IPSET_INVALID_ID;
2046 const struct ip_set *set;
2049 if (unlikely(protocol_failed(attr) ||
2050 !attr[IPSET_ATTR_SETNAME]))
2051 return -IPSET_ERR_PROTOCOL;
2053 set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id);
2054 if (id == IPSET_INVALID_ID)
2057 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2061 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2062 IPSET_CMD_GET_BYNAME);
2065 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
2066 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
2067 nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id)))
2068 goto nla_put_failure;
2069 nlmsg_end(skb2, nlh2);
2071 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2078 nlmsg_cancel(skb2, nlh2);
2084 static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = {
2085 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
2086 [IPSET_ATTR_INDEX] = { .type = NLA_U16 },
2089 static int ip_set_byindex(struct net *net, struct sock *ctnl,
2090 struct sk_buff *skb, const struct nlmsghdr *nlh,
2091 const struct nlattr * const attr[],
2092 struct netlink_ext_ack *extack)
2094 struct ip_set_net *inst = ip_set_pernet(net);
2095 struct sk_buff *skb2;
2096 struct nlmsghdr *nlh2;
2097 ip_set_id_t id = IPSET_INVALID_ID;
2098 const struct ip_set *set;
2101 if (unlikely(protocol_failed(attr) ||
2102 !attr[IPSET_ATTR_INDEX]))
2103 return -IPSET_ERR_PROTOCOL;
2105 id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]);
2106 if (id >= inst->ip_set_max)
2108 set = ip_set(inst, id);
2112 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2116 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2117 IPSET_CMD_GET_BYINDEX);
2120 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
2121 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name))
2122 goto nla_put_failure;
2123 nlmsg_end(skb2, nlh2);
2125 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2132 nlmsg_cancel(skb2, nlh2);
2138 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
2139 [IPSET_CMD_NONE] = {
2140 .call = ip_set_none,
2141 .attr_count = IPSET_ATTR_CMD_MAX,
2143 [IPSET_CMD_CREATE] = {
2144 .call = ip_set_create,
2145 .attr_count = IPSET_ATTR_CMD_MAX,
2146 .policy = ip_set_create_policy,
2148 [IPSET_CMD_DESTROY] = {
2149 .call = ip_set_destroy,
2150 .attr_count = IPSET_ATTR_CMD_MAX,
2151 .policy = ip_set_setname_policy,
2153 [IPSET_CMD_FLUSH] = {
2154 .call = ip_set_flush,
2155 .attr_count = IPSET_ATTR_CMD_MAX,
2156 .policy = ip_set_setname_policy,
2158 [IPSET_CMD_RENAME] = {
2159 .call = ip_set_rename,
2160 .attr_count = IPSET_ATTR_CMD_MAX,
2161 .policy = ip_set_setname2_policy,
2163 [IPSET_CMD_SWAP] = {
2164 .call = ip_set_swap,
2165 .attr_count = IPSET_ATTR_CMD_MAX,
2166 .policy = ip_set_setname2_policy,
2168 [IPSET_CMD_LIST] = {
2169 .call = ip_set_dump,
2170 .attr_count = IPSET_ATTR_CMD_MAX,
2171 .policy = ip_set_dump_policy,
2173 [IPSET_CMD_SAVE] = {
2174 .call = ip_set_dump,
2175 .attr_count = IPSET_ATTR_CMD_MAX,
2176 .policy = ip_set_setname_policy,
2179 .call = ip_set_uadd,
2180 .attr_count = IPSET_ATTR_CMD_MAX,
2181 .policy = ip_set_adt_policy,
2184 .call = ip_set_udel,
2185 .attr_count = IPSET_ATTR_CMD_MAX,
2186 .policy = ip_set_adt_policy,
2188 [IPSET_CMD_TEST] = {
2189 .call = ip_set_utest,
2190 .attr_count = IPSET_ATTR_CMD_MAX,
2191 .policy = ip_set_adt_policy,
2193 [IPSET_CMD_HEADER] = {
2194 .call = ip_set_header,
2195 .attr_count = IPSET_ATTR_CMD_MAX,
2196 .policy = ip_set_setname_policy,
2198 [IPSET_CMD_TYPE] = {
2199 .call = ip_set_type,
2200 .attr_count = IPSET_ATTR_CMD_MAX,
2201 .policy = ip_set_type_policy,
2203 [IPSET_CMD_PROTOCOL] = {
2204 .call = ip_set_protocol,
2205 .attr_count = IPSET_ATTR_CMD_MAX,
2206 .policy = ip_set_protocol_policy,
2208 [IPSET_CMD_GET_BYNAME] = {
2209 .call = ip_set_byname,
2210 .attr_count = IPSET_ATTR_CMD_MAX,
2211 .policy = ip_set_setname_policy,
2213 [IPSET_CMD_GET_BYINDEX] = {
2214 .call = ip_set_byindex,
2215 .attr_count = IPSET_ATTR_CMD_MAX,
2216 .policy = ip_set_index_policy,
2220 static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
2222 .subsys_id = NFNL_SUBSYS_IPSET,
2223 .cb_count = IPSET_MSG_MAX,
2224 .cb = ip_set_netlink_subsys_cb,
2227 /* Interface to iptables/ip6tables */
2230 ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2234 int copylen = *len, ret = 0;
2235 struct net *net = sock_net(sk);
2236 struct ip_set_net *inst = ip_set_pernet(net);
2238 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2240 if (optval != SO_IP_SET)
2242 if (*len < sizeof(unsigned int))
2245 data = vmalloc(*len);
2248 if (copy_from_user(data, user, *len) != 0) {
2254 if (*op < IP_SET_OP_VERSION) {
2255 /* Check the version at the beginning of operations */
2256 struct ip_set_req_version *req_version = data;
2258 if (*len < sizeof(struct ip_set_req_version)) {
2263 if (req_version->version < IPSET_PROTOCOL_MIN) {
2270 case IP_SET_OP_VERSION: {
2271 struct ip_set_req_version *req_version = data;
2273 if (*len != sizeof(struct ip_set_req_version)) {
2278 req_version->version = IPSET_PROTOCOL;
2279 if (copy_to_user(user, req_version,
2280 sizeof(struct ip_set_req_version)))
2284 case IP_SET_OP_GET_BYNAME: {
2285 struct ip_set_req_get_set *req_get = data;
2288 if (*len != sizeof(struct ip_set_req_get_set)) {
2292 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
2293 nfnl_lock(NFNL_SUBSYS_IPSET);
2294 find_set_and_id(inst, req_get->set.name, &id);
2295 req_get->set.index = id;
2296 nfnl_unlock(NFNL_SUBSYS_IPSET);
2299 case IP_SET_OP_GET_FNAME: {
2300 struct ip_set_req_get_set_family *req_get = data;
2303 if (*len != sizeof(struct ip_set_req_get_set_family)) {
2307 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
2308 nfnl_lock(NFNL_SUBSYS_IPSET);
2309 find_set_and_id(inst, req_get->set.name, &id);
2310 req_get->set.index = id;
2311 if (id != IPSET_INVALID_ID)
2312 req_get->family = ip_set(inst, id)->family;
2313 nfnl_unlock(NFNL_SUBSYS_IPSET);
2316 case IP_SET_OP_GET_BYINDEX: {
2317 struct ip_set_req_get_set *req_get = data;
2320 if (*len != sizeof(struct ip_set_req_get_set) ||
2321 req_get->set.index >= inst->ip_set_max) {
2325 nfnl_lock(NFNL_SUBSYS_IPSET);
2326 set = ip_set(inst, req_get->set.index);
2327 ret = strscpy(req_get->set.name, set ? set->name : "",
2329 nfnl_unlock(NFNL_SUBSYS_IPSET);
2337 } /* end of switch(op) */
2340 if (copy_to_user(user, data, copylen))
2350 static struct nf_sockopt_ops so_set __read_mostly = {
2352 .get_optmin = SO_IP_SET,
2353 .get_optmax = SO_IP_SET + 1,
2354 .get = ip_set_sockfn_get,
2355 .owner = THIS_MODULE,
2358 static int __net_init
2359 ip_set_net_init(struct net *net)
2361 struct ip_set_net *inst = ip_set_pernet(net);
2362 struct ip_set **list;
2364 inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
2365 if (inst->ip_set_max >= IPSET_INVALID_ID)
2366 inst->ip_set_max = IPSET_INVALID_ID - 1;
2368 list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
2371 inst->is_deleted = false;
2372 inst->is_destroyed = false;
2373 rcu_assign_pointer(inst->ip_set_list, list);
2377 static void __net_exit
2378 ip_set_net_exit(struct net *net)
2380 struct ip_set_net *inst = ip_set_pernet(net);
2382 struct ip_set *set = NULL;
2385 inst->is_deleted = true; /* flag for ip_set_nfnl_put */
2387 nfnl_lock(NFNL_SUBSYS_IPSET);
2388 for (i = 0; i < inst->ip_set_max; i++) {
2389 set = ip_set(inst, i);
2391 ip_set(inst, i) = NULL;
2392 ip_set_destroy_set(set);
2395 nfnl_unlock(NFNL_SUBSYS_IPSET);
2396 kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
2399 static struct pernet_operations ip_set_net_ops = {
2400 .init = ip_set_net_init,
2401 .exit = ip_set_net_exit,
2402 .id = &ip_set_net_id,
2403 .size = sizeof(struct ip_set_net),
2409 int ret = register_pernet_subsys(&ip_set_net_ops);
2412 pr_err("ip_set: cannot register pernet_subsys.\n");
2416 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
2418 pr_err("ip_set: cannot register with nfnetlink.\n");
2419 unregister_pernet_subsys(&ip_set_net_ops);
2423 ret = nf_register_sockopt(&so_set);
2425 pr_err("SO_SET registry failed: %d\n", ret);
2426 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2427 unregister_pernet_subsys(&ip_set_net_ops);
2437 nf_unregister_sockopt(&so_set);
2438 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2440 unregister_pernet_subsys(&ip_set_net_ops);
2441 pr_debug("these are the famous last words\n");
2444 module_init(ip_set_init);
2445 module_exit(ip_set_fini);
2447 MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));