Merge tag 'cpufreq-arm-updates-6.7-part2' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-block.git] / net / netfilter / nft_set_rbtree.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
20a69341
PM
2/*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
20a69341
PM
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/rbtree.h>
13#include <linux/netlink.h>
14#include <linux/netfilter.h>
15#include <linux/netfilter/nf_tables.h>
5785cf15 16#include <net/netfilter/nf_tables_core.h>
20a69341
PM
17
18struct nft_rbtree {
19 struct rb_root root;
9b7e26ae 20 rwlock_t lock;
b901892b 21 seqcount_rwlock_t count;
7d259f02 22 unsigned long last_gc;
20a69341
PM
23};
24
25struct nft_rbtree_elem {
9dad402b 26 struct nft_elem_priv priv;
20a69341 27 struct rb_node node;
fe2811eb 28 struct nft_set_ext ext;
20a69341
PM
29};
30
ef1d20e0
PNA
31static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32{
33 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
34 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
35}
cc02e457 36
6f7c9caf
SB
37static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38{
39 return !nft_rbtree_interval_end(rbe);
40}
41
c9e6978e
PNA
42static int nft_rbtree_cmp(const struct nft_set *set,
43 const struct nft_rbtree_elem *e1,
44 const struct nft_rbtree_elem *e2)
e701001e 45{
c9e6978e
PNA
46 return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
47 set->klen);
e701001e
PNA
48}
49
f6c383b8
PNA
50static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
51{
7d259f02 52 return nft_set_elem_expired(&rbe->ext);
f6c383b8
PNA
53}
54
9b7e26ae
FW
55static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
56 const u32 *key, const struct nft_set_ext **ext,
57 unsigned int seq)
20a69341 58{
03e5fd0e 59 struct nft_rbtree *priv = nft_set_priv(set);
20a69341 60 const struct nft_rbtree_elem *rbe, *interval = NULL;
42a55769 61 u8 genmask = nft_genmask_cur(net);
16c45eda 62 const struct rb_node *parent;
20a69341
PM
63 int d;
64
9b7e26ae 65 parent = rcu_dereference_raw(priv->root.rb_node);
20a69341 66 while (parent != NULL) {
9b7e26ae
FW
67 if (read_seqcount_retry(&priv->count, seq))
68 return false;
69
20a69341
PM
70 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
71
c9e6978e 72 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
20a69341 73 if (d < 0) {
9b7e26ae 74 parent = rcu_dereference_raw(parent->rb_left);
f9121355 75 if (interval &&
c9e6978e 76 !nft_rbtree_cmp(set, rbe, interval) &&
82e20b44 77 nft_rbtree_interval_end(rbe) &&
6f7c9caf 78 nft_rbtree_interval_start(interval))
e701001e 79 continue;
20a69341
PM
80 interval = rbe;
81 } else if (d > 0)
9b7e26ae 82 parent = rcu_dereference_raw(parent->rb_right);
20a69341 83 else {
cc02e457 84 if (!nft_set_elem_active(&rbe->ext, genmask)) {
9b7e26ae 85 parent = rcu_dereference_raw(parent->rb_left);
cc02e457
PM
86 continue;
87 }
340eaff6 88
f6c383b8 89 if (nft_rbtree_elem_expired(rbe))
340eaff6
PS
90 return false;
91
db3b665d
PNA
92 if (nft_rbtree_interval_end(rbe)) {
93 if (nft_set_is_anonymous(set))
94 return false;
95 parent = rcu_dereference_raw(parent->rb_left);
96 interval = NULL;
97 continue;
98 }
b2832dd6
PM
99
100 *ext = &rbe->ext;
20a69341
PM
101 return true;
102 }
103 }
104
c1eda3c6
PNA
105 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
106 nft_set_elem_active(&interval->ext, genmask) &&
f6c383b8 107 !nft_rbtree_elem_expired(interval) &&
6f7c9caf 108 nft_rbtree_interval_start(interval)) {
c1eda3c6
PNA
109 *ext = &interval->ext;
110 return true;
20a69341 111 }
db3b665d 112
20a69341
PM
113 return false;
114}
115
f227925e
FW
116INDIRECT_CALLABLE_SCOPE
117bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
118 const u32 *key, const struct nft_set_ext **ext)
9b7e26ae
FW
119{
120 struct nft_rbtree *priv = nft_set_priv(set);
121 unsigned int seq = read_seqcount_begin(&priv->count);
122 bool ret;
123
124 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 if (ret || !read_seqcount_retry(&priv->count, seq))
126 return ret;
127
128 read_lock_bh(&priv->lock);
129 seq = read_seqcount_begin(&priv->count);
130 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
131 read_unlock_bh(&priv->lock);
132
133 return ret;
134}
135
ba0e4d99
PNA
136static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
137 const u32 *key, struct nft_rbtree_elem **elem,
138 unsigned int seq, unsigned int flags, u8 genmask)
139{
140 struct nft_rbtree_elem *rbe, *interval = NULL;
141 struct nft_rbtree *priv = nft_set_priv(set);
142 const struct rb_node *parent;
143 const void *this;
144 int d;
145
146 parent = rcu_dereference_raw(priv->root.rb_node);
147 while (parent != NULL) {
148 if (read_seqcount_retry(&priv->count, seq))
149 return false;
150
151 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
152
153 this = nft_set_ext_key(&rbe->ext);
154 d = memcmp(this, key, set->klen);
155 if (d < 0) {
156 parent = rcu_dereference_raw(parent->rb_left);
3b18d5eb
PNA
157 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
158 interval = rbe;
ba0e4d99
PNA
159 } else if (d > 0) {
160 parent = rcu_dereference_raw(parent->rb_right);
3b18d5eb
PNA
161 if (flags & NFT_SET_ELEM_INTERVAL_END)
162 interval = rbe;
ba0e4d99 163 } else {
db3b665d 164 if (!nft_set_elem_active(&rbe->ext, genmask)) {
ba0e4d99 165 parent = rcu_dereference_raw(parent->rb_left);
db3b665d
PNA
166 continue;
167 }
ba0e4d99 168
340eaff6
PS
169 if (nft_set_elem_expired(&rbe->ext))
170 return false;
171
ba0e4d99
PNA
172 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
173 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
174 (flags & NFT_SET_ELEM_INTERVAL_END)) {
175 *elem = rbe;
176 return true;
177 }
db3b665d
PNA
178
179 if (nft_rbtree_interval_end(rbe))
180 interval = NULL;
181
182 parent = rcu_dereference_raw(parent->rb_left);
ba0e4d99
PNA
183 }
184 }
185
186 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
187 nft_set_elem_active(&interval->ext, genmask) &&
340eaff6 188 !nft_set_elem_expired(&interval->ext) &&
3b18d5eb
PNA
189 ((!nft_rbtree_interval_end(interval) &&
190 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
191 (nft_rbtree_interval_end(interval) &&
192 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
ba0e4d99
PNA
193 *elem = interval;
194 return true;
195 }
196
197 return false;
198}
199
9dad402b
PNA
200static struct nft_elem_priv *
201nft_rbtree_get(const struct net *net, const struct nft_set *set,
202 const struct nft_set_elem *elem, unsigned int flags)
ba0e4d99
PNA
203{
204 struct nft_rbtree *priv = nft_set_priv(set);
205 unsigned int seq = read_seqcount_begin(&priv->count);
206 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
207 const u32 *key = (const u32 *)&elem->key.val;
208 u8 genmask = nft_genmask_cur(net);
209 bool ret;
210
211 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
212 if (ret || !read_seqcount_retry(&priv->count, seq))
9dad402b 213 return &rbe->priv;
ba0e4d99
PNA
214
215 read_lock_bh(&priv->lock);
216 seq = read_seqcount_begin(&priv->count);
217 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
ba0e4d99
PNA
218 read_unlock_bh(&priv->lock);
219
9dad402b
PNA
220 if (!ret)
221 return ERR_PTR(-ENOENT);
222
223 return &rbe->priv;
ba0e4d99
PNA
224}
225
8079fc30
FW
226static void nft_rbtree_gc_elem_remove(struct net *net, struct nft_set *set,
227 struct nft_rbtree *priv,
228 struct nft_rbtree_elem *rbe)
f6c383b8 229{
8079fc30 230 lockdep_assert_held_write(&priv->lock);
0e1ea651 231 nft_setelem_data_deactivate(net, set, &rbe->priv);
f6c383b8
PNA
232 rb_erase(&rbe->node, &priv->root);
233}
234
08738827
FW
235static const struct nft_rbtree_elem *
236nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
237 struct nft_rbtree_elem *rbe, u8 genmask)
c9e6978e
PNA
238{
239 struct nft_set *set = (struct nft_set *)__set;
240 struct rb_node *prev = rb_prev(&rbe->node);
f6c383b8 241 struct net *net = read_pnet(&set->net);
f718863a 242 struct nft_rbtree_elem *rbe_prev;
f6c383b8 243 struct nft_trans_gc *gc;
c9e6978e 244
f6c383b8
PNA
245 gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
246 if (!gc)
08738827 247 return ERR_PTR(-ENOMEM);
c9e6978e 248
f718863a
FW
249 /* search for end interval coming before this element.
250 * end intervals don't carry a timeout extension, they
251 * are coupled with the interval start element.
252 */
61ae320a 253 while (prev) {
c9e6978e 254 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
f718863a
FW
255 if (nft_rbtree_interval_end(rbe_prev) &&
256 nft_set_elem_active(&rbe_prev->ext, genmask))
c9e6978e
PNA
257 break;
258
259 prev = rb_prev(prev);
61ae320a
FW
260 }
261
08738827 262 rbe_prev = NULL;
f718863a
FW
263 if (prev) {
264 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
8079fc30 265 nft_rbtree_gc_elem_remove(net, set, priv, rbe_prev);
f718863a 266
f6c383b8
PNA
267 /* There is always room in this trans gc for this element,
268 * memory allocation never actually happens, hence, the warning
269 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
270 * this is synchronous gc which never fails.
271 */
272 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
273 if (WARN_ON_ONCE(!gc))
08738827 274 return ERR_PTR(-ENOMEM);
f6c383b8
PNA
275
276 nft_trans_gc_elem_add(gc, rbe_prev);
61ae320a 277 }
c9e6978e 278
8079fc30 279 nft_rbtree_gc_elem_remove(net, set, priv, rbe);
f6c383b8
PNA
280 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
281 if (WARN_ON_ONCE(!gc))
08738827 282 return ERR_PTR(-ENOMEM);
f6c383b8
PNA
283
284 nft_trans_gc_elem_add(gc, rbe);
c9e6978e 285
f6c383b8 286 nft_trans_gc_queue_sync_done(gc);
c9e6978e 287
08738827 288 return rbe_prev;
c9e6978e
PNA
289}
290
291static bool nft_rbtree_update_first(const struct nft_set *set,
292 struct nft_rbtree_elem *rbe,
293 struct rb_node *first)
294{
295 struct nft_rbtree_elem *first_elem;
296
297 first_elem = rb_entry(first, struct nft_rbtree_elem, node);
298 /* this element is closest to where the new element is to be inserted:
299 * update the first element for the node list path.
300 */
301 if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
302 return true;
303
304 return false;
305}
306
42a55769 307static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
c016c7e4 308 struct nft_rbtree_elem *new,
078996fc 309 struct nft_elem_priv **elem_priv)
20a69341 310{
c9e6978e 311 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
61ae320a 312 struct rb_node *node, *next, *parent, **p, *first = NULL;
20a69341 313 struct nft_rbtree *priv = nft_set_priv(set);
2ee52ae9 314 u8 cur_genmask = nft_genmask_cur(net);
42a55769 315 u8 genmask = nft_genmask_next(net);
08738827 316 int d;
20a69341 317
c9e6978e
PNA
318 /* Descend the tree to search for an existing element greater than the
319 * key value to insert that is greater than the new element. This is the
320 * first element to walk the ordered elements to find possible overlap.
7c84d414 321 */
20a69341
PM
322 parent = NULL;
323 p = &priv->root.rb_node;
324 while (*p != NULL) {
325 parent = *p;
326 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
c9e6978e
PNA
327 d = nft_rbtree_cmp(set, rbe, new);
328
7c84d414 329 if (d < 0) {
20a69341 330 p = &parent->rb_left;
7c84d414 331 } else if (d > 0) {
c9e6978e
PNA
332 if (!first ||
333 nft_rbtree_update_first(set, rbe, first))
334 first = &rbe->node;
7c84d414 335
c9e6978e 336 p = &parent->rb_right;
7c84d414 337 } else {
c9e6978e 338 if (nft_rbtree_interval_end(rbe))
d2df92e9 339 p = &parent->rb_left;
c9e6978e 340 else
d2df92e9 341 p = &parent->rb_right;
c9e6978e
PNA
342 }
343 }
344
345 if (!first)
346 first = rb_first(&priv->root);
347
348 /* Detect overlap by going through the list of valid tree nodes.
349 * Values stored in the tree are in reversed order, starting from
350 * highest to lowest value.
351 */
61ae320a
FW
352 for (node = first; node != NULL; node = next) {
353 next = rb_next(node);
354
c9e6978e
PNA
355 rbe = rb_entry(node, struct nft_rbtree_elem, node);
356
357 if (!nft_set_elem_active(&rbe->ext, genmask))
358 continue;
359
2ee52ae9
PNA
360 /* perform garbage collection to avoid bogus overlap reports
361 * but skip new elements in this transaction.
362 */
363 if (nft_set_elem_expired(&rbe->ext) &&
364 nft_set_elem_active(&rbe->ext, cur_genmask)) {
08738827
FW
365 const struct nft_rbtree_elem *removed_end;
366
367 removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
368 if (IS_ERR(removed_end))
369 return PTR_ERR(removed_end);
370
371 if (removed_end == rbe_le || removed_end == rbe_ge)
372 return -EAGAIN;
7c84d414 373
c9e6978e
PNA
374 continue;
375 }
376
377 d = nft_rbtree_cmp(set, rbe, new);
378 if (d == 0) {
379 /* Matching end element: no need to look for an
380 * overlapping greater or equal element.
381 */
382 if (nft_rbtree_interval_end(rbe)) {
383 rbe_le = rbe;
384 break;
385 }
386
387 /* first element that is greater or equal to key value. */
388 if (!rbe_ge) {
389 rbe_ge = rbe;
390 continue;
e701001e 391 }
c9e6978e
PNA
392
393 /* this is a closer more or equal element, update it. */
394 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
395 rbe_ge = rbe;
396 continue;
397 }
398
399 /* element is equal to key value, make sure flags are
400 * the same, an existing more or equal start element
401 * must not be replaced by more or equal end element.
402 */
403 if ((nft_rbtree_interval_start(new) &&
404 nft_rbtree_interval_start(rbe_ge)) ||
405 (nft_rbtree_interval_end(new) &&
406 nft_rbtree_interval_end(rbe_ge))) {
407 rbe_ge = rbe;
408 continue;
409 }
410 } else if (d > 0) {
411 /* annotate element greater than the new element. */
412 rbe_ge = rbe;
413 continue;
414 } else if (d < 0) {
415 /* annotate element less than the new element. */
416 rbe_le = rbe;
417 break;
cc02e457 418 }
c9e6978e 419 }
07267630 420
c9e6978e
PNA
421 /* - new start element matching existing start element: full overlap
422 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
423 */
424 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
425 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
078996fc 426 *elem_priv = &rbe_ge->priv;
c9e6978e 427 return -EEXIST;
20a69341 428 }
7c84d414 429
c9e6978e
PNA
430 /* - new end element matching existing end element: full overlap
431 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
432 */
433 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
434 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
078996fc 435 *elem_priv = &rbe_le->priv;
c9e6978e
PNA
436 return -EEXIST;
437 }
438
439 /* - new start element with existing closest, less or equal key value
440 * being a start element: partial overlap, reported as -ENOTEMPTY.
441 * Anonymous sets allow for two consecutive start element since they
442 * are constant, skip them to avoid bogus overlap reports.
443 */
444 if (!nft_set_is_anonymous(set) && rbe_le &&
445 nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
7c84d414
SB
446 return -ENOTEMPTY;
447
c9e6978e
PNA
448 /* - new end element with existing closest, less or equal key value
449 * being a end element: partial overlap, reported as -ENOTEMPTY.
450 */
451 if (rbe_le &&
452 nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
453 return -ENOTEMPTY;
454
455 /* - new end element with existing closest, greater or equal key value
456 * being an end element: partial overlap, reported as -ENOTEMPTY
457 */
458 if (rbe_ge &&
459 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
460 return -ENOTEMPTY;
461
462 /* Accepted element: pick insertion point depending on key value */
463 parent = NULL;
464 p = &priv->root.rb_node;
465 while (*p != NULL) {
466 parent = *p;
467 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
468 d = nft_rbtree_cmp(set, rbe, new);
469
470 if (d < 0)
471 p = &parent->rb_left;
472 else if (d > 0)
473 p = &parent->rb_right;
474 else if (nft_rbtree_interval_end(rbe))
475 p = &parent->rb_left;
476 else
477 p = &parent->rb_right;
478 }
479
9b7e26ae 480 rb_link_node_rcu(&new->node, parent, p);
20a69341
PM
481 rb_insert_color(&new->node, &priv->root);
482 return 0;
483}
484
42a55769 485static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
c016c7e4 486 const struct nft_set_elem *elem,
078996fc 487 struct nft_elem_priv **elem_priv)
20a69341 488{
9dad402b 489 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv);
03e5fd0e 490 struct nft_rbtree *priv = nft_set_priv(set);
20a69341
PM
491 int err;
492
08738827
FW
493 do {
494 if (fatal_signal_pending(current))
495 return -EINTR;
496
497 cond_resched();
498
499 write_lock_bh(&priv->lock);
500 write_seqcount_begin(&priv->count);
078996fc 501 err = __nft_rbtree_insert(net, set, rbe, elem_priv);
08738827
FW
502 write_seqcount_end(&priv->count);
503 write_unlock_bh(&priv->lock);
504 } while (err == -EAGAIN);
fe2811eb 505
20a69341
PM
506 return err;
507}
508
7d259f02
FW
509static void nft_rbtree_erase(struct nft_rbtree *priv, struct nft_rbtree_elem *rbe)
510{
511 write_lock_bh(&priv->lock);
512 write_seqcount_begin(&priv->count);
513 rb_erase(&rbe->node, &priv->root);
514 write_seqcount_end(&priv->count);
515 write_unlock_bh(&priv->lock);
516}
517
5cb82a38
PNA
518static void nft_rbtree_remove(const struct net *net,
519 const struct nft_set *set,
0e1ea651 520 struct nft_elem_priv *elem_priv)
20a69341 521{
0e1ea651 522 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
20a69341 523 struct nft_rbtree *priv = nft_set_priv(set);
20a69341 524
7d259f02 525 nft_rbtree_erase(priv, rbe);
20a69341
PM
526}
527
42a55769
PNA
528static void nft_rbtree_activate(const struct net *net,
529 const struct nft_set *set,
0e1ea651 530 struct nft_elem_priv *elem_priv)
cc02e457 531{
0e1ea651 532 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
cc02e457 533
42a55769 534 nft_set_elem_change_active(net, set, &rbe->ext);
cc02e457
PM
535}
536
6509a2e4 537static void nft_rbtree_flush(const struct net *net,
9dad402b
PNA
538 const struct nft_set *set,
539 struct nft_elem_priv *elem_priv)
37df5301 540{
9dad402b 541 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem_priv);
37df5301 542
f6c383b8 543 nft_set_elem_change_active(net, set, &rbe->ext);
37df5301
PNA
544}
545
9dad402b
PNA
546static struct nft_elem_priv *
547nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
548 const struct nft_set_elem *elem)
20a69341 549{
9dad402b 550 struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv);
20a69341
PM
551 const struct nft_rbtree *priv = nft_set_priv(set);
552 const struct rb_node *parent = priv->root.rb_node;
42a55769 553 u8 genmask = nft_genmask_next(net);
20a69341
PM
554 int d;
555
556 while (parent != NULL) {
557 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
558
7d740264
PM
559 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
560 set->klen);
20a69341
PM
561 if (d < 0)
562 parent = parent->rb_left;
563 else if (d > 0)
564 parent = parent->rb_right;
565 else {
e701001e 566 if (nft_rbtree_interval_end(rbe) &&
6f7c9caf 567 nft_rbtree_interval_start(this)) {
e701001e
PNA
568 parent = parent->rb_left;
569 continue;
6f7c9caf 570 } else if (nft_rbtree_interval_start(rbe) &&
e701001e
PNA
571 nft_rbtree_interval_end(this)) {
572 parent = parent->rb_right;
573 continue;
d111692a
PNA
574 } else if (nft_set_elem_expired(&rbe->ext)) {
575 break;
05b7639d
PNA
576 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
577 parent = parent->rb_left;
578 continue;
e701001e 579 }
9dad402b
PNA
580 nft_rbtree_flush(net, set, &rbe->priv);
581 return &rbe->priv;
20a69341
PM
582 }
583 }
cc02e457 584 return NULL;
20a69341
PM
585}
586
587static void nft_rbtree_walk(const struct nft_ctx *ctx,
de70185d 588 struct nft_set *set,
20a69341
PM
589 struct nft_set_iter *iter)
590{
03e5fd0e 591 struct nft_rbtree *priv = nft_set_priv(set);
fe2811eb 592 struct nft_rbtree_elem *rbe;
20a69341
PM
593 struct rb_node *node;
594
03e5fd0e 595 read_lock_bh(&priv->lock);
20a69341 596 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
cc02e457
PM
597 rbe = rb_entry(node, struct nft_rbtree_elem, node);
598
20a69341
PM
599 if (iter->count < iter->skip)
600 goto cont;
8588ac09 601 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
cc02e457 602 goto cont;
20a69341 603
0e1ea651 604 iter->err = iter->fn(ctx, set, iter, &rbe->priv);
7632667d 605 if (iter->err < 0) {
03e5fd0e 606 read_unlock_bh(&priv->lock);
20a69341 607 return;
7632667d 608 }
20a69341
PM
609cont:
610 iter->count++;
611 }
03e5fd0e 612 read_unlock_bh(&priv->lock);
20a69341
PM
613}
614
7d259f02
FW
615static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
616 struct nft_rbtree *priv,
617 struct nft_rbtree_elem *rbe)
8d8540c4 618{
0e1ea651 619 nft_setelem_data_deactivate(net, set, &rbe->priv);
7d259f02
FW
620 nft_rbtree_erase(priv, rbe);
621}
622
623static void nft_rbtree_gc(struct nft_set *set)
624{
625 struct nft_rbtree *priv = nft_set_priv(set);
f6c383b8
PNA
626 struct nft_rbtree_elem *rbe, *rbe_end = NULL;
627 struct nftables_pernet *nft_net;
7d259f02 628 struct rb_node *node, *next;
f6c383b8 629 struct nft_trans_gc *gc;
5d235d6c 630 struct net *net;
8d8540c4 631
8d8540c4 632 set = nft_set_container_of(priv);
5d235d6c 633 net = read_pnet(&set->net);
f6c383b8 634 nft_net = nft_pernet(net);
f6c383b8 635
7d259f02 636 gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
f6c383b8 637 if (!gc)
7d259f02 638 return;
f6c383b8 639
7d259f02
FW
640 for (node = rb_first(&priv->root); node ; node = next) {
641 next = rb_next(node);
f6c383b8 642
8d8540c4
PNA
643 rbe = rb_entry(node, struct nft_rbtree_elem, node);
644
5d235d6c
PNA
645 /* elements are reversed in the rbtree for historical reasons,
646 * from highest to lowest value, that is why end element is
647 * always visited before the start element.
648 */
8d8540c4 649 if (nft_rbtree_interval_end(rbe)) {
a13f814a 650 rbe_end = rbe;
8d8540c4
PNA
651 continue;
652 }
653 if (!nft_set_elem_expired(&rbe->ext))
654 continue;
5d235d6c 655
7d259f02 656 gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
f6c383b8
PNA
657 if (!gc)
658 goto try_later;
8d8540c4 659
7d259f02
FW
660 /* end element needs to be removed first, it has
661 * no timeout extension.
662 */
663 if (rbe_end) {
664 nft_rbtree_gc_remove(net, set, priv, rbe_end);
665 nft_trans_gc_elem_add(gc, rbe_end);
666 rbe_end = NULL;
667 }
668
669 gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
f6c383b8
PNA
670 if (!gc)
671 goto try_later;
672
7d259f02 673 nft_rbtree_gc_remove(net, set, priv, rbe);
f6c383b8 674 nft_trans_gc_elem_add(gc, rbe);
8d8540c4 675 }
f6c383b8 676
f6c383b8 677try_later:
8d8540c4 678
7d259f02
FW
679 if (gc) {
680 gc = nft_trans_gc_catchall_sync(gc);
681 nft_trans_gc_queue_sync_done(gc);
682 priv->last_gc = jiffies;
683 }
8d8540c4
PNA
684}
685
4ef360dd
TY
686static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
687 const struct nft_set_desc *desc)
20a69341
PM
688{
689 return sizeof(struct nft_rbtree);
690}
691
692static int nft_rbtree_init(const struct nft_set *set,
c50b960c 693 const struct nft_set_desc *desc,
20a69341
PM
694 const struct nlattr * const nla[])
695{
696 struct nft_rbtree *priv = nft_set_priv(set);
697
9dad402b
PNA
698 BUILD_BUG_ON(offsetof(struct nft_rbtree_elem, priv) != 0);
699
03e5fd0e 700 rwlock_init(&priv->lock);
b901892b 701 seqcount_rwlock_init(&priv->count, &priv->lock);
20a69341 702 priv->root = RB_ROOT;
8d8540c4 703
20a69341
PM
704 return 0;
705}
706
628bd3e4
PNA
707static void nft_rbtree_destroy(const struct nft_ctx *ctx,
708 const struct nft_set *set)
20a69341
PM
709{
710 struct nft_rbtree *priv = nft_set_priv(set);
711 struct nft_rbtree_elem *rbe;
712 struct rb_node *node;
713
714 while ((node = priv->root.rb_node) != NULL) {
715 rb_erase(node, &priv->root);
716 rbe = rb_entry(node, struct nft_rbtree_elem, node);
9dad402b 717 nf_tables_set_elem_destroy(ctx, set, &rbe->priv);
20a69341
PM
718 }
719}
720
c50b960c
PM
721static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
722 struct nft_set_estimate *est)
723{
f3a2181e
SB
724 if (desc->field_count > 1)
725 return false;
726
c50b960c 727 if (desc->size)
080ed636
PNA
728 est->size = sizeof(struct nft_rbtree) +
729 desc->size * sizeof(struct nft_rbtree_elem);
c50b960c 730 else
080ed636 731 est->size = ~0;
c50b960c 732
55af753c 733 est->lookup = NFT_SET_CLASS_O_LOG_N;
0b5a7874 734 est->space = NFT_SET_CLASS_O_N;
c50b960c
PM
735
736 return true;
737}
738
7d259f02
FW
739static void nft_rbtree_commit(struct nft_set *set)
740{
741 struct nft_rbtree *priv = nft_set_priv(set);
742
743 if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
744 nft_rbtree_gc(set);
745}
746
747static void nft_rbtree_gc_init(const struct nft_set *set)
748{
749 struct nft_rbtree *priv = nft_set_priv(set);
750
751 priv->last_gc = jiffies;
752}
753
24d19826 754const struct nft_set_type nft_set_rbtree_type = {
8d8540c4 755 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
71cc0873
PS
756 .ops = {
757 .privsize = nft_rbtree_privsize,
758 .elemsize = offsetof(struct nft_rbtree_elem, ext),
759 .estimate = nft_rbtree_estimate,
760 .init = nft_rbtree_init,
761 .destroy = nft_rbtree_destroy,
762 .insert = nft_rbtree_insert,
763 .remove = nft_rbtree_remove,
764 .deactivate = nft_rbtree_deactivate,
765 .flush = nft_rbtree_flush,
766 .activate = nft_rbtree_activate,
7d259f02
FW
767 .commit = nft_rbtree_commit,
768 .gc_init = nft_rbtree_gc_init,
71cc0873
PS
769 .lookup = nft_rbtree_lookup,
770 .walk = nft_rbtree_walk,
771 .get = nft_rbtree_get,
772 },
20a69341 773};