1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
35 #include <net/ndisc.h>
37 #define TBL_MIN_BUCKETS 1024
38 #define MASK_ARRAY_SIZE_MIN 16
39 #define REHASH_INTERVAL (10 * 60 * HZ)
41 #define MC_HASH_SHIFT 8
42 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
50 return range->end - range->start;
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 bool full, const struct sw_flow_mask *mask)
56 int start = full ? 0 : mask->range.start;
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 const long *m = (const long *)((const u8 *)&mask->key + start);
59 const long *s = (const long *)((const u8 *)src + start);
60 long *d = (long *)((u8 *)dst + start);
63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 * if 'full' is false the memory outside of the 'mask->range' is left
65 * uninitialized. This can be used as an optimization when further
66 * operations on 'dst' only use contents within 'mask->range'.
68 for (i = 0; i < len; i += sizeof(long))
72 struct sw_flow *ovs_flow_alloc(void)
75 struct sw_flow_stats *stats;
77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
79 return ERR_PTR(-ENOMEM);
81 flow->stats_last_writer = -1;
83 /* Initialize the default stat node. */
84 stats = kmem_cache_alloc_node(flow_stats_cache,
85 GFP_KERNEL | __GFP_ZERO,
86 node_online(0) ? 0 : NUMA_NO_NODE);
90 spin_lock_init(&stats->lock);
92 RCU_INIT_POINTER(flow->stats[0], stats);
94 cpumask_set_cpu(0, &flow->cpu_used_mask);
98 kmem_cache_free(flow_cache, flow);
99 return ERR_PTR(-ENOMEM);
102 int ovs_flow_tbl_count(const struct flow_table *table)
107 static void flow_free(struct sw_flow *flow)
111 if (ovs_identifier_is_key(&flow->id))
112 kfree(flow->id.unmasked_key);
114 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
115 /* We open code this to make sure cpu 0 is always considered */
116 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
117 if (flow->stats[cpu])
118 kmem_cache_free(flow_stats_cache,
119 (struct sw_flow_stats __force *)flow->stats[cpu]);
120 kmem_cache_free(flow_cache, flow);
123 static void rcu_free_flow_callback(struct rcu_head *rcu)
125 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
130 void ovs_flow_free(struct sw_flow *flow, bool deferred)
136 call_rcu(&flow->rcu, rcu_free_flow_callback);
141 static void __table_instance_destroy(struct table_instance *ti)
147 static struct table_instance *table_instance_alloc(int new_size)
149 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
155 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
162 for (i = 0; i < new_size; i++)
163 INIT_HLIST_HEAD(&ti->buckets[i]);
165 ti->n_buckets = new_size;
167 ti->keep_flows = false;
168 get_random_bytes(&ti->hash_seed, sizeof(u32));
173 static void __mask_array_destroy(struct mask_array *ma)
175 free_percpu(ma->masks_usage_cntr);
179 static void mask_array_rcu_cb(struct rcu_head *rcu)
181 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
183 __mask_array_destroy(ma);
186 static void tbl_mask_array_reset_counters(struct mask_array *ma)
190 /* As the per CPU counters are not atomic we can not go ahead and
191 * reset them from another CPU. To be able to still have an approximate
192 * zero based counter we store the value at reset, and subtract it
193 * later when processing.
195 for (i = 0; i < ma->max; i++) {
196 ma->masks_usage_zero_cntr[i] = 0;
198 for_each_possible_cpu(cpu) {
199 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
205 start = u64_stats_fetch_begin_irq(&ma->syncp);
206 counter = usage_counters[i];
207 } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
209 ma->masks_usage_zero_cntr[i] += counter;
214 static struct mask_array *tbl_mask_array_alloc(int size)
216 struct mask_array *new;
218 size = max(MASK_ARRAY_SIZE_MIN, size);
219 new = kzalloc(sizeof(struct mask_array) +
220 sizeof(struct sw_flow_mask *) * size +
221 sizeof(u64) * size, GFP_KERNEL);
225 new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
226 sizeof(struct mask_array) +
227 sizeof(struct sw_flow_mask *) *
230 new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
232 if (!new->masks_usage_cntr) {
243 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
245 struct mask_array *old;
246 struct mask_array *new;
248 new = tbl_mask_array_alloc(size);
252 old = ovsl_dereference(tbl->mask_array);
256 for (i = 0; i < old->max; i++) {
257 if (ovsl_dereference(old->masks[i]))
258 new->masks[new->count++] = old->masks[i];
260 call_rcu(&old->rcu, mask_array_rcu_cb);
263 rcu_assign_pointer(tbl->mask_array, new);
268 static int tbl_mask_array_add_mask(struct flow_table *tbl,
269 struct sw_flow_mask *new)
271 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
272 int err, ma_count = READ_ONCE(ma->count);
274 if (ma_count >= ma->max) {
275 err = tbl_mask_array_realloc(tbl, ma->max +
276 MASK_ARRAY_SIZE_MIN);
280 ma = ovsl_dereference(tbl->mask_array);
282 /* On every add or delete we need to reset the counters so
283 * every new mask gets a fair chance of being prioritized.
285 tbl_mask_array_reset_counters(ma);
288 BUG_ON(ovsl_dereference(ma->masks[ma_count]));
290 rcu_assign_pointer(ma->masks[ma_count], new);
291 WRITE_ONCE(ma->count, ma_count +1);
296 static void tbl_mask_array_del_mask(struct flow_table *tbl,
297 struct sw_flow_mask *mask)
299 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
300 int i, ma_count = READ_ONCE(ma->count);
302 /* Remove the deleted mask pointers from the array */
303 for (i = 0; i < ma_count; i++) {
304 if (mask == ovsl_dereference(ma->masks[i]))
312 WRITE_ONCE(ma->count, ma_count -1);
314 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
315 RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
317 kfree_rcu(mask, rcu);
319 /* Shrink the mask array if necessary. */
320 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
321 ma_count <= (ma->max / 3))
322 tbl_mask_array_realloc(tbl, ma->max / 2);
324 tbl_mask_array_reset_counters(ma);
328 /* Remove 'mask' from the mask list, if it is not needed any more. */
329 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
332 /* ovs-lock is required to protect mask-refcount and
336 BUG_ON(!mask->ref_count);
339 if (!mask->ref_count)
340 tbl_mask_array_del_mask(tbl, mask);
344 int ovs_flow_tbl_init(struct flow_table *table)
346 struct table_instance *ti, *ufid_ti;
347 struct mask_array *ma;
349 table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
351 __alignof__(struct mask_cache_entry));
352 if (!table->mask_cache)
355 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
357 goto free_mask_cache;
359 ti = table_instance_alloc(TBL_MIN_BUCKETS);
361 goto free_mask_array;
363 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
367 rcu_assign_pointer(table->ti, ti);
368 rcu_assign_pointer(table->ufid_ti, ufid_ti);
369 rcu_assign_pointer(table->mask_array, ma);
370 table->last_rehash = jiffies;
372 table->ufid_count = 0;
376 __table_instance_destroy(ti);
378 __mask_array_destroy(ma);
380 free_percpu(table->mask_cache);
384 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
386 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
388 __table_instance_destroy(ti);
391 static void table_instance_flow_free(struct flow_table *table,
392 struct table_instance *ti,
393 struct table_instance *ufid_ti,
394 struct sw_flow *flow,
397 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
401 if (ovs_identifier_is_ufid(&flow->id)) {
402 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
408 flow_mask_remove(table, flow->mask);
411 static void table_instance_destroy(struct flow_table *table,
412 struct table_instance *ti,
413 struct table_instance *ufid_ti,
425 for (i = 0; i < ti->n_buckets; i++) {
426 struct sw_flow *flow;
427 struct hlist_head *head = &ti->buckets[i];
428 struct hlist_node *n;
430 hlist_for_each_entry_safe(flow, n, head,
431 flow_table.node[ti->node_ver]) {
433 table_instance_flow_free(table, ti, ufid_ti,
435 ovs_flow_free(flow, deferred);
441 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
442 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
444 __table_instance_destroy(ti);
445 __table_instance_destroy(ufid_ti);
449 /* No need for locking this function is called from RCU callback or
452 void ovs_flow_tbl_destroy(struct flow_table *table)
454 struct table_instance *ti = rcu_dereference_raw(table->ti);
455 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
457 free_percpu(table->mask_cache);
458 call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
459 table_instance_destroy(table, ti, ufid_ti, false);
462 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
463 u32 *bucket, u32 *last)
465 struct sw_flow *flow;
466 struct hlist_head *head;
471 while (*bucket < ti->n_buckets) {
473 head = &ti->buckets[*bucket];
474 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
489 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
491 hash = jhash_1word(hash, ti->hash_seed);
492 return &ti->buckets[hash & (ti->n_buckets - 1)];
495 static void table_instance_insert(struct table_instance *ti,
496 struct sw_flow *flow)
498 struct hlist_head *head;
500 head = find_bucket(ti, flow->flow_table.hash);
501 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
504 static void ufid_table_instance_insert(struct table_instance *ti,
505 struct sw_flow *flow)
507 struct hlist_head *head;
509 head = find_bucket(ti, flow->ufid_table.hash);
510 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
513 static void flow_table_copy_flows(struct table_instance *old,
514 struct table_instance *new, bool ufid)
519 old_ver = old->node_ver;
520 new->node_ver = !old_ver;
522 /* Insert in new table. */
523 for (i = 0; i < old->n_buckets; i++) {
524 struct sw_flow *flow;
525 struct hlist_head *head = &old->buckets[i];
528 hlist_for_each_entry_rcu(flow, head,
529 ufid_table.node[old_ver],
530 lockdep_ovsl_is_held())
531 ufid_table_instance_insert(new, flow);
533 hlist_for_each_entry_rcu(flow, head,
534 flow_table.node[old_ver],
535 lockdep_ovsl_is_held())
536 table_instance_insert(new, flow);
539 old->keep_flows = true;
542 static struct table_instance *table_instance_rehash(struct table_instance *ti,
543 int n_buckets, bool ufid)
545 struct table_instance *new_ti;
547 new_ti = table_instance_alloc(n_buckets);
551 flow_table_copy_flows(ti, new_ti, ufid);
556 int ovs_flow_tbl_flush(struct flow_table *flow_table)
558 struct table_instance *old_ti, *new_ti;
559 struct table_instance *old_ufid_ti, *new_ufid_ti;
561 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
564 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
568 old_ti = ovsl_dereference(flow_table->ti);
569 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
571 rcu_assign_pointer(flow_table->ti, new_ti);
572 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
573 flow_table->last_rehash = jiffies;
574 flow_table->count = 0;
575 flow_table->ufid_count = 0;
577 table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
581 __table_instance_destroy(new_ti);
585 static u32 flow_hash(const struct sw_flow_key *key,
586 const struct sw_flow_key_range *range)
588 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
590 /* Make sure number of hash bytes are multiple of u32. */
591 int hash_u32s = range_n_bytes(range) >> 2;
593 return jhash2(hash_key, hash_u32s, 0);
596 static int flow_key_start(const struct sw_flow_key *key)
601 return rounddown(offsetof(struct sw_flow_key, phy),
605 static bool cmp_key(const struct sw_flow_key *key1,
606 const struct sw_flow_key *key2,
607 int key_start, int key_end)
609 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
610 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
614 for (i = key_start; i < key_end; i += sizeof(long))
615 diffs |= *cp1++ ^ *cp2++;
620 static bool flow_cmp_masked_key(const struct sw_flow *flow,
621 const struct sw_flow_key *key,
622 const struct sw_flow_key_range *range)
624 return cmp_key(&flow->key, key, range->start, range->end);
627 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
628 const struct sw_flow_match *match)
630 struct sw_flow_key *key = match->key;
631 int key_start = flow_key_start(key);
632 int key_end = match->range.end;
634 BUG_ON(ovs_identifier_is_ufid(&flow->id));
635 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
638 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
639 const struct sw_flow_key *unmasked,
640 const struct sw_flow_mask *mask,
643 struct sw_flow *flow;
644 struct hlist_head *head;
646 struct sw_flow_key masked_key;
648 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
649 hash = flow_hash(&masked_key, &mask->range);
650 head = find_bucket(ti, hash);
653 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
654 lockdep_ovsl_is_held()) {
655 if (flow->mask == mask && flow->flow_table.hash == hash &&
656 flow_cmp_masked_key(flow, &masked_key, &mask->range))
662 /* Flow lookup does full lookup on flow table. It starts with
663 * mask from index passed in *index.
665 static struct sw_flow *flow_lookup(struct flow_table *tbl,
666 struct table_instance *ti,
667 struct mask_array *ma,
668 const struct sw_flow_key *key,
673 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
674 struct sw_flow *flow;
675 struct sw_flow_mask *mask;
678 if (likely(*index < ma->max)) {
679 mask = rcu_dereference_ovsl(ma->masks[*index]);
681 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
683 u64_stats_update_begin(&ma->syncp);
684 usage_counters[*index]++;
685 u64_stats_update_end(&ma->syncp);
692 for (i = 0; i < ma->max; i++) {
697 mask = rcu_dereference_ovsl(ma->masks[i]);
701 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
702 if (flow) { /* Found */
704 u64_stats_update_begin(&ma->syncp);
705 usage_counters[*index]++;
706 u64_stats_update_end(&ma->syncp);
715 * mask_cache maps flow to probable mask. This cache is not tightly
716 * coupled cache, It means updates to mask list can result in inconsistent
717 * cache entry in mask cache.
718 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
719 * In case of a hash collision the entry is hashed in next segment.
721 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
722 const struct sw_flow_key *key,
727 struct mask_array *ma = rcu_dereference(tbl->mask_array);
728 struct table_instance *ti = rcu_dereference(tbl->ti);
729 struct mask_cache_entry *entries, *ce;
730 struct sw_flow *flow;
736 if (unlikely(!skb_hash)) {
740 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
744 /* Pre and post recirulation flows usually have the same skb_hash
745 * value. To avoid hash collisions, rehash the 'skb_hash' with
748 skb_hash = jhash_1word(skb_hash, key->recirc_id);
752 entries = this_cpu_ptr(tbl->mask_cache);
754 /* Find the cache entry 'ce' to operate on. */
755 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
756 int index = hash & (MC_HASH_ENTRIES - 1);
757 struct mask_cache_entry *e;
760 if (e->skb_hash == skb_hash) {
761 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
762 n_cache_hit, &e->mask_index);
768 if (!ce || e->skb_hash < ce->skb_hash)
769 ce = e; /* A better replacement cache candidate. */
771 hash >>= MC_HASH_SHIFT;
774 /* Cache miss, do full lookup. */
775 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
778 ce->skb_hash = skb_hash;
784 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
785 const struct sw_flow_key *key)
787 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
788 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
789 u32 __always_unused n_mask_hit;
790 u32 __always_unused n_cache_hit;
793 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
796 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
797 const struct sw_flow_match *match)
799 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
802 /* Always called under ovs-mutex. */
803 for (i = 0; i < ma->max; i++) {
804 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
805 u32 __always_unused n_mask_hit;
806 struct sw_flow_mask *mask;
807 struct sw_flow *flow;
809 mask = ovsl_dereference(ma->masks[i]);
813 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
814 if (flow && ovs_identifier_is_key(&flow->id) &&
815 ovs_flow_cmp_unmasked_key(flow, match)) {
823 static u32 ufid_hash(const struct sw_flow_id *sfid)
825 return jhash(sfid->ufid, sfid->ufid_len, 0);
828 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
829 const struct sw_flow_id *sfid)
831 if (flow->id.ufid_len != sfid->ufid_len)
834 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
837 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
839 if (ovs_identifier_is_ufid(&flow->id))
840 return flow_cmp_masked_key(flow, match->key, &match->range);
842 return ovs_flow_cmp_unmasked_key(flow, match);
845 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
846 const struct sw_flow_id *ufid)
848 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
849 struct sw_flow *flow;
850 struct hlist_head *head;
853 hash = ufid_hash(ufid);
854 head = find_bucket(ti, hash);
855 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
856 lockdep_ovsl_is_held()) {
857 if (flow->ufid_table.hash == hash &&
858 ovs_flow_cmp_ufid(flow, ufid))
864 int ovs_flow_tbl_num_masks(const struct flow_table *table)
866 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
867 return READ_ONCE(ma->count);
870 static struct table_instance *table_instance_expand(struct table_instance *ti,
873 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
876 /* Must be called with OVS mutex held. */
877 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
879 struct table_instance *ti = ovsl_dereference(table->ti);
880 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
882 BUG_ON(table->count == 0);
883 table_instance_flow_free(table, ti, ufid_ti, flow, true);
886 static struct sw_flow_mask *mask_alloc(void)
888 struct sw_flow_mask *mask;
890 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
897 static bool mask_equal(const struct sw_flow_mask *a,
898 const struct sw_flow_mask *b)
900 const u8 *a_ = (const u8 *)&a->key + a->range.start;
901 const u8 *b_ = (const u8 *)&b->key + b->range.start;
903 return (a->range.end == b->range.end)
904 && (a->range.start == b->range.start)
905 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
908 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
909 const struct sw_flow_mask *mask)
911 struct mask_array *ma;
914 ma = ovsl_dereference(tbl->mask_array);
915 for (i = 0; i < ma->max; i++) {
916 struct sw_flow_mask *t;
917 t = ovsl_dereference(ma->masks[i]);
919 if (t && mask_equal(mask, t))
926 /* Add 'mask' into the mask list, if it is not already there. */
927 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
928 const struct sw_flow_mask *new)
930 struct sw_flow_mask *mask;
932 mask = flow_mask_find(tbl, new);
934 /* Allocate a new mask if none exsits. */
938 mask->key = new->key;
939 mask->range = new->range;
941 /* Add mask to mask-list. */
942 if (tbl_mask_array_add_mask(tbl, mask)) {
947 BUG_ON(!mask->ref_count);
955 /* Must be called with OVS mutex held. */
956 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
958 struct table_instance *new_ti = NULL;
959 struct table_instance *ti;
961 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
962 ti = ovsl_dereference(table->ti);
963 table_instance_insert(ti, flow);
966 /* Expand table, if necessary, to make room. */
967 if (table->count > ti->n_buckets)
968 new_ti = table_instance_expand(ti, false);
969 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
970 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
973 rcu_assign_pointer(table->ti, new_ti);
974 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
975 table->last_rehash = jiffies;
979 /* Must be called with OVS mutex held. */
980 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
982 struct table_instance *ti;
984 flow->ufid_table.hash = ufid_hash(&flow->id);
985 ti = ovsl_dereference(table->ufid_ti);
986 ufid_table_instance_insert(ti, flow);
989 /* Expand table, if necessary, to make room. */
990 if (table->ufid_count > ti->n_buckets) {
991 struct table_instance *new_ti;
993 new_ti = table_instance_expand(ti, true);
995 rcu_assign_pointer(table->ufid_ti, new_ti);
996 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1001 /* Must be called with OVS mutex held. */
1002 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1003 const struct sw_flow_mask *mask)
1007 err = flow_mask_insert(table, flow, mask);
1010 flow_key_insert(table, flow);
1011 if (ovs_identifier_is_ufid(&flow->id))
1012 flow_ufid_insert(table, flow);
1017 static int compare_mask_and_count(const void *a, const void *b)
1019 const struct mask_count *mc_a = a;
1020 const struct mask_count *mc_b = b;
1022 return (s64)mc_b->counter - (s64)mc_a->counter;
1025 /* Must be called with OVS mutex held. */
1026 void ovs_flow_masks_rebalance(struct flow_table *table)
1028 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1029 struct mask_count *masks_and_count;
1030 struct mask_array *new;
1031 int masks_entries = 0;
1034 /* Build array of all current entries with use counters. */
1035 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1037 if (!masks_and_count)
1040 for (i = 0; i < ma->max; i++) {
1041 struct sw_flow_mask *mask;
1045 mask = rcu_dereference_ovsl(ma->masks[i]);
1046 if (unlikely(!mask))
1049 masks_and_count[i].index = i;
1050 masks_and_count[i].counter = 0;
1052 for_each_possible_cpu(cpu) {
1053 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
1058 start = u64_stats_fetch_begin_irq(&ma->syncp);
1059 counter = usage_counters[i];
1060 } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
1062 masks_and_count[i].counter += counter;
1065 /* Subtract the zero count value. */
1066 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1068 /* Rather than calling tbl_mask_array_reset_counters()
1069 * below when no change is needed, do it inline here.
1071 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1075 goto free_mask_entries;
1077 /* Sort the entries */
1079 sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1080 compare_mask_and_count, NULL);
1082 /* If the order is the same, nothing to do... */
1083 for (i = 0; i < masks_entries; i++) {
1084 if (i != masks_and_count[i].index)
1087 if (i == masks_entries)
1088 goto free_mask_entries;
1090 /* Rebuilt the new list in order of usage. */
1091 new = tbl_mask_array_alloc(ma->max);
1093 goto free_mask_entries;
1095 for (i = 0; i < masks_entries; i++) {
1096 int index = masks_and_count[i].index;
1098 new->masks[new->count++] =
1099 rcu_dereference_ovsl(ma->masks[index]);
1102 rcu_assign_pointer(table->mask_array, new);
1103 call_rcu(&ma->rcu, mask_array_rcu_cb);
1106 kfree(masks_and_count);
1109 /* Initializes the flow module.
1110 * Returns zero if successful or a negative error code. */
1111 int ovs_flow_init(void)
1113 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1114 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1116 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1118 * sizeof(struct sw_flow_stats *)),
1120 if (flow_cache == NULL)
1124 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1125 0, SLAB_HWCACHE_ALIGN, NULL);
1126 if (flow_stats_cache == NULL) {
1127 kmem_cache_destroy(flow_cache);
1135 /* Uninitializes the flow module. */
1136 void ovs_flow_exit(void)
1138 kmem_cache_destroy(flow_stats_cache);
1139 kmem_cache_destroy(flow_cache);