Commit | Line | Data |
---|---|---|
c9422999 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e6445719 | 2 | /* |
9b996e54 | 3 | * Copyright (c) 2007-2014 Nicira, Inc. |
e6445719 PS |
4 | */ |
5 | ||
6 | #include "flow.h" | |
7 | #include "datapath.h" | |
34ae932a | 8 | #include "flow_netlink.h" |
e6445719 PS |
9 | #include <linux/uaccess.h> |
10 | #include <linux/netdevice.h> | |
11 | #include <linux/etherdevice.h> | |
12 | #include <linux/if_ether.h> | |
13 | #include <linux/if_vlan.h> | |
14 | #include <net/llc_pdu.h> | |
15 | #include <linux/kernel.h> | |
87545899 | 16 | #include <linux/jhash.h> |
e6445719 PS |
17 | #include <linux/jiffies.h> |
18 | #include <linux/llc.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/in.h> | |
21 | #include <linux/rcupdate.h> | |
db74a333 | 22 | #include <linux/cpumask.h> |
e6445719 PS |
23 | #include <linux/if_arp.h> |
24 | #include <linux/ip.h> | |
25 | #include <linux/ipv6.h> | |
26 | #include <linux/sctp.h> | |
27 | #include <linux/tcp.h> | |
28 | #include <linux/udp.h> | |
29 | #include <linux/icmp.h> | |
30 | #include <linux/icmpv6.h> | |
31 | #include <linux/rculist.h> | |
eac87c41 | 32 | #include <linux/sort.h> |
e6445719 PS |
33 | #include <net/ip.h> |
34 | #include <net/ipv6.h> | |
35 | #include <net/ndisc.h> | |
36 | ||
b637e498 | 37 | #define TBL_MIN_BUCKETS 1024 |
4bc63b1b | 38 | #define MASK_ARRAY_SIZE_MIN 16 |
b637e498 PS |
39 | #define REHASH_INTERVAL (10 * 60 * HZ) |
40 | ||
9bf24f59 | 41 | #define MC_DEFAULT_HASH_ENTRIES 256 |
04b7d136 | 42 | #define MC_HASH_SHIFT 8 |
04b7d136 TZ |
43 | #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) |
44 | ||
e6445719 | 45 | static struct kmem_cache *flow_cache; |
63e7959c | 46 | struct kmem_cache *flow_stats_cache __read_mostly; |
e6445719 PS |
47 | |
48 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | |
49 | { | |
50 | return range->end - range->start; | |
51 | } | |
52 | ||
53 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | |
ae5f2fb1 | 54 | bool full, const struct sw_flow_mask *mask) |
e6445719 | 55 | { |
ae5f2fb1 JG |
56 | int start = full ? 0 : mask->range.start; |
57 | int len = full ? sizeof *dst : range_n_bytes(&mask->range); | |
58 | const long *m = (const long *)((const u8 *)&mask->key + start); | |
59 | const long *s = (const long *)((const u8 *)src + start); | |
60 | long *d = (long *)((u8 *)dst + start); | |
e6445719 PS |
61 | int i; |
62 | ||
ae5f2fb1 JG |
63 | /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, |
64 | * if 'full' is false the memory outside of the 'mask->range' is left | |
65 | * uninitialized. This can be used as an optimization when further | |
66 | * operations on 'dst' only use contents within 'mask->range'. | |
e6445719 | 67 | */ |
ae5f2fb1 | 68 | for (i = 0; i < len; i += sizeof(long)) |
e6445719 PS |
69 | *d++ = *s++ & *m++; |
70 | } | |
71 | ||
23dabf88 | 72 | struct sw_flow *ovs_flow_alloc(void) |
e6445719 PS |
73 | { |
74 | struct sw_flow *flow; | |
aef833c5 | 75 | struct sw_flow_stats *stats; |
e6445719 | 76 | |
db74a333 | 77 | flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); |
e6445719 PS |
78 | if (!flow) |
79 | return ERR_PTR(-ENOMEM); | |
80 | ||
db74a333 | 81 | flow->stats_last_writer = -1; |
e6445719 | 82 | |
63e7959c JR |
83 | /* Initialize the default stat node. */ |
84 | stats = kmem_cache_alloc_node(flow_stats_cache, | |
598c12d0 KK |
85 | GFP_KERNEL | __GFP_ZERO, |
86 | node_online(0) ? 0 : NUMA_NO_NODE); | |
63e7959c | 87 | if (!stats) |
23dabf88 | 88 | goto err; |
e298e505 | 89 | |
63e7959c JR |
90 | spin_lock_init(&stats->lock); |
91 | ||
92 | RCU_INIT_POINTER(flow->stats[0], stats); | |
93 | ||
c4b2bf6b TZ |
94 | cpumask_set_cpu(0, &flow->cpu_used_mask); |
95 | ||
e6445719 | 96 | return flow; |
e298e505 | 97 | err: |
ece37c87 | 98 | kmem_cache_free(flow_cache, flow); |
e298e505 | 99 | return ERR_PTR(-ENOMEM); |
e6445719 PS |
100 | } |
101 | ||
12eb18f7 | 102 | int ovs_flow_tbl_count(const struct flow_table *table) |
b637e498 PS |
103 | { |
104 | return table->count; | |
105 | } | |
106 | ||
e6445719 PS |
107 | static void flow_free(struct sw_flow *flow) |
108 | { | |
db74a333 | 109 | int cpu; |
63e7959c | 110 | |
74ed7ab9 JS |
111 | if (ovs_identifier_is_key(&flow->id)) |
112 | kfree(flow->id.unmasked_key); | |
34ae932a TG |
113 | if (flow->sf_acts) |
114 | ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts); | |
db74a333 | 115 | /* We open code this to make sure cpu 0 is always considered */ |
c4b2bf6b | 116 | for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) |
db74a333 | 117 | if (flow->stats[cpu]) |
63e7959c | 118 | kmem_cache_free(flow_stats_cache, |
aef833c5 | 119 | (struct sw_flow_stats __force *)flow->stats[cpu]); |
e6445719 PS |
120 | kmem_cache_free(flow_cache, flow); |
121 | } | |
122 | ||
123 | static void rcu_free_flow_callback(struct rcu_head *rcu) | |
124 | { | |
125 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | |
126 | ||
127 | flow_free(flow); | |
128 | } | |
129 | ||
e80857cc | 130 | void ovs_flow_free(struct sw_flow *flow, bool deferred) |
618ed0c8 | 131 | { |
e80857cc | 132 | if (!flow) |
618ed0c8 PS |
133 | return; |
134 | ||
e6445719 PS |
135 | if (deferred) |
136 | call_rcu(&flow->rcu, rcu_free_flow_callback); | |
137 | else | |
138 | flow_free(flow); | |
139 | } | |
140 | ||
b637e498 | 141 | static void __table_instance_destroy(struct table_instance *ti) |
e6445719 | 142 | { |
ee9c5e67 | 143 | kvfree(ti->buckets); |
b637e498 | 144 | kfree(ti); |
e6445719 PS |
145 | } |
146 | ||
b637e498 | 147 | static struct table_instance *table_instance_alloc(int new_size) |
e6445719 | 148 | { |
b637e498 | 149 | struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); |
ee9c5e67 | 150 | int i; |
e6445719 | 151 | |
b637e498 | 152 | if (!ti) |
e6445719 PS |
153 | return NULL; |
154 | ||
ee9c5e67 KO |
155 | ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head), |
156 | GFP_KERNEL); | |
b637e498 PS |
157 | if (!ti->buckets) { |
158 | kfree(ti); | |
e6445719 PS |
159 | return NULL; |
160 | } | |
ee9c5e67 KO |
161 | |
162 | for (i = 0; i < new_size; i++) | |
163 | INIT_HLIST_HEAD(&ti->buckets[i]); | |
164 | ||
b637e498 PS |
165 | ti->n_buckets = new_size; |
166 | ti->node_ver = 0; | |
167 | ti->keep_flows = false; | |
168 | get_random_bytes(&ti->hash_seed, sizeof(u32)); | |
e6445719 | 169 | |
b637e498 | 170 | return ti; |
e6445719 PS |
171 | } |
172 | ||
eac87c41 EC |
173 | static void __mask_array_destroy(struct mask_array *ma) |
174 | { | |
175 | free_percpu(ma->masks_usage_cntr); | |
176 | kfree(ma); | |
177 | } | |
178 | ||
179 | static void mask_array_rcu_cb(struct rcu_head *rcu) | |
180 | { | |
181 | struct mask_array *ma = container_of(rcu, struct mask_array, rcu); | |
182 | ||
183 | __mask_array_destroy(ma); | |
184 | } | |
185 | ||
186 | static void tbl_mask_array_reset_counters(struct mask_array *ma) | |
187 | { | |
188 | int i, cpu; | |
189 | ||
190 | /* As the per CPU counters are not atomic we can not go ahead and | |
191 | * reset them from another CPU. To be able to still have an approximate | |
192 | * zero based counter we store the value at reset, and subtract it | |
193 | * later when processing. | |
194 | */ | |
195 | for (i = 0; i < ma->max; i++) { | |
196 | ma->masks_usage_zero_cntr[i] = 0; | |
197 | ||
198 | for_each_possible_cpu(cpu) { | |
199 | u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, | |
200 | cpu); | |
201 | unsigned int start; | |
202 | u64 counter; | |
203 | ||
204 | do { | |
205 | start = u64_stats_fetch_begin_irq(&ma->syncp); | |
206 | counter = usage_counters[i]; | |
207 | } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); | |
208 | ||
209 | ma->masks_usage_zero_cntr[i] += counter; | |
210 | } | |
211 | } | |
212 | } | |
213 | ||
4bc63b1b TZ |
214 | static struct mask_array *tbl_mask_array_alloc(int size) |
215 | { | |
216 | struct mask_array *new; | |
217 | ||
218 | size = max(MASK_ARRAY_SIZE_MIN, size); | |
219 | new = kzalloc(sizeof(struct mask_array) + | |
eac87c41 EC |
220 | sizeof(struct sw_flow_mask *) * size + |
221 | sizeof(u64) * size, GFP_KERNEL); | |
4bc63b1b TZ |
222 | if (!new) |
223 | return NULL; | |
224 | ||
eac87c41 EC |
225 | new->masks_usage_zero_cntr = (u64 *)((u8 *)new + |
226 | sizeof(struct mask_array) + | |
227 | sizeof(struct sw_flow_mask *) * | |
228 | size); | |
229 | ||
230 | new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size, | |
231 | __alignof__(u64)); | |
232 | if (!new->masks_usage_cntr) { | |
233 | kfree(new); | |
234 | return NULL; | |
235 | } | |
236 | ||
4bc63b1b TZ |
237 | new->count = 0; |
238 | new->max = size; | |
239 | ||
240 | return new; | |
241 | } | |
242 | ||
243 | static int tbl_mask_array_realloc(struct flow_table *tbl, int size) | |
244 | { | |
245 | struct mask_array *old; | |
246 | struct mask_array *new; | |
247 | ||
248 | new = tbl_mask_array_alloc(size); | |
249 | if (!new) | |
250 | return -ENOMEM; | |
251 | ||
252 | old = ovsl_dereference(tbl->mask_array); | |
253 | if (old) { | |
254 | int i; | |
255 | ||
256 | for (i = 0; i < old->max; i++) { | |
257 | if (ovsl_dereference(old->masks[i])) | |
258 | new->masks[new->count++] = old->masks[i]; | |
259 | } | |
eac87c41 | 260 | call_rcu(&old->rcu, mask_array_rcu_cb); |
4bc63b1b TZ |
261 | } |
262 | ||
263 | rcu_assign_pointer(tbl->mask_array, new); | |
4bc63b1b TZ |
264 | |
265 | return 0; | |
266 | } | |
267 | ||
50b0e61b TZ |
268 | static int tbl_mask_array_add_mask(struct flow_table *tbl, |
269 | struct sw_flow_mask *new) | |
270 | { | |
271 | struct mask_array *ma = ovsl_dereference(tbl->mask_array); | |
272 | int err, ma_count = READ_ONCE(ma->count); | |
273 | ||
274 | if (ma_count >= ma->max) { | |
275 | err = tbl_mask_array_realloc(tbl, ma->max + | |
276 | MASK_ARRAY_SIZE_MIN); | |
277 | if (err) | |
278 | return err; | |
279 | ||
280 | ma = ovsl_dereference(tbl->mask_array); | |
eac87c41 EC |
281 | } else { |
282 | /* On every add or delete we need to reset the counters so | |
283 | * every new mask gets a fair chance of being prioritized. | |
284 | */ | |
285 | tbl_mask_array_reset_counters(ma); | |
50b0e61b TZ |
286 | } |
287 | ||
288 | BUG_ON(ovsl_dereference(ma->masks[ma_count])); | |
289 | ||
290 | rcu_assign_pointer(ma->masks[ma_count], new); | |
291 | WRITE_ONCE(ma->count, ma_count +1); | |
292 | ||
293 | return 0; | |
294 | } | |
295 | ||
296 | static void tbl_mask_array_del_mask(struct flow_table *tbl, | |
297 | struct sw_flow_mask *mask) | |
298 | { | |
299 | struct mask_array *ma = ovsl_dereference(tbl->mask_array); | |
300 | int i, ma_count = READ_ONCE(ma->count); | |
301 | ||
302 | /* Remove the deleted mask pointers from the array */ | |
303 | for (i = 0; i < ma_count; i++) { | |
304 | if (mask == ovsl_dereference(ma->masks[i])) | |
305 | goto found; | |
306 | } | |
307 | ||
308 | BUG(); | |
309 | return; | |
310 | ||
311 | found: | |
312 | WRITE_ONCE(ma->count, ma_count -1); | |
313 | ||
314 | rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]); | |
315 | RCU_INIT_POINTER(ma->masks[ma_count -1], NULL); | |
316 | ||
317 | kfree_rcu(mask, rcu); | |
318 | ||
319 | /* Shrink the mask array if necessary. */ | |
320 | if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && | |
321 | ma_count <= (ma->max / 3)) | |
322 | tbl_mask_array_realloc(tbl, ma->max / 2); | |
eac87c41 EC |
323 | else |
324 | tbl_mask_array_reset_counters(ma); | |
325 | ||
50b0e61b TZ |
326 | } |
327 | ||
328 | /* Remove 'mask' from the mask list, if it is not needed any more. */ | |
329 | static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) | |
330 | { | |
331 | if (mask) { | |
332 | /* ovs-lock is required to protect mask-refcount and | |
333 | * mask list. | |
334 | */ | |
335 | ASSERT_OVSL(); | |
336 | BUG_ON(!mask->ref_count); | |
337 | mask->ref_count--; | |
338 | ||
339 | if (!mask->ref_count) | |
340 | tbl_mask_array_del_mask(tbl, mask); | |
341 | } | |
342 | } | |
343 | ||
9bf24f59 EC |
344 | static void __mask_cache_destroy(struct mask_cache *mc) |
345 | { | |
346 | free_percpu(mc->mask_cache); | |
347 | kfree(mc); | |
348 | } | |
349 | ||
350 | static void mask_cache_rcu_cb(struct rcu_head *rcu) | |
351 | { | |
352 | struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); | |
353 | ||
354 | __mask_cache_destroy(mc); | |
355 | } | |
356 | ||
357 | static struct mask_cache *tbl_mask_cache_alloc(u32 size) | |
358 | { | |
359 | struct mask_cache_entry __percpu *cache = NULL; | |
360 | struct mask_cache *new; | |
361 | ||
362 | /* Only allow size to be 0, or a power of 2, and does not exceed | |
363 | * percpu allocation size. | |
364 | */ | |
365 | if ((!is_power_of_2(size) && size != 0) || | |
366 | (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) | |
367 | return NULL; | |
368 | ||
369 | new = kzalloc(sizeof(*new), GFP_KERNEL); | |
370 | if (!new) | |
371 | return NULL; | |
372 | ||
373 | new->cache_size = size; | |
374 | if (new->cache_size > 0) { | |
375 | cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), | |
376 | new->cache_size), | |
377 | __alignof__(struct mask_cache_entry)); | |
378 | if (!cache) { | |
379 | kfree(new); | |
380 | return NULL; | |
381 | } | |
382 | } | |
383 | ||
384 | new->mask_cache = cache; | |
385 | return new; | |
386 | } | |
387 | int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) | |
388 | { | |
389 | struct mask_cache *mc = rcu_dereference(table->mask_cache); | |
390 | struct mask_cache *new; | |
391 | ||
392 | if (size == mc->cache_size) | |
393 | return 0; | |
394 | ||
395 | if ((!is_power_of_2(size) && size != 0) || | |
396 | (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) | |
397 | return -EINVAL; | |
398 | ||
399 | new = tbl_mask_cache_alloc(size); | |
400 | if (!new) | |
401 | return -ENOMEM; | |
402 | ||
403 | rcu_assign_pointer(table->mask_cache, new); | |
404 | call_rcu(&mc->rcu, mask_cache_rcu_cb); | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
b637e498 | 409 | int ovs_flow_tbl_init(struct flow_table *table) |
e6445719 | 410 | { |
74ed7ab9 | 411 | struct table_instance *ti, *ufid_ti; |
9bf24f59 | 412 | struct mask_cache *mc; |
4bc63b1b | 413 | struct mask_array *ma; |
e6445719 | 414 | |
9bf24f59 EC |
415 | mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); |
416 | if (!mc) | |
04b7d136 | 417 | return -ENOMEM; |
e6445719 | 418 | |
4bc63b1b TZ |
419 | ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); |
420 | if (!ma) | |
421 | goto free_mask_cache; | |
422 | ||
04b7d136 | 423 | ti = table_instance_alloc(TBL_MIN_BUCKETS); |
b637e498 | 424 | if (!ti) |
4bc63b1b | 425 | goto free_mask_array; |
e6445719 | 426 | |
74ed7ab9 JS |
427 | ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); |
428 | if (!ufid_ti) | |
429 | goto free_ti; | |
430 | ||
b637e498 | 431 | rcu_assign_pointer(table->ti, ti); |
74ed7ab9 | 432 | rcu_assign_pointer(table->ufid_ti, ufid_ti); |
4bc63b1b | 433 | rcu_assign_pointer(table->mask_array, ma); |
9bf24f59 | 434 | rcu_assign_pointer(table->mask_cache, mc); |
b637e498 PS |
435 | table->last_rehash = jiffies; |
436 | table->count = 0; | |
74ed7ab9 | 437 | table->ufid_count = 0; |
b637e498 | 438 | return 0; |
74ed7ab9 JS |
439 | |
440 | free_ti: | |
441 | __table_instance_destroy(ti); | |
4bc63b1b | 442 | free_mask_array: |
eac87c41 | 443 | __mask_array_destroy(ma); |
04b7d136 | 444 | free_mask_cache: |
9bf24f59 | 445 | __mask_cache_destroy(mc); |
74ed7ab9 | 446 | return -ENOMEM; |
e6445719 PS |
447 | } |
448 | ||
449 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | |
450 | { | |
b637e498 | 451 | struct table_instance *ti = container_of(rcu, struct table_instance, rcu); |
e6445719 | 452 | |
b637e498 | 453 | __table_instance_destroy(ti); |
e6445719 PS |
454 | } |
455 | ||
50b0e61b TZ |
456 | static void table_instance_flow_free(struct flow_table *table, |
457 | struct table_instance *ti, | |
458 | struct table_instance *ufid_ti, | |
459 | struct sw_flow *flow, | |
460 | bool count) | |
461 | { | |
462 | hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); | |
463 | if (count) | |
464 | table->count--; | |
465 | ||
466 | if (ovs_identifier_is_ufid(&flow->id)) { | |
467 | hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); | |
468 | ||
469 | if (count) | |
470 | table->ufid_count--; | |
471 | } | |
472 | ||
473 | flow_mask_remove(table, flow->mask); | |
474 | } | |
475 | ||
1f3a090b TZ |
476 | /* Must be called with OVS mutex held. */ |
477 | void table_instance_flow_flush(struct flow_table *table, | |
478 | struct table_instance *ti, | |
479 | struct table_instance *ufid_ti) | |
e6445719 | 480 | { |
e80857cc AZ |
481 | int i; |
482 | ||
e80857cc | 483 | if (ti->keep_flows) |
1f3a090b | 484 | return; |
e80857cc AZ |
485 | |
486 | for (i = 0; i < ti->n_buckets; i++) { | |
487 | struct sw_flow *flow; | |
ee9c5e67 | 488 | struct hlist_head *head = &ti->buckets[i]; |
e80857cc | 489 | struct hlist_node *n; |
e80857cc | 490 | |
50b0e61b TZ |
491 | hlist_for_each_entry_safe(flow, n, head, |
492 | flow_table.node[ti->node_ver]) { | |
493 | ||
494 | table_instance_flow_free(table, ti, ufid_ti, | |
495 | flow, false); | |
1f3a090b | 496 | ovs_flow_free(flow, true); |
e80857cc AZ |
497 | } |
498 | } | |
1f3a090b | 499 | } |
e80857cc | 500 | |
1f3a090b TZ |
501 | static void table_instance_destroy(struct table_instance *ti, |
502 | struct table_instance *ufid_ti) | |
503 | { | |
504 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | |
505 | call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); | |
b637e498 PS |
506 | } |
507 | ||
9b996e54 PS |
508 | /* No need for locking this function is called from RCU callback or |
509 | * error path. | |
510 | */ | |
511 | void ovs_flow_tbl_destroy(struct flow_table *table) | |
b637e498 | 512 | { |
9b996e54 | 513 | struct table_instance *ti = rcu_dereference_raw(table->ti); |
74ed7ab9 | 514 | struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); |
5845589e TZ |
515 | struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); |
516 | struct mask_array *ma = rcu_dereference_raw(table->mask_array); | |
b637e498 | 517 | |
9bf24f59 EC |
518 | call_rcu(&mc->rcu, mask_cache_rcu_cb); |
519 | call_rcu(&ma->rcu, mask_array_rcu_cb); | |
1f3a090b | 520 | table_instance_destroy(ti, ufid_ti); |
e6445719 PS |
521 | } |
522 | ||
b637e498 | 523 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
e6445719 PS |
524 | u32 *bucket, u32 *last) |
525 | { | |
526 | struct sw_flow *flow; | |
527 | struct hlist_head *head; | |
528 | int ver; | |
529 | int i; | |
530 | ||
b637e498 PS |
531 | ver = ti->node_ver; |
532 | while (*bucket < ti->n_buckets) { | |
e6445719 | 533 | i = 0; |
ee9c5e67 | 534 | head = &ti->buckets[*bucket]; |
74ed7ab9 | 535 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { |
e6445719 PS |
536 | if (i < *last) { |
537 | i++; | |
538 | continue; | |
539 | } | |
540 | *last = i + 1; | |
541 | return flow; | |
542 | } | |
543 | (*bucket)++; | |
544 | *last = 0; | |
545 | } | |
546 | ||
547 | return NULL; | |
548 | } | |
549 | ||
b637e498 | 550 | static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) |
e6445719 | 551 | { |
b637e498 | 552 | hash = jhash_1word(hash, ti->hash_seed); |
ee9c5e67 | 553 | return &ti->buckets[hash & (ti->n_buckets - 1)]; |
e6445719 PS |
554 | } |
555 | ||
74ed7ab9 JS |
556 | static void table_instance_insert(struct table_instance *ti, |
557 | struct sw_flow *flow) | |
e6445719 PS |
558 | { |
559 | struct hlist_head *head; | |
560 | ||
74ed7ab9 JS |
561 | head = find_bucket(ti, flow->flow_table.hash); |
562 | hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); | |
563 | } | |
564 | ||
565 | static void ufid_table_instance_insert(struct table_instance *ti, | |
566 | struct sw_flow *flow) | |
567 | { | |
568 | struct hlist_head *head; | |
569 | ||
570 | head = find_bucket(ti, flow->ufid_table.hash); | |
571 | hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); | |
e6445719 PS |
572 | } |
573 | ||
b637e498 | 574 | static void flow_table_copy_flows(struct table_instance *old, |
74ed7ab9 | 575 | struct table_instance *new, bool ufid) |
e6445719 PS |
576 | { |
577 | int old_ver; | |
578 | int i; | |
579 | ||
580 | old_ver = old->node_ver; | |
581 | new->node_ver = !old_ver; | |
582 | ||
583 | /* Insert in new table. */ | |
584 | for (i = 0; i < old->n_buckets; i++) { | |
585 | struct sw_flow *flow; | |
ee9c5e67 | 586 | struct hlist_head *head = &old->buckets[i]; |
e6445719 | 587 | |
74ed7ab9 | 588 | if (ufid) |
64948427 TZ |
589 | hlist_for_each_entry_rcu(flow, head, |
590 | ufid_table.node[old_ver], | |
591 | lockdep_ovsl_is_held()) | |
74ed7ab9 JS |
592 | ufid_table_instance_insert(new, flow); |
593 | else | |
64948427 TZ |
594 | hlist_for_each_entry_rcu(flow, head, |
595 | flow_table.node[old_ver], | |
596 | lockdep_ovsl_is_held()) | |
74ed7ab9 | 597 | table_instance_insert(new, flow); |
e6445719 PS |
598 | } |
599 | ||
e6445719 PS |
600 | old->keep_flows = true; |
601 | } | |
602 | ||
b637e498 | 603 | static struct table_instance *table_instance_rehash(struct table_instance *ti, |
74ed7ab9 | 604 | int n_buckets, bool ufid) |
e6445719 | 605 | { |
b637e498 | 606 | struct table_instance *new_ti; |
e6445719 | 607 | |
b637e498 PS |
608 | new_ti = table_instance_alloc(n_buckets); |
609 | if (!new_ti) | |
618ed0c8 | 610 | return NULL; |
e6445719 | 611 | |
74ed7ab9 | 612 | flow_table_copy_flows(ti, new_ti, ufid); |
e6445719 | 613 | |
b637e498 | 614 | return new_ti; |
e6445719 PS |
615 | } |
616 | ||
b637e498 | 617 | int ovs_flow_tbl_flush(struct flow_table *flow_table) |
e6445719 | 618 | { |
74ed7ab9 JS |
619 | struct table_instance *old_ti, *new_ti; |
620 | struct table_instance *old_ufid_ti, *new_ufid_ti; | |
e6445719 | 621 | |
b637e498 PS |
622 | new_ti = table_instance_alloc(TBL_MIN_BUCKETS); |
623 | if (!new_ti) | |
624 | return -ENOMEM; | |
74ed7ab9 JS |
625 | new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); |
626 | if (!new_ufid_ti) | |
627 | goto err_free_ti; | |
628 | ||
629 | old_ti = ovsl_dereference(flow_table->ti); | |
630 | old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); | |
b637e498 PS |
631 | |
632 | rcu_assign_pointer(flow_table->ti, new_ti); | |
74ed7ab9 | 633 | rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); |
b637e498 PS |
634 | flow_table->last_rehash = jiffies; |
635 | flow_table->count = 0; | |
74ed7ab9 | 636 | flow_table->ufid_count = 0; |
b637e498 | 637 | |
1f3a090b TZ |
638 | table_instance_flow_flush(flow_table, old_ti, old_ufid_ti); |
639 | table_instance_destroy(old_ti, old_ufid_ti); | |
b637e498 | 640 | return 0; |
74ed7ab9 JS |
641 | |
642 | err_free_ti: | |
643 | __table_instance_destroy(new_ti); | |
644 | return -ENOMEM; | |
e6445719 PS |
645 | } |
646 | ||
272c2cf8 JS |
647 | static u32 flow_hash(const struct sw_flow_key *key, |
648 | const struct sw_flow_key_range *range) | |
e6445719 | 649 | { |
515b65a4 | 650 | const u32 *hash_key = (const u32 *)((const u8 *)key + range->start); |
e6445719 PS |
651 | |
652 | /* Make sure number of hash bytes are multiple of u32. */ | |
515b65a4 | 653 | int hash_u32s = range_n_bytes(range) >> 2; |
e6445719 | 654 | |
87545899 | 655 | return jhash2(hash_key, hash_u32s, 0); |
e6445719 PS |
656 | } |
657 | ||
658 | static int flow_key_start(const struct sw_flow_key *key) | |
659 | { | |
00a93bab | 660 | if (key->tun_proto) |
e6445719 PS |
661 | return 0; |
662 | else | |
663 | return rounddown(offsetof(struct sw_flow_key, phy), | |
664 | sizeof(long)); | |
665 | } | |
666 | ||
667 | static bool cmp_key(const struct sw_flow_key *key1, | |
668 | const struct sw_flow_key *key2, | |
669 | int key_start, int key_end) | |
670 | { | |
7085130b DDP |
671 | const long *cp1 = (const long *)((const u8 *)key1 + key_start); |
672 | const long *cp2 = (const long *)((const u8 *)key2 + key_start); | |
e6445719 PS |
673 | long diffs = 0; |
674 | int i; | |
675 | ||
676 | for (i = key_start; i < key_end; i += sizeof(long)) | |
677 | diffs |= *cp1++ ^ *cp2++; | |
678 | ||
679 | return diffs == 0; | |
680 | } | |
681 | ||
682 | static bool flow_cmp_masked_key(const struct sw_flow *flow, | |
683 | const struct sw_flow_key *key, | |
272c2cf8 | 684 | const struct sw_flow_key_range *range) |
e6445719 | 685 | { |
272c2cf8 | 686 | return cmp_key(&flow->key, key, range->start, range->end); |
e6445719 PS |
687 | } |
688 | ||
74ed7ab9 JS |
689 | static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, |
690 | const struct sw_flow_match *match) | |
e6445719 PS |
691 | { |
692 | struct sw_flow_key *key = match->key; | |
693 | int key_start = flow_key_start(key); | |
694 | int key_end = match->range.end; | |
695 | ||
74ed7ab9 JS |
696 | BUG_ON(ovs_identifier_is_ufid(&flow->id)); |
697 | return cmp_key(flow->id.unmasked_key, key, key_start, key_end); | |
e6445719 PS |
698 | } |
699 | ||
b637e498 | 700 | static struct sw_flow *masked_flow_lookup(struct table_instance *ti, |
e6445719 | 701 | const struct sw_flow_key *unmasked, |
04b7d136 TZ |
702 | const struct sw_flow_mask *mask, |
703 | u32 *n_mask_hit) | |
e6445719 PS |
704 | { |
705 | struct sw_flow *flow; | |
706 | struct hlist_head *head; | |
e6445719 PS |
707 | u32 hash; |
708 | struct sw_flow_key masked_key; | |
709 | ||
ae5f2fb1 | 710 | ovs_flow_mask_key(&masked_key, unmasked, false, mask); |
272c2cf8 | 711 | hash = flow_hash(&masked_key, &mask->range); |
b637e498 | 712 | head = find_bucket(ti, hash); |
04b7d136 TZ |
713 | (*n_mask_hit)++; |
714 | ||
a2cfb96c MB |
715 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver], |
716 | lockdep_ovsl_is_held()) { | |
74ed7ab9 | 717 | if (flow->mask == mask && flow->flow_table.hash == hash && |
272c2cf8 | 718 | flow_cmp_masked_key(flow, &masked_key, &mask->range)) |
e6445719 PS |
719 | return flow; |
720 | } | |
721 | return NULL; | |
722 | } | |
723 | ||
a7f35e78 TZ |
724 | /* Flow lookup does full lookup on flow table. It starts with |
725 | * mask from index passed in *index. | |
726 | */ | |
04b7d136 TZ |
727 | static struct sw_flow *flow_lookup(struct flow_table *tbl, |
728 | struct table_instance *ti, | |
4bc63b1b | 729 | struct mask_array *ma, |
04b7d136 | 730 | const struct sw_flow_key *key, |
4bc63b1b | 731 | u32 *n_mask_hit, |
9d2f627b | 732 | u32 *n_cache_hit, |
4bc63b1b | 733 | u32 *index) |
e6445719 | 734 | { |
eac87c41 | 735 | u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); |
b637e498 | 736 | struct sw_flow *flow; |
57f7d7b9 | 737 | struct sw_flow_mask *mask; |
4bc63b1b | 738 | int i; |
e6445719 | 739 | |
0a3e0137 | 740 | if (likely(*index < ma->max)) { |
a7f35e78 | 741 | mask = rcu_dereference_ovsl(ma->masks[*index]); |
4bc63b1b TZ |
742 | if (mask) { |
743 | flow = masked_flow_lookup(ti, key, mask, n_mask_hit); | |
eac87c41 EC |
744 | if (flow) { |
745 | u64_stats_update_begin(&ma->syncp); | |
746 | usage_counters[*index]++; | |
747 | u64_stats_update_end(&ma->syncp); | |
9d2f627b | 748 | (*n_cache_hit)++; |
4bc63b1b | 749 | return flow; |
eac87c41 | 750 | } |
a7f35e78 TZ |
751 | } |
752 | } | |
753 | ||
754 | for (i = 0; i < ma->max; i++) { | |
755 | ||
756 | if (i == *index) | |
757 | continue; | |
758 | ||
759 | mask = rcu_dereference_ovsl(ma->masks[i]); | |
0a3e0137 | 760 | if (unlikely(!mask)) |
57f7d7b9 | 761 | break; |
a7f35e78 TZ |
762 | |
763 | flow = masked_flow_lookup(ti, key, mask, n_mask_hit); | |
764 | if (flow) { /* Found */ | |
765 | *index = i; | |
eac87c41 EC |
766 | u64_stats_update_begin(&ma->syncp); |
767 | usage_counters[*index]++; | |
768 | u64_stats_update_end(&ma->syncp); | |
a7f35e78 | 769 | return flow; |
4bc63b1b | 770 | } |
e6445719 | 771 | } |
4bc63b1b | 772 | |
b637e498 PS |
773 | return NULL; |
774 | } | |
e6445719 | 775 | |
04b7d136 TZ |
776 | /* |
777 | * mask_cache maps flow to probable mask. This cache is not tightly | |
778 | * coupled cache, It means updates to mask list can result in inconsistent | |
779 | * cache entry in mask cache. | |
780 | * This is per cpu cache and is divided in MC_HASH_SEGS segments. | |
781 | * In case of a hash collision the entry is hashed in next segment. | |
782 | * */ | |
783 | struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, | |
784 | const struct sw_flow_key *key, | |
785 | u32 skb_hash, | |
9d2f627b EC |
786 | u32 *n_mask_hit, |
787 | u32 *n_cache_hit) | |
04b7d136 | 788 | { |
9bf24f59 | 789 | struct mask_cache *mc = rcu_dereference(tbl->mask_cache); |
a7f35e78 TZ |
790 | struct mask_array *ma = rcu_dereference(tbl->mask_array); |
791 | struct table_instance *ti = rcu_dereference(tbl->ti); | |
792 | struct mask_cache_entry *entries, *ce; | |
04b7d136 | 793 | struct sw_flow *flow; |
a7f35e78 | 794 | u32 hash; |
04b7d136 TZ |
795 | int seg; |
796 | ||
797 | *n_mask_hit = 0; | |
9d2f627b | 798 | *n_cache_hit = 0; |
9bf24f59 | 799 | if (unlikely(!skb_hash || mc->cache_size == 0)) { |
a7f35e78 | 800 | u32 mask_index = 0; |
9d2f627b | 801 | u32 cache = 0; |
4bc63b1b | 802 | |
9d2f627b EC |
803 | return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, |
804 | &mask_index); | |
4bc63b1b | 805 | } |
04b7d136 | 806 | |
a7f35e78 TZ |
807 | /* Pre and post recirulation flows usually have the same skb_hash |
808 | * value. To avoid hash collisions, rehash the 'skb_hash' with | |
809 | * 'recirc_id'. */ | |
810 | if (key->recirc_id) | |
811 | skb_hash = jhash_1word(skb_hash, key->recirc_id); | |
812 | ||
813 | ce = NULL; | |
814 | hash = skb_hash; | |
9bf24f59 | 815 | entries = this_cpu_ptr(mc->mask_cache); |
04b7d136 | 816 | |
a7f35e78 | 817 | /* Find the cache entry 'ce' to operate on. */ |
04b7d136 | 818 | for (seg = 0; seg < MC_HASH_SEGS; seg++) { |
9bf24f59 | 819 | int index = hash & (mc->cache_size - 1); |
a7f35e78 TZ |
820 | struct mask_cache_entry *e; |
821 | ||
822 | e = &entries[index]; | |
823 | if (e->skb_hash == skb_hash) { | |
824 | flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, | |
9d2f627b | 825 | n_cache_hit, &e->mask_index); |
a7f35e78 TZ |
826 | if (!flow) |
827 | e->skb_hash = 0; | |
828 | return flow; | |
04b7d136 TZ |
829 | } |
830 | ||
a7f35e78 TZ |
831 | if (!ce || e->skb_hash < ce->skb_hash) |
832 | ce = e; /* A better replacement cache candidate. */ | |
04b7d136 TZ |
833 | |
834 | hash >>= MC_HASH_SHIFT; | |
835 | } | |
836 | ||
a7f35e78 | 837 | /* Cache miss, do full lookup. */ |
9d2f627b EC |
838 | flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, |
839 | &ce->mask_index); | |
4bc63b1b | 840 | if (flow) |
a7f35e78 | 841 | ce->skb_hash = skb_hash; |
04b7d136 | 842 | |
9d2f627b | 843 | *n_cache_hit = 0; |
04b7d136 TZ |
844 | return flow; |
845 | } | |
846 | ||
5bb50632 AZ |
847 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, |
848 | const struct sw_flow_key *key) | |
849 | { | |
04b7d136 | 850 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); |
4bc63b1b | 851 | struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); |
5bb50632 | 852 | u32 __always_unused n_mask_hit; |
9d2f627b | 853 | u32 __always_unused n_cache_hit; |
a7f35e78 | 854 | u32 index = 0; |
5bb50632 | 855 | |
9d2f627b | 856 | return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); |
5bb50632 AZ |
857 | } |
858 | ||
4a46b24e | 859 | struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, |
12eb18f7 | 860 | const struct sw_flow_match *match) |
4a46b24e | 861 | { |
4bc63b1b TZ |
862 | struct mask_array *ma = ovsl_dereference(tbl->mask_array); |
863 | int i; | |
4a46b24e AW |
864 | |
865 | /* Always called under ovs-mutex. */ | |
4bc63b1b TZ |
866 | for (i = 0; i < ma->max; i++) { |
867 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); | |
868 | u32 __always_unused n_mask_hit; | |
869 | struct sw_flow_mask *mask; | |
870 | struct sw_flow *flow; | |
871 | ||
872 | mask = ovsl_dereference(ma->masks[i]); | |
873 | if (!mask) | |
874 | continue; | |
875 | ||
04b7d136 | 876 | flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit); |
74ed7ab9 | 877 | if (flow && ovs_identifier_is_key(&flow->id) && |
4bc63b1b | 878 | ovs_flow_cmp_unmasked_key(flow, match)) { |
74ed7ab9 | 879 | return flow; |
4bc63b1b | 880 | } |
74ed7ab9 | 881 | } |
4bc63b1b | 882 | |
74ed7ab9 JS |
883 | return NULL; |
884 | } | |
885 | ||
886 | static u32 ufid_hash(const struct sw_flow_id *sfid) | |
887 | { | |
888 | return jhash(sfid->ufid, sfid->ufid_len, 0); | |
889 | } | |
890 | ||
891 | static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, | |
892 | const struct sw_flow_id *sfid) | |
893 | { | |
894 | if (flow->id.ufid_len != sfid->ufid_len) | |
895 | return false; | |
896 | ||
897 | return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); | |
898 | } | |
899 | ||
900 | bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match) | |
901 | { | |
902 | if (ovs_identifier_is_ufid(&flow->id)) | |
903 | return flow_cmp_masked_key(flow, match->key, &match->range); | |
904 | ||
905 | return ovs_flow_cmp_unmasked_key(flow, match); | |
906 | } | |
907 | ||
908 | struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, | |
909 | const struct sw_flow_id *ufid) | |
910 | { | |
911 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); | |
912 | struct sw_flow *flow; | |
913 | struct hlist_head *head; | |
914 | u32 hash; | |
915 | ||
916 | hash = ufid_hash(ufid); | |
917 | head = find_bucket(ti, hash); | |
a2cfb96c MB |
918 | hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver], |
919 | lockdep_ovsl_is_held()) { | |
74ed7ab9 JS |
920 | if (flow->ufid_table.hash == hash && |
921 | ovs_flow_cmp_ufid(flow, ufid)) | |
4a46b24e AW |
922 | return flow; |
923 | } | |
924 | return NULL; | |
925 | } | |
926 | ||
1bd7116f AZ |
927 | int ovs_flow_tbl_num_masks(const struct flow_table *table) |
928 | { | |
4bc63b1b | 929 | struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); |
57f7d7b9 | 930 | return READ_ONCE(ma->count); |
1bd7116f AZ |
931 | } |
932 | ||
9bf24f59 EC |
933 | u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) |
934 | { | |
5845589e | 935 | struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); |
9bf24f59 EC |
936 | |
937 | return READ_ONCE(mc->cache_size); | |
938 | } | |
939 | ||
74ed7ab9 JS |
940 | static struct table_instance *table_instance_expand(struct table_instance *ti, |
941 | bool ufid) | |
b637e498 | 942 | { |
74ed7ab9 | 943 | return table_instance_rehash(ti, ti->n_buckets * 2, ufid); |
e6445719 PS |
944 | } |
945 | ||
56c19868 | 946 | /* Must be called with OVS mutex held. */ |
e6445719 PS |
947 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) |
948 | { | |
b637e498 | 949 | struct table_instance *ti = ovsl_dereference(table->ti); |
74ed7ab9 | 950 | struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); |
b637e498 | 951 | |
e6445719 | 952 | BUG_ON(table->count == 0); |
50b0e61b | 953 | table_instance_flow_free(table, ti, ufid_ti, flow, true); |
e6445719 PS |
954 | } |
955 | ||
618ed0c8 | 956 | static struct sw_flow_mask *mask_alloc(void) |
e6445719 PS |
957 | { |
958 | struct sw_flow_mask *mask; | |
959 | ||
960 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | |
961 | if (mask) | |
e80857cc | 962 | mask->ref_count = 1; |
e6445719 PS |
963 | |
964 | return mask; | |
965 | } | |
966 | ||
e6445719 PS |
967 | static bool mask_equal(const struct sw_flow_mask *a, |
968 | const struct sw_flow_mask *b) | |
969 | { | |
7085130b DDP |
970 | const u8 *a_ = (const u8 *)&a->key + a->range.start; |
971 | const u8 *b_ = (const u8 *)&b->key + b->range.start; | |
e6445719 PS |
972 | |
973 | return (a->range.end == b->range.end) | |
974 | && (a->range.start == b->range.start) | |
975 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | |
976 | } | |
977 | ||
618ed0c8 | 978 | static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, |
e6445719 PS |
979 | const struct sw_flow_mask *mask) |
980 | { | |
4bc63b1b TZ |
981 | struct mask_array *ma; |
982 | int i; | |
e6445719 | 983 | |
4bc63b1b TZ |
984 | ma = ovsl_dereference(tbl->mask_array); |
985 | for (i = 0; i < ma->max; i++) { | |
986 | struct sw_flow_mask *t; | |
987 | t = ovsl_dereference(ma->masks[i]); | |
988 | ||
989 | if (t && mask_equal(mask, t)) | |
990 | return t; | |
e6445719 PS |
991 | } |
992 | ||
993 | return NULL; | |
994 | } | |
995 | ||
d1211908 | 996 | /* Add 'mask' into the mask list, if it is not already there. */ |
618ed0c8 | 997 | static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, |
12eb18f7 | 998 | const struct sw_flow_mask *new) |
618ed0c8 PS |
999 | { |
1000 | struct sw_flow_mask *mask; | |
4bc63b1b | 1001 | |
618ed0c8 PS |
1002 | mask = flow_mask_find(tbl, new); |
1003 | if (!mask) { | |
1004 | /* Allocate a new mask if none exsits. */ | |
1005 | mask = mask_alloc(); | |
1006 | if (!mask) | |
1007 | return -ENOMEM; | |
1008 | mask->key = new->key; | |
1009 | mask->range = new->range; | |
4bc63b1b TZ |
1010 | |
1011 | /* Add mask to mask-list. */ | |
57f7d7b9 TZ |
1012 | if (tbl_mask_array_add_mask(tbl, mask)) { |
1013 | kfree(mask); | |
1014 | return -ENOMEM; | |
4bc63b1b | 1015 | } |
e80857cc AZ |
1016 | } else { |
1017 | BUG_ON(!mask->ref_count); | |
1018 | mask->ref_count++; | |
618ed0c8 PS |
1019 | } |
1020 | ||
618ed0c8 PS |
1021 | flow->mask = mask; |
1022 | return 0; | |
1023 | } | |
1024 | ||
56c19868 | 1025 | /* Must be called with OVS mutex held. */ |
d29ab6f8 | 1026 | static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) |
e6445719 | 1027 | { |
618ed0c8 PS |
1028 | struct table_instance *new_ti = NULL; |
1029 | struct table_instance *ti; | |
618ed0c8 | 1030 | |
74ed7ab9 | 1031 | flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); |
618ed0c8 PS |
1032 | ti = ovsl_dereference(table->ti); |
1033 | table_instance_insert(ti, flow); | |
1034 | table->count++; | |
1035 | ||
1036 | /* Expand table, if necessary, to make room. */ | |
1037 | if (table->count > ti->n_buckets) | |
74ed7ab9 | 1038 | new_ti = table_instance_expand(ti, false); |
618ed0c8 | 1039 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) |
74ed7ab9 | 1040 | new_ti = table_instance_rehash(ti, ti->n_buckets, false); |
618ed0c8 PS |
1041 | |
1042 | if (new_ti) { | |
1043 | rcu_assign_pointer(table->ti, new_ti); | |
74ed7ab9 | 1044 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); |
618ed0c8 PS |
1045 | table->last_rehash = jiffies; |
1046 | } | |
d29ab6f8 JS |
1047 | } |
1048 | ||
74ed7ab9 JS |
1049 | /* Must be called with OVS mutex held. */ |
1050 | static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) | |
1051 | { | |
1052 | struct table_instance *ti; | |
1053 | ||
1054 | flow->ufid_table.hash = ufid_hash(&flow->id); | |
1055 | ti = ovsl_dereference(table->ufid_ti); | |
1056 | ufid_table_instance_insert(ti, flow); | |
1057 | table->ufid_count++; | |
1058 | ||
1059 | /* Expand table, if necessary, to make room. */ | |
1060 | if (table->ufid_count > ti->n_buckets) { | |
1061 | struct table_instance *new_ti; | |
1062 | ||
1063 | new_ti = table_instance_expand(ti, true); | |
1064 | if (new_ti) { | |
1065 | rcu_assign_pointer(table->ufid_ti, new_ti); | |
1066 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | |
1067 | } | |
1068 | } | |
1069 | } | |
1070 | ||
d29ab6f8 JS |
1071 | /* Must be called with OVS mutex held. */ |
1072 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | |
1073 | const struct sw_flow_mask *mask) | |
1074 | { | |
1075 | int err; | |
1076 | ||
1077 | err = flow_mask_insert(table, flow, mask); | |
1078 | if (err) | |
1079 | return err; | |
1080 | flow_key_insert(table, flow); | |
74ed7ab9 JS |
1081 | if (ovs_identifier_is_ufid(&flow->id)) |
1082 | flow_ufid_insert(table, flow); | |
d29ab6f8 | 1083 | |
618ed0c8 | 1084 | return 0; |
e6445719 PS |
1085 | } |
1086 | ||
eac87c41 EC |
1087 | static int compare_mask_and_count(const void *a, const void *b) |
1088 | { | |
1089 | const struct mask_count *mc_a = a; | |
1090 | const struct mask_count *mc_b = b; | |
1091 | ||
1092 | return (s64)mc_b->counter - (s64)mc_a->counter; | |
1093 | } | |
1094 | ||
1095 | /* Must be called with OVS mutex held. */ | |
1096 | void ovs_flow_masks_rebalance(struct flow_table *table) | |
1097 | { | |
1098 | struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); | |
1099 | struct mask_count *masks_and_count; | |
1100 | struct mask_array *new; | |
1101 | int masks_entries = 0; | |
1102 | int i; | |
1103 | ||
1104 | /* Build array of all current entries with use counters. */ | |
1105 | masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), | |
1106 | GFP_KERNEL); | |
1107 | if (!masks_and_count) | |
1108 | return; | |
1109 | ||
1110 | for (i = 0; i < ma->max; i++) { | |
1111 | struct sw_flow_mask *mask; | |
1112 | unsigned int start; | |
1113 | int cpu; | |
1114 | ||
1115 | mask = rcu_dereference_ovsl(ma->masks[i]); | |
1116 | if (unlikely(!mask)) | |
1117 | break; | |
1118 | ||
1119 | masks_and_count[i].index = i; | |
1120 | masks_and_count[i].counter = 0; | |
1121 | ||
1122 | for_each_possible_cpu(cpu) { | |
1123 | u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, | |
1124 | cpu); | |
1125 | u64 counter; | |
1126 | ||
1127 | do { | |
1128 | start = u64_stats_fetch_begin_irq(&ma->syncp); | |
1129 | counter = usage_counters[i]; | |
1130 | } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); | |
1131 | ||
1132 | masks_and_count[i].counter += counter; | |
1133 | } | |
1134 | ||
1135 | /* Subtract the zero count value. */ | |
1136 | masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; | |
1137 | ||
1138 | /* Rather than calling tbl_mask_array_reset_counters() | |
1139 | * below when no change is needed, do it inline here. | |
1140 | */ | |
1141 | ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; | |
1142 | } | |
1143 | ||
1144 | if (i == 0) | |
1145 | goto free_mask_entries; | |
1146 | ||
1147 | /* Sort the entries */ | |
1148 | masks_entries = i; | |
1149 | sort(masks_and_count, masks_entries, sizeof(*masks_and_count), | |
1150 | compare_mask_and_count, NULL); | |
1151 | ||
1152 | /* If the order is the same, nothing to do... */ | |
1153 | for (i = 0; i < masks_entries; i++) { | |
1154 | if (i != masks_and_count[i].index) | |
1155 | break; | |
1156 | } | |
1157 | if (i == masks_entries) | |
1158 | goto free_mask_entries; | |
1159 | ||
1160 | /* Rebuilt the new list in order of usage. */ | |
1161 | new = tbl_mask_array_alloc(ma->max); | |
1162 | if (!new) | |
1163 | goto free_mask_entries; | |
1164 | ||
1165 | for (i = 0; i < masks_entries; i++) { | |
1166 | int index = masks_and_count[i].index; | |
1167 | ||
9bf24f59 EC |
1168 | if (ovsl_dereference(ma->masks[index])) |
1169 | new->masks[new->count++] = ma->masks[index]; | |
eac87c41 EC |
1170 | } |
1171 | ||
1172 | rcu_assign_pointer(table->mask_array, new); | |
1173 | call_rcu(&ma->rcu, mask_array_rcu_cb); | |
1174 | ||
1175 | free_mask_entries: | |
1176 | kfree(masks_and_count); | |
1177 | } | |
1178 | ||
e6445719 PS |
1179 | /* Initializes the flow module. |
1180 | * Returns zero if successful or a negative error code. */ | |
1181 | int ovs_flow_init(void) | |
1182 | { | |
1183 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | |
1184 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | |
1185 | ||
63e7959c | 1186 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) |
db74a333 | 1187 | + (nr_cpu_ids |
aef833c5 | 1188 | * sizeof(struct sw_flow_stats *)), |
63e7959c | 1189 | 0, 0, NULL); |
e6445719 PS |
1190 | if (flow_cache == NULL) |
1191 | return -ENOMEM; | |
1192 | ||
63e7959c | 1193 | flow_stats_cache |
aef833c5 | 1194 | = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats), |
63e7959c JR |
1195 | 0, SLAB_HWCACHE_ALIGN, NULL); |
1196 | if (flow_stats_cache == NULL) { | |
1197 | kmem_cache_destroy(flow_cache); | |
1198 | flow_cache = NULL; | |
1199 | return -ENOMEM; | |
1200 | } | |
1201 | ||
e6445719 PS |
1202 | return 0; |
1203 | } | |
1204 | ||
1205 | /* Uninitializes the flow module. */ | |
1206 | void ovs_flow_exit(void) | |
1207 | { | |
63e7959c | 1208 | kmem_cache_destroy(flow_stats_cache); |
e6445719 PS |
1209 | kmem_cache_destroy(flow_cache); |
1210 | } |