net/mlx5: E-Switch, Get counters for offloaded flows from callers
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
CommitLineData
de8575e0
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
0efc8562 35#include <linux/mlx5/eswitch.h>
de8575e0
MG
36
37#include "mlx5_core.h"
38#include "fs_core.h"
0c56b975 39#include "fs_cmd.h"
4c03e69a 40#include "diag/fs_tracepoint.h"
5f418378 41#include "accel/ipsec.h"
05564d0a 42#include "fpga/ipsec.h"
0c56b975 43
25302363
MG
44#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
45 sizeof(struct init_tree_node))
46
a257b94a 47#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
8d40d162 48 ...) {.type = FS_TYPE_PRIO,\
25302363 49 .min_ft_level = min_level_val,\
a257b94a 50 .num_levels = num_levels_val,\
4cbdd30e 51 .num_leaf_prios = num_prios_val,\
8d40d162 52 .caps = caps_val,\
25302363
MG
53 .children = (struct init_tree_node[]) {__VA_ARGS__},\
54 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
55}
56
a257b94a
MG
57#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
58 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
4cbdd30e 59 __VA_ARGS__)\
25302363
MG
60
61#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
62 .children = (struct init_tree_node[]) {__VA_ARGS__},\
63 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
64}
65
8d40d162
MG
66#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
67 sizeof(long))
68
69#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
70
71#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
72 .caps = (long[]) {__VA_ARGS__} }
73
6dc6071c
MG
74#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
75 FS_CAP(flow_table_properties_nic_receive.modify_root), \
76 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
77 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
78
8ce78257
MB
79#define FS_CHAINING_CAPS_EGRESS \
80 FS_REQUIRED_CAPS( \
81 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
82 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
83 FS_CAP(flow_table_properties_nic_transmit \
84 .identified_miss_table_mode), \
85 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
86
a257b94a 87#define LEFTOVERS_NUM_LEVELS 1
4cbdd30e 88#define LEFTOVERS_NUM_PRIOS 1
4cbdd30e 89
a257b94a 90#define BY_PASS_PRIO_NUM_LEVELS 1
6dc6071c 91#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
a257b94a
MG
92 LEFTOVERS_NUM_PRIOS)
93
6dc6071c 94#define ETHTOOL_PRIO_NUM_LEVELS 1
e5835f28 95#define ETHTOOL_NUM_PRIOS 11
6dc6071c 96#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
7b3722fa
GP
97/* Vlan, mac, ttc, inner ttc, aRFS */
98#define KERNEL_NIC_PRIO_NUM_LEVELS 5
13de6c10
MG
99#define KERNEL_NIC_NUM_PRIOS 1
100/* One more level for tc */
101#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
8d40d162 102
479f074c
OG
103#define KERNEL_NIC_TC_NUM_PRIOS 1
104#define KERNEL_NIC_TC_NUM_LEVELS 2
105
a257b94a 106#define ANCHOR_NUM_LEVELS 1
153fefbf
MG
107#define ANCHOR_NUM_PRIOS 1
108#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
acbc2004
OG
109
110#define OFFLOADS_MAX_FT 1
111#define OFFLOADS_NUM_PRIOS 1
112#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
113
3e75d4eb
AH
114#define LAG_PRIO_NUM_LEVELS 1
115#define LAG_NUM_PRIOS 1
116#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
117
8d40d162
MG
118struct node_caps {
119 size_t arr_sz;
120 long *caps;
121};
8963ca45 122
25302363
MG
123static struct init_tree_node {
124 enum fs_node_type type;
125 struct init_tree_node *children;
126 int ar_size;
8d40d162 127 struct node_caps caps;
25302363 128 int min_ft_level;
4cbdd30e 129 int num_leaf_prios;
25302363 130 int prio;
a257b94a 131 int num_levels;
25302363
MG
132} root_fs = {
133 .type = FS_TYPE_NAMESPACE,
3e75d4eb 134 .ar_size = 7,
25302363 135 .children = (struct init_tree_node[]) {
4cbdd30e 136 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
6dc6071c 137 FS_CHAINING_CAPS,
a257b94a
MG
138 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
139 BY_PASS_PRIO_NUM_LEVELS))),
3e75d4eb
AH
140 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
141 FS_CHAINING_CAPS,
142 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
143 LAG_PRIO_NUM_LEVELS))),
acbc2004
OG
144 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
145 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
6dc6071c
MG
146 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
147 FS_CHAINING_CAPS,
148 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
149 ETHTOOL_PRIO_NUM_LEVELS))),
a257b94a 150 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
479f074c 151 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
13de6c10
MG
152 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
153 KERNEL_NIC_PRIO_NUM_LEVELS))),
4cbdd30e 154 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
6dc6071c 155 FS_CHAINING_CAPS,
a257b94a 156 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
153fefbf 157 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
a257b94a 158 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
25302363
MG
159 }
160};
161
8ce78257
MB
162static struct init_tree_node egress_root_fs = {
163 .type = FS_TYPE_NAMESPACE,
164 .ar_size = 1,
165 .children = (struct init_tree_node[]) {
166 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
167 FS_CHAINING_CAPS_EGRESS,
168 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
169 BY_PASS_PRIO_NUM_LEVELS))),
170 }
171};
172
c7784b1c
MG
173enum fs_i_lock_class {
174 FS_LOCK_GRANDPARENT,
175 FS_LOCK_PARENT,
176 FS_LOCK_CHILD
f0d22d18
MG
177};
178
0d235c3f
MB
179static const struct rhashtable_params rhash_fte = {
180 .key_len = FIELD_SIZEOF(struct fs_fte, val),
181 .key_offset = offsetof(struct fs_fte, val),
182 .head_offset = offsetof(struct fs_fte, hash),
183 .automatic_shrinking = true,
184 .min_size = 1,
185};
186
693c6883
MB
187static const struct rhashtable_params rhash_fg = {
188 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
189 .key_offset = offsetof(struct mlx5_flow_group, mask),
190 .head_offset = offsetof(struct mlx5_flow_group, hash),
191 .automatic_shrinking = true,
192 .min_size = 1,
193
194};
195
bd71b08e
MG
196static void del_hw_flow_table(struct fs_node *node);
197static void del_hw_flow_group(struct fs_node *node);
198static void del_hw_fte(struct fs_node *node);
199static void del_sw_flow_table(struct fs_node *node);
200static void del_sw_flow_group(struct fs_node *node);
201static void del_sw_fte(struct fs_node *node);
139ed6c6
MG
202static void del_sw_prio(struct fs_node *node);
203static void del_sw_ns(struct fs_node *node);
bd71b08e
MG
204/* Delete rule (destination) is special case that
205 * requires to lock the FTE for all the deletion process.
206 */
207static void del_sw_hw_rule(struct fs_node *node);
814fb875
MB
208static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
209 struct mlx5_flow_destination *d2);
9c26f5f8 210static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
74491de9
MB
211static struct mlx5_flow_rule *
212find_flow_rule(struct fs_fte *fte,
213 struct mlx5_flow_destination *dest);
de8575e0
MG
214
215static void tree_init_node(struct fs_node *node,
bd71b08e
MG
216 void (*del_hw_func)(struct fs_node *),
217 void (*del_sw_func)(struct fs_node *))
de8575e0 218{
dd8e1945 219 refcount_set(&node->refcount, 1);
de8575e0
MG
220 INIT_LIST_HEAD(&node->list);
221 INIT_LIST_HEAD(&node->children);
c7784b1c 222 init_rwsem(&node->lock);
bd71b08e
MG
223 node->del_hw_func = del_hw_func;
224 node->del_sw_func = del_sw_func;
19f100fe 225 node->active = false;
de8575e0
MG
226}
227
228static void tree_add_node(struct fs_node *node, struct fs_node *parent)
229{
230 if (parent)
dd8e1945 231 refcount_inc(&parent->refcount);
de8575e0
MG
232 node->parent = parent;
233
234 /* Parent is the root */
235 if (!parent)
236 node->root = node;
237 else
238 node->root = parent->root;
239}
240
bd71b08e 241static int tree_get_node(struct fs_node *node)
de8575e0 242{
dd8e1945 243 return refcount_inc_not_zero(&node->refcount);
de8575e0
MG
244}
245
bd71b08e
MG
246static void nested_down_read_ref_node(struct fs_node *node,
247 enum fs_i_lock_class class)
de8575e0
MG
248{
249 if (node) {
bd71b08e 250 down_read_nested(&node->lock, class);
dd8e1945 251 refcount_inc(&node->refcount);
de8575e0
MG
252 }
253}
254
bd71b08e
MG
255static void nested_down_write_ref_node(struct fs_node *node,
256 enum fs_i_lock_class class)
de8575e0
MG
257{
258 if (node) {
bd71b08e 259 down_write_nested(&node->lock, class);
dd8e1945 260 refcount_inc(&node->refcount);
de8575e0
MG
261 }
262}
263
bd71b08e 264static void down_write_ref_node(struct fs_node *node)
de8575e0
MG
265{
266 if (node) {
bd71b08e 267 down_write(&node->lock);
dd8e1945 268 refcount_inc(&node->refcount);
de8575e0
MG
269 }
270}
271
bd71b08e
MG
272static void up_read_ref_node(struct fs_node *node)
273{
dd8e1945 274 refcount_dec(&node->refcount);
bd71b08e
MG
275 up_read(&node->lock);
276}
277
278static void up_write_ref_node(struct fs_node *node)
279{
dd8e1945 280 refcount_dec(&node->refcount);
bd71b08e
MG
281 up_write(&node->lock);
282}
283
de8575e0
MG
284static void tree_put_node(struct fs_node *node)
285{
286 struct fs_node *parent_node = node->parent;
287
dd8e1945 288 if (refcount_dec_and_test(&node->refcount)) {
bd71b08e
MG
289 if (node->del_hw_func)
290 node->del_hw_func(node);
291 if (parent_node) {
292 /* Only root namespace doesn't have parent and we just
293 * need to free its node.
294 */
295 down_write_ref_node(parent_node);
de8575e0 296 list_del_init(&node->list);
bd71b08e
MG
297 if (node->del_sw_func)
298 node->del_sw_func(node);
299 up_write_ref_node(parent_node);
a369d4ac
MG
300 } else {
301 kfree(node);
bd71b08e 302 }
de8575e0
MG
303 node = NULL;
304 }
de8575e0
MG
305 if (!node && parent_node)
306 tree_put_node(parent_node);
307}
308
309static int tree_remove_node(struct fs_node *node)
310{
dd8e1945
ER
311 if (refcount_read(&node->refcount) > 1) {
312 refcount_dec(&node->refcount);
b3638e1a
MG
313 return -EEXIST;
314 }
de8575e0
MG
315 tree_put_node(node);
316 return 0;
317}
5e1626c0
MG
318
319static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
320 unsigned int prio)
321{
322 struct fs_prio *iter_prio;
323
324 fs_for_each_prio(iter_prio, ns) {
325 if (iter_prio->prio == prio)
326 return iter_prio;
327 }
328
329 return NULL;
330}
331
693c6883 332static bool check_valid_spec(const struct mlx5_flow_spec *spec)
5e1626c0 333{
693c6883
MB
334 int i;
335
693c6883
MB
336 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
337 if (spec->match_value[i] & ~spec->match_criteria[i]) {
338 pr_warn("mlx5_core: match_value differs from match_criteria\n");
339 return false;
340 }
341
2aada6c0 342 return true;
5e1626c0 343}
0c56b975
MG
344
345static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
346{
347 struct fs_node *root;
348 struct mlx5_flow_namespace *ns;
349
350 root = node->root;
351
352 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
353 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
354 return NULL;
355 }
356
357 ns = container_of(root, struct mlx5_flow_namespace, node);
358 return container_of(ns, struct mlx5_flow_root_namespace, ns);
359}
360
a369d4ac
MG
361static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
362{
363 struct mlx5_flow_root_namespace *root = find_root(node);
364
365 if (root)
366 return root->dev->priv.steering;
367 return NULL;
368}
369
0c56b975
MG
370static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
371{
372 struct mlx5_flow_root_namespace *root = find_root(node);
373
374 if (root)
375 return root->dev;
376 return NULL;
377}
378
139ed6c6
MG
379static void del_sw_ns(struct fs_node *node)
380{
381 kfree(node);
382}
383
384static void del_sw_prio(struct fs_node *node)
385{
386 kfree(node);
387}
388
bd71b08e 389static void del_hw_flow_table(struct fs_node *node)
0c56b975 390{
af76c501 391 struct mlx5_flow_root_namespace *root;
0c56b975
MG
392 struct mlx5_flow_table *ft;
393 struct mlx5_core_dev *dev;
0c56b975
MG
394 int err;
395
396 fs_get_obj(ft, node);
397 dev = get_dev(&ft->node);
af76c501 398 root = find_root(&ft->node);
0c56b975 399
19f100fe 400 if (node->active) {
af76c501 401 err = root->cmds->destroy_flow_table(dev, ft);
19f100fe
MG
402 if (err)
403 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
404 }
bd71b08e
MG
405}
406
407static void del_sw_flow_table(struct fs_node *node)
408{
409 struct mlx5_flow_table *ft;
410 struct fs_prio *prio;
411
412 fs_get_obj(ft, node);
413
693c6883 414 rhltable_destroy(&ft->fgs_hash);
0c56b975
MG
415 fs_get_obj(prio, ft->node.parent);
416 prio->num_ft--;
a369d4ac 417 kfree(ft);
0c56b975
MG
418}
419
bd71b08e 420static void del_sw_hw_rule(struct fs_node *node)
0c56b975 421{
af76c501 422 struct mlx5_flow_root_namespace *root;
0c56b975
MG
423 struct mlx5_flow_rule *rule;
424 struct mlx5_flow_table *ft;
425 struct mlx5_flow_group *fg;
426 struct fs_fte *fte;
bd5251db 427 int modify_mask;
0c56b975 428 struct mlx5_core_dev *dev = get_dev(node);
0c56b975 429 int err;
ae058314 430 bool update_fte = false;
0c56b975 431
0c56b975
MG
432 fs_get_obj(rule, node);
433 fs_get_obj(fte, rule->node.parent);
434 fs_get_obj(fg, fte->node.parent);
0c56b975 435 fs_get_obj(ft, fg->node.parent);
4c03e69a 436 trace_mlx5_fs_del_rule(rule);
b3638e1a
MG
437 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
438 mutex_lock(&rule->dest_attr.ft->lock);
439 list_del(&rule->next_ft);
440 mutex_unlock(&rule->dest_attr.ft->lock);
441 }
ae058314
MB
442
443 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
444 --fte->dests_size) {
202854e9
CM
445 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
446 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
d2ec6a35 447 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
ae058314
MB
448 update_fte = true;
449 goto out;
450 }
451
d2ec6a35 452 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
60ab4584 453 --fte->dests_size) {
bd5251db 454 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
ae058314
MB
455 update_fte = true;
456 }
457out:
af76c501 458 root = find_root(&ft->node);
ae058314 459 if (update_fte && fte->dests_size) {
af76c501 460 err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
0c56b975 461 if (err)
2974ab6e
SM
462 mlx5_core_warn(dev,
463 "%s can't del rule fg id=%d fte_index=%d\n",
464 __func__, fg->id, fte->index);
0c56b975 465 }
a369d4ac 466 kfree(rule);
0c56b975
MG
467}
468
bd71b08e 469static void del_hw_fte(struct fs_node *node)
0c56b975 470{
af76c501 471 struct mlx5_flow_root_namespace *root;
0c56b975
MG
472 struct mlx5_flow_table *ft;
473 struct mlx5_flow_group *fg;
474 struct mlx5_core_dev *dev;
475 struct fs_fte *fte;
476 int err;
477
478 fs_get_obj(fte, node);
479 fs_get_obj(fg, fte->node.parent);
480 fs_get_obj(ft, fg->node.parent);
481
bd71b08e 482 trace_mlx5_fs_del_fte(fte);
0c56b975 483 dev = get_dev(&ft->node);
af76c501 484 root = find_root(&ft->node);
19f100fe 485 if (node->active) {
e810bf5e 486 err = root->cmds->delete_fte(dev, ft, fte);
19f100fe
MG
487 if (err)
488 mlx5_core_warn(dev,
489 "flow steering can't delete fte in index %d of flow group id %d\n",
490 fte->index, fg->id);
491 }
bd71b08e
MG
492}
493
494static void del_sw_fte(struct fs_node *node)
495{
a369d4ac 496 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
497 struct mlx5_flow_group *fg;
498 struct fs_fte *fte;
499 int err;
500
501 fs_get_obj(fte, node);
502 fs_get_obj(fg, fte->node.parent);
0c56b975 503
19f100fe
MG
504 err = rhashtable_remove_fast(&fg->ftes_hash,
505 &fte->hash,
506 rhash_fte);
507 WARN_ON(err);
508 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
a369d4ac 509 kmem_cache_free(steering->ftes_cache, fte);
0c56b975
MG
510}
511
bd71b08e 512static void del_hw_flow_group(struct fs_node *node)
0c56b975 513{
af76c501 514 struct mlx5_flow_root_namespace *root;
0c56b975
MG
515 struct mlx5_flow_group *fg;
516 struct mlx5_flow_table *ft;
517 struct mlx5_core_dev *dev;
518
519 fs_get_obj(fg, node);
520 fs_get_obj(ft, fg->node.parent);
521 dev = get_dev(&ft->node);
4c03e69a 522 trace_mlx5_fs_del_fg(fg);
0c56b975 523
af76c501
MB
524 root = find_root(&ft->node);
525 if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
bd71b08e
MG
526 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
527 fg->id, ft->id);
528}
529
530static void del_sw_flow_group(struct fs_node *node)
531{
a369d4ac 532 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
533 struct mlx5_flow_group *fg;
534 struct mlx5_flow_table *ft;
535 int err;
536
537 fs_get_obj(fg, node);
538 fs_get_obj(ft, fg->node.parent);
32dba76a 539
0d235c3f 540 rhashtable_destroy(&fg->ftes_hash);
75d1d187 541 ida_destroy(&fg->fte_allocator);
bd71b08e
MG
542 if (ft->autogroup.active)
543 ft->autogroup.num_groups--;
693c6883
MB
544 err = rhltable_remove(&ft->fgs_hash,
545 &fg->hash,
546 rhash_fg);
547 WARN_ON(err);
a369d4ac 548 kmem_cache_free(steering->fgs_cache, fg);
0c56b975
MG
549}
550
f5c2ff17
MG
551static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
552{
553 int index;
554 int ret;
555
556 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
557 if (index < 0)
558 return index;
559
560 fte->index = index + fg->start_index;
561 ret = rhashtable_insert_fast(&fg->ftes_hash,
562 &fte->hash,
563 rhash_fte);
564 if (ret)
565 goto err_ida_remove;
566
567 tree_add_node(&fte->node, &fg->node);
568 list_add_tail(&fte->node.list, &fg->node.children);
569 return 0;
570
571err_ida_remove:
572 ida_simple_remove(&fg->fte_allocator, index);
573 return ret;
574}
575
a369d4ac
MG
576static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
577 u32 *match_value,
f5c2ff17 578 struct mlx5_flow_act *flow_act)
0c56b975 579{
a369d4ac 580 struct mlx5_flow_steering *steering = get_steering(&ft->node);
0c56b975
MG
581 struct fs_fte *fte;
582
a369d4ac 583 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
0c56b975
MG
584 if (!fte)
585 return ERR_PTR(-ENOMEM);
586
587 memcpy(fte->val, match_value, sizeof(fte->val));
588 fte->node.type = FS_TYPE_FLOW_ENTRY;
d2ec6a35 589 fte->action = *flow_act;
0c56b975 590
bd71b08e 591 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
19f100fe
MG
592
593 return fte;
19f100fe
MG
594}
595
a369d4ac
MG
596static void dealloc_flow_group(struct mlx5_flow_steering *steering,
597 struct mlx5_flow_group *fg)
19f100fe
MG
598{
599 rhashtable_destroy(&fg->ftes_hash);
a369d4ac 600 kmem_cache_free(steering->fgs_cache, fg);
19f100fe
MG
601}
602
a369d4ac
MG
603static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
604 u8 match_criteria_enable,
19f100fe
MG
605 void *match_criteria,
606 int start_index,
607 int end_index)
0c56b975
MG
608{
609 struct mlx5_flow_group *fg;
0d235c3f
MB
610 int ret;
611
a369d4ac 612 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
0c56b975
MG
613 if (!fg)
614 return ERR_PTR(-ENOMEM);
615
0d235c3f
MB
616 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
617 if (ret) {
a369d4ac 618 kmem_cache_free(steering->fgs_cache, fg);
0d235c3f 619 return ERR_PTR(ret);
19f100fe 620}
75d1d187 621 ida_init(&fg->fte_allocator);
0c56b975
MG
622 fg->mask.match_criteria_enable = match_criteria_enable;
623 memcpy(&fg->mask.match_criteria, match_criteria,
624 sizeof(fg->mask.match_criteria));
625 fg->node.type = FS_TYPE_FLOW_GROUP;
19f100fe
MG
626 fg->start_index = start_index;
627 fg->max_ftes = end_index - start_index + 1;
628
629 return fg;
630}
631
632static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
633 u8 match_criteria_enable,
634 void *match_criteria,
635 int start_index,
636 int end_index,
637 struct list_head *prev)
638{
a369d4ac 639 struct mlx5_flow_steering *steering = get_steering(&ft->node);
19f100fe
MG
640 struct mlx5_flow_group *fg;
641 int ret;
642
a369d4ac 643 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
19f100fe
MG
644 start_index, end_index);
645 if (IS_ERR(fg))
646 return fg;
647
648 /* initialize refcnt, add to parent list */
649 ret = rhltable_insert(&ft->fgs_hash,
650 &fg->hash,
651 rhash_fg);
652 if (ret) {
a369d4ac 653 dealloc_flow_group(steering, fg);
19f100fe
MG
654 return ERR_PTR(ret);
655 }
656
bd71b08e 657 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
19f100fe
MG
658 tree_add_node(&fg->node, &ft->node);
659 /* Add node to group list */
660 list_add(&fg->node.list, prev);
bd71b08e 661 atomic_inc(&ft->node.version);
19f100fe 662
0c56b975
MG
663 return fg;
664}
665
efdc810b 666static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
aaff1bea 667 enum fs_flow_table_type table_type,
c9f1b073
HHZ
668 enum fs_flow_table_op_mod op_mod,
669 u32 flags)
0c56b975
MG
670{
671 struct mlx5_flow_table *ft;
693c6883 672 int ret;
0c56b975
MG
673
674 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
675 if (!ft)
693c6883
MB
676 return ERR_PTR(-ENOMEM);
677
678 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
679 if (ret) {
680 kfree(ft);
681 return ERR_PTR(ret);
682 }
0c56b975
MG
683
684 ft->level = level;
685 ft->node.type = FS_TYPE_FLOW_TABLE;
aaff1bea 686 ft->op_mod = op_mod;
0c56b975 687 ft->type = table_type;
efdc810b 688 ft->vport = vport;
0c56b975 689 ft->max_fte = max_fte;
c9f1b073 690 ft->flags = flags;
b3638e1a
MG
691 INIT_LIST_HEAD(&ft->fwd_rules);
692 mutex_init(&ft->lock);
0c56b975
MG
693
694 return ft;
695}
696
fdb6896f
MG
697/* If reverse is false, then we search for the first flow table in the
698 * root sub-tree from start(closest from right), else we search for the
699 * last flow table in the root sub-tree till start(closest from left).
700 */
701static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
702 struct list_head *start,
703 bool reverse)
704{
705#define list_advance_entry(pos, reverse) \
706 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
707
708#define list_for_each_advance_continue(pos, head, reverse) \
709 for (pos = list_advance_entry(pos, reverse); \
710 &pos->list != (head); \
711 pos = list_advance_entry(pos, reverse))
712
713 struct fs_node *iter = list_entry(start, struct fs_node, list);
714 struct mlx5_flow_table *ft = NULL;
715
716 if (!root)
717 return NULL;
718
719 list_for_each_advance_continue(iter, &root->children, reverse) {
720 if (iter->type == FS_TYPE_FLOW_TABLE) {
721 fs_get_obj(ft, iter);
722 return ft;
723 }
724 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
725 if (ft)
726 return ft;
727 }
728
729 return ft;
730}
731
732/* If reverse if false then return the first flow table in next priority of
733 * prio in the tree, else return the last flow table in the previous priority
734 * of prio in the tree.
735 */
736static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
737{
738 struct mlx5_flow_table *ft = NULL;
739 struct fs_node *curr_node;
740 struct fs_node *parent;
741
742 parent = prio->node.parent;
743 curr_node = &prio->node;
744 while (!ft && parent) {
745 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
746 curr_node = parent;
747 parent = curr_node->parent;
748 }
749 return ft;
750}
751
752/* Assuming all the tree is locked by mutex chain lock */
753static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
754{
755 return find_closest_ft(prio, false);
756}
757
758/* Assuming all the tree is locked by mutex chain lock */
759static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
760{
761 return find_closest_ft(prio, true);
762}
763
f90edfd2
MG
764static int connect_fts_in_prio(struct mlx5_core_dev *dev,
765 struct fs_prio *prio,
766 struct mlx5_flow_table *ft)
767{
af76c501 768 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
f90edfd2
MG
769 struct mlx5_flow_table *iter;
770 int i = 0;
771 int err;
772
773 fs_for_each_ft(iter, prio) {
774 i++;
af76c501 775 err = root->cmds->modify_flow_table(dev, iter, ft);
f90edfd2
MG
776 if (err) {
777 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
778 iter->id);
779 /* The driver is out of sync with the FW */
780 if (i > 1)
781 WARN_ON(true);
782 return err;
783 }
784 }
785 return 0;
786}
787
788/* Connect flow tables from previous priority of prio to ft */
789static int connect_prev_fts(struct mlx5_core_dev *dev,
790 struct mlx5_flow_table *ft,
791 struct fs_prio *prio)
792{
793 struct mlx5_flow_table *prev_ft;
794
795 prev_ft = find_prev_chained_ft(prio);
796 if (prev_ft) {
797 struct fs_prio *prev_prio;
798
799 fs_get_obj(prev_prio, prev_ft->node.parent);
800 return connect_fts_in_prio(dev, prev_prio, ft);
801 }
802 return 0;
803}
804
2cc43b49
MG
805static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
806 *prio)
807{
808 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
dae37456 809 struct mlx5_ft_underlay_qp *uqp;
2cc43b49
MG
810 int min_level = INT_MAX;
811 int err;
dae37456 812 u32 qpn;
2cc43b49
MG
813
814 if (root->root_ft)
815 min_level = root->root_ft->level;
816
817 if (ft->level >= min_level)
818 return 0;
819
dae37456
AV
820 if (list_empty(&root->underlay_qpns)) {
821 /* Don't set any QPN (zero) in case QPN list is empty */
822 qpn = 0;
af76c501 823 err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
dae37456
AV
824 } else {
825 list_for_each_entry(uqp, &root->underlay_qpns, list) {
826 qpn = uqp->qpn;
af76c501
MB
827 err = root->cmds->update_root_ft(root->dev, ft,
828 qpn, false);
dae37456
AV
829 if (err)
830 break;
831 }
832 }
833
2cc43b49 834 if (err)
dae37456
AV
835 mlx5_core_warn(root->dev,
836 "Update root flow table of id(%u) qpn(%d) failed\n",
837 ft->id, qpn);
2cc43b49
MG
838 else
839 root->root_ft = ft;
840
841 return err;
842}
843
74491de9
MB
844static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
845 struct mlx5_flow_destination *dest)
b3638e1a 846{
af76c501 847 struct mlx5_flow_root_namespace *root;
b3638e1a
MG
848 struct mlx5_flow_table *ft;
849 struct mlx5_flow_group *fg;
850 struct fs_fte *fte;
bd5251db 851 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
b3638e1a
MG
852 int err = 0;
853
854 fs_get_obj(fte, rule->node.parent);
d2ec6a35 855 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
b3638e1a 856 return -EINVAL;
bd71b08e 857 down_write_ref_node(&fte->node);
b3638e1a
MG
858 fs_get_obj(fg, fte->node.parent);
859 fs_get_obj(ft, fg->node.parent);
860
861 memcpy(&rule->dest_attr, dest, sizeof(*dest));
af76c501
MB
862 root = find_root(&ft->node);
863 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
864 modify_mask, fte);
bd71b08e 865 up_write_ref_node(&fte->node);
b3638e1a
MG
866
867 return err;
868}
869
74491de9
MB
870int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
871 struct mlx5_flow_destination *new_dest,
872 struct mlx5_flow_destination *old_dest)
873{
874 int i;
875
876 if (!old_dest) {
877 if (handle->num_rules != 1)
878 return -EINVAL;
879 return _mlx5_modify_rule_destination(handle->rule[0],
880 new_dest);
881 }
882
883 for (i = 0; i < handle->num_rules; i++) {
884 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
885 return _mlx5_modify_rule_destination(handle->rule[i],
886 new_dest);
887 }
888
889 return -EINVAL;
890}
891
b3638e1a
MG
892/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
893static int connect_fwd_rules(struct mlx5_core_dev *dev,
894 struct mlx5_flow_table *new_next_ft,
895 struct mlx5_flow_table *old_next_ft)
896{
4c5009c5 897 struct mlx5_flow_destination dest = {};
b3638e1a
MG
898 struct mlx5_flow_rule *iter;
899 int err = 0;
900
901 /* new_next_ft and old_next_ft could be NULL only
902 * when we create/destroy the anchor flow table.
903 */
904 if (!new_next_ft || !old_next_ft)
905 return 0;
906
907 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
908 dest.ft = new_next_ft;
909
910 mutex_lock(&old_next_ft->lock);
911 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
912 mutex_unlock(&old_next_ft->lock);
913 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
74491de9 914 err = _mlx5_modify_rule_destination(iter, &dest);
b3638e1a
MG
915 if (err)
916 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
917 new_next_ft->id);
918 }
919 return 0;
920}
921
f90edfd2
MG
922static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
923 struct fs_prio *prio)
924{
b3638e1a 925 struct mlx5_flow_table *next_ft;
f90edfd2
MG
926 int err = 0;
927
928 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
929
930 if (list_empty(&prio->node.children)) {
931 err = connect_prev_fts(dev, ft, prio);
932 if (err)
933 return err;
b3638e1a
MG
934
935 next_ft = find_next_chained_ft(prio);
936 err = connect_fwd_rules(dev, ft, next_ft);
937 if (err)
938 return err;
f90edfd2
MG
939 }
940
941 if (MLX5_CAP_FLOWTABLE(dev,
942 flow_table_properties_nic_receive.modify_root))
943 err = update_root_ft_create(ft, prio);
944 return err;
945}
946
d63cd286
MG
947static void list_add_flow_table(struct mlx5_flow_table *ft,
948 struct fs_prio *prio)
949{
950 struct list_head *prev = &prio->node.children;
951 struct mlx5_flow_table *iter;
952
953 fs_for_each_ft(iter, prio) {
954 if (iter->level > ft->level)
955 break;
956 prev = &iter->node.list;
957 }
958 list_add(&ft->node.list, prev);
959}
960
efdc810b 961static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 962 struct mlx5_flow_table_attr *ft_attr,
aaff1bea 963 enum fs_flow_table_op_mod op_mod,
b3ba5149 964 u16 vport)
0c56b975 965{
b3ba5149 966 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
f90edfd2 967 struct mlx5_flow_table *next_ft = NULL;
b3ba5149 968 struct fs_prio *fs_prio = NULL;
0c56b975 969 struct mlx5_flow_table *ft;
0c56b975 970 int log_table_sz;
b3ba5149 971 int err;
0c56b975
MG
972
973 if (!root) {
974 pr_err("mlx5: flow steering failed to find root of namespace\n");
975 return ERR_PTR(-ENODEV);
976 }
977
2cc43b49 978 mutex_lock(&root->chain_lock);
b3ba5149 979 fs_prio = find_prio(ns, ft_attr->prio);
2cc43b49
MG
980 if (!fs_prio) {
981 err = -EINVAL;
982 goto unlock_root;
983 }
b3ba5149 984 if (ft_attr->level >= fs_prio->num_levels) {
0c56b975 985 err = -ENOSPC;
2cc43b49 986 goto unlock_root;
0c56b975 987 }
d63cd286
MG
988 /* The level is related to the
989 * priority level range.
990 */
b3ba5149
ES
991 ft_attr->level += fs_prio->start_level;
992 ft = alloc_flow_table(ft_attr->level,
efdc810b 993 vport,
b3ba5149 994 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
aaff1bea 995 root->table_type,
b3ba5149 996 op_mod, ft_attr->flags);
693c6883
MB
997 if (IS_ERR(ft)) {
998 err = PTR_ERR(ft);
2cc43b49 999 goto unlock_root;
0c56b975
MG
1000 }
1001
bd71b08e 1002 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
aaff1bea 1003 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
f90edfd2 1004 next_ft = find_next_chained_ft(fs_prio);
af76c501
MB
1005 err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
1006 ft->type, ft->level, log_table_sz,
1007 next_ft, &ft->id, ft->flags);
0c56b975
MG
1008 if (err)
1009 goto free_ft;
1010
f90edfd2
MG
1011 err = connect_flow_table(root->dev, ft, fs_prio);
1012 if (err)
1013 goto destroy_ft;
19f100fe 1014 ft->node.active = true;
bd71b08e 1015 down_write_ref_node(&fs_prio->node);
0c56b975 1016 tree_add_node(&ft->node, &fs_prio->node);
d63cd286 1017 list_add_flow_table(ft, fs_prio);
0c56b975 1018 fs_prio->num_ft++;
bd71b08e 1019 up_write_ref_node(&fs_prio->node);
2cc43b49 1020 mutex_unlock(&root->chain_lock);
0c56b975 1021 return ft;
2cc43b49 1022destroy_ft:
af76c501 1023 root->cmds->destroy_flow_table(root->dev, ft);
0c56b975
MG
1024free_ft:
1025 kfree(ft);
2cc43b49
MG
1026unlock_root:
1027 mutex_unlock(&root->chain_lock);
0c56b975
MG
1028 return ERR_PTR(err);
1029}
1030
efdc810b 1031struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 1032 struct mlx5_flow_table_attr *ft_attr)
efdc810b 1033{
b3ba5149 1034 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
efdc810b
MHY
1035}
1036
1037struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1038 int prio, int max_fte,
1039 u32 level, u16 vport)
1040{
b3ba5149
ES
1041 struct mlx5_flow_table_attr ft_attr = {};
1042
1043 ft_attr.max_fte = max_fte;
1044 ft_attr.level = level;
1045 ft_attr.prio = prio;
1046
57f35c93 1047 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
efdc810b
MHY
1048}
1049
b3ba5149
ES
1050struct mlx5_flow_table*
1051mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1052 int prio, u32 level)
aaff1bea 1053{
b3ba5149
ES
1054 struct mlx5_flow_table_attr ft_attr = {};
1055
1056 ft_attr.level = level;
1057 ft_attr.prio = prio;
1058 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
aaff1bea
AH
1059}
1060EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1061
b3ba5149
ES
1062struct mlx5_flow_table*
1063mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1064 int prio,
1065 int num_flow_table_entries,
1066 int max_num_groups,
1067 u32 level,
1068 u32 flags)
f0d22d18 1069{
b3ba5149 1070 struct mlx5_flow_table_attr ft_attr = {};
f0d22d18
MG
1071 struct mlx5_flow_table *ft;
1072
1073 if (max_num_groups > num_flow_table_entries)
1074 return ERR_PTR(-EINVAL);
1075
b3ba5149
ES
1076 ft_attr.max_fte = num_flow_table_entries;
1077 ft_attr.prio = prio;
1078 ft_attr.level = level;
1079 ft_attr.flags = flags;
1080
1081 ft = mlx5_create_flow_table(ns, &ft_attr);
f0d22d18
MG
1082 if (IS_ERR(ft))
1083 return ft;
1084
1085 ft->autogroup.active = true;
1086 ft->autogroup.required_groups = max_num_groups;
1087
1088 return ft;
1089}
b217ea25 1090EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
f0d22d18 1091
f0d22d18
MG
1092struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1093 u32 *fg_in)
1094{
af76c501 1095 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
0d235c3f
MB
1096 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1097 fg_in, match_criteria);
1098 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1099 fg_in,
1100 match_criteria_enable);
19f100fe
MG
1101 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1102 start_flow_index);
1103 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1104 end_flow_index);
1105 struct mlx5_core_dev *dev = get_dev(&ft->node);
f0d22d18 1106 struct mlx5_flow_group *fg;
19f100fe 1107 int err;
f0d22d18
MG
1108
1109 if (ft->autogroup.active)
1110 return ERR_PTR(-EPERM);
1111
bd71b08e 1112 down_write_ref_node(&ft->node);
19f100fe
MG
1113 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1114 start_index, end_index,
1115 ft->node.children.prev);
bd71b08e 1116 up_write_ref_node(&ft->node);
19f100fe
MG
1117 if (IS_ERR(fg))
1118 return fg;
1119
af76c501 1120 err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
19f100fe
MG
1121 if (err) {
1122 tree_put_node(&fg->node);
1123 return ERR_PTR(err);
1124 }
1125 trace_mlx5_fs_add_fg(fg);
1126 fg->node.active = true;
0c56b975
MG
1127
1128 return fg;
1129}
1130
1131static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1132{
1133 struct mlx5_flow_rule *rule;
1134
1135 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1136 if (!rule)
1137 return NULL;
1138
b3638e1a 1139 INIT_LIST_HEAD(&rule->next_ft);
0c56b975 1140 rule->node.type = FS_TYPE_FLOW_DEST;
60ab4584
AV
1141 if (dest)
1142 memcpy(&rule->dest_attr, dest, sizeof(*dest));
0c56b975
MG
1143
1144 return rule;
1145}
1146
74491de9
MB
1147static struct mlx5_flow_handle *alloc_handle(int num_rules)
1148{
1149 struct mlx5_flow_handle *handle;
1150
acafe7e3 1151 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
74491de9
MB
1152 if (!handle)
1153 return NULL;
1154
1155 handle->num_rules = num_rules;
1156
1157 return handle;
1158}
1159
1160static void destroy_flow_handle(struct fs_fte *fte,
1161 struct mlx5_flow_handle *handle,
1162 struct mlx5_flow_destination *dest,
1163 int i)
1164{
1165 for (; --i >= 0;) {
dd8e1945 1166 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
74491de9
MB
1167 fte->dests_size--;
1168 list_del(&handle->rule[i]->node.list);
1169 kfree(handle->rule[i]);
1170 }
1171 }
1172 kfree(handle);
1173}
1174
1175static struct mlx5_flow_handle *
1176create_flow_handle(struct fs_fte *fte,
1177 struct mlx5_flow_destination *dest,
1178 int dest_num,
1179 int *modify_mask,
1180 bool *new_rule)
1181{
1182 struct mlx5_flow_handle *handle;
1183 struct mlx5_flow_rule *rule = NULL;
1184 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1185 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1186 int type;
1187 int i = 0;
1188
1189 handle = alloc_handle((dest_num) ? dest_num : 1);
1190 if (!handle)
1191 return ERR_PTR(-ENOMEM);
1192
1193 do {
1194 if (dest) {
1195 rule = find_flow_rule(fte, dest + i);
1196 if (rule) {
dd8e1945 1197 refcount_inc(&rule->node.refcount);
74491de9
MB
1198 goto rule_found;
1199 }
1200 }
1201
1202 *new_rule = true;
1203 rule = alloc_rule(dest + i);
1204 if (!rule)
1205 goto free_rules;
1206
1207 /* Add dest to dests list- we need flow tables to be in the
1208 * end of the list for forward to next prio rules.
1209 */
bd71b08e 1210 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
74491de9
MB
1211 if (dest &&
1212 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1213 list_add(&rule->node.list, &fte->node.children);
1214 else
1215 list_add_tail(&rule->node.list, &fte->node.children);
1216 if (dest) {
1217 fte->dests_size++;
1218
1219 type = dest[i].type ==
1220 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1221 *modify_mask |= type ? count : dst;
1222 }
1223rule_found:
1224 handle->rule[i] = rule;
1225 } while (++i < dest_num);
1226
1227 return handle;
1228
1229free_rules:
1230 destroy_flow_handle(fte, handle, dest, i);
1231 return ERR_PTR(-ENOMEM);
1232}
1233
0c56b975 1234/* fte should not be deleted while calling this function */
74491de9
MB
1235static struct mlx5_flow_handle *
1236add_rule_fte(struct fs_fte *fte,
1237 struct mlx5_flow_group *fg,
1238 struct mlx5_flow_destination *dest,
1239 int dest_num,
1240 bool update_action)
0c56b975 1241{
af76c501 1242 struct mlx5_flow_root_namespace *root;
74491de9 1243 struct mlx5_flow_handle *handle;
0c56b975 1244 struct mlx5_flow_table *ft;
bd5251db 1245 int modify_mask = 0;
0c56b975 1246 int err;
74491de9 1247 bool new_rule = false;
0c56b975 1248
74491de9
MB
1249 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1250 &new_rule);
1251 if (IS_ERR(handle) || !new_rule)
1252 goto out;
bd5251db 1253
a6224985
MB
1254 if (update_action)
1255 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
bd5251db 1256
0c56b975 1257 fs_get_obj(ft, fg->node.parent);
af76c501 1258 root = find_root(&fg->node);
0501fc47 1259 if (!(fte->status & FS_FTE_STATUS_EXISTING))
af76c501
MB
1260 err = root->cmds->create_fte(get_dev(&ft->node),
1261 ft, fg, fte);
0c56b975 1262 else
af76c501
MB
1263 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1264 modify_mask, fte);
0c56b975 1265 if (err)
74491de9 1266 goto free_handle;
0c56b975 1267
19f100fe 1268 fte->node.active = true;
0c56b975 1269 fte->status |= FS_FTE_STATUS_EXISTING;
bd71b08e 1270 atomic_inc(&fte->node.version);
0c56b975 1271
74491de9
MB
1272out:
1273 return handle;
0c56b975 1274
74491de9
MB
1275free_handle:
1276 destroy_flow_handle(fte, handle, dest, handle->num_rules);
0c56b975
MG
1277 return ERR_PTR(err);
1278}
1279
19f100fe
MG
1280static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1281 struct mlx5_flow_spec *spec)
0c56b975 1282{
af363705 1283 struct list_head *prev = &ft->node.children;
f0d22d18 1284 struct mlx5_flow_group *fg;
19f100fe 1285 unsigned int candidate_index = 0;
f0d22d18 1286 unsigned int group_size = 0;
f0d22d18
MG
1287
1288 if (!ft->autogroup.active)
1289 return ERR_PTR(-ENOENT);
1290
f0d22d18
MG
1291 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1292 /* We save place for flow groups in addition to max types */
1293 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1294
1295 /* ft->max_fte == ft->autogroup.max_types */
1296 if (group_size == 0)
1297 group_size = 1;
1298
1299 /* sorted by start_index */
1300 fs_for_each_fg(fg, ft) {
1301 if (candidate_index + group_size > fg->start_index)
1302 candidate_index = fg->start_index + fg->max_ftes;
1303 else
1304 break;
1305 prev = &fg->node.list;
1306 }
1307
19f100fe
MG
1308 if (candidate_index + group_size > ft->max_fte)
1309 return ERR_PTR(-ENOSPC);
1310
1311 fg = alloc_insert_flow_group(ft,
1312 spec->match_criteria_enable,
1313 spec->match_criteria,
1314 candidate_index,
1315 candidate_index + group_size - 1,
1316 prev);
1317 if (IS_ERR(fg))
f0d22d18 1318 goto out;
19f100fe
MG
1319
1320 ft->autogroup.num_groups++;
1321
1322out:
1323 return fg;
1324}
1325
1326static int create_auto_flow_group(struct mlx5_flow_table *ft,
1327 struct mlx5_flow_group *fg)
1328{
af76c501 1329 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
19f100fe
MG
1330 struct mlx5_core_dev *dev = get_dev(&ft->node);
1331 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1332 void *match_criteria_addr;
3e99df87
SK
1333 u8 src_esw_owner_mask_on;
1334 void *misc;
19f100fe
MG
1335 int err;
1336 u32 *in;
1337
1338 in = kvzalloc(inlen, GFP_KERNEL);
1339 if (!in)
1340 return -ENOMEM;
f0d22d18
MG
1341
1342 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
19f100fe
MG
1343 fg->mask.match_criteria_enable);
1344 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1345 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1346 fg->max_ftes - 1);
3e99df87
SK
1347
1348 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1349 misc_parameters);
1350 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1351 source_eswitch_owner_vhca_id);
1352 MLX5_SET(create_flow_group_in, in,
1353 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1354
f0d22d18
MG
1355 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1356 in, match_criteria);
19f100fe
MG
1357 memcpy(match_criteria_addr, fg->mask.match_criteria,
1358 sizeof(fg->mask.match_criteria));
1359
af76c501 1360 err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
19f100fe
MG
1361 if (!err) {
1362 fg->node.active = true;
1363 trace_mlx5_fs_add_fg(fg);
1364 }
f0d22d18 1365
f0d22d18 1366 kvfree(in);
19f100fe 1367 return err;
f0d22d18
MG
1368}
1369
814fb875
MB
1370static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1371 struct mlx5_flow_destination *d2)
1372{
1373 if (d1->type == d2->type) {
1374 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
b17f7fc1 1375 d1->vport.num == d2->vport.num) ||
814fb875
MB
1376 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1377 d1->ft == d2->ft) ||
1378 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
664000b6
YH
1379 d1->tir_num == d2->tir_num) ||
1380 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1381 d1->ft_num == d2->ft_num))
814fb875
MB
1382 return true;
1383 }
1384
1385 return false;
1386}
1387
b3638e1a
MG
1388static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1389 struct mlx5_flow_destination *dest)
1390{
1391 struct mlx5_flow_rule *rule;
1392
1393 list_for_each_entry(rule, &fte->node.children, node.list) {
814fb875
MB
1394 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1395 return rule;
b3638e1a
MG
1396 }
1397 return NULL;
1398}
1399
0d235c3f
MB
1400static bool check_conflicting_actions(u32 action1, u32 action2)
1401{
1402 u32 xored_actions = action1 ^ action2;
1403
1404 /* if one rule only wants to count, it's ok */
1405 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1406 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1407 return false;
1408
1409 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
60786f09 1410 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
96de67a7 1411 MLX5_FLOW_CONTEXT_ACTION_DECAP |
0c06897a
OG
1412 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1413 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
8da6fe2a
JL
1414 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1415 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1416 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
0d235c3f
MB
1417 return true;
1418
1419 return false;
1420}
1421
1422static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1423{
d2ec6a35 1424 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
0d235c3f
MB
1425 mlx5_core_warn(get_dev(&fte->node),
1426 "Found two FTEs with conflicting actions\n");
1427 return -EEXIST;
1428 }
1429
d2ec6a35
MB
1430 if (flow_act->has_flow_tag &&
1431 fte->action.flow_tag != flow_act->flow_tag) {
0d235c3f
MB
1432 mlx5_core_warn(get_dev(&fte->node),
1433 "FTE flow tag %u already exists with different flow tag %u\n",
d2ec6a35 1434 fte->action.flow_tag,
0d235c3f
MB
1435 flow_act->flow_tag);
1436 return -EEXIST;
1437 }
1438
1439 return 0;
1440}
1441
74491de9
MB
1442static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1443 u32 *match_value,
66958ed9 1444 struct mlx5_flow_act *flow_act,
74491de9 1445 struct mlx5_flow_destination *dest,
693c6883
MB
1446 int dest_num,
1447 struct fs_fte *fte)
0c56b975 1448{
74491de9 1449 struct mlx5_flow_handle *handle;
bd71b08e 1450 int old_action;
74491de9 1451 int i;
bd71b08e 1452 int ret;
0c56b975 1453
bd71b08e
MG
1454 ret = check_conflicting_ftes(fte, flow_act);
1455 if (ret)
1456 return ERR_PTR(ret);
0c56b975 1457
d2ec6a35
MB
1458 old_action = fte->action.action;
1459 fte->action.action |= flow_act->action;
bd71b08e
MG
1460 handle = add_rule_fte(fte, fg, dest, dest_num,
1461 old_action != flow_act->action);
74491de9 1462 if (IS_ERR(handle)) {
d2ec6a35 1463 fte->action.action = old_action;
693c6883 1464 return handle;
0c56b975 1465 }
bd71b08e 1466 trace_mlx5_fs_set_fte(fte, false);
0c56b975 1467
74491de9 1468 for (i = 0; i < handle->num_rules; i++) {
dd8e1945 1469 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
74491de9 1470 tree_add_node(&handle->rule[i]->node, &fte->node);
4c03e69a
MB
1471 trace_mlx5_fs_add_rule(handle->rule[i]);
1472 }
74491de9 1473 }
74491de9 1474 return handle;
0c56b975
MG
1475}
1476
bd5251db
AV
1477static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1478{
1479 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1480 return !counter;
1481
1482 if (!counter)
1483 return false;
1484
ae058314 1485 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
eafa6abd 1486 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
bd5251db
AV
1487}
1488
d63cd286
MG
1489static bool dest_is_valid(struct mlx5_flow_destination *dest,
1490 u32 action,
1491 struct mlx5_flow_table *ft)
1492{
bd5251db
AV
1493 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1494 return counter_is_valid(dest->counter, action);
1495
d63cd286
MG
1496 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1497 return true;
1498
1499 if (!dest || ((dest->type ==
1500 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1501 (dest->ft->level <= ft->level)))
1502 return false;
1503 return true;
1504}
1505
46719d77
MG
1506struct match_list {
1507 struct list_head list;
693c6883 1508 struct mlx5_flow_group *g;
46719d77
MG
1509};
1510
1511struct match_list_head {
1512 struct list_head list;
1513 struct match_list first;
1514};
1515
1516static void free_match_list(struct match_list_head *head)
1517{
1518 if (!list_empty(&head->list)) {
1519 struct match_list *iter, *match_tmp;
1520
1521 list_del(&head->first.list);
bd71b08e 1522 tree_put_node(&head->first.g->node);
46719d77
MG
1523 list_for_each_entry_safe(iter, match_tmp, &head->list,
1524 list) {
bd71b08e 1525 tree_put_node(&iter->g->node);
46719d77
MG
1526 list_del(&iter->list);
1527 kfree(iter);
1528 }
1529 }
1530}
1531
1532static int build_match_list(struct match_list_head *match_head,
1533 struct mlx5_flow_table *ft,
1534 struct mlx5_flow_spec *spec)
1535{
693c6883 1536 struct rhlist_head *tmp, *list;
46719d77
MG
1537 struct mlx5_flow_group *g;
1538 int err = 0;
693c6883
MB
1539
1540 rcu_read_lock();
46719d77 1541 INIT_LIST_HEAD(&match_head->list);
693c6883
MB
1542 /* Collect all fgs which has a matching match_criteria */
1543 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
46719d77 1544 /* RCU is atomic, we can't execute FW commands here */
693c6883
MB
1545 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1546 struct match_list *curr_match;
1547
46719d77 1548 if (likely(list_empty(&match_head->list))) {
bd71b08e
MG
1549 if (!tree_get_node(&g->node))
1550 continue;
46719d77
MG
1551 match_head->first.g = g;
1552 list_add_tail(&match_head->first.list,
1553 &match_head->list);
693c6883
MB
1554 continue;
1555 }
693c6883 1556
46719d77 1557 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
693c6883 1558 if (!curr_match) {
46719d77
MG
1559 free_match_list(match_head);
1560 err = -ENOMEM;
1561 goto out;
693c6883 1562 }
bd71b08e
MG
1563 if (!tree_get_node(&g->node)) {
1564 kfree(curr_match);
1565 continue;
1566 }
693c6883 1567 curr_match->g = g;
46719d77 1568 list_add_tail(&curr_match->list, &match_head->list);
693c6883 1569 }
46719d77 1570out:
693c6883 1571 rcu_read_unlock();
46719d77
MG
1572 return err;
1573}
1574
bd71b08e
MG
1575static u64 matched_fgs_get_version(struct list_head *match_head)
1576{
1577 struct match_list *iter;
1578 u64 version = 0;
1579
1580 list_for_each_entry(iter, match_head, list)
1581 version += (u64)atomic_read(&iter->g->node.version);
1582 return version;
1583}
1584
ad9421e3
RD
1585static struct fs_fte *
1586lookup_fte_locked(struct mlx5_flow_group *g,
1587 u32 *match_value,
1588 bool take_write)
1589{
1590 struct fs_fte *fte_tmp;
1591
1592 if (take_write)
1593 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1594 else
1595 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1596 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1597 rhash_fte);
1598 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1599 fte_tmp = NULL;
1600 goto out;
1601 }
1602
1603 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1604out:
1605 if (take_write)
1606 up_write_ref_node(&g->node);
1607 else
1608 up_read_ref_node(&g->node);
1609 return fte_tmp;
1610}
1611
46719d77
MG
1612static struct mlx5_flow_handle *
1613try_add_to_existing_fg(struct mlx5_flow_table *ft,
bd71b08e 1614 struct list_head *match_head,
46719d77
MG
1615 struct mlx5_flow_spec *spec,
1616 struct mlx5_flow_act *flow_act,
1617 struct mlx5_flow_destination *dest,
bd71b08e
MG
1618 int dest_num,
1619 int ft_version)
46719d77 1620{
a369d4ac 1621 struct mlx5_flow_steering *steering = get_steering(&ft->node);
46719d77
MG
1622 struct mlx5_flow_group *g;
1623 struct mlx5_flow_handle *rule;
46719d77 1624 struct match_list *iter;
bd71b08e
MG
1625 bool take_write = false;
1626 struct fs_fte *fte;
1627 u64 version;
f5c2ff17
MG
1628 int err;
1629
a369d4ac 1630 fte = alloc_fte(ft, spec->match_value, flow_act);
f5c2ff17
MG
1631 if (IS_ERR(fte))
1632 return ERR_PTR(-ENOMEM);
46719d77 1633
bd71b08e
MG
1634search_again_locked:
1635 version = matched_fgs_get_version(match_head);
693c6883 1636 /* Try to find a fg that already contains a matching fte */
bd71b08e
MG
1637 list_for_each_entry(iter, match_head, list) {
1638 struct fs_fte *fte_tmp;
693c6883
MB
1639
1640 g = iter->g;
ad9421e3
RD
1641 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1642 if (!fte_tmp)
bd71b08e 1643 continue;
bd71b08e
MG
1644 rule = add_rule_fg(g, spec->match_value,
1645 flow_act, dest, dest_num, fte_tmp);
1646 up_write_ref_node(&fte_tmp->node);
1647 tree_put_node(&fte_tmp->node);
a369d4ac 1648 kmem_cache_free(steering->ftes_cache, fte);
bd71b08e 1649 return rule;
693c6883
MB
1650 }
1651
bd71b08e
MG
1652 /* Check the ft version, for case that new flow group
1653 * was added while the fgs weren't locked
1654 */
1655 if (atomic_read(&ft->node.version) != ft_version) {
1656 rule = ERR_PTR(-EAGAIN);
1657 goto out;
1658 }
b92af5a7 1659
bd71b08e
MG
1660 /* Check the fgs version, for case the new FTE with the
1661 * same values was added while the fgs weren't locked
1662 */
ad9421e3
RD
1663 if (version != matched_fgs_get_version(match_head)) {
1664 take_write = true;
bd71b08e 1665 goto search_again_locked;
ad9421e3 1666 }
bd71b08e
MG
1667
1668 list_for_each_entry(iter, match_head, list) {
1669 g = iter->g;
1670
1671 if (!g->node.active)
1672 continue;
ad9421e3
RD
1673
1674 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1675
f5c2ff17
MG
1676 err = insert_fte(g, fte);
1677 if (err) {
ad9421e3 1678 up_write_ref_node(&g->node);
f5c2ff17 1679 if (err == -ENOSPC)
bd71b08e 1680 continue;
a369d4ac 1681 kmem_cache_free(steering->ftes_cache, fte);
f5c2ff17 1682 return ERR_PTR(err);
bd71b08e 1683 }
693c6883 1684
bd71b08e 1685 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
ad9421e3 1686 up_write_ref_node(&g->node);
bd71b08e
MG
1687 rule = add_rule_fg(g, spec->match_value,
1688 flow_act, dest, dest_num, fte);
1689 up_write_ref_node(&fte->node);
1690 tree_put_node(&fte->node);
1691 return rule;
1692 }
1693 rule = ERR_PTR(-ENOENT);
1694out:
a369d4ac 1695 kmem_cache_free(steering->ftes_cache, fte);
693c6883
MB
1696 return rule;
1697}
1698
74491de9
MB
1699static struct mlx5_flow_handle *
1700_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1701 struct mlx5_flow_spec *spec,
66958ed9 1702 struct mlx5_flow_act *flow_act,
74491de9
MB
1703 struct mlx5_flow_destination *dest,
1704 int dest_num)
66958ed9 1705
0c56b975 1706{
a369d4ac 1707 struct mlx5_flow_steering *steering = get_steering(&ft->node);
0c56b975 1708 struct mlx5_flow_group *g;
74491de9 1709 struct mlx5_flow_handle *rule;
bd71b08e
MG
1710 struct match_list_head match_head;
1711 bool take_write = false;
1712 struct fs_fte *fte;
1713 int version;
19f100fe 1714 int err;
74491de9 1715 int i;
0c56b975 1716
693c6883 1717 if (!check_valid_spec(spec))
0d235c3f
MB
1718 return ERR_PTR(-EINVAL);
1719
74491de9 1720 for (i = 0; i < dest_num; i++) {
66958ed9 1721 if (!dest_is_valid(&dest[i], flow_act->action, ft))
74491de9
MB
1722 return ERR_PTR(-EINVAL);
1723 }
bd71b08e
MG
1724 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1725search_again_locked:
1726 version = atomic_read(&ft->node.version);
60ab4584 1727
bd71b08e
MG
1728 /* Collect all fgs which has a matching match_criteria */
1729 err = build_match_list(&match_head, ft, spec);
9238e380
VB
1730 if (err) {
1731 if (take_write)
1732 up_write_ref_node(&ft->node);
07130477
RD
1733 else
1734 up_read_ref_node(&ft->node);
bd71b08e 1735 return ERR_PTR(err);
9238e380 1736 }
bd71b08e
MG
1737
1738 if (!take_write)
1739 up_read_ref_node(&ft->node);
1740
1741 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1742 dest_num, version);
1743 free_match_list(&match_head);
1744 if (!IS_ERR(rule) ||
9238e380
VB
1745 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1746 if (take_write)
1747 up_write_ref_node(&ft->node);
bd71b08e 1748 return rule;
9238e380 1749 }
bd71b08e
MG
1750
1751 if (!take_write) {
1752 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1753 take_write = true;
1754 }
1755
1756 if (PTR_ERR(rule) == -EAGAIN ||
1757 version != atomic_read(&ft->node.version))
1758 goto search_again_locked;
f0d22d18 1759
19f100fe 1760 g = alloc_auto_flow_group(ft, spec);
c3f9bf62 1761 if (IS_ERR(g)) {
d34c6efc 1762 rule = ERR_CAST(g);
bd71b08e
MG
1763 up_write_ref_node(&ft->node);
1764 return rule;
c3f9bf62
MG
1765 }
1766
bd71b08e
MG
1767 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1768 up_write_ref_node(&ft->node);
1769
19f100fe 1770 err = create_auto_flow_group(ft, g);
bd71b08e
MG
1771 if (err)
1772 goto err_release_fg;
1773
a369d4ac 1774 fte = alloc_fte(ft, spec->match_value, flow_act);
bd71b08e
MG
1775 if (IS_ERR(fte)) {
1776 err = PTR_ERR(fte);
1777 goto err_release_fg;
19f100fe
MG
1778 }
1779
f5c2ff17
MG
1780 err = insert_fte(g, fte);
1781 if (err) {
a369d4ac 1782 kmem_cache_free(steering->ftes_cache, fte);
f5c2ff17
MG
1783 goto err_release_fg;
1784 }
1785
bd71b08e
MG
1786 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1787 up_write_ref_node(&g->node);
693c6883 1788 rule = add_rule_fg(g, spec->match_value, flow_act, dest,
bd71b08e
MG
1789 dest_num, fte);
1790 up_write_ref_node(&fte->node);
1791 tree_put_node(&fte->node);
19f100fe 1792 tree_put_node(&g->node);
0c56b975 1793 return rule;
bd71b08e
MG
1794
1795err_release_fg:
1796 up_write_ref_node(&g->node);
1797 tree_put_node(&g->node);
1798 return ERR_PTR(err);
0c56b975 1799}
b3638e1a
MG
1800
1801static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1802{
1803 return ((ft->type == FS_FT_NIC_RX) &&
1804 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1805}
1806
74491de9
MB
1807struct mlx5_flow_handle *
1808mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1809 struct mlx5_flow_spec *spec,
66958ed9 1810 struct mlx5_flow_act *flow_act,
74491de9 1811 struct mlx5_flow_destination *dest,
cf916ffb 1812 int num_dest)
b3638e1a
MG
1813{
1814 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
4c5009c5 1815 struct mlx5_flow_destination gen_dest = {};
b3638e1a 1816 struct mlx5_flow_table *next_ft = NULL;
74491de9 1817 struct mlx5_flow_handle *handle = NULL;
66958ed9 1818 u32 sw_action = flow_act->action;
b3638e1a
MG
1819 struct fs_prio *prio;
1820
1821 fs_get_obj(prio, ft->node.parent);
66958ed9 1822 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
b3638e1a
MG
1823 if (!fwd_next_prio_supported(ft))
1824 return ERR_PTR(-EOPNOTSUPP);
cf916ffb 1825 if (num_dest)
b3638e1a
MG
1826 return ERR_PTR(-EINVAL);
1827 mutex_lock(&root->chain_lock);
1828 next_ft = find_next_chained_ft(prio);
1829 if (next_ft) {
1830 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1831 gen_dest.ft = next_ft;
1832 dest = &gen_dest;
cf916ffb 1833 num_dest = 1;
66958ed9 1834 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
b3638e1a
MG
1835 } else {
1836 mutex_unlock(&root->chain_lock);
1837 return ERR_PTR(-EOPNOTSUPP);
1838 }
1839 }
1840
cf916ffb 1841 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
b3638e1a
MG
1842
1843 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
74491de9
MB
1844 if (!IS_ERR_OR_NULL(handle) &&
1845 (list_empty(&handle->rule[0]->next_ft))) {
b3638e1a 1846 mutex_lock(&next_ft->lock);
74491de9
MB
1847 list_add(&handle->rule[0]->next_ft,
1848 &next_ft->fwd_rules);
b3638e1a 1849 mutex_unlock(&next_ft->lock);
74491de9 1850 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
b3638e1a
MG
1851 }
1852 mutex_unlock(&root->chain_lock);
1853 }
74491de9 1854 return handle;
b3638e1a 1855}
74491de9 1856EXPORT_SYMBOL(mlx5_add_flow_rules);
0c56b975 1857
74491de9 1858void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
0c56b975 1859{
74491de9
MB
1860 int i;
1861
1862 for (i = handle->num_rules - 1; i >= 0; i--)
1863 tree_remove_node(&handle->rule[i]->node);
1864 kfree(handle);
0c56b975 1865}
74491de9 1866EXPORT_SYMBOL(mlx5_del_flow_rules);
0c56b975 1867
2cc43b49
MG
1868/* Assuming prio->node.children(flow tables) is sorted by level */
1869static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1870{
1871 struct fs_prio *prio;
1872
1873 fs_get_obj(prio, ft->node.parent);
1874
1875 if (!list_is_last(&ft->node.list, &prio->node.children))
1876 return list_next_entry(ft, node.list);
1877 return find_next_chained_ft(prio);
1878}
1879
1880static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1881{
1882 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
dae37456 1883 struct mlx5_ft_underlay_qp *uqp;
2cc43b49 1884 struct mlx5_flow_table *new_root_ft = NULL;
dae37456
AV
1885 int err = 0;
1886 u32 qpn;
2cc43b49
MG
1887
1888 if (root->root_ft != ft)
1889 return 0;
1890
1891 new_root_ft = find_next_ft(ft);
dae37456
AV
1892 if (!new_root_ft) {
1893 root->root_ft = NULL;
1894 return 0;
1895 }
1896
1897 if (list_empty(&root->underlay_qpns)) {
1898 /* Don't set any QPN (zero) in case QPN list is empty */
1899 qpn = 0;
af76c501
MB
1900 err = root->cmds->update_root_ft(root->dev, new_root_ft,
1901 qpn, false);
dae37456
AV
1902 } else {
1903 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1904 qpn = uqp->qpn;
af76c501
MB
1905 err = root->cmds->update_root_ft(root->dev,
1906 new_root_ft, qpn,
1907 false);
dae37456
AV
1908 if (err)
1909 break;
2cc43b49 1910 }
2cc43b49 1911 }
dae37456
AV
1912
1913 if (err)
1914 mlx5_core_warn(root->dev,
1915 "Update root flow table of id(%u) qpn(%d) failed\n",
1916 ft->id, qpn);
1917 else
1918 root->root_ft = new_root_ft;
1919
2cc43b49
MG
1920 return 0;
1921}
1922
f90edfd2
MG
1923/* Connect flow table from previous priority to
1924 * the next flow table.
1925 */
1926static int disconnect_flow_table(struct mlx5_flow_table *ft)
1927{
1928 struct mlx5_core_dev *dev = get_dev(&ft->node);
1929 struct mlx5_flow_table *next_ft;
1930 struct fs_prio *prio;
1931 int err = 0;
1932
1933 err = update_root_ft_destroy(ft);
1934 if (err)
1935 return err;
1936
1937 fs_get_obj(prio, ft->node.parent);
1938 if (!(list_first_entry(&prio->node.children,
1939 struct mlx5_flow_table,
1940 node.list) == ft))
1941 return 0;
1942
1943 next_ft = find_next_chained_ft(prio);
b3638e1a
MG
1944 err = connect_fwd_rules(dev, next_ft, ft);
1945 if (err)
1946 return err;
1947
f90edfd2
MG
1948 err = connect_prev_fts(dev, next_ft, prio);
1949 if (err)
1950 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1951 ft->id);
1952 return err;
1953}
1954
86d722ad 1955int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
0c56b975 1956{
2cc43b49
MG
1957 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1958 int err = 0;
1959
1960 mutex_lock(&root->chain_lock);
f90edfd2 1961 err = disconnect_flow_table(ft);
2cc43b49
MG
1962 if (err) {
1963 mutex_unlock(&root->chain_lock);
1964 return err;
1965 }
0c56b975
MG
1966 if (tree_remove_node(&ft->node))
1967 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1968 ft->id);
2cc43b49 1969 mutex_unlock(&root->chain_lock);
0c56b975 1970
2cc43b49 1971 return err;
0c56b975 1972}
b217ea25 1973EXPORT_SYMBOL(mlx5_destroy_flow_table);
0c56b975 1974
86d722ad 1975void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
0c56b975
MG
1976{
1977 if (tree_remove_node(&fg->node))
1978 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1979 fg->id);
1980}
25302363 1981
86d722ad
MG
1982struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1983 enum mlx5_flow_namespace_type type)
25302363 1984{
fba53f7b
MG
1985 struct mlx5_flow_steering *steering = dev->priv.steering;
1986 struct mlx5_flow_root_namespace *root_ns;
8ce78257 1987 int prio = 0;
78228cbd 1988 struct fs_prio *fs_prio;
25302363
MG
1989 struct mlx5_flow_namespace *ns;
1990
fba53f7b 1991 if (!steering)
25302363
MG
1992 return NULL;
1993
1994 switch (type) {
25302363 1995 case MLX5_FLOW_NAMESPACE_FDB:
fba53f7b
MG
1996 if (steering->fdb_root_ns)
1997 return &steering->fdb_root_ns->ns;
2226dcb4 1998 return NULL;
87d22483
MG
1999 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2000 if (steering->sniffer_rx_root_ns)
2001 return &steering->sniffer_rx_root_ns->ns;
2226dcb4 2002 return NULL;
87d22483
MG
2003 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2004 if (steering->sniffer_tx_root_ns)
2005 return &steering->sniffer_tx_root_ns->ns;
25302363 2006 return NULL;
2226dcb4
MB
2007 default:
2008 break;
25302363
MG
2009 }
2010
8ce78257
MB
2011 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2012 root_ns = steering->egress_root_ns;
2013 } else { /* Must be NIC RX */
2014 root_ns = steering->root_ns;
2015 prio = type;
25302363
MG
2016 }
2017
fba53f7b
MG
2018 if (!root_ns)
2019 return NULL;
2020
25302363
MG
2021 fs_prio = find_prio(&root_ns->ns, prio);
2022 if (!fs_prio)
2023 return NULL;
2024
2025 ns = list_first_entry(&fs_prio->node.children,
2026 typeof(*ns),
2027 node.list);
2028
2029 return ns;
2030}
b217ea25 2031EXPORT_SYMBOL(mlx5_get_flow_namespace);
25302363 2032
9b93ab98
GP
2033struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2034 enum mlx5_flow_namespace_type type,
2035 int vport)
2036{
2037 struct mlx5_flow_steering *steering = dev->priv.steering;
2038
2039 if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2040 return NULL;
2041
2042 switch (type) {
2043 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2044 if (steering->esw_egress_root_ns &&
2045 steering->esw_egress_root_ns[vport])
2046 return &steering->esw_egress_root_ns[vport]->ns;
2047 else
2048 return NULL;
2049 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2050 if (steering->esw_ingress_root_ns &&
2051 steering->esw_ingress_root_ns[vport])
2052 return &steering->esw_ingress_root_ns[vport]->ns;
2053 else
2054 return NULL;
2055 default:
2056 return NULL;
2057 }
2058}
2059
25302363 2060static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
a257b94a 2061 unsigned int prio, int num_levels)
25302363
MG
2062{
2063 struct fs_prio *fs_prio;
2064
2065 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2066 if (!fs_prio)
2067 return ERR_PTR(-ENOMEM);
2068
2069 fs_prio->node.type = FS_TYPE_PRIO;
139ed6c6 2070 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
25302363 2071 tree_add_node(&fs_prio->node, &ns->node);
a257b94a 2072 fs_prio->num_levels = num_levels;
25302363 2073 fs_prio->prio = prio;
25302363
MG
2074 list_add_tail(&fs_prio->node.list, &ns->node.children);
2075
2076 return fs_prio;
2077}
2078
2079static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2080 *ns)
2081{
2082 ns->node.type = FS_TYPE_NAMESPACE;
2083
2084 return ns;
2085}
2086
2087static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2088{
2089 struct mlx5_flow_namespace *ns;
2090
2091 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2092 if (!ns)
2093 return ERR_PTR(-ENOMEM);
2094
2095 fs_init_namespace(ns);
139ed6c6 2096 tree_init_node(&ns->node, NULL, del_sw_ns);
25302363
MG
2097 tree_add_node(&ns->node, &prio->node);
2098 list_add_tail(&ns->node.list, &prio->node.children);
2099
2100 return ns;
2101}
2102
13de6c10
MG
2103static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2104 struct init_tree_node *prio_metadata)
4cbdd30e
MG
2105{
2106 struct fs_prio *fs_prio;
2107 int i;
2108
2109 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
13de6c10 2110 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
4cbdd30e
MG
2111 if (IS_ERR(fs_prio))
2112 return PTR_ERR(fs_prio);
2113 }
2114 return 0;
2115}
2116
8d40d162
MG
2117#define FLOW_TABLE_BIT_SZ 1
2118#define GET_FLOW_TABLE_CAP(dev, offset) \
701052c5 2119 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
8d40d162
MG
2120 offset / 32)) >> \
2121 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2122static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2123{
2124 int i;
2125
2126 for (i = 0; i < caps->arr_sz; i++) {
2127 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2128 return false;
2129 }
2130 return true;
2131}
2132
fba53f7b 2133static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
8d40d162 2134 struct init_tree_node *init_node,
25302363
MG
2135 struct fs_node *fs_parent_node,
2136 struct init_tree_node *init_parent_node,
13de6c10 2137 int prio)
25302363 2138{
fba53f7b 2139 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
8d40d162
MG
2140 flow_table_properties_nic_receive.
2141 max_ft_level);
25302363
MG
2142 struct mlx5_flow_namespace *fs_ns;
2143 struct fs_prio *fs_prio;
2144 struct fs_node *base;
2145 int i;
2146 int err;
2147
2148 if (init_node->type == FS_TYPE_PRIO) {
8d40d162 2149 if ((init_node->min_ft_level > max_ft_level) ||
fba53f7b 2150 !has_required_caps(steering->dev, &init_node->caps))
8d40d162 2151 return 0;
25302363
MG
2152
2153 fs_get_obj(fs_ns, fs_parent_node);
4cbdd30e 2154 if (init_node->num_leaf_prios)
13de6c10
MG
2155 return create_leaf_prios(fs_ns, prio, init_node);
2156 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
25302363
MG
2157 if (IS_ERR(fs_prio))
2158 return PTR_ERR(fs_prio);
2159 base = &fs_prio->node;
2160 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2161 fs_get_obj(fs_prio, fs_parent_node);
2162 fs_ns = fs_create_namespace(fs_prio);
2163 if (IS_ERR(fs_ns))
2164 return PTR_ERR(fs_ns);
2165 base = &fs_ns->node;
2166 } else {
2167 return -EINVAL;
2168 }
13de6c10 2169 prio = 0;
25302363 2170 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2171 err = init_root_tree_recursive(steering, &init_node->children[i],
13de6c10 2172 base, init_node, prio);
25302363
MG
2173 if (err)
2174 return err;
13de6c10
MG
2175 if (init_node->children[i].type == FS_TYPE_PRIO &&
2176 init_node->children[i].num_leaf_prios) {
2177 prio += init_node->children[i].num_leaf_prios;
2178 }
25302363
MG
2179 }
2180
2181 return 0;
2182}
2183
fba53f7b 2184static int init_root_tree(struct mlx5_flow_steering *steering,
8d40d162 2185 struct init_tree_node *init_node,
25302363
MG
2186 struct fs_node *fs_parent_node)
2187{
2188 int i;
2189 struct mlx5_flow_namespace *fs_ns;
2190 int err;
2191
2192 fs_get_obj(fs_ns, fs_parent_node);
2193 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2194 err = init_root_tree_recursive(steering, &init_node->children[i],
25302363
MG
2195 &fs_ns->node,
2196 init_node, i);
2197 if (err)
2198 return err;
2199 }
2200 return 0;
2201}
2202
af76c501
MB
2203static struct mlx5_flow_root_namespace
2204*create_root_ns(struct mlx5_flow_steering *steering,
2205 enum fs_flow_table_type table_type)
25302363 2206{
af76c501 2207 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
25302363
MG
2208 struct mlx5_flow_root_namespace *root_ns;
2209 struct mlx5_flow_namespace *ns;
2210
05564d0a
AY
2211 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2212 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2213 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2214
86d722ad 2215 /* Create the root namespace */
1b9a07ee 2216 root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
25302363
MG
2217 if (!root_ns)
2218 return NULL;
2219
fba53f7b 2220 root_ns->dev = steering->dev;
25302363 2221 root_ns->table_type = table_type;
af76c501 2222 root_ns->cmds = cmds;
25302363 2223
dae37456
AV
2224 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2225
25302363
MG
2226 ns = &root_ns->ns;
2227 fs_init_namespace(ns);
2cc43b49 2228 mutex_init(&root_ns->chain_lock);
bd71b08e 2229 tree_init_node(&ns->node, NULL, NULL);
25302363
MG
2230 tree_add_node(&ns->node, NULL);
2231
2232 return root_ns;
2233}
2234
655227ed
MG
2235static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2236
2237static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2238{
2239 struct fs_prio *prio;
2240
2241 fs_for_each_prio(prio, ns) {
a257b94a 2242 /* This updates prio start_level and num_levels */
655227ed 2243 set_prio_attrs_in_prio(prio, acc_level);
a257b94a 2244 acc_level += prio->num_levels;
655227ed
MG
2245 }
2246 return acc_level;
2247}
2248
2249static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2250{
2251 struct mlx5_flow_namespace *ns;
2252 int acc_level_ns = acc_level;
2253
2254 prio->start_level = acc_level;
2255 fs_for_each_ns(ns, prio)
a257b94a 2256 /* This updates start_level and num_levels of ns's priority descendants */
655227ed 2257 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
a257b94a
MG
2258 if (!prio->num_levels)
2259 prio->num_levels = acc_level_ns - prio->start_level;
2260 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
655227ed
MG
2261}
2262
2263static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2264{
2265 struct mlx5_flow_namespace *ns = &root_ns->ns;
2266 struct fs_prio *prio;
2267 int start_level = 0;
2268
2269 fs_for_each_prio(prio, ns) {
2270 set_prio_attrs_in_prio(prio, start_level);
a257b94a 2271 start_level += prio->num_levels;
655227ed
MG
2272 }
2273}
2274
153fefbf
MG
2275#define ANCHOR_PRIO 0
2276#define ANCHOR_SIZE 1
d63cd286 2277#define ANCHOR_LEVEL 0
fba53f7b 2278static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
153fefbf
MG
2279{
2280 struct mlx5_flow_namespace *ns = NULL;
b3ba5149 2281 struct mlx5_flow_table_attr ft_attr = {};
153fefbf
MG
2282 struct mlx5_flow_table *ft;
2283
fba53f7b 2284 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
eff596da 2285 if (WARN_ON(!ns))
153fefbf 2286 return -EINVAL;
b3ba5149
ES
2287
2288 ft_attr.max_fte = ANCHOR_SIZE;
2289 ft_attr.level = ANCHOR_LEVEL;
2290 ft_attr.prio = ANCHOR_PRIO;
2291
2292 ft = mlx5_create_flow_table(ns, &ft_attr);
153fefbf 2293 if (IS_ERR(ft)) {
fba53f7b 2294 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
153fefbf
MG
2295 return PTR_ERR(ft);
2296 }
2297 return 0;
2298}
2299
fba53f7b 2300static int init_root_ns(struct mlx5_flow_steering *steering)
25302363 2301{
9c26f5f8
TB
2302 int err;
2303
fba53f7b 2304 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
42fb18fd 2305 if (!steering->root_ns)
9c26f5f8 2306 return -ENOMEM;
25302363 2307
9c26f5f8
TB
2308 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2309 if (err)
2310 goto out_err;
25302363 2311
fba53f7b 2312 set_prio_attrs(steering->root_ns);
9c26f5f8
TB
2313 err = create_anchor_flow_table(steering);
2314 if (err)
2315 goto out_err;
153fefbf 2316
25302363
MG
2317 return 0;
2318
9c26f5f8
TB
2319out_err:
2320 cleanup_root_ns(steering->root_ns);
2321 steering->root_ns = NULL;
2322 return err;
25302363
MG
2323}
2324
0da2d666 2325static void clean_tree(struct fs_node *node)
25302363 2326{
0da2d666
MG
2327 if (node) {
2328 struct fs_node *iter;
2329 struct fs_node *temp;
25302363 2330
800350a3 2331 tree_get_node(node);
0da2d666
MG
2332 list_for_each_entry_safe(iter, temp, &node->children, list)
2333 clean_tree(iter);
800350a3 2334 tree_put_node(node);
0da2d666 2335 tree_remove_node(node);
25302363 2336 }
153fefbf
MG
2337}
2338
0da2d666 2339static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
25302363 2340{
25302363
MG
2341 if (!root_ns)
2342 return;
2343
0da2d666 2344 clean_tree(&root_ns->ns.node);
25302363
MG
2345}
2346
9b93ab98
GP
2347static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2348{
2349 struct mlx5_flow_steering *steering = dev->priv.steering;
2350 int i;
2351
2352 if (!steering->esw_egress_root_ns)
2353 return;
2354
2355 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2356 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2357
2358 kfree(steering->esw_egress_root_ns);
2359}
2360
2361static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2362{
2363 struct mlx5_flow_steering *steering = dev->priv.steering;
2364 int i;
2365
2366 if (!steering->esw_ingress_root_ns)
2367 return;
2368
2369 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2370 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2371
2372 kfree(steering->esw_ingress_root_ns);
2373}
2374
25302363
MG
2375void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2376{
fba53f7b
MG
2377 struct mlx5_flow_steering *steering = dev->priv.steering;
2378
0da2d666 2379 cleanup_root_ns(steering->root_ns);
9b93ab98
GP
2380 cleanup_egress_acls_root_ns(dev);
2381 cleanup_ingress_acls_root_ns(dev);
0da2d666 2382 cleanup_root_ns(steering->fdb_root_ns);
87d22483
MG
2383 cleanup_root_ns(steering->sniffer_rx_root_ns);
2384 cleanup_root_ns(steering->sniffer_tx_root_ns);
5f418378 2385 cleanup_root_ns(steering->egress_root_ns);
43a335e0 2386 mlx5_cleanup_fc_stats(dev);
a369d4ac
MG
2387 kmem_cache_destroy(steering->ftes_cache);
2388 kmem_cache_destroy(steering->fgs_cache);
fba53f7b 2389 kfree(steering);
25302363
MG
2390}
2391
87d22483
MG
2392static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2393{
2394 struct fs_prio *prio;
2395
2396 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2397 if (!steering->sniffer_tx_root_ns)
2398 return -ENOMEM;
2399
2400 /* Create single prio */
2401 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2402 if (IS_ERR(prio)) {
2403 cleanup_root_ns(steering->sniffer_tx_root_ns);
2404 return PTR_ERR(prio);
2405 }
2406 return 0;
2407}
2408
2409static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2410{
2411 struct fs_prio *prio;
2412
2413 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2414 if (!steering->sniffer_rx_root_ns)
2415 return -ENOMEM;
2416
2417 /* Create single prio */
2418 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2419 if (IS_ERR(prio)) {
2420 cleanup_root_ns(steering->sniffer_rx_root_ns);
2421 return PTR_ERR(prio);
2422 }
2423 return 0;
2424}
2425
fba53f7b 2426static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
25302363
MG
2427{
2428 struct fs_prio *prio;
2429
fba53f7b
MG
2430 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2431 if (!steering->fdb_root_ns)
25302363
MG
2432 return -ENOMEM;
2433
a842dd04 2434 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
1033665e
OG
2435 if (IS_ERR(prio))
2436 goto out_err;
2437
2438 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
2439 if (IS_ERR(prio))
2440 goto out_err;
2441
2442 set_prio_attrs(steering->fdb_root_ns);
2443 return 0;
2444
2445out_err:
2446 cleanup_root_ns(steering->fdb_root_ns);
2447 steering->fdb_root_ns = NULL;
2448 return PTR_ERR(prio);
25302363
MG
2449}
2450
9b93ab98 2451static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2452{
2453 struct fs_prio *prio;
2454
9b93ab98
GP
2455 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2456 if (!steering->esw_egress_root_ns[vport])
efdc810b
MHY
2457 return -ENOMEM;
2458
2459 /* create 1 prio*/
9b93ab98 2460 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
44fafdaa 2461 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
2462}
2463
9b93ab98 2464static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2465{
2466 struct fs_prio *prio;
2467
9b93ab98
GP
2468 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2469 if (!steering->esw_ingress_root_ns[vport])
efdc810b
MHY
2470 return -ENOMEM;
2471
2472 /* create 1 prio*/
9b93ab98 2473 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
44fafdaa 2474 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
2475}
2476
9b93ab98
GP
2477static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2478{
2479 struct mlx5_flow_steering *steering = dev->priv.steering;
2480 int err;
2481 int i;
2482
2483 steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2484 sizeof(*steering->esw_egress_root_ns),
2485 GFP_KERNEL);
2486 if (!steering->esw_egress_root_ns)
2487 return -ENOMEM;
2488
2489 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2490 err = init_egress_acl_root_ns(steering, i);
2491 if (err)
2492 goto cleanup_root_ns;
2493 }
2494
2495 return 0;
2496
2497cleanup_root_ns:
2498 for (i--; i >= 0; i--)
2499 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2500 kfree(steering->esw_egress_root_ns);
2501 return err;
2502}
2503
2504static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2505{
2506 struct mlx5_flow_steering *steering = dev->priv.steering;
2507 int err;
2508 int i;
2509
2510 steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2511 sizeof(*steering->esw_ingress_root_ns),
2512 GFP_KERNEL);
2513 if (!steering->esw_ingress_root_ns)
2514 return -ENOMEM;
2515
2516 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2517 err = init_ingress_acl_root_ns(steering, i);
2518 if (err)
2519 goto cleanup_root_ns;
2520 }
2521
2522 return 0;
2523
2524cleanup_root_ns:
2525 for (i--; i >= 0; i--)
2526 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2527 kfree(steering->esw_ingress_root_ns);
2528 return err;
2529}
2530
5f418378
AY
2531static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2532{
8ce78257 2533 int err;
5f418378
AY
2534
2535 steering->egress_root_ns = create_root_ns(steering,
2536 FS_FT_NIC_TX);
2537 if (!steering->egress_root_ns)
2538 return -ENOMEM;
2539
8ce78257
MB
2540 err = init_root_tree(steering, &egress_root_fs,
2541 &steering->egress_root_ns->ns.node);
2542 if (err)
2543 goto cleanup;
2544 set_prio_attrs(steering->egress_root_ns);
2545 return 0;
2546cleanup:
2547 cleanup_root_ns(steering->egress_root_ns);
2548 steering->egress_root_ns = NULL;
2549 return err;
5f418378
AY
2550}
2551
25302363
MG
2552int mlx5_init_fs(struct mlx5_core_dev *dev)
2553{
fba53f7b 2554 struct mlx5_flow_steering *steering;
25302363
MG
2555 int err = 0;
2556
43a335e0
AV
2557 err = mlx5_init_fc_stats(dev);
2558 if (err)
2559 return err;
2560
fba53f7b
MG
2561 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2562 if (!steering)
2563 return -ENOMEM;
2564 steering->dev = dev;
2565 dev->priv.steering = steering;
2566
a369d4ac
MG
2567 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2568 sizeof(struct mlx5_flow_group), 0,
2569 0, NULL);
2570 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2571 0, NULL);
2572 if (!steering->ftes_cache || !steering->fgs_cache) {
2573 err = -ENOMEM;
2574 goto err;
2575 }
2576
ffdb8827
ES
2577 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2578 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2579 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2580 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
876d634d 2581 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
fba53f7b 2582 err = init_root_ns(steering);
25302363 2583 if (err)
43a335e0 2584 goto err;
25302363 2585 }
876d634d 2586
0efc8562 2587 if (MLX5_ESWITCH_MANAGER(dev)) {
bd02ef8e 2588 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
fba53f7b 2589 err = init_fdb_root_ns(steering);
bd02ef8e
MG
2590 if (err)
2591 goto err;
2592 }
2593 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
9b93ab98 2594 err = init_egress_acls_root_ns(dev);
bd02ef8e
MG
2595 if (err)
2596 goto err;
2597 }
2598 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
9b93ab98 2599 err = init_ingress_acls_root_ns(dev);
bd02ef8e
MG
2600 if (err)
2601 goto err;
2602 }
25302363
MG
2603 }
2604
87d22483
MG
2605 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2606 err = init_sniffer_rx_root_ns(steering);
2607 if (err)
2608 goto err;
2609 }
2610
2611 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2612 err = init_sniffer_tx_root_ns(steering);
2613 if (err)
2614 goto err;
2615 }
2616
8ce78257 2617 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
5f418378
AY
2618 err = init_egress_root_ns(steering);
2619 if (err)
2620 goto err;
2621 }
2622
efdc810b
MHY
2623 return 0;
2624err:
2625 mlx5_cleanup_fs(dev);
25302363
MG
2626 return err;
2627}
50854114
YH
2628
2629int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2630{
2631 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
2632 struct mlx5_ft_underlay_qp *new_uqp;
2633 int err = 0;
2634
2635 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2636 if (!new_uqp)
2637 return -ENOMEM;
2638
2639 mutex_lock(&root->chain_lock);
2640
2641 if (!root->root_ft) {
2642 err = -EINVAL;
2643 goto update_ft_fail;
2644 }
2645
af76c501
MB
2646 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2647 false);
dae37456
AV
2648 if (err) {
2649 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2650 underlay_qpn, err);
2651 goto update_ft_fail;
2652 }
2653
2654 new_uqp->qpn = underlay_qpn;
2655 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2656
2657 mutex_unlock(&root->chain_lock);
50854114 2658
50854114 2659 return 0;
dae37456
AV
2660
2661update_ft_fail:
2662 mutex_unlock(&root->chain_lock);
2663 kfree(new_uqp);
2664 return err;
50854114
YH
2665}
2666EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2667
2668int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2669{
2670 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
2671 struct mlx5_ft_underlay_qp *uqp;
2672 bool found = false;
2673 int err = 0;
2674
2675 mutex_lock(&root->chain_lock);
2676 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2677 if (uqp->qpn == underlay_qpn) {
2678 found = true;
2679 break;
2680 }
2681 }
2682
2683 if (!found) {
2684 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2685 underlay_qpn);
2686 err = -EINVAL;
2687 goto out;
2688 }
2689
af76c501
MB
2690 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2691 true);
dae37456
AV
2692 if (err)
2693 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2694 underlay_qpn, err);
2695
2696 list_del(&uqp->list);
2697 mutex_unlock(&root->chain_lock);
2698 kfree(uqp);
50854114 2699
50854114 2700 return 0;
dae37456
AV
2701
2702out:
2703 mutex_unlock(&root->chain_lock);
2704 return err;
50854114
YH
2705}
2706EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);