net/mlx5: Refactor find_flow_rule
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
CommitLineData
de8575e0
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
35
36#include "mlx5_core.h"
37#include "fs_core.h"
0c56b975
MG
38#include "fs_cmd.h"
39
25302363
MG
40#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
41 sizeof(struct init_tree_node))
42
a257b94a 43#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
8d40d162 44 ...) {.type = FS_TYPE_PRIO,\
25302363 45 .min_ft_level = min_level_val,\
a257b94a 46 .num_levels = num_levels_val,\
4cbdd30e 47 .num_leaf_prios = num_prios_val,\
8d40d162 48 .caps = caps_val,\
25302363
MG
49 .children = (struct init_tree_node[]) {__VA_ARGS__},\
50 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
51}
52
a257b94a
MG
53#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
54 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
4cbdd30e 55 __VA_ARGS__)\
25302363
MG
56
57#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
58 .children = (struct init_tree_node[]) {__VA_ARGS__},\
59 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
60}
61
8d40d162
MG
62#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
63 sizeof(long))
64
65#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
66
67#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
68 .caps = (long[]) {__VA_ARGS__} }
69
6dc6071c
MG
70#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
71 FS_CAP(flow_table_properties_nic_receive.modify_root), \
72 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
73 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
74
a257b94a 75#define LEFTOVERS_NUM_LEVELS 1
4cbdd30e 76#define LEFTOVERS_NUM_PRIOS 1
4cbdd30e 77
a257b94a 78#define BY_PASS_PRIO_NUM_LEVELS 1
6dc6071c 79#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
a257b94a
MG
80 LEFTOVERS_NUM_PRIOS)
81
6dc6071c 82#define ETHTOOL_PRIO_NUM_LEVELS 1
e5835f28 83#define ETHTOOL_NUM_PRIOS 11
6dc6071c 84#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
1cabe6b0
MG
85/* Vlan, mac, ttc, aRFS */
86#define KERNEL_NIC_PRIO_NUM_LEVELS 4
13de6c10
MG
87#define KERNEL_NIC_NUM_PRIOS 1
88/* One more level for tc */
89#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
8d40d162 90
a257b94a 91#define ANCHOR_NUM_LEVELS 1
153fefbf
MG
92#define ANCHOR_NUM_PRIOS 1
93#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
acbc2004
OG
94
95#define OFFLOADS_MAX_FT 1
96#define OFFLOADS_NUM_PRIOS 1
97#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
98
3e75d4eb
AH
99#define LAG_PRIO_NUM_LEVELS 1
100#define LAG_NUM_PRIOS 1
101#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
102
8d40d162
MG
103struct node_caps {
104 size_t arr_sz;
105 long *caps;
106};
25302363
MG
107static struct init_tree_node {
108 enum fs_node_type type;
109 struct init_tree_node *children;
110 int ar_size;
8d40d162 111 struct node_caps caps;
25302363 112 int min_ft_level;
4cbdd30e 113 int num_leaf_prios;
25302363 114 int prio;
a257b94a 115 int num_levels;
25302363
MG
116} root_fs = {
117 .type = FS_TYPE_NAMESPACE,
3e75d4eb 118 .ar_size = 7,
25302363 119 .children = (struct init_tree_node[]) {
4cbdd30e 120 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
6dc6071c 121 FS_CHAINING_CAPS,
a257b94a
MG
122 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
123 BY_PASS_PRIO_NUM_LEVELS))),
3e75d4eb
AH
124 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
125 FS_CHAINING_CAPS,
126 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
127 LAG_PRIO_NUM_LEVELS))),
acbc2004
OG
128 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
129 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
6dc6071c
MG
130 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
131 FS_CHAINING_CAPS,
132 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
133 ETHTOOL_PRIO_NUM_LEVELS))),
a257b94a 134 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
13de6c10
MG
135 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
136 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
137 KERNEL_NIC_PRIO_NUM_LEVELS))),
4cbdd30e 138 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
6dc6071c 139 FS_CHAINING_CAPS,
a257b94a 140 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
153fefbf 141 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
a257b94a 142 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
25302363
MG
143 }
144};
145
f0d22d18
MG
146enum fs_i_mutex_lock_class {
147 FS_MUTEX_GRANDPARENT,
148 FS_MUTEX_PARENT,
149 FS_MUTEX_CHILD
150};
151
0c56b975
MG
152static void del_rule(struct fs_node *node);
153static void del_flow_table(struct fs_node *node);
154static void del_flow_group(struct fs_node *node);
155static void del_fte(struct fs_node *node);
814fb875
MB
156static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
157 struct mlx5_flow_destination *d2);
de8575e0
MG
158
159static void tree_init_node(struct fs_node *node,
160 unsigned int refcount,
161 void (*remove_func)(struct fs_node *))
162{
163 atomic_set(&node->refcount, refcount);
164 INIT_LIST_HEAD(&node->list);
165 INIT_LIST_HEAD(&node->children);
166 mutex_init(&node->lock);
167 node->remove_func = remove_func;
168}
169
170static void tree_add_node(struct fs_node *node, struct fs_node *parent)
171{
172 if (parent)
173 atomic_inc(&parent->refcount);
174 node->parent = parent;
175
176 /* Parent is the root */
177 if (!parent)
178 node->root = node;
179 else
180 node->root = parent->root;
181}
182
183static void tree_get_node(struct fs_node *node)
184{
185 atomic_inc(&node->refcount);
186}
187
f0d22d18
MG
188static void nested_lock_ref_node(struct fs_node *node,
189 enum fs_i_mutex_lock_class class)
de8575e0
MG
190{
191 if (node) {
f0d22d18 192 mutex_lock_nested(&node->lock, class);
de8575e0
MG
193 atomic_inc(&node->refcount);
194 }
195}
196
197static void lock_ref_node(struct fs_node *node)
198{
199 if (node) {
200 mutex_lock(&node->lock);
201 atomic_inc(&node->refcount);
202 }
203}
204
205static void unlock_ref_node(struct fs_node *node)
206{
207 if (node) {
208 atomic_dec(&node->refcount);
209 mutex_unlock(&node->lock);
210 }
211}
212
213static void tree_put_node(struct fs_node *node)
214{
215 struct fs_node *parent_node = node->parent;
216
217 lock_ref_node(parent_node);
218 if (atomic_dec_and_test(&node->refcount)) {
219 if (parent_node)
220 list_del_init(&node->list);
221 if (node->remove_func)
222 node->remove_func(node);
223 kfree(node);
224 node = NULL;
225 }
226 unlock_ref_node(parent_node);
227 if (!node && parent_node)
228 tree_put_node(parent_node);
229}
230
231static int tree_remove_node(struct fs_node *node)
232{
b3638e1a
MG
233 if (atomic_read(&node->refcount) > 1) {
234 atomic_dec(&node->refcount);
235 return -EEXIST;
236 }
de8575e0
MG
237 tree_put_node(node);
238 return 0;
239}
5e1626c0
MG
240
241static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
242 unsigned int prio)
243{
244 struct fs_prio *iter_prio;
245
246 fs_for_each_prio(iter_prio, ns) {
247 if (iter_prio->prio == prio)
248 return iter_prio;
249 }
250
251 return NULL;
252}
253
5e1626c0
MG
254static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
255{
256 unsigned int i;
257
258 for (i = 0; i < size; i++, mask++, val1++, val2++)
259 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
260 ((*(u8 *)val2) & (*(u8 *)mask)))
261 return false;
262
263 return true;
264}
265
266static bool compare_match_value(struct mlx5_flow_group_mask *mask,
267 void *fte_param1, void *fte_param2)
268{
269 if (mask->match_criteria_enable &
270 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
271 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
272 fte_param1, outer_headers);
273 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
274 fte_param2, outer_headers);
275 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
276 mask->match_criteria, outer_headers);
277
278 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
279 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
280 return false;
281 }
282
283 if (mask->match_criteria_enable &
284 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
285 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
286 fte_param1, misc_parameters);
287 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
288 fte_param2, misc_parameters);
289 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
290 mask->match_criteria, misc_parameters);
291
292 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
293 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
294 return false;
295 }
296
297 if (mask->match_criteria_enable &
298 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
299 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
300 fte_param1, inner_headers);
301 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
302 fte_param2, inner_headers);
303 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
304 mask->match_criteria, inner_headers);
305
306 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
307 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
308 return false;
309 }
310 return true;
311}
312
313static bool compare_match_criteria(u8 match_criteria_enable1,
314 u8 match_criteria_enable2,
315 void *mask1, void *mask2)
316{
317 return match_criteria_enable1 == match_criteria_enable2 &&
318 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
319}
0c56b975
MG
320
321static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
322{
323 struct fs_node *root;
324 struct mlx5_flow_namespace *ns;
325
326 root = node->root;
327
328 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
329 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
330 return NULL;
331 }
332
333 ns = container_of(root, struct mlx5_flow_namespace, node);
334 return container_of(ns, struct mlx5_flow_root_namespace, ns);
335}
336
337static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
338{
339 struct mlx5_flow_root_namespace *root = find_root(node);
340
341 if (root)
342 return root->dev;
343 return NULL;
344}
345
346static void del_flow_table(struct fs_node *node)
347{
348 struct mlx5_flow_table *ft;
349 struct mlx5_core_dev *dev;
350 struct fs_prio *prio;
351 int err;
352
353 fs_get_obj(ft, node);
354 dev = get_dev(&ft->node);
355
356 err = mlx5_cmd_destroy_flow_table(dev, ft);
357 if (err)
2974ab6e 358 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
0c56b975
MG
359 fs_get_obj(prio, ft->node.parent);
360 prio->num_ft--;
361}
362
363static void del_rule(struct fs_node *node)
364{
365 struct mlx5_flow_rule *rule;
366 struct mlx5_flow_table *ft;
367 struct mlx5_flow_group *fg;
368 struct fs_fte *fte;
369 u32 *match_value;
bd5251db 370 int modify_mask;
0c56b975
MG
371 struct mlx5_core_dev *dev = get_dev(node);
372 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
373 int err;
374
375 match_value = mlx5_vzalloc(match_len);
376 if (!match_value) {
2974ab6e 377 mlx5_core_warn(dev, "failed to allocate inbox\n");
0c56b975
MG
378 return;
379 }
380
381 fs_get_obj(rule, node);
382 fs_get_obj(fte, rule->node.parent);
383 fs_get_obj(fg, fte->node.parent);
384 memcpy(match_value, fte->val, sizeof(fte->val));
385 fs_get_obj(ft, fg->node.parent);
386 list_del(&rule->node.list);
b3638e1a
MG
387 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
388 mutex_lock(&rule->dest_attr.ft->lock);
389 list_del(&rule->next_ft);
390 mutex_unlock(&rule->dest_attr.ft->lock);
391 }
60ab4584
AV
392 if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
393 --fte->dests_size) {
bd5251db 394 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
0c56b975 395 err = mlx5_cmd_update_fte(dev, ft,
bd5251db
AV
396 fg->id,
397 modify_mask,
398 fte);
0c56b975 399 if (err)
2974ab6e
SM
400 mlx5_core_warn(dev,
401 "%s can't del rule fg id=%d fte_index=%d\n",
402 __func__, fg->id, fte->index);
0c56b975
MG
403 }
404 kvfree(match_value);
405}
406
407static void del_fte(struct fs_node *node)
408{
409 struct mlx5_flow_table *ft;
410 struct mlx5_flow_group *fg;
411 struct mlx5_core_dev *dev;
412 struct fs_fte *fte;
413 int err;
414
415 fs_get_obj(fte, node);
416 fs_get_obj(fg, fte->node.parent);
417 fs_get_obj(ft, fg->node.parent);
418
419 dev = get_dev(&ft->node);
420 err = mlx5_cmd_delete_fte(dev, ft,
421 fte->index);
422 if (err)
2974ab6e
SM
423 mlx5_core_warn(dev,
424 "flow steering can't delete fte in index %d of flow group id %d\n",
425 fte->index, fg->id);
0c56b975
MG
426
427 fte->status = 0;
428 fg->num_ftes--;
429}
430
431static void del_flow_group(struct fs_node *node)
432{
433 struct mlx5_flow_group *fg;
434 struct mlx5_flow_table *ft;
435 struct mlx5_core_dev *dev;
436
437 fs_get_obj(fg, node);
438 fs_get_obj(ft, fg->node.parent);
439 dev = get_dev(&ft->node);
440
441 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
2974ab6e
SM
442 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
443 fg->id, ft->id);
0c56b975
MG
444}
445
446static struct fs_fte *alloc_fte(u8 action,
447 u32 flow_tag,
448 u32 *match_value,
449 unsigned int index)
450{
451 struct fs_fte *fte;
452
453 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
454 if (!fte)
455 return ERR_PTR(-ENOMEM);
456
457 memcpy(fte->val, match_value, sizeof(fte->val));
458 fte->node.type = FS_TYPE_FLOW_ENTRY;
459 fte->flow_tag = flow_tag;
460 fte->index = index;
461 fte->action = action;
462
463 return fte;
464}
465
466static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
467{
468 struct mlx5_flow_group *fg;
469 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
470 create_fg_in, match_criteria);
471 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
472 create_fg_in,
473 match_criteria_enable);
474 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
475 if (!fg)
476 return ERR_PTR(-ENOMEM);
477
478 fg->mask.match_criteria_enable = match_criteria_enable;
479 memcpy(&fg->mask.match_criteria, match_criteria,
480 sizeof(fg->mask.match_criteria));
481 fg->node.type = FS_TYPE_FLOW_GROUP;
482 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
483 start_flow_index);
484 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
485 end_flow_index) - fg->start_index + 1;
486 return fg;
487}
488
efdc810b 489static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
aaff1bea
AH
490 enum fs_flow_table_type table_type,
491 enum fs_flow_table_op_mod op_mod)
0c56b975
MG
492{
493 struct mlx5_flow_table *ft;
494
495 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
496 if (!ft)
497 return NULL;
498
499 ft->level = level;
500 ft->node.type = FS_TYPE_FLOW_TABLE;
aaff1bea 501 ft->op_mod = op_mod;
0c56b975 502 ft->type = table_type;
efdc810b 503 ft->vport = vport;
0c56b975 504 ft->max_fte = max_fte;
b3638e1a
MG
505 INIT_LIST_HEAD(&ft->fwd_rules);
506 mutex_init(&ft->lock);
0c56b975
MG
507
508 return ft;
509}
510
fdb6896f
MG
511/* If reverse is false, then we search for the first flow table in the
512 * root sub-tree from start(closest from right), else we search for the
513 * last flow table in the root sub-tree till start(closest from left).
514 */
515static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
516 struct list_head *start,
517 bool reverse)
518{
519#define list_advance_entry(pos, reverse) \
520 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
521
522#define list_for_each_advance_continue(pos, head, reverse) \
523 for (pos = list_advance_entry(pos, reverse); \
524 &pos->list != (head); \
525 pos = list_advance_entry(pos, reverse))
526
527 struct fs_node *iter = list_entry(start, struct fs_node, list);
528 struct mlx5_flow_table *ft = NULL;
529
530 if (!root)
531 return NULL;
532
533 list_for_each_advance_continue(iter, &root->children, reverse) {
534 if (iter->type == FS_TYPE_FLOW_TABLE) {
535 fs_get_obj(ft, iter);
536 return ft;
537 }
538 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
539 if (ft)
540 return ft;
541 }
542
543 return ft;
544}
545
546/* If reverse if false then return the first flow table in next priority of
547 * prio in the tree, else return the last flow table in the previous priority
548 * of prio in the tree.
549 */
550static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
551{
552 struct mlx5_flow_table *ft = NULL;
553 struct fs_node *curr_node;
554 struct fs_node *parent;
555
556 parent = prio->node.parent;
557 curr_node = &prio->node;
558 while (!ft && parent) {
559 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
560 curr_node = parent;
561 parent = curr_node->parent;
562 }
563 return ft;
564}
565
566/* Assuming all the tree is locked by mutex chain lock */
567static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
568{
569 return find_closest_ft(prio, false);
570}
571
572/* Assuming all the tree is locked by mutex chain lock */
573static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
574{
575 return find_closest_ft(prio, true);
576}
577
f90edfd2
MG
578static int connect_fts_in_prio(struct mlx5_core_dev *dev,
579 struct fs_prio *prio,
580 struct mlx5_flow_table *ft)
581{
582 struct mlx5_flow_table *iter;
583 int i = 0;
584 int err;
585
586 fs_for_each_ft(iter, prio) {
587 i++;
588 err = mlx5_cmd_modify_flow_table(dev,
589 iter,
590 ft);
591 if (err) {
592 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
593 iter->id);
594 /* The driver is out of sync with the FW */
595 if (i > 1)
596 WARN_ON(true);
597 return err;
598 }
599 }
600 return 0;
601}
602
603/* Connect flow tables from previous priority of prio to ft */
604static int connect_prev_fts(struct mlx5_core_dev *dev,
605 struct mlx5_flow_table *ft,
606 struct fs_prio *prio)
607{
608 struct mlx5_flow_table *prev_ft;
609
610 prev_ft = find_prev_chained_ft(prio);
611 if (prev_ft) {
612 struct fs_prio *prev_prio;
613
614 fs_get_obj(prev_prio, prev_ft->node.parent);
615 return connect_fts_in_prio(dev, prev_prio, ft);
616 }
617 return 0;
618}
619
2cc43b49
MG
620static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
621 *prio)
622{
623 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
624 int min_level = INT_MAX;
625 int err;
626
627 if (root->root_ft)
628 min_level = root->root_ft->level;
629
630 if (ft->level >= min_level)
631 return 0;
632
633 err = mlx5_cmd_update_root_ft(root->dev, ft);
634 if (err)
635 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
636 ft->id);
637 else
638 root->root_ft = ft;
639
640 return err;
641}
642
d745098c
MG
643int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
644 struct mlx5_flow_destination *dest)
b3638e1a
MG
645{
646 struct mlx5_flow_table *ft;
647 struct mlx5_flow_group *fg;
648 struct fs_fte *fte;
bd5251db 649 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
b3638e1a
MG
650 int err = 0;
651
652 fs_get_obj(fte, rule->node.parent);
653 if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
654 return -EINVAL;
655 lock_ref_node(&fte->node);
656 fs_get_obj(fg, fte->node.parent);
657 fs_get_obj(ft, fg->node.parent);
658
659 memcpy(&rule->dest_attr, dest, sizeof(*dest));
660 err = mlx5_cmd_update_fte(get_dev(&ft->node),
bd5251db
AV
661 ft, fg->id,
662 modify_mask,
663 fte);
b3638e1a
MG
664 unlock_ref_node(&fte->node);
665
666 return err;
667}
668
669/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
670static int connect_fwd_rules(struct mlx5_core_dev *dev,
671 struct mlx5_flow_table *new_next_ft,
672 struct mlx5_flow_table *old_next_ft)
673{
674 struct mlx5_flow_destination dest;
675 struct mlx5_flow_rule *iter;
676 int err = 0;
677
678 /* new_next_ft and old_next_ft could be NULL only
679 * when we create/destroy the anchor flow table.
680 */
681 if (!new_next_ft || !old_next_ft)
682 return 0;
683
684 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
685 dest.ft = new_next_ft;
686
687 mutex_lock(&old_next_ft->lock);
688 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
689 mutex_unlock(&old_next_ft->lock);
690 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
691 err = mlx5_modify_rule_destination(iter, &dest);
692 if (err)
693 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
694 new_next_ft->id);
695 }
696 return 0;
697}
698
f90edfd2
MG
699static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
700 struct fs_prio *prio)
701{
b3638e1a 702 struct mlx5_flow_table *next_ft;
f90edfd2
MG
703 int err = 0;
704
705 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
706
707 if (list_empty(&prio->node.children)) {
708 err = connect_prev_fts(dev, ft, prio);
709 if (err)
710 return err;
b3638e1a
MG
711
712 next_ft = find_next_chained_ft(prio);
713 err = connect_fwd_rules(dev, ft, next_ft);
714 if (err)
715 return err;
f90edfd2
MG
716 }
717
718 if (MLX5_CAP_FLOWTABLE(dev,
719 flow_table_properties_nic_receive.modify_root))
720 err = update_root_ft_create(ft, prio);
721 return err;
722}
723
d63cd286
MG
724static void list_add_flow_table(struct mlx5_flow_table *ft,
725 struct fs_prio *prio)
726{
727 struct list_head *prev = &prio->node.children;
728 struct mlx5_flow_table *iter;
729
730 fs_for_each_ft(iter, prio) {
731 if (iter->level > ft->level)
732 break;
733 prev = &iter->node.list;
734 }
735 list_add(&ft->node.list, prev);
736}
737
efdc810b 738static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
aaff1bea 739 enum fs_flow_table_op_mod op_mod,
efdc810b
MHY
740 u16 vport, int prio,
741 int max_fte, u32 level)
0c56b975 742{
f90edfd2 743 struct mlx5_flow_table *next_ft = NULL;
0c56b975
MG
744 struct mlx5_flow_table *ft;
745 int err;
746 int log_table_sz;
747 struct mlx5_flow_root_namespace *root =
748 find_root(&ns->node);
749 struct fs_prio *fs_prio = NULL;
750
751 if (!root) {
752 pr_err("mlx5: flow steering failed to find root of namespace\n");
753 return ERR_PTR(-ENODEV);
754 }
755
2cc43b49 756 mutex_lock(&root->chain_lock);
0c56b975 757 fs_prio = find_prio(ns, prio);
2cc43b49
MG
758 if (!fs_prio) {
759 err = -EINVAL;
760 goto unlock_root;
761 }
d63cd286 762 if (level >= fs_prio->num_levels) {
0c56b975 763 err = -ENOSPC;
2cc43b49 764 goto unlock_root;
0c56b975 765 }
d63cd286
MG
766 /* The level is related to the
767 * priority level range.
768 */
769 level += fs_prio->start_level;
770 ft = alloc_flow_table(level,
efdc810b 771 vport,
aaff1bea
AH
772 max_fte ? roundup_pow_of_two(max_fte) : 0,
773 root->table_type,
774 op_mod);
0c56b975
MG
775 if (!ft) {
776 err = -ENOMEM;
2cc43b49 777 goto unlock_root;
0c56b975
MG
778 }
779
780 tree_init_node(&ft->node, 1, del_flow_table);
aaff1bea 781 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
f90edfd2 782 next_ft = find_next_chained_ft(fs_prio);
aaff1bea
AH
783 err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
784 ft->level, log_table_sz, next_ft, &ft->id);
0c56b975
MG
785 if (err)
786 goto free_ft;
787
f90edfd2
MG
788 err = connect_flow_table(root->dev, ft, fs_prio);
789 if (err)
790 goto destroy_ft;
2cc43b49 791 lock_ref_node(&fs_prio->node);
0c56b975 792 tree_add_node(&ft->node, &fs_prio->node);
d63cd286 793 list_add_flow_table(ft, fs_prio);
0c56b975
MG
794 fs_prio->num_ft++;
795 unlock_ref_node(&fs_prio->node);
2cc43b49 796 mutex_unlock(&root->chain_lock);
0c56b975 797 return ft;
2cc43b49
MG
798destroy_ft:
799 mlx5_cmd_destroy_flow_table(root->dev, ft);
0c56b975
MG
800free_ft:
801 kfree(ft);
2cc43b49
MG
802unlock_root:
803 mutex_unlock(&root->chain_lock);
0c56b975
MG
804 return ERR_PTR(err);
805}
806
efdc810b
MHY
807struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
808 int prio, int max_fte,
809 u32 level)
810{
aaff1bea
AH
811 return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
812 max_fte, level);
efdc810b
MHY
813}
814
815struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
816 int prio, int max_fte,
817 u32 level, u16 vport)
818{
aaff1bea
AH
819 return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
820 max_fte, level);
efdc810b
MHY
821}
822
aaff1bea
AH
823struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
824 struct mlx5_flow_namespace *ns,
825 int prio, u32 level)
826{
827 return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
828 level);
829}
830EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
831
f0d22d18
MG
832struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
833 int prio,
834 int num_flow_table_entries,
d63cd286
MG
835 int max_num_groups,
836 u32 level)
f0d22d18
MG
837{
838 struct mlx5_flow_table *ft;
839
840 if (max_num_groups > num_flow_table_entries)
841 return ERR_PTR(-EINVAL);
842
d63cd286 843 ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
f0d22d18
MG
844 if (IS_ERR(ft))
845 return ft;
846
847 ft->autogroup.active = true;
848 ft->autogroup.required_groups = max_num_groups;
849
850 return ft;
851}
b217ea25 852EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
f0d22d18
MG
853
854/* Flow table should be locked */
855static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
856 u32 *fg_in,
857 struct list_head
858 *prev_fg,
859 bool is_auto_fg)
0c56b975
MG
860{
861 struct mlx5_flow_group *fg;
862 struct mlx5_core_dev *dev = get_dev(&ft->node);
863 int err;
864
865 if (!dev)
866 return ERR_PTR(-ENODEV);
867
868 fg = alloc_flow_group(fg_in);
869 if (IS_ERR(fg))
870 return fg;
871
0c56b975
MG
872 err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
873 if (err) {
874 kfree(fg);
0c56b975
MG
875 return ERR_PTR(err);
876 }
f0d22d18
MG
877
878 if (ft->autogroup.active)
879 ft->autogroup.num_groups++;
0c56b975 880 /* Add node to tree */
f0d22d18 881 tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
0c56b975
MG
882 tree_add_node(&fg->node, &ft->node);
883 /* Add node to group list */
884 list_add(&fg->node.list, ft->node.children.prev);
f0d22d18
MG
885
886 return fg;
887}
888
889struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
890 u32 *fg_in)
891{
892 struct mlx5_flow_group *fg;
893
894 if (ft->autogroup.active)
895 return ERR_PTR(-EPERM);
896
897 lock_ref_node(&ft->node);
898 fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
0c56b975
MG
899 unlock_ref_node(&ft->node);
900
901 return fg;
902}
903
904static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
905{
906 struct mlx5_flow_rule *rule;
907
908 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
909 if (!rule)
910 return NULL;
911
b3638e1a 912 INIT_LIST_HEAD(&rule->next_ft);
0c56b975 913 rule->node.type = FS_TYPE_FLOW_DEST;
60ab4584
AV
914 if (dest)
915 memcpy(&rule->dest_attr, dest, sizeof(*dest));
0c56b975
MG
916
917 return rule;
918}
919
920/* fte should not be deleted while calling this function */
921static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
922 struct mlx5_flow_group *fg,
923 struct mlx5_flow_destination *dest)
924{
925 struct mlx5_flow_table *ft;
926 struct mlx5_flow_rule *rule;
bd5251db 927 int modify_mask = 0;
0c56b975
MG
928 int err;
929
930 rule = alloc_rule(dest);
931 if (!rule)
932 return ERR_PTR(-ENOMEM);
933
934 fs_get_obj(ft, fg->node.parent);
b3638e1a
MG
935 /* Add dest to dests list- we need flow tables to be in the
936 * end of the list for forward to next prio rules.
937 */
0c56b975 938 tree_init_node(&rule->node, 1, del_rule);
b3638e1a
MG
939 if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
940 list_add(&rule->node.list, &fte->node.children);
941 else
942 list_add_tail(&rule->node.list, &fte->node.children);
bd5251db 943 if (dest) {
60ab4584 944 fte->dests_size++;
bd5251db
AV
945
946 modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
947 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
948 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
949 }
950
0501fc47 951 if (!(fte->status & FS_FTE_STATUS_EXISTING))
0c56b975
MG
952 err = mlx5_cmd_create_fte(get_dev(&ft->node),
953 ft, fg->id, fte);
954 else
955 err = mlx5_cmd_update_fte(get_dev(&ft->node),
bd5251db 956 ft, fg->id, modify_mask, fte);
0c56b975
MG
957 if (err)
958 goto free_rule;
959
960 fte->status |= FS_FTE_STATUS_EXISTING;
961
962 return rule;
963
964free_rule:
965 list_del(&rule->node.list);
966 kfree(rule);
60ab4584
AV
967 if (dest)
968 fte->dests_size--;
0c56b975
MG
969 return ERR_PTR(err);
970}
971
972/* Assumed fg is locked */
973static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
974 struct list_head **prev)
975{
976 struct fs_fte *fte;
977 unsigned int start = fg->start_index;
978
979 if (prev)
980 *prev = &fg->node.children;
981
982 /* assumed list is sorted by index */
983 fs_for_each_fte(fte, fg) {
984 if (fte->index != start)
985 return start;
986 start++;
987 if (prev)
988 *prev = &fte->node.list;
989 }
990
991 return start;
992}
993
994/* prev is output, prev->next = new_fte */
995static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
996 u32 *match_value,
997 u8 action,
998 u32 flow_tag,
999 struct list_head **prev)
1000{
1001 struct fs_fte *fte;
1002 int index;
1003
1004 index = get_free_fte_index(fg, prev);
1005 fte = alloc_fte(action, flow_tag, match_value, index);
1006 if (IS_ERR(fte))
1007 return fte;
1008
1009 return fte;
1010}
1011
f0d22d18
MG
1012static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1013 u8 match_criteria_enable,
1014 u32 *match_criteria)
1015{
1016 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1017 struct list_head *prev = &ft->node.children;
1018 unsigned int candidate_index = 0;
1019 struct mlx5_flow_group *fg;
1020 void *match_criteria_addr;
1021 unsigned int group_size = 0;
1022 u32 *in;
1023
1024 if (!ft->autogroup.active)
1025 return ERR_PTR(-ENOENT);
1026
1027 in = mlx5_vzalloc(inlen);
1028 if (!in)
1029 return ERR_PTR(-ENOMEM);
1030
1031 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1032 /* We save place for flow groups in addition to max types */
1033 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1034
1035 /* ft->max_fte == ft->autogroup.max_types */
1036 if (group_size == 0)
1037 group_size = 1;
1038
1039 /* sorted by start_index */
1040 fs_for_each_fg(fg, ft) {
1041 if (candidate_index + group_size > fg->start_index)
1042 candidate_index = fg->start_index + fg->max_ftes;
1043 else
1044 break;
1045 prev = &fg->node.list;
1046 }
1047
1048 if (candidate_index + group_size > ft->max_fte) {
1049 fg = ERR_PTR(-ENOSPC);
1050 goto out;
1051 }
1052
1053 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1054 match_criteria_enable);
1055 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1056 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
1057 group_size - 1);
1058 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1059 in, match_criteria);
1060 memcpy(match_criteria_addr, match_criteria,
1061 MLX5_ST_SZ_BYTES(fte_match_param));
1062
1063 fg = create_flow_group_common(ft, in, prev, true);
1064out:
1065 kvfree(in);
1066 return fg;
1067}
1068
814fb875
MB
1069static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1070 struct mlx5_flow_destination *d2)
1071{
1072 if (d1->type == d2->type) {
1073 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1074 d1->vport_num == d2->vport_num) ||
1075 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1076 d1->ft == d2->ft) ||
1077 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1078 d1->tir_num == d2->tir_num))
1079 return true;
1080 }
1081
1082 return false;
1083}
1084
b3638e1a
MG
1085static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1086 struct mlx5_flow_destination *dest)
1087{
1088 struct mlx5_flow_rule *rule;
1089
1090 list_for_each_entry(rule, &fte->node.children, node.list) {
814fb875
MB
1091 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1092 return rule;
b3638e1a
MG
1093 }
1094 return NULL;
1095}
1096
0c56b975
MG
1097static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
1098 u32 *match_value,
1099 u8 action,
1100 u32 flow_tag,
1101 struct mlx5_flow_destination *dest)
1102{
1103 struct fs_fte *fte;
1104 struct mlx5_flow_rule *rule;
1105 struct mlx5_flow_table *ft;
1106 struct list_head *prev;
1107
f0d22d18 1108 nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
0c56b975 1109 fs_for_each_fte(fte, fg) {
f0d22d18 1110 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
0c56b975
MG
1111 if (compare_match_value(&fg->mask, match_value, &fte->val) &&
1112 action == fte->action && flow_tag == fte->flow_tag) {
b3638e1a
MG
1113 rule = find_flow_rule(fte, dest);
1114 if (rule) {
1115 atomic_inc(&rule->node.refcount);
1116 unlock_ref_node(&fte->node);
1117 unlock_ref_node(&fg->node);
1118 return rule;
1119 }
0c56b975 1120 rule = add_rule_fte(fte, fg, dest);
0c56b975 1121 if (IS_ERR(rule))
0fd758d6 1122 goto unlock_fte;
0c56b975
MG
1123 else
1124 goto add_rule;
1125 }
1126 unlock_ref_node(&fte->node);
1127 }
1128 fs_get_obj(ft, fg->node.parent);
1129 if (fg->num_ftes >= fg->max_ftes) {
1130 rule = ERR_PTR(-ENOSPC);
1131 goto unlock_fg;
1132 }
1133
1134 fte = create_fte(fg, match_value, action, flow_tag, &prev);
1135 if (IS_ERR(fte)) {
1136 rule = (void *)fte;
1137 goto unlock_fg;
1138 }
1139 tree_init_node(&fte->node, 0, del_fte);
0fd758d6 1140 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
0c56b975
MG
1141 rule = add_rule_fte(fte, fg, dest);
1142 if (IS_ERR(rule)) {
1143 kfree(fte);
1144 goto unlock_fg;
1145 }
1146
1147 fg->num_ftes++;
1148
1149 tree_add_node(&fte->node, &fg->node);
1150 list_add(&fte->node.list, prev);
1151add_rule:
1152 tree_add_node(&rule->node, &fte->node);
0fd758d6
MB
1153unlock_fte:
1154 unlock_ref_node(&fte->node);
0c56b975
MG
1155unlock_fg:
1156 unlock_ref_node(&fg->node);
1157 return rule;
1158}
1159
bd5251db
AV
1160struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
1161{
1162 struct mlx5_flow_rule *dst;
1163 struct fs_fte *fte;
1164
1165 fs_get_obj(fte, rule->node.parent);
1166
1167 fs_for_each_dst(dst, fte) {
1168 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1169 return dst->dest_attr.counter;
1170 }
1171
1172 return NULL;
1173}
1174
1175static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1176{
1177 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1178 return !counter;
1179
1180 if (!counter)
1181 return false;
1182
1183 /* Hardware support counter for a drop action only */
1184 return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
1185}
1186
d63cd286
MG
1187static bool dest_is_valid(struct mlx5_flow_destination *dest,
1188 u32 action,
1189 struct mlx5_flow_table *ft)
1190{
bd5251db
AV
1191 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1192 return counter_is_valid(dest->counter, action);
1193
d63cd286
MG
1194 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1195 return true;
1196
1197 if (!dest || ((dest->type ==
1198 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1199 (dest->ft->level <= ft->level)))
1200 return false;
1201 return true;
1202}
1203
b3638e1a
MG
1204static struct mlx5_flow_rule *
1205_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
c5bb1730 1206 struct mlx5_flow_spec *spec,
b3638e1a
MG
1207 u32 action,
1208 u32 flow_tag,
1209 struct mlx5_flow_destination *dest)
0c56b975
MG
1210{
1211 struct mlx5_flow_group *g;
f0d22d18 1212 struct mlx5_flow_rule *rule;
0c56b975 1213
d63cd286 1214 if (!dest_is_valid(dest, action, ft))
60ab4584
AV
1215 return ERR_PTR(-EINVAL);
1216
f0d22d18 1217 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
0c56b975
MG
1218 fs_for_each_fg(g, ft)
1219 if (compare_match_criteria(g->mask.match_criteria_enable,
c5bb1730 1220 spec->match_criteria_enable,
0c56b975 1221 g->mask.match_criteria,
c5bb1730
MG
1222 spec->match_criteria)) {
1223 rule = add_rule_fg(g, spec->match_value,
0c56b975 1224 action, flow_tag, dest);
f0d22d18
MG
1225 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
1226 goto unlock;
0c56b975 1227 }
f0d22d18 1228
c5bb1730
MG
1229 g = create_autogroup(ft, spec->match_criteria_enable,
1230 spec->match_criteria);
c3f9bf62
MG
1231 if (IS_ERR(g)) {
1232 rule = (void *)g;
1233 goto unlock;
1234 }
1235
c5bb1730 1236 rule = add_rule_fg(g, spec->match_value,
c3f9bf62
MG
1237 action, flow_tag, dest);
1238 if (IS_ERR(rule)) {
1239 /* Remove assumes refcount > 0 and autogroup creates a group
1240 * with a refcount = 0.
1241 */
1242 unlock_ref_node(&ft->node);
1243 tree_get_node(&g->node);
1244 tree_remove_node(&g->node);
1245 return rule;
1246 }
f0d22d18 1247unlock:
0c56b975 1248 unlock_ref_node(&ft->node);
0c56b975
MG
1249 return rule;
1250}
b3638e1a
MG
1251
1252static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1253{
1254 return ((ft->type == FS_FT_NIC_RX) &&
1255 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1256}
1257
1258struct mlx5_flow_rule *
1259mlx5_add_flow_rule(struct mlx5_flow_table *ft,
c5bb1730 1260 struct mlx5_flow_spec *spec,
b3638e1a
MG
1261 u32 action,
1262 u32 flow_tag,
1263 struct mlx5_flow_destination *dest)
1264{
1265 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1266 struct mlx5_flow_destination gen_dest;
1267 struct mlx5_flow_table *next_ft = NULL;
1268 struct mlx5_flow_rule *rule = NULL;
1269 u32 sw_action = action;
1270 struct fs_prio *prio;
1271
1272 fs_get_obj(prio, ft->node.parent);
1273 if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1274 if (!fwd_next_prio_supported(ft))
1275 return ERR_PTR(-EOPNOTSUPP);
1276 if (dest)
1277 return ERR_PTR(-EINVAL);
1278 mutex_lock(&root->chain_lock);
1279 next_ft = find_next_chained_ft(prio);
1280 if (next_ft) {
1281 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1282 gen_dest.ft = next_ft;
1283 dest = &gen_dest;
1284 action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1285 } else {
1286 mutex_unlock(&root->chain_lock);
1287 return ERR_PTR(-EOPNOTSUPP);
1288 }
1289 }
1290
c5bb1730 1291 rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
b3638e1a
MG
1292
1293 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1294 if (!IS_ERR_OR_NULL(rule) &&
1295 (list_empty(&rule->next_ft))) {
1296 mutex_lock(&next_ft->lock);
1297 list_add(&rule->next_ft, &next_ft->fwd_rules);
1298 mutex_unlock(&next_ft->lock);
1299 rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1300 }
1301 mutex_unlock(&root->chain_lock);
1302 }
1303 return rule;
1304}
b217ea25 1305EXPORT_SYMBOL(mlx5_add_flow_rule);
0c56b975 1306
86d722ad 1307void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
0c56b975
MG
1308{
1309 tree_remove_node(&rule->node);
1310}
b217ea25 1311EXPORT_SYMBOL(mlx5_del_flow_rule);
0c56b975 1312
2cc43b49
MG
1313/* Assuming prio->node.children(flow tables) is sorted by level */
1314static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1315{
1316 struct fs_prio *prio;
1317
1318 fs_get_obj(prio, ft->node.parent);
1319
1320 if (!list_is_last(&ft->node.list, &prio->node.children))
1321 return list_next_entry(ft, node.list);
1322 return find_next_chained_ft(prio);
1323}
1324
1325static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1326{
1327 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1328 struct mlx5_flow_table *new_root_ft = NULL;
1329
1330 if (root->root_ft != ft)
1331 return 0;
1332
1333 new_root_ft = find_next_ft(ft);
1334 if (new_root_ft) {
1335 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft);
1336
1337 if (err) {
1338 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
1339 ft->id);
1340 return err;
1341 }
2cc43b49 1342 }
2fee37a4 1343 root->root_ft = new_root_ft;
2cc43b49
MG
1344 return 0;
1345}
1346
f90edfd2
MG
1347/* Connect flow table from previous priority to
1348 * the next flow table.
1349 */
1350static int disconnect_flow_table(struct mlx5_flow_table *ft)
1351{
1352 struct mlx5_core_dev *dev = get_dev(&ft->node);
1353 struct mlx5_flow_table *next_ft;
1354 struct fs_prio *prio;
1355 int err = 0;
1356
1357 err = update_root_ft_destroy(ft);
1358 if (err)
1359 return err;
1360
1361 fs_get_obj(prio, ft->node.parent);
1362 if (!(list_first_entry(&prio->node.children,
1363 struct mlx5_flow_table,
1364 node.list) == ft))
1365 return 0;
1366
1367 next_ft = find_next_chained_ft(prio);
b3638e1a
MG
1368 err = connect_fwd_rules(dev, next_ft, ft);
1369 if (err)
1370 return err;
1371
f90edfd2
MG
1372 err = connect_prev_fts(dev, next_ft, prio);
1373 if (err)
1374 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1375 ft->id);
1376 return err;
1377}
1378
86d722ad 1379int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
0c56b975 1380{
2cc43b49
MG
1381 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1382 int err = 0;
1383
1384 mutex_lock(&root->chain_lock);
f90edfd2 1385 err = disconnect_flow_table(ft);
2cc43b49
MG
1386 if (err) {
1387 mutex_unlock(&root->chain_lock);
1388 return err;
1389 }
0c56b975
MG
1390 if (tree_remove_node(&ft->node))
1391 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1392 ft->id);
2cc43b49 1393 mutex_unlock(&root->chain_lock);
0c56b975 1394
2cc43b49 1395 return err;
0c56b975 1396}
b217ea25 1397EXPORT_SYMBOL(mlx5_destroy_flow_table);
0c56b975 1398
86d722ad 1399void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
0c56b975
MG
1400{
1401 if (tree_remove_node(&fg->node))
1402 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1403 fg->id);
1404}
25302363 1405
86d722ad
MG
1406struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1407 enum mlx5_flow_namespace_type type)
25302363 1408{
fba53f7b
MG
1409 struct mlx5_flow_steering *steering = dev->priv.steering;
1410 struct mlx5_flow_root_namespace *root_ns;
25302363 1411 int prio;
78228cbd 1412 struct fs_prio *fs_prio;
25302363
MG
1413 struct mlx5_flow_namespace *ns;
1414
fba53f7b 1415 if (!steering)
25302363
MG
1416 return NULL;
1417
1418 switch (type) {
4cbdd30e 1419 case MLX5_FLOW_NAMESPACE_BYPASS:
3e75d4eb 1420 case MLX5_FLOW_NAMESPACE_LAG:
acbc2004 1421 case MLX5_FLOW_NAMESPACE_OFFLOADS:
6dc6071c 1422 case MLX5_FLOW_NAMESPACE_ETHTOOL:
25302363 1423 case MLX5_FLOW_NAMESPACE_KERNEL:
4cbdd30e 1424 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
153fefbf 1425 case MLX5_FLOW_NAMESPACE_ANCHOR:
4cbdd30e 1426 prio = type;
25302363
MG
1427 break;
1428 case MLX5_FLOW_NAMESPACE_FDB:
fba53f7b
MG
1429 if (steering->fdb_root_ns)
1430 return &steering->fdb_root_ns->ns;
25302363
MG
1431 else
1432 return NULL;
efdc810b 1433 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
fba53f7b
MG
1434 if (steering->esw_egress_root_ns)
1435 return &steering->esw_egress_root_ns->ns;
efdc810b
MHY
1436 else
1437 return NULL;
1438 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
fba53f7b
MG
1439 if (steering->esw_ingress_root_ns)
1440 return &steering->esw_ingress_root_ns->ns;
efdc810b
MHY
1441 else
1442 return NULL;
87d22483
MG
1443 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
1444 if (steering->sniffer_rx_root_ns)
1445 return &steering->sniffer_rx_root_ns->ns;
1446 else
1447 return NULL;
1448 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
1449 if (steering->sniffer_tx_root_ns)
1450 return &steering->sniffer_tx_root_ns->ns;
1451 else
1452 return NULL;
25302363
MG
1453 default:
1454 return NULL;
1455 }
1456
fba53f7b
MG
1457 root_ns = steering->root_ns;
1458 if (!root_ns)
1459 return NULL;
1460
25302363
MG
1461 fs_prio = find_prio(&root_ns->ns, prio);
1462 if (!fs_prio)
1463 return NULL;
1464
1465 ns = list_first_entry(&fs_prio->node.children,
1466 typeof(*ns),
1467 node.list);
1468
1469 return ns;
1470}
b217ea25 1471EXPORT_SYMBOL(mlx5_get_flow_namespace);
25302363
MG
1472
1473static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
a257b94a 1474 unsigned int prio, int num_levels)
25302363
MG
1475{
1476 struct fs_prio *fs_prio;
1477
1478 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1479 if (!fs_prio)
1480 return ERR_PTR(-ENOMEM);
1481
1482 fs_prio->node.type = FS_TYPE_PRIO;
1483 tree_init_node(&fs_prio->node, 1, NULL);
1484 tree_add_node(&fs_prio->node, &ns->node);
a257b94a 1485 fs_prio->num_levels = num_levels;
25302363 1486 fs_prio->prio = prio;
25302363
MG
1487 list_add_tail(&fs_prio->node.list, &ns->node.children);
1488
1489 return fs_prio;
1490}
1491
1492static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1493 *ns)
1494{
1495 ns->node.type = FS_TYPE_NAMESPACE;
1496
1497 return ns;
1498}
1499
1500static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
1501{
1502 struct mlx5_flow_namespace *ns;
1503
1504 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1505 if (!ns)
1506 return ERR_PTR(-ENOMEM);
1507
1508 fs_init_namespace(ns);
1509 tree_init_node(&ns->node, 1, NULL);
1510 tree_add_node(&ns->node, &prio->node);
1511 list_add_tail(&ns->node.list, &prio->node.children);
1512
1513 return ns;
1514}
1515
13de6c10
MG
1516static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
1517 struct init_tree_node *prio_metadata)
4cbdd30e
MG
1518{
1519 struct fs_prio *fs_prio;
1520 int i;
1521
1522 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
13de6c10 1523 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
4cbdd30e
MG
1524 if (IS_ERR(fs_prio))
1525 return PTR_ERR(fs_prio);
1526 }
1527 return 0;
1528}
1529
8d40d162
MG
1530#define FLOW_TABLE_BIT_SZ 1
1531#define GET_FLOW_TABLE_CAP(dev, offset) \
1532 ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
1533 offset / 32)) >> \
1534 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
1535static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
1536{
1537 int i;
1538
1539 for (i = 0; i < caps->arr_sz; i++) {
1540 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
1541 return false;
1542 }
1543 return true;
1544}
1545
fba53f7b 1546static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
8d40d162 1547 struct init_tree_node *init_node,
25302363
MG
1548 struct fs_node *fs_parent_node,
1549 struct init_tree_node *init_parent_node,
13de6c10 1550 int prio)
25302363 1551{
fba53f7b 1552 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
8d40d162
MG
1553 flow_table_properties_nic_receive.
1554 max_ft_level);
25302363
MG
1555 struct mlx5_flow_namespace *fs_ns;
1556 struct fs_prio *fs_prio;
1557 struct fs_node *base;
1558 int i;
1559 int err;
1560
1561 if (init_node->type == FS_TYPE_PRIO) {
8d40d162 1562 if ((init_node->min_ft_level > max_ft_level) ||
fba53f7b 1563 !has_required_caps(steering->dev, &init_node->caps))
8d40d162 1564 return 0;
25302363
MG
1565
1566 fs_get_obj(fs_ns, fs_parent_node);
4cbdd30e 1567 if (init_node->num_leaf_prios)
13de6c10
MG
1568 return create_leaf_prios(fs_ns, prio, init_node);
1569 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
25302363
MG
1570 if (IS_ERR(fs_prio))
1571 return PTR_ERR(fs_prio);
1572 base = &fs_prio->node;
1573 } else if (init_node->type == FS_TYPE_NAMESPACE) {
1574 fs_get_obj(fs_prio, fs_parent_node);
1575 fs_ns = fs_create_namespace(fs_prio);
1576 if (IS_ERR(fs_ns))
1577 return PTR_ERR(fs_ns);
1578 base = &fs_ns->node;
1579 } else {
1580 return -EINVAL;
1581 }
13de6c10 1582 prio = 0;
25302363 1583 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 1584 err = init_root_tree_recursive(steering, &init_node->children[i],
13de6c10 1585 base, init_node, prio);
25302363
MG
1586 if (err)
1587 return err;
13de6c10
MG
1588 if (init_node->children[i].type == FS_TYPE_PRIO &&
1589 init_node->children[i].num_leaf_prios) {
1590 prio += init_node->children[i].num_leaf_prios;
1591 }
25302363
MG
1592 }
1593
1594 return 0;
1595}
1596
fba53f7b 1597static int init_root_tree(struct mlx5_flow_steering *steering,
8d40d162 1598 struct init_tree_node *init_node,
25302363
MG
1599 struct fs_node *fs_parent_node)
1600{
1601 int i;
1602 struct mlx5_flow_namespace *fs_ns;
1603 int err;
1604
1605 fs_get_obj(fs_ns, fs_parent_node);
1606 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 1607 err = init_root_tree_recursive(steering, &init_node->children[i],
25302363
MG
1608 &fs_ns->node,
1609 init_node, i);
1610 if (err)
1611 return err;
1612 }
1613 return 0;
1614}
1615
fba53f7b 1616static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
25302363
MG
1617 enum fs_flow_table_type
1618 table_type)
1619{
1620 struct mlx5_flow_root_namespace *root_ns;
1621 struct mlx5_flow_namespace *ns;
1622
86d722ad 1623 /* Create the root namespace */
25302363
MG
1624 root_ns = mlx5_vzalloc(sizeof(*root_ns));
1625 if (!root_ns)
1626 return NULL;
1627
fba53f7b 1628 root_ns->dev = steering->dev;
25302363
MG
1629 root_ns->table_type = table_type;
1630
1631 ns = &root_ns->ns;
1632 fs_init_namespace(ns);
2cc43b49 1633 mutex_init(&root_ns->chain_lock);
25302363
MG
1634 tree_init_node(&ns->node, 1, NULL);
1635 tree_add_node(&ns->node, NULL);
1636
1637 return root_ns;
1638}
1639
655227ed
MG
1640static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
1641
1642static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
1643{
1644 struct fs_prio *prio;
1645
1646 fs_for_each_prio(prio, ns) {
a257b94a 1647 /* This updates prio start_level and num_levels */
655227ed 1648 set_prio_attrs_in_prio(prio, acc_level);
a257b94a 1649 acc_level += prio->num_levels;
655227ed
MG
1650 }
1651 return acc_level;
1652}
1653
1654static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
1655{
1656 struct mlx5_flow_namespace *ns;
1657 int acc_level_ns = acc_level;
1658
1659 prio->start_level = acc_level;
1660 fs_for_each_ns(ns, prio)
a257b94a 1661 /* This updates start_level and num_levels of ns's priority descendants */
655227ed 1662 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
a257b94a
MG
1663 if (!prio->num_levels)
1664 prio->num_levels = acc_level_ns - prio->start_level;
1665 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
655227ed
MG
1666}
1667
1668static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
1669{
1670 struct mlx5_flow_namespace *ns = &root_ns->ns;
1671 struct fs_prio *prio;
1672 int start_level = 0;
1673
1674 fs_for_each_prio(prio, ns) {
1675 set_prio_attrs_in_prio(prio, start_level);
a257b94a 1676 start_level += prio->num_levels;
655227ed
MG
1677 }
1678}
1679
153fefbf
MG
1680#define ANCHOR_PRIO 0
1681#define ANCHOR_SIZE 1
d63cd286 1682#define ANCHOR_LEVEL 0
fba53f7b 1683static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
153fefbf
MG
1684{
1685 struct mlx5_flow_namespace *ns = NULL;
1686 struct mlx5_flow_table *ft;
1687
fba53f7b 1688 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
153fefbf
MG
1689 if (!ns)
1690 return -EINVAL;
d63cd286 1691 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
153fefbf 1692 if (IS_ERR(ft)) {
fba53f7b 1693 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
153fefbf
MG
1694 return PTR_ERR(ft);
1695 }
1696 return 0;
1697}
1698
fba53f7b 1699static int init_root_ns(struct mlx5_flow_steering *steering)
25302363 1700{
25302363 1701
fba53f7b
MG
1702 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
1703 if (IS_ERR_OR_NULL(steering->root_ns))
25302363
MG
1704 goto cleanup;
1705
fba53f7b 1706 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
25302363
MG
1707 goto cleanup;
1708
fba53f7b 1709 set_prio_attrs(steering->root_ns);
655227ed 1710
fba53f7b 1711 if (create_anchor_flow_table(steering))
153fefbf
MG
1712 goto cleanup;
1713
25302363
MG
1714 return 0;
1715
1716cleanup:
fba53f7b 1717 mlx5_cleanup_fs(steering->dev);
25302363
MG
1718 return -ENOMEM;
1719}
1720
0da2d666 1721static void clean_tree(struct fs_node *node)
25302363 1722{
0da2d666
MG
1723 if (node) {
1724 struct fs_node *iter;
1725 struct fs_node *temp;
25302363 1726
0da2d666
MG
1727 list_for_each_entry_safe(iter, temp, &node->children, list)
1728 clean_tree(iter);
1729 tree_remove_node(node);
25302363 1730 }
153fefbf
MG
1731}
1732
0da2d666 1733static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
25302363 1734{
25302363
MG
1735 if (!root_ns)
1736 return;
1737
0da2d666 1738 clean_tree(&root_ns->ns.node);
25302363
MG
1739}
1740
1741void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1742{
fba53f7b
MG
1743 struct mlx5_flow_steering *steering = dev->priv.steering;
1744
876d634d
MG
1745 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1746 return;
1747
0da2d666
MG
1748 cleanup_root_ns(steering->root_ns);
1749 cleanup_root_ns(steering->esw_egress_root_ns);
1750 cleanup_root_ns(steering->esw_ingress_root_ns);
1751 cleanup_root_ns(steering->fdb_root_ns);
87d22483
MG
1752 cleanup_root_ns(steering->sniffer_rx_root_ns);
1753 cleanup_root_ns(steering->sniffer_tx_root_ns);
43a335e0 1754 mlx5_cleanup_fc_stats(dev);
fba53f7b 1755 kfree(steering);
25302363
MG
1756}
1757
87d22483
MG
1758static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
1759{
1760 struct fs_prio *prio;
1761
1762 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
1763 if (!steering->sniffer_tx_root_ns)
1764 return -ENOMEM;
1765
1766 /* Create single prio */
1767 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
1768 if (IS_ERR(prio)) {
1769 cleanup_root_ns(steering->sniffer_tx_root_ns);
1770 return PTR_ERR(prio);
1771 }
1772 return 0;
1773}
1774
1775static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
1776{
1777 struct fs_prio *prio;
1778
1779 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
1780 if (!steering->sniffer_rx_root_ns)
1781 return -ENOMEM;
1782
1783 /* Create single prio */
1784 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
1785 if (IS_ERR(prio)) {
1786 cleanup_root_ns(steering->sniffer_rx_root_ns);
1787 return PTR_ERR(prio);
1788 }
1789 return 0;
1790}
1791
fba53f7b 1792static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
25302363
MG
1793{
1794 struct fs_prio *prio;
1795
fba53f7b
MG
1796 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
1797 if (!steering->fdb_root_ns)
25302363
MG
1798 return -ENOMEM;
1799
fba53f7b 1800 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
1033665e
OG
1801 if (IS_ERR(prio))
1802 goto out_err;
1803
1804 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
1805 if (IS_ERR(prio))
1806 goto out_err;
1807
1808 set_prio_attrs(steering->fdb_root_ns);
1809 return 0;
1810
1811out_err:
1812 cleanup_root_ns(steering->fdb_root_ns);
1813 steering->fdb_root_ns = NULL;
1814 return PTR_ERR(prio);
25302363
MG
1815}
1816
fba53f7b 1817static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
efdc810b
MHY
1818{
1819 struct fs_prio *prio;
1820
fba53f7b
MG
1821 steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
1822 if (!steering->esw_egress_root_ns)
efdc810b
MHY
1823 return -ENOMEM;
1824
1825 /* create 1 prio*/
fba53f7b
MG
1826 prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
1827 MLX5_TOTAL_VPORTS(steering->dev));
44fafdaa 1828 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
1829}
1830
fba53f7b 1831static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
efdc810b
MHY
1832{
1833 struct fs_prio *prio;
1834
fba53f7b
MG
1835 steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
1836 if (!steering->esw_ingress_root_ns)
efdc810b
MHY
1837 return -ENOMEM;
1838
1839 /* create 1 prio*/
fba53f7b
MG
1840 prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
1841 MLX5_TOTAL_VPORTS(steering->dev));
44fafdaa 1842 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
1843}
1844
25302363
MG
1845int mlx5_init_fs(struct mlx5_core_dev *dev)
1846{
fba53f7b 1847 struct mlx5_flow_steering *steering;
25302363
MG
1848 int err = 0;
1849
876d634d
MG
1850 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1851 return 0;
1852
43a335e0
AV
1853 err = mlx5_init_fc_stats(dev);
1854 if (err)
1855 return err;
1856
fba53f7b
MG
1857 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
1858 if (!steering)
1859 return -ENOMEM;
1860 steering->dev = dev;
1861 dev->priv.steering = steering;
1862
876d634d
MG
1863 if (MLX5_CAP_GEN(dev, nic_flow_table) &&
1864 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
fba53f7b 1865 err = init_root_ns(steering);
25302363 1866 if (err)
43a335e0 1867 goto err;
25302363 1868 }
876d634d 1869
25302363 1870 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
bd02ef8e 1871 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
fba53f7b 1872 err = init_fdb_root_ns(steering);
bd02ef8e
MG
1873 if (err)
1874 goto err;
1875 }
1876 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
fba53f7b 1877 err = init_egress_acl_root_ns(steering);
bd02ef8e
MG
1878 if (err)
1879 goto err;
1880 }
1881 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
fba53f7b 1882 err = init_ingress_acl_root_ns(steering);
bd02ef8e
MG
1883 if (err)
1884 goto err;
1885 }
25302363
MG
1886 }
1887
87d22483
MG
1888 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
1889 err = init_sniffer_rx_root_ns(steering);
1890 if (err)
1891 goto err;
1892 }
1893
1894 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
1895 err = init_sniffer_tx_root_ns(steering);
1896 if (err)
1897 goto err;
1898 }
1899
efdc810b
MHY
1900 return 0;
1901err:
1902 mlx5_cleanup_fs(dev);
25302363
MG
1903 return err;
1904}