net/mlx5: Move header encap type to IFC header file
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
CommitLineData
de8575e0
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
0efc8562 35#include <linux/mlx5/eswitch.h>
de8575e0
MG
36
37#include "mlx5_core.h"
38#include "fs_core.h"
0c56b975 39#include "fs_cmd.h"
4c03e69a 40#include "diag/fs_tracepoint.h"
5f418378 41#include "accel/ipsec.h"
05564d0a 42#include "fpga/ipsec.h"
0c56b975 43
25302363
MG
44#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
45 sizeof(struct init_tree_node))
46
a257b94a 47#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
8d40d162 48 ...) {.type = FS_TYPE_PRIO,\
25302363 49 .min_ft_level = min_level_val,\
a257b94a 50 .num_levels = num_levels_val,\
4cbdd30e 51 .num_leaf_prios = num_prios_val,\
8d40d162 52 .caps = caps_val,\
25302363
MG
53 .children = (struct init_tree_node[]) {__VA_ARGS__},\
54 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
55}
56
a257b94a
MG
57#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
58 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
4cbdd30e 59 __VA_ARGS__)\
25302363
MG
60
61#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
62 .children = (struct init_tree_node[]) {__VA_ARGS__},\
63 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
64}
65
8d40d162
MG
66#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
67 sizeof(long))
68
69#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
70
71#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
72 .caps = (long[]) {__VA_ARGS__} }
73
6dc6071c
MG
74#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
75 FS_CAP(flow_table_properties_nic_receive.modify_root), \
76 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
77 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
78
8ce78257
MB
79#define FS_CHAINING_CAPS_EGRESS \
80 FS_REQUIRED_CAPS( \
81 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
82 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
83 FS_CAP(flow_table_properties_nic_transmit \
84 .identified_miss_table_mode), \
85 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
86
a257b94a 87#define LEFTOVERS_NUM_LEVELS 1
4cbdd30e 88#define LEFTOVERS_NUM_PRIOS 1
4cbdd30e 89
a257b94a 90#define BY_PASS_PRIO_NUM_LEVELS 1
6dc6071c 91#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
a257b94a
MG
92 LEFTOVERS_NUM_PRIOS)
93
6dc6071c 94#define ETHTOOL_PRIO_NUM_LEVELS 1
e5835f28 95#define ETHTOOL_NUM_PRIOS 11
6dc6071c 96#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
7b3722fa
GP
97/* Vlan, mac, ttc, inner ttc, aRFS */
98#define KERNEL_NIC_PRIO_NUM_LEVELS 5
13de6c10
MG
99#define KERNEL_NIC_NUM_PRIOS 1
100/* One more level for tc */
101#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
8d40d162 102
479f074c
OG
103#define KERNEL_NIC_TC_NUM_PRIOS 1
104#define KERNEL_NIC_TC_NUM_LEVELS 2
105
a257b94a 106#define ANCHOR_NUM_LEVELS 1
153fefbf
MG
107#define ANCHOR_NUM_PRIOS 1
108#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
acbc2004
OG
109
110#define OFFLOADS_MAX_FT 1
111#define OFFLOADS_NUM_PRIOS 1
112#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
113
3e75d4eb
AH
114#define LAG_PRIO_NUM_LEVELS 1
115#define LAG_NUM_PRIOS 1
116#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
117
8d40d162
MG
118struct node_caps {
119 size_t arr_sz;
120 long *caps;
121};
8963ca45 122
25302363
MG
123static struct init_tree_node {
124 enum fs_node_type type;
125 struct init_tree_node *children;
126 int ar_size;
8d40d162 127 struct node_caps caps;
25302363 128 int min_ft_level;
4cbdd30e 129 int num_leaf_prios;
25302363 130 int prio;
a257b94a 131 int num_levels;
25302363
MG
132} root_fs = {
133 .type = FS_TYPE_NAMESPACE,
3e75d4eb 134 .ar_size = 7,
25302363 135 .children = (struct init_tree_node[]) {
4cbdd30e 136 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
6dc6071c 137 FS_CHAINING_CAPS,
a257b94a
MG
138 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
139 BY_PASS_PRIO_NUM_LEVELS))),
3e75d4eb
AH
140 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
141 FS_CHAINING_CAPS,
142 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
143 LAG_PRIO_NUM_LEVELS))),
acbc2004
OG
144 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
145 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
6dc6071c
MG
146 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
147 FS_CHAINING_CAPS,
148 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
149 ETHTOOL_PRIO_NUM_LEVELS))),
a257b94a 150 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
479f074c 151 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
13de6c10
MG
152 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
153 KERNEL_NIC_PRIO_NUM_LEVELS))),
4cbdd30e 154 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
6dc6071c 155 FS_CHAINING_CAPS,
a257b94a 156 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
153fefbf 157 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
a257b94a 158 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
25302363
MG
159 }
160};
161
8ce78257
MB
162static struct init_tree_node egress_root_fs = {
163 .type = FS_TYPE_NAMESPACE,
164 .ar_size = 1,
165 .children = (struct init_tree_node[]) {
166 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
167 FS_CHAINING_CAPS_EGRESS,
168 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
169 BY_PASS_PRIO_NUM_LEVELS))),
170 }
171};
172
c7784b1c
MG
173enum fs_i_lock_class {
174 FS_LOCK_GRANDPARENT,
175 FS_LOCK_PARENT,
176 FS_LOCK_CHILD
f0d22d18
MG
177};
178
0d235c3f
MB
179static const struct rhashtable_params rhash_fte = {
180 .key_len = FIELD_SIZEOF(struct fs_fte, val),
181 .key_offset = offsetof(struct fs_fte, val),
182 .head_offset = offsetof(struct fs_fte, hash),
183 .automatic_shrinking = true,
184 .min_size = 1,
185};
186
693c6883
MB
187static const struct rhashtable_params rhash_fg = {
188 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
189 .key_offset = offsetof(struct mlx5_flow_group, mask),
190 .head_offset = offsetof(struct mlx5_flow_group, hash),
191 .automatic_shrinking = true,
192 .min_size = 1,
193
194};
195
bd71b08e
MG
196static void del_hw_flow_table(struct fs_node *node);
197static void del_hw_flow_group(struct fs_node *node);
198static void del_hw_fte(struct fs_node *node);
199static void del_sw_flow_table(struct fs_node *node);
200static void del_sw_flow_group(struct fs_node *node);
201static void del_sw_fte(struct fs_node *node);
139ed6c6
MG
202static void del_sw_prio(struct fs_node *node);
203static void del_sw_ns(struct fs_node *node);
bd71b08e
MG
204/* Delete rule (destination) is special case that
205 * requires to lock the FTE for all the deletion process.
206 */
207static void del_sw_hw_rule(struct fs_node *node);
814fb875
MB
208static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
209 struct mlx5_flow_destination *d2);
9c26f5f8 210static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
74491de9
MB
211static struct mlx5_flow_rule *
212find_flow_rule(struct fs_fte *fte,
213 struct mlx5_flow_destination *dest);
de8575e0
MG
214
215static void tree_init_node(struct fs_node *node,
bd71b08e
MG
216 void (*del_hw_func)(struct fs_node *),
217 void (*del_sw_func)(struct fs_node *))
de8575e0 218{
dd8e1945 219 refcount_set(&node->refcount, 1);
de8575e0
MG
220 INIT_LIST_HEAD(&node->list);
221 INIT_LIST_HEAD(&node->children);
c7784b1c 222 init_rwsem(&node->lock);
bd71b08e
MG
223 node->del_hw_func = del_hw_func;
224 node->del_sw_func = del_sw_func;
19f100fe 225 node->active = false;
de8575e0
MG
226}
227
228static void tree_add_node(struct fs_node *node, struct fs_node *parent)
229{
230 if (parent)
dd8e1945 231 refcount_inc(&parent->refcount);
de8575e0
MG
232 node->parent = parent;
233
234 /* Parent is the root */
235 if (!parent)
236 node->root = node;
237 else
238 node->root = parent->root;
239}
240
bd71b08e 241static int tree_get_node(struct fs_node *node)
de8575e0 242{
dd8e1945 243 return refcount_inc_not_zero(&node->refcount);
de8575e0
MG
244}
245
bd71b08e
MG
246static void nested_down_read_ref_node(struct fs_node *node,
247 enum fs_i_lock_class class)
de8575e0
MG
248{
249 if (node) {
bd71b08e 250 down_read_nested(&node->lock, class);
dd8e1945 251 refcount_inc(&node->refcount);
de8575e0
MG
252 }
253}
254
bd71b08e
MG
255static void nested_down_write_ref_node(struct fs_node *node,
256 enum fs_i_lock_class class)
de8575e0
MG
257{
258 if (node) {
bd71b08e 259 down_write_nested(&node->lock, class);
dd8e1945 260 refcount_inc(&node->refcount);
de8575e0
MG
261 }
262}
263
bd71b08e 264static void down_write_ref_node(struct fs_node *node)
de8575e0
MG
265{
266 if (node) {
bd71b08e 267 down_write(&node->lock);
dd8e1945 268 refcount_inc(&node->refcount);
de8575e0
MG
269 }
270}
271
bd71b08e
MG
272static void up_read_ref_node(struct fs_node *node)
273{
dd8e1945 274 refcount_dec(&node->refcount);
bd71b08e
MG
275 up_read(&node->lock);
276}
277
278static void up_write_ref_node(struct fs_node *node)
279{
dd8e1945 280 refcount_dec(&node->refcount);
bd71b08e
MG
281 up_write(&node->lock);
282}
283
de8575e0
MG
284static void tree_put_node(struct fs_node *node)
285{
286 struct fs_node *parent_node = node->parent;
287
dd8e1945 288 if (refcount_dec_and_test(&node->refcount)) {
bd71b08e
MG
289 if (node->del_hw_func)
290 node->del_hw_func(node);
291 if (parent_node) {
292 /* Only root namespace doesn't have parent and we just
293 * need to free its node.
294 */
295 down_write_ref_node(parent_node);
de8575e0 296 list_del_init(&node->list);
bd71b08e
MG
297 if (node->del_sw_func)
298 node->del_sw_func(node);
299 up_write_ref_node(parent_node);
a369d4ac
MG
300 } else {
301 kfree(node);
bd71b08e 302 }
de8575e0
MG
303 node = NULL;
304 }
de8575e0
MG
305 if (!node && parent_node)
306 tree_put_node(parent_node);
307}
308
309static int tree_remove_node(struct fs_node *node)
310{
dd8e1945
ER
311 if (refcount_read(&node->refcount) > 1) {
312 refcount_dec(&node->refcount);
b3638e1a
MG
313 return -EEXIST;
314 }
de8575e0
MG
315 tree_put_node(node);
316 return 0;
317}
5e1626c0
MG
318
319static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
320 unsigned int prio)
321{
322 struct fs_prio *iter_prio;
323
324 fs_for_each_prio(iter_prio, ns) {
325 if (iter_prio->prio == prio)
326 return iter_prio;
327 }
328
329 return NULL;
330}
331
693c6883 332static bool check_valid_spec(const struct mlx5_flow_spec *spec)
5e1626c0 333{
693c6883
MB
334 int i;
335
693c6883
MB
336 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
337 if (spec->match_value[i] & ~spec->match_criteria[i]) {
338 pr_warn("mlx5_core: match_value differs from match_criteria\n");
339 return false;
340 }
341
2aada6c0 342 return true;
5e1626c0 343}
0c56b975
MG
344
345static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
346{
347 struct fs_node *root;
348 struct mlx5_flow_namespace *ns;
349
350 root = node->root;
351
352 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
353 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
354 return NULL;
355 }
356
357 ns = container_of(root, struct mlx5_flow_namespace, node);
358 return container_of(ns, struct mlx5_flow_root_namespace, ns);
359}
360
a369d4ac
MG
361static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
362{
363 struct mlx5_flow_root_namespace *root = find_root(node);
364
365 if (root)
366 return root->dev->priv.steering;
367 return NULL;
368}
369
0c56b975
MG
370static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
371{
372 struct mlx5_flow_root_namespace *root = find_root(node);
373
374 if (root)
375 return root->dev;
376 return NULL;
377}
378
139ed6c6
MG
379static void del_sw_ns(struct fs_node *node)
380{
381 kfree(node);
382}
383
384static void del_sw_prio(struct fs_node *node)
385{
386 kfree(node);
387}
388
bd71b08e 389static void del_hw_flow_table(struct fs_node *node)
0c56b975 390{
af76c501 391 struct mlx5_flow_root_namespace *root;
0c56b975
MG
392 struct mlx5_flow_table *ft;
393 struct mlx5_core_dev *dev;
0c56b975
MG
394 int err;
395
396 fs_get_obj(ft, node);
397 dev = get_dev(&ft->node);
af76c501 398 root = find_root(&ft->node);
0c56b975 399
19f100fe 400 if (node->active) {
af76c501 401 err = root->cmds->destroy_flow_table(dev, ft);
19f100fe
MG
402 if (err)
403 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
404 }
bd71b08e
MG
405}
406
407static void del_sw_flow_table(struct fs_node *node)
408{
409 struct mlx5_flow_table *ft;
410 struct fs_prio *prio;
411
412 fs_get_obj(ft, node);
413
693c6883 414 rhltable_destroy(&ft->fgs_hash);
0c56b975
MG
415 fs_get_obj(prio, ft->node.parent);
416 prio->num_ft--;
a369d4ac 417 kfree(ft);
0c56b975
MG
418}
419
bd71b08e 420static void del_sw_hw_rule(struct fs_node *node)
0c56b975 421{
af76c501 422 struct mlx5_flow_root_namespace *root;
0c56b975
MG
423 struct mlx5_flow_rule *rule;
424 struct mlx5_flow_table *ft;
425 struct mlx5_flow_group *fg;
426 struct fs_fte *fte;
bd5251db 427 int modify_mask;
0c56b975 428 struct mlx5_core_dev *dev = get_dev(node);
0c56b975 429 int err;
ae058314 430 bool update_fte = false;
0c56b975 431
0c56b975
MG
432 fs_get_obj(rule, node);
433 fs_get_obj(fte, rule->node.parent);
434 fs_get_obj(fg, fte->node.parent);
0c56b975 435 fs_get_obj(ft, fg->node.parent);
4c03e69a 436 trace_mlx5_fs_del_rule(rule);
b3638e1a
MG
437 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
438 mutex_lock(&rule->dest_attr.ft->lock);
439 list_del(&rule->next_ft);
440 mutex_unlock(&rule->dest_attr.ft->lock);
441 }
ae058314
MB
442
443 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
444 --fte->dests_size) {
202854e9
CM
445 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
446 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
d2ec6a35 447 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
ae058314
MB
448 update_fte = true;
449 goto out;
450 }
451
d2ec6a35 452 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
60ab4584 453 --fte->dests_size) {
bd5251db 454 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
ae058314
MB
455 update_fte = true;
456 }
457out:
af76c501 458 root = find_root(&ft->node);
ae058314 459 if (update_fte && fte->dests_size) {
af76c501 460 err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
0c56b975 461 if (err)
2974ab6e
SM
462 mlx5_core_warn(dev,
463 "%s can't del rule fg id=%d fte_index=%d\n",
464 __func__, fg->id, fte->index);
0c56b975 465 }
a369d4ac 466 kfree(rule);
0c56b975
MG
467}
468
bd71b08e 469static void del_hw_fte(struct fs_node *node)
0c56b975 470{
af76c501 471 struct mlx5_flow_root_namespace *root;
0c56b975
MG
472 struct mlx5_flow_table *ft;
473 struct mlx5_flow_group *fg;
474 struct mlx5_core_dev *dev;
475 struct fs_fte *fte;
476 int err;
477
478 fs_get_obj(fte, node);
479 fs_get_obj(fg, fte->node.parent);
480 fs_get_obj(ft, fg->node.parent);
481
bd71b08e 482 trace_mlx5_fs_del_fte(fte);
0c56b975 483 dev = get_dev(&ft->node);
af76c501 484 root = find_root(&ft->node);
19f100fe 485 if (node->active) {
e810bf5e 486 err = root->cmds->delete_fte(dev, ft, fte);
19f100fe
MG
487 if (err)
488 mlx5_core_warn(dev,
489 "flow steering can't delete fte in index %d of flow group id %d\n",
490 fte->index, fg->id);
491 }
bd71b08e
MG
492}
493
494static void del_sw_fte(struct fs_node *node)
495{
a369d4ac 496 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
497 struct mlx5_flow_group *fg;
498 struct fs_fte *fte;
499 int err;
500
501 fs_get_obj(fte, node);
502 fs_get_obj(fg, fte->node.parent);
0c56b975 503
19f100fe
MG
504 err = rhashtable_remove_fast(&fg->ftes_hash,
505 &fte->hash,
506 rhash_fte);
507 WARN_ON(err);
508 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
a369d4ac 509 kmem_cache_free(steering->ftes_cache, fte);
0c56b975
MG
510}
511
bd71b08e 512static void del_hw_flow_group(struct fs_node *node)
0c56b975 513{
af76c501 514 struct mlx5_flow_root_namespace *root;
0c56b975
MG
515 struct mlx5_flow_group *fg;
516 struct mlx5_flow_table *ft;
517 struct mlx5_core_dev *dev;
518
519 fs_get_obj(fg, node);
520 fs_get_obj(ft, fg->node.parent);
521 dev = get_dev(&ft->node);
4c03e69a 522 trace_mlx5_fs_del_fg(fg);
0c56b975 523
af76c501
MB
524 root = find_root(&ft->node);
525 if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
bd71b08e
MG
526 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
527 fg->id, ft->id);
528}
529
530static void del_sw_flow_group(struct fs_node *node)
531{
a369d4ac 532 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
533 struct mlx5_flow_group *fg;
534 struct mlx5_flow_table *ft;
535 int err;
536
537 fs_get_obj(fg, node);
538 fs_get_obj(ft, fg->node.parent);
32dba76a 539
0d235c3f 540 rhashtable_destroy(&fg->ftes_hash);
75d1d187 541 ida_destroy(&fg->fte_allocator);
bd71b08e
MG
542 if (ft->autogroup.active)
543 ft->autogroup.num_groups--;
693c6883
MB
544 err = rhltable_remove(&ft->fgs_hash,
545 &fg->hash,
546 rhash_fg);
547 WARN_ON(err);
a369d4ac 548 kmem_cache_free(steering->fgs_cache, fg);
0c56b975
MG
549}
550
f5c2ff17
MG
551static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
552{
553 int index;
554 int ret;
555
556 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
557 if (index < 0)
558 return index;
559
560 fte->index = index + fg->start_index;
561 ret = rhashtable_insert_fast(&fg->ftes_hash,
562 &fte->hash,
563 rhash_fte);
564 if (ret)
565 goto err_ida_remove;
566
567 tree_add_node(&fte->node, &fg->node);
568 list_add_tail(&fte->node.list, &fg->node.children);
569 return 0;
570
571err_ida_remove:
572 ida_simple_remove(&fg->fte_allocator, index);
573 return ret;
574}
575
a369d4ac
MG
576static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
577 u32 *match_value,
f5c2ff17 578 struct mlx5_flow_act *flow_act)
0c56b975 579{
a369d4ac 580 struct mlx5_flow_steering *steering = get_steering(&ft->node);
0c56b975
MG
581 struct fs_fte *fte;
582
a369d4ac 583 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
0c56b975
MG
584 if (!fte)
585 return ERR_PTR(-ENOMEM);
586
587 memcpy(fte->val, match_value, sizeof(fte->val));
588 fte->node.type = FS_TYPE_FLOW_ENTRY;
d2ec6a35 589 fte->action = *flow_act;
0c56b975 590
bd71b08e 591 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
19f100fe
MG
592
593 return fte;
19f100fe
MG
594}
595
a369d4ac
MG
596static void dealloc_flow_group(struct mlx5_flow_steering *steering,
597 struct mlx5_flow_group *fg)
19f100fe
MG
598{
599 rhashtable_destroy(&fg->ftes_hash);
a369d4ac 600 kmem_cache_free(steering->fgs_cache, fg);
19f100fe
MG
601}
602
a369d4ac
MG
603static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
604 u8 match_criteria_enable,
19f100fe
MG
605 void *match_criteria,
606 int start_index,
607 int end_index)
0c56b975
MG
608{
609 struct mlx5_flow_group *fg;
0d235c3f
MB
610 int ret;
611
a369d4ac 612 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
0c56b975
MG
613 if (!fg)
614 return ERR_PTR(-ENOMEM);
615
0d235c3f
MB
616 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
617 if (ret) {
a369d4ac 618 kmem_cache_free(steering->fgs_cache, fg);
0d235c3f 619 return ERR_PTR(ret);
19f100fe 620}
75d1d187 621 ida_init(&fg->fte_allocator);
0c56b975
MG
622 fg->mask.match_criteria_enable = match_criteria_enable;
623 memcpy(&fg->mask.match_criteria, match_criteria,
624 sizeof(fg->mask.match_criteria));
625 fg->node.type = FS_TYPE_FLOW_GROUP;
19f100fe
MG
626 fg->start_index = start_index;
627 fg->max_ftes = end_index - start_index + 1;
628
629 return fg;
630}
631
632static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
633 u8 match_criteria_enable,
634 void *match_criteria,
635 int start_index,
636 int end_index,
637 struct list_head *prev)
638{
a369d4ac 639 struct mlx5_flow_steering *steering = get_steering(&ft->node);
19f100fe
MG
640 struct mlx5_flow_group *fg;
641 int ret;
642
a369d4ac 643 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
19f100fe
MG
644 start_index, end_index);
645 if (IS_ERR(fg))
646 return fg;
647
648 /* initialize refcnt, add to parent list */
649 ret = rhltable_insert(&ft->fgs_hash,
650 &fg->hash,
651 rhash_fg);
652 if (ret) {
a369d4ac 653 dealloc_flow_group(steering, fg);
19f100fe
MG
654 return ERR_PTR(ret);
655 }
656
bd71b08e 657 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
19f100fe
MG
658 tree_add_node(&fg->node, &ft->node);
659 /* Add node to group list */
660 list_add(&fg->node.list, prev);
bd71b08e 661 atomic_inc(&ft->node.version);
19f100fe 662
0c56b975
MG
663 return fg;
664}
665
efdc810b 666static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
aaff1bea 667 enum fs_flow_table_type table_type,
c9f1b073
HHZ
668 enum fs_flow_table_op_mod op_mod,
669 u32 flags)
0c56b975
MG
670{
671 struct mlx5_flow_table *ft;
693c6883 672 int ret;
0c56b975
MG
673
674 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
675 if (!ft)
693c6883
MB
676 return ERR_PTR(-ENOMEM);
677
678 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
679 if (ret) {
680 kfree(ft);
681 return ERR_PTR(ret);
682 }
0c56b975
MG
683
684 ft->level = level;
685 ft->node.type = FS_TYPE_FLOW_TABLE;
aaff1bea 686 ft->op_mod = op_mod;
0c56b975 687 ft->type = table_type;
efdc810b 688 ft->vport = vport;
0c56b975 689 ft->max_fte = max_fte;
c9f1b073 690 ft->flags = flags;
b3638e1a
MG
691 INIT_LIST_HEAD(&ft->fwd_rules);
692 mutex_init(&ft->lock);
0c56b975
MG
693
694 return ft;
695}
696
fdb6896f
MG
697/* If reverse is false, then we search for the first flow table in the
698 * root sub-tree from start(closest from right), else we search for the
699 * last flow table in the root sub-tree till start(closest from left).
700 */
701static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
702 struct list_head *start,
703 bool reverse)
704{
705#define list_advance_entry(pos, reverse) \
706 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
707
708#define list_for_each_advance_continue(pos, head, reverse) \
709 for (pos = list_advance_entry(pos, reverse); \
710 &pos->list != (head); \
711 pos = list_advance_entry(pos, reverse))
712
713 struct fs_node *iter = list_entry(start, struct fs_node, list);
714 struct mlx5_flow_table *ft = NULL;
715
716 if (!root)
717 return NULL;
718
719 list_for_each_advance_continue(iter, &root->children, reverse) {
720 if (iter->type == FS_TYPE_FLOW_TABLE) {
721 fs_get_obj(ft, iter);
722 return ft;
723 }
724 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
725 if (ft)
726 return ft;
727 }
728
729 return ft;
730}
731
732/* If reverse if false then return the first flow table in next priority of
733 * prio in the tree, else return the last flow table in the previous priority
734 * of prio in the tree.
735 */
736static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
737{
738 struct mlx5_flow_table *ft = NULL;
739 struct fs_node *curr_node;
740 struct fs_node *parent;
741
742 parent = prio->node.parent;
743 curr_node = &prio->node;
744 while (!ft && parent) {
745 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
746 curr_node = parent;
747 parent = curr_node->parent;
748 }
749 return ft;
750}
751
752/* Assuming all the tree is locked by mutex chain lock */
753static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
754{
755 return find_closest_ft(prio, false);
756}
757
758/* Assuming all the tree is locked by mutex chain lock */
759static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
760{
761 return find_closest_ft(prio, true);
762}
763
f90edfd2
MG
764static int connect_fts_in_prio(struct mlx5_core_dev *dev,
765 struct fs_prio *prio,
766 struct mlx5_flow_table *ft)
767{
af76c501 768 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
f90edfd2
MG
769 struct mlx5_flow_table *iter;
770 int i = 0;
771 int err;
772
773 fs_for_each_ft(iter, prio) {
774 i++;
af76c501 775 err = root->cmds->modify_flow_table(dev, iter, ft);
f90edfd2
MG
776 if (err) {
777 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
778 iter->id);
779 /* The driver is out of sync with the FW */
780 if (i > 1)
781 WARN_ON(true);
782 return err;
783 }
784 }
785 return 0;
786}
787
788/* Connect flow tables from previous priority of prio to ft */
789static int connect_prev_fts(struct mlx5_core_dev *dev,
790 struct mlx5_flow_table *ft,
791 struct fs_prio *prio)
792{
793 struct mlx5_flow_table *prev_ft;
794
795 prev_ft = find_prev_chained_ft(prio);
796 if (prev_ft) {
797 struct fs_prio *prev_prio;
798
799 fs_get_obj(prev_prio, prev_ft->node.parent);
800 return connect_fts_in_prio(dev, prev_prio, ft);
801 }
802 return 0;
803}
804
2cc43b49
MG
805static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
806 *prio)
807{
808 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
dae37456 809 struct mlx5_ft_underlay_qp *uqp;
2cc43b49
MG
810 int min_level = INT_MAX;
811 int err;
dae37456 812 u32 qpn;
2cc43b49
MG
813
814 if (root->root_ft)
815 min_level = root->root_ft->level;
816
817 if (ft->level >= min_level)
818 return 0;
819
dae37456
AV
820 if (list_empty(&root->underlay_qpns)) {
821 /* Don't set any QPN (zero) in case QPN list is empty */
822 qpn = 0;
af76c501 823 err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
dae37456
AV
824 } else {
825 list_for_each_entry(uqp, &root->underlay_qpns, list) {
826 qpn = uqp->qpn;
af76c501
MB
827 err = root->cmds->update_root_ft(root->dev, ft,
828 qpn, false);
dae37456
AV
829 if (err)
830 break;
831 }
832 }
833
2cc43b49 834 if (err)
dae37456
AV
835 mlx5_core_warn(root->dev,
836 "Update root flow table of id(%u) qpn(%d) failed\n",
837 ft->id, qpn);
2cc43b49
MG
838 else
839 root->root_ft = ft;
840
841 return err;
842}
843
74491de9
MB
844static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
845 struct mlx5_flow_destination *dest)
b3638e1a 846{
af76c501 847 struct mlx5_flow_root_namespace *root;
b3638e1a
MG
848 struct mlx5_flow_table *ft;
849 struct mlx5_flow_group *fg;
850 struct fs_fte *fte;
bd5251db 851 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
b3638e1a
MG
852 int err = 0;
853
854 fs_get_obj(fte, rule->node.parent);
d2ec6a35 855 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
b3638e1a 856 return -EINVAL;
bd71b08e 857 down_write_ref_node(&fte->node);
b3638e1a
MG
858 fs_get_obj(fg, fte->node.parent);
859 fs_get_obj(ft, fg->node.parent);
860
861 memcpy(&rule->dest_attr, dest, sizeof(*dest));
af76c501
MB
862 root = find_root(&ft->node);
863 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
864 modify_mask, fte);
bd71b08e 865 up_write_ref_node(&fte->node);
b3638e1a
MG
866
867 return err;
868}
869
74491de9
MB
870int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
871 struct mlx5_flow_destination *new_dest,
872 struct mlx5_flow_destination *old_dest)
873{
874 int i;
875
876 if (!old_dest) {
877 if (handle->num_rules != 1)
878 return -EINVAL;
879 return _mlx5_modify_rule_destination(handle->rule[0],
880 new_dest);
881 }
882
883 for (i = 0; i < handle->num_rules; i++) {
884 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
885 return _mlx5_modify_rule_destination(handle->rule[i],
886 new_dest);
887 }
888
889 return -EINVAL;
890}
891
b3638e1a
MG
892/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
893static int connect_fwd_rules(struct mlx5_core_dev *dev,
894 struct mlx5_flow_table *new_next_ft,
895 struct mlx5_flow_table *old_next_ft)
896{
4c5009c5 897 struct mlx5_flow_destination dest = {};
b3638e1a
MG
898 struct mlx5_flow_rule *iter;
899 int err = 0;
900
901 /* new_next_ft and old_next_ft could be NULL only
902 * when we create/destroy the anchor flow table.
903 */
904 if (!new_next_ft || !old_next_ft)
905 return 0;
906
907 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
908 dest.ft = new_next_ft;
909
910 mutex_lock(&old_next_ft->lock);
911 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
912 mutex_unlock(&old_next_ft->lock);
913 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
74491de9 914 err = _mlx5_modify_rule_destination(iter, &dest);
b3638e1a
MG
915 if (err)
916 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
917 new_next_ft->id);
918 }
919 return 0;
920}
921
f90edfd2
MG
922static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
923 struct fs_prio *prio)
924{
b3638e1a 925 struct mlx5_flow_table *next_ft;
f90edfd2
MG
926 int err = 0;
927
928 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
929
930 if (list_empty(&prio->node.children)) {
931 err = connect_prev_fts(dev, ft, prio);
932 if (err)
933 return err;
b3638e1a
MG
934
935 next_ft = find_next_chained_ft(prio);
936 err = connect_fwd_rules(dev, ft, next_ft);
937 if (err)
938 return err;
f90edfd2
MG
939 }
940
941 if (MLX5_CAP_FLOWTABLE(dev,
942 flow_table_properties_nic_receive.modify_root))
943 err = update_root_ft_create(ft, prio);
944 return err;
945}
946
d63cd286
MG
947static void list_add_flow_table(struct mlx5_flow_table *ft,
948 struct fs_prio *prio)
949{
950 struct list_head *prev = &prio->node.children;
951 struct mlx5_flow_table *iter;
952
953 fs_for_each_ft(iter, prio) {
954 if (iter->level > ft->level)
955 break;
956 prev = &iter->node.list;
957 }
958 list_add(&ft->node.list, prev);
959}
960
efdc810b 961static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 962 struct mlx5_flow_table_attr *ft_attr,
aaff1bea 963 enum fs_flow_table_op_mod op_mod,
b3ba5149 964 u16 vport)
0c56b975 965{
b3ba5149 966 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
f90edfd2 967 struct mlx5_flow_table *next_ft = NULL;
b3ba5149 968 struct fs_prio *fs_prio = NULL;
0c56b975 969 struct mlx5_flow_table *ft;
0c56b975 970 int log_table_sz;
b3ba5149 971 int err;
0c56b975
MG
972
973 if (!root) {
974 pr_err("mlx5: flow steering failed to find root of namespace\n");
975 return ERR_PTR(-ENODEV);
976 }
977
2cc43b49 978 mutex_lock(&root->chain_lock);
b3ba5149 979 fs_prio = find_prio(ns, ft_attr->prio);
2cc43b49
MG
980 if (!fs_prio) {
981 err = -EINVAL;
982 goto unlock_root;
983 }
b3ba5149 984 if (ft_attr->level >= fs_prio->num_levels) {
0c56b975 985 err = -ENOSPC;
2cc43b49 986 goto unlock_root;
0c56b975 987 }
d63cd286
MG
988 /* The level is related to the
989 * priority level range.
990 */
b3ba5149
ES
991 ft_attr->level += fs_prio->start_level;
992 ft = alloc_flow_table(ft_attr->level,
efdc810b 993 vport,
b3ba5149 994 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
aaff1bea 995 root->table_type,
b3ba5149 996 op_mod, ft_attr->flags);
693c6883
MB
997 if (IS_ERR(ft)) {
998 err = PTR_ERR(ft);
2cc43b49 999 goto unlock_root;
0c56b975
MG
1000 }
1001
bd71b08e 1002 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
aaff1bea 1003 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
f90edfd2 1004 next_ft = find_next_chained_ft(fs_prio);
af76c501
MB
1005 err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
1006 ft->type, ft->level, log_table_sz,
1007 next_ft, &ft->id, ft->flags);
0c56b975
MG
1008 if (err)
1009 goto free_ft;
1010
f90edfd2
MG
1011 err = connect_flow_table(root->dev, ft, fs_prio);
1012 if (err)
1013 goto destroy_ft;
19f100fe 1014 ft->node.active = true;
bd71b08e 1015 down_write_ref_node(&fs_prio->node);
0c56b975 1016 tree_add_node(&ft->node, &fs_prio->node);
d63cd286 1017 list_add_flow_table(ft, fs_prio);
0c56b975 1018 fs_prio->num_ft++;
bd71b08e 1019 up_write_ref_node(&fs_prio->node);
2cc43b49 1020 mutex_unlock(&root->chain_lock);
0c56b975 1021 return ft;
2cc43b49 1022destroy_ft:
af76c501 1023 root->cmds->destroy_flow_table(root->dev, ft);
0c56b975
MG
1024free_ft:
1025 kfree(ft);
2cc43b49
MG
1026unlock_root:
1027 mutex_unlock(&root->chain_lock);
0c56b975
MG
1028 return ERR_PTR(err);
1029}
1030
efdc810b 1031struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 1032 struct mlx5_flow_table_attr *ft_attr)
efdc810b 1033{
b3ba5149 1034 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
efdc810b
MHY
1035}
1036
1037struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1038 int prio, int max_fte,
1039 u32 level, u16 vport)
1040{
b3ba5149
ES
1041 struct mlx5_flow_table_attr ft_attr = {};
1042
1043 ft_attr.max_fte = max_fte;
1044 ft_attr.level = level;
1045 ft_attr.prio = prio;
1046
57f35c93 1047 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
efdc810b
MHY
1048}
1049
b3ba5149
ES
1050struct mlx5_flow_table*
1051mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1052 int prio, u32 level)
aaff1bea 1053{
b3ba5149
ES
1054 struct mlx5_flow_table_attr ft_attr = {};
1055
1056 ft_attr.level = level;
1057 ft_attr.prio = prio;
1058 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
aaff1bea
AH
1059}
1060EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1061
b3ba5149
ES
1062struct mlx5_flow_table*
1063mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1064 int prio,
1065 int num_flow_table_entries,
1066 int max_num_groups,
1067 u32 level,
1068 u32 flags)
f0d22d18 1069{
b3ba5149 1070 struct mlx5_flow_table_attr ft_attr = {};
f0d22d18
MG
1071 struct mlx5_flow_table *ft;
1072
1073 if (max_num_groups > num_flow_table_entries)
1074 return ERR_PTR(-EINVAL);
1075
b3ba5149
ES
1076 ft_attr.max_fte = num_flow_table_entries;
1077 ft_attr.prio = prio;
1078 ft_attr.level = level;
1079 ft_attr.flags = flags;
1080
1081 ft = mlx5_create_flow_table(ns, &ft_attr);
f0d22d18
MG
1082 if (IS_ERR(ft))
1083 return ft;
1084
1085 ft->autogroup.active = true;
1086 ft->autogroup.required_groups = max_num_groups;
1087
1088 return ft;
1089}
b217ea25 1090EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
f0d22d18 1091
f0d22d18
MG
1092struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1093 u32 *fg_in)
1094{
af76c501 1095 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
0d235c3f
MB
1096 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1097 fg_in, match_criteria);
1098 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1099 fg_in,
1100 match_criteria_enable);
19f100fe
MG
1101 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1102 start_flow_index);
1103 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1104 end_flow_index);
1105 struct mlx5_core_dev *dev = get_dev(&ft->node);
f0d22d18 1106 struct mlx5_flow_group *fg;
19f100fe 1107 int err;
f0d22d18
MG
1108
1109 if (ft->autogroup.active)
1110 return ERR_PTR(-EPERM);
1111
bd71b08e 1112 down_write_ref_node(&ft->node);
19f100fe
MG
1113 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1114 start_index, end_index,
1115 ft->node.children.prev);
bd71b08e 1116 up_write_ref_node(&ft->node);
19f100fe
MG
1117 if (IS_ERR(fg))
1118 return fg;
1119
af76c501 1120 err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
19f100fe
MG
1121 if (err) {
1122 tree_put_node(&fg->node);
1123 return ERR_PTR(err);
1124 }
1125 trace_mlx5_fs_add_fg(fg);
1126 fg->node.active = true;
0c56b975
MG
1127
1128 return fg;
1129}
1130
1131static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1132{
1133 struct mlx5_flow_rule *rule;
1134
1135 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1136 if (!rule)
1137 return NULL;
1138
b3638e1a 1139 INIT_LIST_HEAD(&rule->next_ft);
0c56b975 1140 rule->node.type = FS_TYPE_FLOW_DEST;
60ab4584
AV
1141 if (dest)
1142 memcpy(&rule->dest_attr, dest, sizeof(*dest));
0c56b975
MG
1143
1144 return rule;
1145}
1146
74491de9
MB
1147static struct mlx5_flow_handle *alloc_handle(int num_rules)
1148{
1149 struct mlx5_flow_handle *handle;
1150
acafe7e3 1151 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
74491de9
MB
1152 if (!handle)
1153 return NULL;
1154
1155 handle->num_rules = num_rules;
1156
1157 return handle;
1158}
1159
1160static void destroy_flow_handle(struct fs_fte *fte,
1161 struct mlx5_flow_handle *handle,
1162 struct mlx5_flow_destination *dest,
1163 int i)
1164{
1165 for (; --i >= 0;) {
dd8e1945 1166 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
74491de9
MB
1167 fte->dests_size--;
1168 list_del(&handle->rule[i]->node.list);
1169 kfree(handle->rule[i]);
1170 }
1171 }
1172 kfree(handle);
1173}
1174
1175static struct mlx5_flow_handle *
1176create_flow_handle(struct fs_fte *fte,
1177 struct mlx5_flow_destination *dest,
1178 int dest_num,
1179 int *modify_mask,
1180 bool *new_rule)
1181{
1182 struct mlx5_flow_handle *handle;
1183 struct mlx5_flow_rule *rule = NULL;
1184 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1185 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1186 int type;
1187 int i = 0;
1188
1189 handle = alloc_handle((dest_num) ? dest_num : 1);
1190 if (!handle)
1191 return ERR_PTR(-ENOMEM);
1192
1193 do {
1194 if (dest) {
1195 rule = find_flow_rule(fte, dest + i);
1196 if (rule) {
dd8e1945 1197 refcount_inc(&rule->node.refcount);
74491de9
MB
1198 goto rule_found;
1199 }
1200 }
1201
1202 *new_rule = true;
1203 rule = alloc_rule(dest + i);
1204 if (!rule)
1205 goto free_rules;
1206
1207 /* Add dest to dests list- we need flow tables to be in the
1208 * end of the list for forward to next prio rules.
1209 */
bd71b08e 1210 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
74491de9
MB
1211 if (dest &&
1212 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1213 list_add(&rule->node.list, &fte->node.children);
1214 else
1215 list_add_tail(&rule->node.list, &fte->node.children);
1216 if (dest) {
1217 fte->dests_size++;
1218
1219 type = dest[i].type ==
1220 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1221 *modify_mask |= type ? count : dst;
1222 }
1223rule_found:
1224 handle->rule[i] = rule;
1225 } while (++i < dest_num);
1226
1227 return handle;
1228
1229free_rules:
1230 destroy_flow_handle(fte, handle, dest, i);
1231 return ERR_PTR(-ENOMEM);
1232}
1233
0c56b975 1234/* fte should not be deleted while calling this function */
74491de9
MB
1235static struct mlx5_flow_handle *
1236add_rule_fte(struct fs_fte *fte,
1237 struct mlx5_flow_group *fg,
1238 struct mlx5_flow_destination *dest,
1239 int dest_num,
1240 bool update_action)
0c56b975 1241{
af76c501 1242 struct mlx5_flow_root_namespace *root;
74491de9 1243 struct mlx5_flow_handle *handle;
0c56b975 1244 struct mlx5_flow_table *ft;
bd5251db 1245 int modify_mask = 0;
0c56b975 1246 int err;
74491de9 1247 bool new_rule = false;
0c56b975 1248
74491de9
MB
1249 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1250 &new_rule);
1251 if (IS_ERR(handle) || !new_rule)
1252 goto out;
bd5251db 1253
a6224985
MB
1254 if (update_action)
1255 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
bd5251db 1256
0c56b975 1257 fs_get_obj(ft, fg->node.parent);
af76c501 1258 root = find_root(&fg->node);
0501fc47 1259 if (!(fte->status & FS_FTE_STATUS_EXISTING))
af76c501
MB
1260 err = root->cmds->create_fte(get_dev(&ft->node),
1261 ft, fg, fte);
0c56b975 1262 else
af76c501
MB
1263 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1264 modify_mask, fte);
0c56b975 1265 if (err)
74491de9 1266 goto free_handle;
0c56b975 1267
19f100fe 1268 fte->node.active = true;
0c56b975 1269 fte->status |= FS_FTE_STATUS_EXISTING;
bd71b08e 1270 atomic_inc(&fte->node.version);
0c56b975 1271
74491de9
MB
1272out:
1273 return handle;
0c56b975 1274
74491de9
MB
1275free_handle:
1276 destroy_flow_handle(fte, handle, dest, handle->num_rules);
0c56b975
MG
1277 return ERR_PTR(err);
1278}
1279
19f100fe
MG
1280static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1281 struct mlx5_flow_spec *spec)
0c56b975 1282{
af363705 1283 struct list_head *prev = &ft->node.children;
f0d22d18 1284 struct mlx5_flow_group *fg;
19f100fe 1285 unsigned int candidate_index = 0;
f0d22d18 1286 unsigned int group_size = 0;
f0d22d18
MG
1287
1288 if (!ft->autogroup.active)
1289 return ERR_PTR(-ENOENT);
1290
f0d22d18
MG
1291 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1292 /* We save place for flow groups in addition to max types */
1293 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1294
1295 /* ft->max_fte == ft->autogroup.max_types */
1296 if (group_size == 0)
1297 group_size = 1;
1298
1299 /* sorted by start_index */
1300 fs_for_each_fg(fg, ft) {
1301 if (candidate_index + group_size > fg->start_index)
1302 candidate_index = fg->start_index + fg->max_ftes;
1303 else
1304 break;
1305 prev = &fg->node.list;
1306 }
1307
19f100fe
MG
1308 if (candidate_index + group_size > ft->max_fte)
1309 return ERR_PTR(-ENOSPC);
1310
1311 fg = alloc_insert_flow_group(ft,
1312 spec->match_criteria_enable,
1313 spec->match_criteria,
1314 candidate_index,
1315 candidate_index + group_size - 1,
1316 prev);
1317 if (IS_ERR(fg))
f0d22d18 1318 goto out;
19f100fe
MG
1319
1320 ft->autogroup.num_groups++;
1321
1322out:
1323 return fg;
1324}
1325
1326static int create_auto_flow_group(struct mlx5_flow_table *ft,
1327 struct mlx5_flow_group *fg)
1328{
af76c501 1329 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
19f100fe
MG
1330 struct mlx5_core_dev *dev = get_dev(&ft->node);
1331 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1332 void *match_criteria_addr;
3e99df87
SK
1333 u8 src_esw_owner_mask_on;
1334 void *misc;
19f100fe
MG
1335 int err;
1336 u32 *in;
1337
1338 in = kvzalloc(inlen, GFP_KERNEL);
1339 if (!in)
1340 return -ENOMEM;
f0d22d18
MG
1341
1342 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
19f100fe
MG
1343 fg->mask.match_criteria_enable);
1344 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1345 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1346 fg->max_ftes - 1);
3e99df87
SK
1347
1348 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1349 misc_parameters);
1350 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1351 source_eswitch_owner_vhca_id);
1352 MLX5_SET(create_flow_group_in, in,
1353 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1354
f0d22d18
MG
1355 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1356 in, match_criteria);
19f100fe
MG
1357 memcpy(match_criteria_addr, fg->mask.match_criteria,
1358 sizeof(fg->mask.match_criteria));
1359
af76c501 1360 err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
19f100fe
MG
1361 if (!err) {
1362 fg->node.active = true;
1363 trace_mlx5_fs_add_fg(fg);
1364 }
f0d22d18 1365
f0d22d18 1366 kvfree(in);
19f100fe 1367 return err;
f0d22d18
MG
1368}
1369
814fb875
MB
1370static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1371 struct mlx5_flow_destination *d2)
1372{
1373 if (d1->type == d2->type) {
1374 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
b17f7fc1 1375 d1->vport.num == d2->vport.num) ||
814fb875
MB
1376 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1377 d1->ft == d2->ft) ||
1378 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
664000b6
YH
1379 d1->tir_num == d2->tir_num) ||
1380 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1381 d1->ft_num == d2->ft_num))
814fb875
MB
1382 return true;
1383 }
1384
1385 return false;
1386}
1387
b3638e1a
MG
1388static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1389 struct mlx5_flow_destination *dest)
1390{
1391 struct mlx5_flow_rule *rule;
1392
1393 list_for_each_entry(rule, &fte->node.children, node.list) {
814fb875
MB
1394 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1395 return rule;
b3638e1a
MG
1396 }
1397 return NULL;
1398}
1399
0d235c3f
MB
1400static bool check_conflicting_actions(u32 action1, u32 action2)
1401{
1402 u32 xored_actions = action1 ^ action2;
1403
1404 /* if one rule only wants to count, it's ok */
1405 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1406 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1407 return false;
1408
1409 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1410 MLX5_FLOW_CONTEXT_ACTION_ENCAP |
96de67a7 1411 MLX5_FLOW_CONTEXT_ACTION_DECAP |
0c06897a
OG
1412 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1413 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
8da6fe2a
JL
1414 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1415 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1416 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
0d235c3f
MB
1417 return true;
1418
1419 return false;
1420}
1421
1422static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1423{
d2ec6a35 1424 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
0d235c3f
MB
1425 mlx5_core_warn(get_dev(&fte->node),
1426 "Found two FTEs with conflicting actions\n");
1427 return -EEXIST;
1428 }
1429
d2ec6a35
MB
1430 if (flow_act->has_flow_tag &&
1431 fte->action.flow_tag != flow_act->flow_tag) {
0d235c3f
MB
1432 mlx5_core_warn(get_dev(&fte->node),
1433 "FTE flow tag %u already exists with different flow tag %u\n",
d2ec6a35 1434 fte->action.flow_tag,
0d235c3f
MB
1435 flow_act->flow_tag);
1436 return -EEXIST;
1437 }
1438
1439 return 0;
1440}
1441
74491de9
MB
1442static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1443 u32 *match_value,
66958ed9 1444 struct mlx5_flow_act *flow_act,
74491de9 1445 struct mlx5_flow_destination *dest,
693c6883
MB
1446 int dest_num,
1447 struct fs_fte *fte)
0c56b975 1448{
74491de9 1449 struct mlx5_flow_handle *handle;
bd71b08e 1450 int old_action;
74491de9 1451 int i;
bd71b08e 1452 int ret;
0c56b975 1453
bd71b08e
MG
1454 ret = check_conflicting_ftes(fte, flow_act);
1455 if (ret)
1456 return ERR_PTR(ret);
0c56b975 1457
d2ec6a35
MB
1458 old_action = fte->action.action;
1459 fte->action.action |= flow_act->action;
bd71b08e
MG
1460 handle = add_rule_fte(fte, fg, dest, dest_num,
1461 old_action != flow_act->action);
74491de9 1462 if (IS_ERR(handle)) {
d2ec6a35 1463 fte->action.action = old_action;
693c6883 1464 return handle;
0c56b975 1465 }
bd71b08e 1466 trace_mlx5_fs_set_fte(fte, false);
0c56b975 1467
74491de9 1468 for (i = 0; i < handle->num_rules; i++) {
dd8e1945 1469 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
74491de9 1470 tree_add_node(&handle->rule[i]->node, &fte->node);
4c03e69a
MB
1471 trace_mlx5_fs_add_rule(handle->rule[i]);
1472 }
74491de9 1473 }
74491de9 1474 return handle;
0c56b975
MG
1475}
1476
74491de9 1477struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
bd5251db
AV
1478{
1479 struct mlx5_flow_rule *dst;
1480 struct fs_fte *fte;
1481
74491de9 1482 fs_get_obj(fte, handle->rule[0]->node.parent);
bd5251db
AV
1483
1484 fs_for_each_dst(dst, fte) {
1485 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1486 return dst->dest_attr.counter;
1487 }
1488
1489 return NULL;
1490}
1491
1492static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1493{
1494 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1495 return !counter;
1496
1497 if (!counter)
1498 return false;
1499
ae058314 1500 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
eafa6abd 1501 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
bd5251db
AV
1502}
1503
d63cd286
MG
1504static bool dest_is_valid(struct mlx5_flow_destination *dest,
1505 u32 action,
1506 struct mlx5_flow_table *ft)
1507{
bd5251db
AV
1508 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1509 return counter_is_valid(dest->counter, action);
1510
d63cd286
MG
1511 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1512 return true;
1513
1514 if (!dest || ((dest->type ==
1515 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1516 (dest->ft->level <= ft->level)))
1517 return false;
1518 return true;
1519}
1520
46719d77
MG
1521struct match_list {
1522 struct list_head list;
693c6883 1523 struct mlx5_flow_group *g;
46719d77
MG
1524};
1525
1526struct match_list_head {
1527 struct list_head list;
1528 struct match_list first;
1529};
1530
1531static void free_match_list(struct match_list_head *head)
1532{
1533 if (!list_empty(&head->list)) {
1534 struct match_list *iter, *match_tmp;
1535
1536 list_del(&head->first.list);
bd71b08e 1537 tree_put_node(&head->first.g->node);
46719d77
MG
1538 list_for_each_entry_safe(iter, match_tmp, &head->list,
1539 list) {
bd71b08e 1540 tree_put_node(&iter->g->node);
46719d77
MG
1541 list_del(&iter->list);
1542 kfree(iter);
1543 }
1544 }
1545}
1546
1547static int build_match_list(struct match_list_head *match_head,
1548 struct mlx5_flow_table *ft,
1549 struct mlx5_flow_spec *spec)
1550{
693c6883 1551 struct rhlist_head *tmp, *list;
46719d77
MG
1552 struct mlx5_flow_group *g;
1553 int err = 0;
693c6883
MB
1554
1555 rcu_read_lock();
46719d77 1556 INIT_LIST_HEAD(&match_head->list);
693c6883
MB
1557 /* Collect all fgs which has a matching match_criteria */
1558 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
46719d77 1559 /* RCU is atomic, we can't execute FW commands here */
693c6883
MB
1560 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1561 struct match_list *curr_match;
1562
46719d77 1563 if (likely(list_empty(&match_head->list))) {
bd71b08e
MG
1564 if (!tree_get_node(&g->node))
1565 continue;
46719d77
MG
1566 match_head->first.g = g;
1567 list_add_tail(&match_head->first.list,
1568 &match_head->list);
693c6883
MB
1569 continue;
1570 }
693c6883 1571
46719d77 1572 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
693c6883 1573 if (!curr_match) {
46719d77
MG
1574 free_match_list(match_head);
1575 err = -ENOMEM;
1576 goto out;
693c6883 1577 }
bd71b08e
MG
1578 if (!tree_get_node(&g->node)) {
1579 kfree(curr_match);
1580 continue;
1581 }
693c6883 1582 curr_match->g = g;
46719d77 1583 list_add_tail(&curr_match->list, &match_head->list);
693c6883 1584 }
46719d77 1585out:
693c6883 1586 rcu_read_unlock();
46719d77
MG
1587 return err;
1588}
1589
bd71b08e
MG
1590static u64 matched_fgs_get_version(struct list_head *match_head)
1591{
1592 struct match_list *iter;
1593 u64 version = 0;
1594
1595 list_for_each_entry(iter, match_head, list)
1596 version += (u64)atomic_read(&iter->g->node.version);
1597 return version;
1598}
1599
46719d77
MG
1600static struct mlx5_flow_handle *
1601try_add_to_existing_fg(struct mlx5_flow_table *ft,
bd71b08e 1602 struct list_head *match_head,
46719d77
MG
1603 struct mlx5_flow_spec *spec,
1604 struct mlx5_flow_act *flow_act,
1605 struct mlx5_flow_destination *dest,
bd71b08e
MG
1606 int dest_num,
1607 int ft_version)
46719d77 1608{
a369d4ac 1609 struct mlx5_flow_steering *steering = get_steering(&ft->node);
46719d77
MG
1610 struct mlx5_flow_group *g;
1611 struct mlx5_flow_handle *rule;
46719d77 1612 struct match_list *iter;
bd71b08e
MG
1613 bool take_write = false;
1614 struct fs_fte *fte;
1615 u64 version;
f5c2ff17
MG
1616 int err;
1617
a369d4ac 1618 fte = alloc_fte(ft, spec->match_value, flow_act);
f5c2ff17
MG
1619 if (IS_ERR(fte))
1620 return ERR_PTR(-ENOMEM);
46719d77 1621
bd71b08e
MG
1622 list_for_each_entry(iter, match_head, list) {
1623 nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
bd71b08e 1624 }
693c6883 1625
bd71b08e
MG
1626search_again_locked:
1627 version = matched_fgs_get_version(match_head);
693c6883 1628 /* Try to find a fg that already contains a matching fte */
bd71b08e
MG
1629 list_for_each_entry(iter, match_head, list) {
1630 struct fs_fte *fte_tmp;
693c6883
MB
1631
1632 g = iter->g;
bd71b08e
MG
1633 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
1634 rhash_fte);
1635 if (!fte_tmp || !tree_get_node(&fte_tmp->node))
1636 continue;
1637
1638 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1639 if (!take_write) {
1640 list_for_each_entry(iter, match_head, list)
1641 up_read_ref_node(&iter->g->node);
1642 } else {
1643 list_for_each_entry(iter, match_head, list)
1644 up_write_ref_node(&iter->g->node);
693c6883 1645 }
bd71b08e
MG
1646
1647 rule = add_rule_fg(g, spec->match_value,
1648 flow_act, dest, dest_num, fte_tmp);
1649 up_write_ref_node(&fte_tmp->node);
1650 tree_put_node(&fte_tmp->node);
a369d4ac 1651 kmem_cache_free(steering->ftes_cache, fte);
bd71b08e 1652 return rule;
693c6883
MB
1653 }
1654
1655 /* No group with matching fte found. Try to add a new fte to any
1656 * matching fg.
1657 */
693c6883 1658
bd71b08e
MG
1659 if (!take_write) {
1660 list_for_each_entry(iter, match_head, list)
1661 up_read_ref_node(&iter->g->node);
1662 list_for_each_entry(iter, match_head, list)
1663 nested_down_write_ref_node(&iter->g->node,
1664 FS_LOCK_PARENT);
1665 take_write = true;
693c6883
MB
1666 }
1667
bd71b08e
MG
1668 /* Check the ft version, for case that new flow group
1669 * was added while the fgs weren't locked
1670 */
1671 if (atomic_read(&ft->node.version) != ft_version) {
1672 rule = ERR_PTR(-EAGAIN);
1673 goto out;
1674 }
b92af5a7 1675
bd71b08e
MG
1676 /* Check the fgs version, for case the new FTE with the
1677 * same values was added while the fgs weren't locked
1678 */
1679 if (version != matched_fgs_get_version(match_head))
1680 goto search_again_locked;
1681
1682 list_for_each_entry(iter, match_head, list) {
1683 g = iter->g;
1684
1685 if (!g->node.active)
1686 continue;
f5c2ff17
MG
1687 err = insert_fte(g, fte);
1688 if (err) {
1689 if (err == -ENOSPC)
bd71b08e
MG
1690 continue;
1691 list_for_each_entry(iter, match_head, list)
1692 up_write_ref_node(&iter->g->node);
a369d4ac 1693 kmem_cache_free(steering->ftes_cache, fte);
f5c2ff17 1694 return ERR_PTR(err);
bd71b08e 1695 }
693c6883 1696
bd71b08e
MG
1697 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1698 list_for_each_entry(iter, match_head, list)
1699 up_write_ref_node(&iter->g->node);
1700 rule = add_rule_fg(g, spec->match_value,
1701 flow_act, dest, dest_num, fte);
1702 up_write_ref_node(&fte->node);
1703 tree_put_node(&fte->node);
1704 return rule;
1705 }
1706 rule = ERR_PTR(-ENOENT);
1707out:
1708 list_for_each_entry(iter, match_head, list)
1709 up_write_ref_node(&iter->g->node);
a369d4ac 1710 kmem_cache_free(steering->ftes_cache, fte);
693c6883
MB
1711 return rule;
1712}
1713
74491de9
MB
1714static struct mlx5_flow_handle *
1715_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1716 struct mlx5_flow_spec *spec,
66958ed9 1717 struct mlx5_flow_act *flow_act,
74491de9
MB
1718 struct mlx5_flow_destination *dest,
1719 int dest_num)
66958ed9 1720
0c56b975 1721{
a369d4ac 1722 struct mlx5_flow_steering *steering = get_steering(&ft->node);
0c56b975 1723 struct mlx5_flow_group *g;
74491de9 1724 struct mlx5_flow_handle *rule;
bd71b08e
MG
1725 struct match_list_head match_head;
1726 bool take_write = false;
1727 struct fs_fte *fte;
1728 int version;
19f100fe 1729 int err;
74491de9 1730 int i;
0c56b975 1731
693c6883 1732 if (!check_valid_spec(spec))
0d235c3f
MB
1733 return ERR_PTR(-EINVAL);
1734
74491de9 1735 for (i = 0; i < dest_num; i++) {
66958ed9 1736 if (!dest_is_valid(&dest[i], flow_act->action, ft))
74491de9
MB
1737 return ERR_PTR(-EINVAL);
1738 }
bd71b08e
MG
1739 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1740search_again_locked:
1741 version = atomic_read(&ft->node.version);
60ab4584 1742
bd71b08e
MG
1743 /* Collect all fgs which has a matching match_criteria */
1744 err = build_match_list(&match_head, ft, spec);
9238e380
VB
1745 if (err) {
1746 if (take_write)
1747 up_write_ref_node(&ft->node);
bd71b08e 1748 return ERR_PTR(err);
9238e380 1749 }
bd71b08e
MG
1750
1751 if (!take_write)
1752 up_read_ref_node(&ft->node);
1753
1754 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1755 dest_num, version);
1756 free_match_list(&match_head);
1757 if (!IS_ERR(rule) ||
9238e380
VB
1758 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1759 if (take_write)
1760 up_write_ref_node(&ft->node);
bd71b08e 1761 return rule;
9238e380 1762 }
bd71b08e
MG
1763
1764 if (!take_write) {
1765 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1766 take_write = true;
1767 }
1768
1769 if (PTR_ERR(rule) == -EAGAIN ||
1770 version != atomic_read(&ft->node.version))
1771 goto search_again_locked;
f0d22d18 1772
19f100fe 1773 g = alloc_auto_flow_group(ft, spec);
c3f9bf62 1774 if (IS_ERR(g)) {
d34c6efc 1775 rule = ERR_CAST(g);
bd71b08e
MG
1776 up_write_ref_node(&ft->node);
1777 return rule;
c3f9bf62
MG
1778 }
1779
bd71b08e
MG
1780 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1781 up_write_ref_node(&ft->node);
1782
19f100fe 1783 err = create_auto_flow_group(ft, g);
bd71b08e
MG
1784 if (err)
1785 goto err_release_fg;
1786
a369d4ac 1787 fte = alloc_fte(ft, spec->match_value, flow_act);
bd71b08e
MG
1788 if (IS_ERR(fte)) {
1789 err = PTR_ERR(fte);
1790 goto err_release_fg;
19f100fe
MG
1791 }
1792
f5c2ff17
MG
1793 err = insert_fte(g, fte);
1794 if (err) {
a369d4ac 1795 kmem_cache_free(steering->ftes_cache, fte);
f5c2ff17
MG
1796 goto err_release_fg;
1797 }
1798
bd71b08e
MG
1799 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1800 up_write_ref_node(&g->node);
693c6883 1801 rule = add_rule_fg(g, spec->match_value, flow_act, dest,
bd71b08e
MG
1802 dest_num, fte);
1803 up_write_ref_node(&fte->node);
1804 tree_put_node(&fte->node);
19f100fe 1805 tree_put_node(&g->node);
0c56b975 1806 return rule;
bd71b08e
MG
1807
1808err_release_fg:
1809 up_write_ref_node(&g->node);
1810 tree_put_node(&g->node);
1811 return ERR_PTR(err);
0c56b975 1812}
b3638e1a
MG
1813
1814static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1815{
1816 return ((ft->type == FS_FT_NIC_RX) &&
1817 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1818}
1819
74491de9
MB
1820struct mlx5_flow_handle *
1821mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1822 struct mlx5_flow_spec *spec,
66958ed9 1823 struct mlx5_flow_act *flow_act,
74491de9 1824 struct mlx5_flow_destination *dest,
cf916ffb 1825 int num_dest)
b3638e1a
MG
1826{
1827 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
4c5009c5 1828 struct mlx5_flow_destination gen_dest = {};
b3638e1a 1829 struct mlx5_flow_table *next_ft = NULL;
74491de9 1830 struct mlx5_flow_handle *handle = NULL;
66958ed9 1831 u32 sw_action = flow_act->action;
b3638e1a
MG
1832 struct fs_prio *prio;
1833
1834 fs_get_obj(prio, ft->node.parent);
66958ed9 1835 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
b3638e1a
MG
1836 if (!fwd_next_prio_supported(ft))
1837 return ERR_PTR(-EOPNOTSUPP);
cf916ffb 1838 if (num_dest)
b3638e1a
MG
1839 return ERR_PTR(-EINVAL);
1840 mutex_lock(&root->chain_lock);
1841 next_ft = find_next_chained_ft(prio);
1842 if (next_ft) {
1843 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1844 gen_dest.ft = next_ft;
1845 dest = &gen_dest;
cf916ffb 1846 num_dest = 1;
66958ed9 1847 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
b3638e1a
MG
1848 } else {
1849 mutex_unlock(&root->chain_lock);
1850 return ERR_PTR(-EOPNOTSUPP);
1851 }
1852 }
1853
cf916ffb 1854 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
b3638e1a
MG
1855
1856 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
74491de9
MB
1857 if (!IS_ERR_OR_NULL(handle) &&
1858 (list_empty(&handle->rule[0]->next_ft))) {
b3638e1a 1859 mutex_lock(&next_ft->lock);
74491de9
MB
1860 list_add(&handle->rule[0]->next_ft,
1861 &next_ft->fwd_rules);
b3638e1a 1862 mutex_unlock(&next_ft->lock);
74491de9 1863 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
b3638e1a
MG
1864 }
1865 mutex_unlock(&root->chain_lock);
1866 }
74491de9 1867 return handle;
b3638e1a 1868}
74491de9 1869EXPORT_SYMBOL(mlx5_add_flow_rules);
0c56b975 1870
74491de9 1871void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
0c56b975 1872{
74491de9
MB
1873 int i;
1874
1875 for (i = handle->num_rules - 1; i >= 0; i--)
1876 tree_remove_node(&handle->rule[i]->node);
1877 kfree(handle);
0c56b975 1878}
74491de9 1879EXPORT_SYMBOL(mlx5_del_flow_rules);
0c56b975 1880
2cc43b49
MG
1881/* Assuming prio->node.children(flow tables) is sorted by level */
1882static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1883{
1884 struct fs_prio *prio;
1885
1886 fs_get_obj(prio, ft->node.parent);
1887
1888 if (!list_is_last(&ft->node.list, &prio->node.children))
1889 return list_next_entry(ft, node.list);
1890 return find_next_chained_ft(prio);
1891}
1892
1893static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1894{
1895 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
dae37456 1896 struct mlx5_ft_underlay_qp *uqp;
2cc43b49 1897 struct mlx5_flow_table *new_root_ft = NULL;
dae37456
AV
1898 int err = 0;
1899 u32 qpn;
2cc43b49
MG
1900
1901 if (root->root_ft != ft)
1902 return 0;
1903
1904 new_root_ft = find_next_ft(ft);
dae37456
AV
1905 if (!new_root_ft) {
1906 root->root_ft = NULL;
1907 return 0;
1908 }
1909
1910 if (list_empty(&root->underlay_qpns)) {
1911 /* Don't set any QPN (zero) in case QPN list is empty */
1912 qpn = 0;
af76c501
MB
1913 err = root->cmds->update_root_ft(root->dev, new_root_ft,
1914 qpn, false);
dae37456
AV
1915 } else {
1916 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1917 qpn = uqp->qpn;
af76c501
MB
1918 err = root->cmds->update_root_ft(root->dev,
1919 new_root_ft, qpn,
1920 false);
dae37456
AV
1921 if (err)
1922 break;
2cc43b49 1923 }
2cc43b49 1924 }
dae37456
AV
1925
1926 if (err)
1927 mlx5_core_warn(root->dev,
1928 "Update root flow table of id(%u) qpn(%d) failed\n",
1929 ft->id, qpn);
1930 else
1931 root->root_ft = new_root_ft;
1932
2cc43b49
MG
1933 return 0;
1934}
1935
f90edfd2
MG
1936/* Connect flow table from previous priority to
1937 * the next flow table.
1938 */
1939static int disconnect_flow_table(struct mlx5_flow_table *ft)
1940{
1941 struct mlx5_core_dev *dev = get_dev(&ft->node);
1942 struct mlx5_flow_table *next_ft;
1943 struct fs_prio *prio;
1944 int err = 0;
1945
1946 err = update_root_ft_destroy(ft);
1947 if (err)
1948 return err;
1949
1950 fs_get_obj(prio, ft->node.parent);
1951 if (!(list_first_entry(&prio->node.children,
1952 struct mlx5_flow_table,
1953 node.list) == ft))
1954 return 0;
1955
1956 next_ft = find_next_chained_ft(prio);
b3638e1a
MG
1957 err = connect_fwd_rules(dev, next_ft, ft);
1958 if (err)
1959 return err;
1960
f90edfd2
MG
1961 err = connect_prev_fts(dev, next_ft, prio);
1962 if (err)
1963 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1964 ft->id);
1965 return err;
1966}
1967
86d722ad 1968int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
0c56b975 1969{
2cc43b49
MG
1970 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1971 int err = 0;
1972
1973 mutex_lock(&root->chain_lock);
f90edfd2 1974 err = disconnect_flow_table(ft);
2cc43b49
MG
1975 if (err) {
1976 mutex_unlock(&root->chain_lock);
1977 return err;
1978 }
0c56b975
MG
1979 if (tree_remove_node(&ft->node))
1980 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1981 ft->id);
2cc43b49 1982 mutex_unlock(&root->chain_lock);
0c56b975 1983
2cc43b49 1984 return err;
0c56b975 1985}
b217ea25 1986EXPORT_SYMBOL(mlx5_destroy_flow_table);
0c56b975 1987
86d722ad 1988void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
0c56b975
MG
1989{
1990 if (tree_remove_node(&fg->node))
1991 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1992 fg->id);
1993}
25302363 1994
86d722ad
MG
1995struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1996 enum mlx5_flow_namespace_type type)
25302363 1997{
fba53f7b
MG
1998 struct mlx5_flow_steering *steering = dev->priv.steering;
1999 struct mlx5_flow_root_namespace *root_ns;
8ce78257 2000 int prio = 0;
78228cbd 2001 struct fs_prio *fs_prio;
25302363
MG
2002 struct mlx5_flow_namespace *ns;
2003
fba53f7b 2004 if (!steering)
25302363
MG
2005 return NULL;
2006
2007 switch (type) {
25302363 2008 case MLX5_FLOW_NAMESPACE_FDB:
fba53f7b
MG
2009 if (steering->fdb_root_ns)
2010 return &steering->fdb_root_ns->ns;
2226dcb4 2011 return NULL;
87d22483
MG
2012 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2013 if (steering->sniffer_rx_root_ns)
2014 return &steering->sniffer_rx_root_ns->ns;
2226dcb4 2015 return NULL;
87d22483
MG
2016 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2017 if (steering->sniffer_tx_root_ns)
2018 return &steering->sniffer_tx_root_ns->ns;
2226dcb4 2019 return NULL;
2226dcb4
MB
2020 default:
2021 break;
25302363
MG
2022 }
2023
8ce78257
MB
2024 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2025 root_ns = steering->egress_root_ns;
2026 } else { /* Must be NIC RX */
2027 root_ns = steering->root_ns;
2028 prio = type;
2029 }
2030
fba53f7b
MG
2031 if (!root_ns)
2032 return NULL;
2033
25302363
MG
2034 fs_prio = find_prio(&root_ns->ns, prio);
2035 if (!fs_prio)
2036 return NULL;
2037
2038 ns = list_first_entry(&fs_prio->node.children,
2039 typeof(*ns),
2040 node.list);
2041
2042 return ns;
2043}
b217ea25 2044EXPORT_SYMBOL(mlx5_get_flow_namespace);
25302363 2045
9b93ab98
GP
2046struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2047 enum mlx5_flow_namespace_type type,
2048 int vport)
2049{
2050 struct mlx5_flow_steering *steering = dev->priv.steering;
2051
2052 if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2053 return NULL;
2054
2055 switch (type) {
2056 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2057 if (steering->esw_egress_root_ns &&
2058 steering->esw_egress_root_ns[vport])
2059 return &steering->esw_egress_root_ns[vport]->ns;
2060 else
2061 return NULL;
2062 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2063 if (steering->esw_ingress_root_ns &&
2064 steering->esw_ingress_root_ns[vport])
2065 return &steering->esw_ingress_root_ns[vport]->ns;
2066 else
2067 return NULL;
2068 default:
2069 return NULL;
2070 }
2071}
2072
25302363 2073static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
a257b94a 2074 unsigned int prio, int num_levels)
25302363
MG
2075{
2076 struct fs_prio *fs_prio;
2077
2078 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2079 if (!fs_prio)
2080 return ERR_PTR(-ENOMEM);
2081
2082 fs_prio->node.type = FS_TYPE_PRIO;
139ed6c6 2083 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
25302363 2084 tree_add_node(&fs_prio->node, &ns->node);
a257b94a 2085 fs_prio->num_levels = num_levels;
25302363 2086 fs_prio->prio = prio;
25302363
MG
2087 list_add_tail(&fs_prio->node.list, &ns->node.children);
2088
2089 return fs_prio;
2090}
2091
2092static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2093 *ns)
2094{
2095 ns->node.type = FS_TYPE_NAMESPACE;
2096
2097 return ns;
2098}
2099
2100static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2101{
2102 struct mlx5_flow_namespace *ns;
2103
2104 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2105 if (!ns)
2106 return ERR_PTR(-ENOMEM);
2107
2108 fs_init_namespace(ns);
139ed6c6 2109 tree_init_node(&ns->node, NULL, del_sw_ns);
25302363
MG
2110 tree_add_node(&ns->node, &prio->node);
2111 list_add_tail(&ns->node.list, &prio->node.children);
2112
2113 return ns;
2114}
2115
13de6c10
MG
2116static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2117 struct init_tree_node *prio_metadata)
4cbdd30e
MG
2118{
2119 struct fs_prio *fs_prio;
2120 int i;
2121
2122 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
13de6c10 2123 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
4cbdd30e
MG
2124 if (IS_ERR(fs_prio))
2125 return PTR_ERR(fs_prio);
2126 }
2127 return 0;
2128}
2129
8d40d162
MG
2130#define FLOW_TABLE_BIT_SZ 1
2131#define GET_FLOW_TABLE_CAP(dev, offset) \
701052c5 2132 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
8d40d162
MG
2133 offset / 32)) >> \
2134 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2135static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2136{
2137 int i;
2138
2139 for (i = 0; i < caps->arr_sz; i++) {
2140 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2141 return false;
2142 }
2143 return true;
2144}
2145
fba53f7b 2146static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
8d40d162 2147 struct init_tree_node *init_node,
25302363
MG
2148 struct fs_node *fs_parent_node,
2149 struct init_tree_node *init_parent_node,
13de6c10 2150 int prio)
25302363 2151{
fba53f7b 2152 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
8d40d162
MG
2153 flow_table_properties_nic_receive.
2154 max_ft_level);
25302363
MG
2155 struct mlx5_flow_namespace *fs_ns;
2156 struct fs_prio *fs_prio;
2157 struct fs_node *base;
2158 int i;
2159 int err;
2160
2161 if (init_node->type == FS_TYPE_PRIO) {
8d40d162 2162 if ((init_node->min_ft_level > max_ft_level) ||
fba53f7b 2163 !has_required_caps(steering->dev, &init_node->caps))
8d40d162 2164 return 0;
25302363
MG
2165
2166 fs_get_obj(fs_ns, fs_parent_node);
4cbdd30e 2167 if (init_node->num_leaf_prios)
13de6c10
MG
2168 return create_leaf_prios(fs_ns, prio, init_node);
2169 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
25302363
MG
2170 if (IS_ERR(fs_prio))
2171 return PTR_ERR(fs_prio);
2172 base = &fs_prio->node;
2173 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2174 fs_get_obj(fs_prio, fs_parent_node);
2175 fs_ns = fs_create_namespace(fs_prio);
2176 if (IS_ERR(fs_ns))
2177 return PTR_ERR(fs_ns);
2178 base = &fs_ns->node;
2179 } else {
2180 return -EINVAL;
2181 }
13de6c10 2182 prio = 0;
25302363 2183 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2184 err = init_root_tree_recursive(steering, &init_node->children[i],
13de6c10 2185 base, init_node, prio);
25302363
MG
2186 if (err)
2187 return err;
13de6c10
MG
2188 if (init_node->children[i].type == FS_TYPE_PRIO &&
2189 init_node->children[i].num_leaf_prios) {
2190 prio += init_node->children[i].num_leaf_prios;
2191 }
25302363
MG
2192 }
2193
2194 return 0;
2195}
2196
fba53f7b 2197static int init_root_tree(struct mlx5_flow_steering *steering,
8d40d162 2198 struct init_tree_node *init_node,
25302363
MG
2199 struct fs_node *fs_parent_node)
2200{
2201 int i;
2202 struct mlx5_flow_namespace *fs_ns;
2203 int err;
2204
2205 fs_get_obj(fs_ns, fs_parent_node);
2206 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2207 err = init_root_tree_recursive(steering, &init_node->children[i],
25302363
MG
2208 &fs_ns->node,
2209 init_node, i);
2210 if (err)
2211 return err;
2212 }
2213 return 0;
2214}
2215
af76c501
MB
2216static struct mlx5_flow_root_namespace
2217*create_root_ns(struct mlx5_flow_steering *steering,
2218 enum fs_flow_table_type table_type)
25302363 2219{
af76c501 2220 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
25302363
MG
2221 struct mlx5_flow_root_namespace *root_ns;
2222 struct mlx5_flow_namespace *ns;
2223
05564d0a
AY
2224 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2225 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2226 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2227
86d722ad 2228 /* Create the root namespace */
1b9a07ee 2229 root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
25302363
MG
2230 if (!root_ns)
2231 return NULL;
2232
fba53f7b 2233 root_ns->dev = steering->dev;
25302363 2234 root_ns->table_type = table_type;
af76c501 2235 root_ns->cmds = cmds;
25302363 2236
dae37456
AV
2237 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2238
25302363
MG
2239 ns = &root_ns->ns;
2240 fs_init_namespace(ns);
2cc43b49 2241 mutex_init(&root_ns->chain_lock);
bd71b08e 2242 tree_init_node(&ns->node, NULL, NULL);
25302363
MG
2243 tree_add_node(&ns->node, NULL);
2244
2245 return root_ns;
2246}
2247
655227ed
MG
2248static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2249
2250static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2251{
2252 struct fs_prio *prio;
2253
2254 fs_for_each_prio(prio, ns) {
a257b94a 2255 /* This updates prio start_level and num_levels */
655227ed 2256 set_prio_attrs_in_prio(prio, acc_level);
a257b94a 2257 acc_level += prio->num_levels;
655227ed
MG
2258 }
2259 return acc_level;
2260}
2261
2262static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2263{
2264 struct mlx5_flow_namespace *ns;
2265 int acc_level_ns = acc_level;
2266
2267 prio->start_level = acc_level;
2268 fs_for_each_ns(ns, prio)
a257b94a 2269 /* This updates start_level and num_levels of ns's priority descendants */
655227ed 2270 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
a257b94a
MG
2271 if (!prio->num_levels)
2272 prio->num_levels = acc_level_ns - prio->start_level;
2273 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
655227ed
MG
2274}
2275
2276static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2277{
2278 struct mlx5_flow_namespace *ns = &root_ns->ns;
2279 struct fs_prio *prio;
2280 int start_level = 0;
2281
2282 fs_for_each_prio(prio, ns) {
2283 set_prio_attrs_in_prio(prio, start_level);
a257b94a 2284 start_level += prio->num_levels;
655227ed
MG
2285 }
2286}
2287
153fefbf
MG
2288#define ANCHOR_PRIO 0
2289#define ANCHOR_SIZE 1
d63cd286 2290#define ANCHOR_LEVEL 0
fba53f7b 2291static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
153fefbf
MG
2292{
2293 struct mlx5_flow_namespace *ns = NULL;
b3ba5149 2294 struct mlx5_flow_table_attr ft_attr = {};
153fefbf
MG
2295 struct mlx5_flow_table *ft;
2296
fba53f7b 2297 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
eff596da 2298 if (WARN_ON(!ns))
153fefbf 2299 return -EINVAL;
b3ba5149
ES
2300
2301 ft_attr.max_fte = ANCHOR_SIZE;
2302 ft_attr.level = ANCHOR_LEVEL;
2303 ft_attr.prio = ANCHOR_PRIO;
2304
2305 ft = mlx5_create_flow_table(ns, &ft_attr);
153fefbf 2306 if (IS_ERR(ft)) {
fba53f7b 2307 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
153fefbf
MG
2308 return PTR_ERR(ft);
2309 }
2310 return 0;
2311}
2312
fba53f7b 2313static int init_root_ns(struct mlx5_flow_steering *steering)
25302363 2314{
9c26f5f8
TB
2315 int err;
2316
fba53f7b 2317 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
42fb18fd 2318 if (!steering->root_ns)
9c26f5f8 2319 return -ENOMEM;
25302363 2320
9c26f5f8
TB
2321 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2322 if (err)
2323 goto out_err;
25302363 2324
fba53f7b 2325 set_prio_attrs(steering->root_ns);
9c26f5f8
TB
2326 err = create_anchor_flow_table(steering);
2327 if (err)
2328 goto out_err;
153fefbf 2329
25302363
MG
2330 return 0;
2331
9c26f5f8
TB
2332out_err:
2333 cleanup_root_ns(steering->root_ns);
2334 steering->root_ns = NULL;
2335 return err;
25302363
MG
2336}
2337
0da2d666 2338static void clean_tree(struct fs_node *node)
25302363 2339{
0da2d666
MG
2340 if (node) {
2341 struct fs_node *iter;
2342 struct fs_node *temp;
25302363 2343
800350a3 2344 tree_get_node(node);
0da2d666
MG
2345 list_for_each_entry_safe(iter, temp, &node->children, list)
2346 clean_tree(iter);
800350a3 2347 tree_put_node(node);
0da2d666 2348 tree_remove_node(node);
25302363 2349 }
153fefbf
MG
2350}
2351
0da2d666 2352static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
25302363 2353{
25302363
MG
2354 if (!root_ns)
2355 return;
2356
0da2d666 2357 clean_tree(&root_ns->ns.node);
25302363
MG
2358}
2359
9b93ab98
GP
2360static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2361{
2362 struct mlx5_flow_steering *steering = dev->priv.steering;
2363 int i;
2364
2365 if (!steering->esw_egress_root_ns)
2366 return;
2367
2368 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2369 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2370
2371 kfree(steering->esw_egress_root_ns);
2372}
2373
2374static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2375{
2376 struct mlx5_flow_steering *steering = dev->priv.steering;
2377 int i;
2378
2379 if (!steering->esw_ingress_root_ns)
2380 return;
2381
2382 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2383 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2384
2385 kfree(steering->esw_ingress_root_ns);
2386}
2387
25302363
MG
2388void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2389{
fba53f7b
MG
2390 struct mlx5_flow_steering *steering = dev->priv.steering;
2391
0da2d666 2392 cleanup_root_ns(steering->root_ns);
9b93ab98
GP
2393 cleanup_egress_acls_root_ns(dev);
2394 cleanup_ingress_acls_root_ns(dev);
0da2d666 2395 cleanup_root_ns(steering->fdb_root_ns);
87d22483
MG
2396 cleanup_root_ns(steering->sniffer_rx_root_ns);
2397 cleanup_root_ns(steering->sniffer_tx_root_ns);
5f418378 2398 cleanup_root_ns(steering->egress_root_ns);
43a335e0 2399 mlx5_cleanup_fc_stats(dev);
a369d4ac
MG
2400 kmem_cache_destroy(steering->ftes_cache);
2401 kmem_cache_destroy(steering->fgs_cache);
fba53f7b 2402 kfree(steering);
25302363
MG
2403}
2404
87d22483
MG
2405static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2406{
2407 struct fs_prio *prio;
2408
2409 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2410 if (!steering->sniffer_tx_root_ns)
2411 return -ENOMEM;
2412
2413 /* Create single prio */
2414 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2415 if (IS_ERR(prio)) {
2416 cleanup_root_ns(steering->sniffer_tx_root_ns);
2417 return PTR_ERR(prio);
2418 }
2419 return 0;
2420}
2421
2422static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2423{
2424 struct fs_prio *prio;
2425
2426 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2427 if (!steering->sniffer_rx_root_ns)
2428 return -ENOMEM;
2429
2430 /* Create single prio */
2431 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2432 if (IS_ERR(prio)) {
2433 cleanup_root_ns(steering->sniffer_rx_root_ns);
2434 return PTR_ERR(prio);
2435 }
2436 return 0;
2437}
2438
fba53f7b 2439static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
25302363
MG
2440{
2441 struct fs_prio *prio;
2442
fba53f7b
MG
2443 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2444 if (!steering->fdb_root_ns)
25302363
MG
2445 return -ENOMEM;
2446
a842dd04 2447 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
1033665e
OG
2448 if (IS_ERR(prio))
2449 goto out_err;
2450
2451 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
2452 if (IS_ERR(prio))
2453 goto out_err;
2454
2455 set_prio_attrs(steering->fdb_root_ns);
2456 return 0;
2457
2458out_err:
2459 cleanup_root_ns(steering->fdb_root_ns);
2460 steering->fdb_root_ns = NULL;
2461 return PTR_ERR(prio);
25302363
MG
2462}
2463
9b93ab98 2464static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2465{
2466 struct fs_prio *prio;
2467
9b93ab98
GP
2468 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2469 if (!steering->esw_egress_root_ns[vport])
efdc810b
MHY
2470 return -ENOMEM;
2471
2472 /* create 1 prio*/
9b93ab98 2473 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
44fafdaa 2474 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
2475}
2476
9b93ab98 2477static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2478{
2479 struct fs_prio *prio;
2480
9b93ab98
GP
2481 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2482 if (!steering->esw_ingress_root_ns[vport])
efdc810b
MHY
2483 return -ENOMEM;
2484
2485 /* create 1 prio*/
9b93ab98 2486 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
44fafdaa 2487 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
2488}
2489
9b93ab98
GP
2490static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2491{
2492 struct mlx5_flow_steering *steering = dev->priv.steering;
2493 int err;
2494 int i;
2495
2496 steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2497 sizeof(*steering->esw_egress_root_ns),
2498 GFP_KERNEL);
2499 if (!steering->esw_egress_root_ns)
2500 return -ENOMEM;
2501
2502 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2503 err = init_egress_acl_root_ns(steering, i);
2504 if (err)
2505 goto cleanup_root_ns;
2506 }
2507
2508 return 0;
2509
2510cleanup_root_ns:
2511 for (i--; i >= 0; i--)
2512 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2513 kfree(steering->esw_egress_root_ns);
2514 return err;
2515}
2516
2517static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2518{
2519 struct mlx5_flow_steering *steering = dev->priv.steering;
2520 int err;
2521 int i;
2522
2523 steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2524 sizeof(*steering->esw_ingress_root_ns),
2525 GFP_KERNEL);
2526 if (!steering->esw_ingress_root_ns)
2527 return -ENOMEM;
2528
2529 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2530 err = init_ingress_acl_root_ns(steering, i);
2531 if (err)
2532 goto cleanup_root_ns;
2533 }
2534
2535 return 0;
2536
2537cleanup_root_ns:
2538 for (i--; i >= 0; i--)
2539 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2540 kfree(steering->esw_ingress_root_ns);
2541 return err;
2542}
2543
5f418378
AY
2544static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2545{
8ce78257 2546 int err;
5f418378
AY
2547
2548 steering->egress_root_ns = create_root_ns(steering,
2549 FS_FT_NIC_TX);
2550 if (!steering->egress_root_ns)
2551 return -ENOMEM;
2552
8ce78257
MB
2553 err = init_root_tree(steering, &egress_root_fs,
2554 &steering->egress_root_ns->ns.node);
2555 if (err)
2556 goto cleanup;
2557 set_prio_attrs(steering->egress_root_ns);
2558 return 0;
2559cleanup:
2560 cleanup_root_ns(steering->egress_root_ns);
2561 steering->egress_root_ns = NULL;
2562 return err;
5f418378
AY
2563}
2564
25302363
MG
2565int mlx5_init_fs(struct mlx5_core_dev *dev)
2566{
fba53f7b 2567 struct mlx5_flow_steering *steering;
25302363
MG
2568 int err = 0;
2569
43a335e0
AV
2570 err = mlx5_init_fc_stats(dev);
2571 if (err)
2572 return err;
2573
fba53f7b
MG
2574 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2575 if (!steering)
2576 return -ENOMEM;
2577 steering->dev = dev;
2578 dev->priv.steering = steering;
2579
a369d4ac
MG
2580 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2581 sizeof(struct mlx5_flow_group), 0,
2582 0, NULL);
2583 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2584 0, NULL);
2585 if (!steering->ftes_cache || !steering->fgs_cache) {
2586 err = -ENOMEM;
2587 goto err;
2588 }
2589
ffdb8827
ES
2590 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2591 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2592 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2593 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
876d634d 2594 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
fba53f7b 2595 err = init_root_ns(steering);
25302363 2596 if (err)
43a335e0 2597 goto err;
25302363 2598 }
876d634d 2599
0efc8562 2600 if (MLX5_ESWITCH_MANAGER(dev)) {
bd02ef8e 2601 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
fba53f7b 2602 err = init_fdb_root_ns(steering);
bd02ef8e
MG
2603 if (err)
2604 goto err;
2605 }
2606 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
9b93ab98 2607 err = init_egress_acls_root_ns(dev);
bd02ef8e
MG
2608 if (err)
2609 goto err;
2610 }
2611 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
9b93ab98 2612 err = init_ingress_acls_root_ns(dev);
bd02ef8e
MG
2613 if (err)
2614 goto err;
2615 }
25302363
MG
2616 }
2617
87d22483
MG
2618 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2619 err = init_sniffer_rx_root_ns(steering);
2620 if (err)
2621 goto err;
2622 }
2623
2624 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2625 err = init_sniffer_tx_root_ns(steering);
2626 if (err)
2627 goto err;
2628 }
2629
8ce78257 2630 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
5f418378
AY
2631 err = init_egress_root_ns(steering);
2632 if (err)
2633 goto err;
2634 }
2635
efdc810b
MHY
2636 return 0;
2637err:
2638 mlx5_cleanup_fs(dev);
25302363
MG
2639 return err;
2640}
50854114
YH
2641
2642int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2643{
2644 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
2645 struct mlx5_ft_underlay_qp *new_uqp;
2646 int err = 0;
2647
2648 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2649 if (!new_uqp)
2650 return -ENOMEM;
2651
2652 mutex_lock(&root->chain_lock);
2653
2654 if (!root->root_ft) {
2655 err = -EINVAL;
2656 goto update_ft_fail;
2657 }
2658
af76c501
MB
2659 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2660 false);
dae37456
AV
2661 if (err) {
2662 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2663 underlay_qpn, err);
2664 goto update_ft_fail;
2665 }
2666
2667 new_uqp->qpn = underlay_qpn;
2668 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2669
2670 mutex_unlock(&root->chain_lock);
50854114 2671
50854114 2672 return 0;
dae37456
AV
2673
2674update_ft_fail:
2675 mutex_unlock(&root->chain_lock);
2676 kfree(new_uqp);
2677 return err;
50854114
YH
2678}
2679EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2680
2681int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2682{
2683 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
2684 struct mlx5_ft_underlay_qp *uqp;
2685 bool found = false;
2686 int err = 0;
2687
2688 mutex_lock(&root->chain_lock);
2689 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2690 if (uqp->qpn == underlay_qpn) {
2691 found = true;
2692 break;
2693 }
2694 }
2695
2696 if (!found) {
2697 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2698 underlay_qpn);
2699 err = -EINVAL;
2700 goto out;
2701 }
2702
af76c501
MB
2703 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2704 true);
dae37456
AV
2705 if (err)
2706 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2707 underlay_qpn, err);
2708
2709 list_del(&uqp->list);
2710 mutex_unlock(&root->chain_lock);
2711 kfree(uqp);
50854114 2712
50854114 2713 return 0;
dae37456
AV
2714
2715out:
2716 mutex_unlock(&root->chain_lock);
2717 return err;
50854114
YH
2718}
2719EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);