net/mlx5: Drain wq first during PCI device removal
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
CommitLineData
de8575e0
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
bf3e4d38 35#include <linux/mlx5/vport.h>
0efc8562 36#include <linux/mlx5/eswitch.h>
de8575e0
MG
37
38#include "mlx5_core.h"
39#include "fs_core.h"
0c56b975 40#include "fs_cmd.h"
4c03e69a 41#include "diag/fs_tracepoint.h"
5f418378 42#include "accel/ipsec.h"
05564d0a 43#include "fpga/ipsec.h"
328edb49 44#include "eswitch.h"
0c56b975 45
25302363
MG
46#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
48
a257b94a 49#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
8d40d162 50 ...) {.type = FS_TYPE_PRIO,\
25302363 51 .min_ft_level = min_level_val,\
a257b94a 52 .num_levels = num_levels_val,\
4cbdd30e 53 .num_leaf_prios = num_prios_val,\
8d40d162 54 .caps = caps_val,\
25302363
MG
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57}
58
a257b94a
MG
59#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
4cbdd30e 61 __VA_ARGS__)\
25302363 62
f66ad830
MZ
63#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
25302363
MG
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67}
68
8d40d162
MG
69#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 sizeof(long))
71
72#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73
74#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
76
6dc6071c
MG
77#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81
8ce78257
MB
82#define FS_CHAINING_CAPS_EGRESS \
83 FS_REQUIRED_CAPS( \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89
24670b1a
MG
90#define FS_CHAINING_CAPS_RDMA_TX \
91 FS_REQUIRED_CAPS( \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
97 .flow_table_modify))
98
a257b94a 99#define LEFTOVERS_NUM_LEVELS 1
4cbdd30e 100#define LEFTOVERS_NUM_PRIOS 1
4cbdd30e 101
a257b94a 102#define BY_PASS_PRIO_NUM_LEVELS 1
6dc6071c 103#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
a257b94a
MG
104 LEFTOVERS_NUM_PRIOS)
105
6dc6071c 106#define ETHTOOL_PRIO_NUM_LEVELS 1
e5835f28 107#define ETHTOOL_NUM_PRIOS 11
6dc6071c 108#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
7b3722fa
GP
109/* Vlan, mac, ttc, inner ttc, aRFS */
110#define KERNEL_NIC_PRIO_NUM_LEVELS 5
13de6c10
MG
111#define KERNEL_NIC_NUM_PRIOS 1
112/* One more level for tc */
113#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
8d40d162 114
479f074c
OG
115#define KERNEL_NIC_TC_NUM_PRIOS 1
116#define KERNEL_NIC_TC_NUM_LEVELS 2
117
a257b94a 118#define ANCHOR_NUM_LEVELS 1
153fefbf
MG
119#define ANCHOR_NUM_PRIOS 1
120#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
acbc2004 121
20f7b37f 122#define OFFLOADS_MAX_FT 2
11b717d6
PB
123#define OFFLOADS_NUM_PRIOS 2
124#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
acbc2004 125
3e75d4eb
AH
126#define LAG_PRIO_NUM_LEVELS 1
127#define LAG_NUM_PRIOS 1
128#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
129
8d40d162
MG
130struct node_caps {
131 size_t arr_sz;
132 long *caps;
133};
8963ca45 134
25302363
MG
135static struct init_tree_node {
136 enum fs_node_type type;
137 struct init_tree_node *children;
138 int ar_size;
8d40d162 139 struct node_caps caps;
25302363 140 int min_ft_level;
4cbdd30e 141 int num_leaf_prios;
25302363 142 int prio;
a257b94a 143 int num_levels;
f66ad830 144 enum mlx5_flow_table_miss_action def_miss_action;
25302363
MG
145} root_fs = {
146 .type = FS_TYPE_NAMESPACE,
3e75d4eb 147 .ar_size = 7,
f66ad830
MZ
148 .children = (struct init_tree_node[]){
149 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
150 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
151 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
152 BY_PASS_PRIO_NUM_LEVELS))),
153 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
154 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
155 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
156 LAG_PRIO_NUM_LEVELS))),
20f7b37f 157 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
f66ad830
MZ
158 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
159 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
160 OFFLOADS_MAX_FT))),
161 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
162 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
163 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
164 ETHTOOL_PRIO_NUM_LEVELS))),
165 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
166 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
167 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
168 KERNEL_NIC_TC_NUM_LEVELS),
169 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
170 KERNEL_NIC_PRIO_NUM_LEVELS))),
171 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
172 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
173 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
174 LEFTOVERS_NUM_LEVELS))),
175 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
176 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
177 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
178 ANCHOR_NUM_LEVELS))),
25302363
MG
179 }
180};
181
8ce78257
MB
182static struct init_tree_node egress_root_fs = {
183 .type = FS_TYPE_NAMESPACE,
184 .ar_size = 1,
185 .children = (struct init_tree_node[]) {
186 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
187 FS_CHAINING_CAPS_EGRESS,
f66ad830
MZ
188 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
189 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
8ce78257
MB
190 BY_PASS_PRIO_NUM_LEVELS))),
191 }
192};
193
e6806e9a
MZ
194#define RDMA_RX_BYPASS_PRIO 0
195#define RDMA_RX_KERNEL_PRIO 1
196static struct init_tree_node rdma_rx_root_fs = {
197 .type = FS_TYPE_NAMESPACE,
198 .ar_size = 2,
199 .children = (struct init_tree_node[]) {
200 [RDMA_RX_BYPASS_PRIO] =
201 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
202 FS_CHAINING_CAPS,
203 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
204 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
205 BY_PASS_PRIO_NUM_LEVELS))),
206 [RDMA_RX_KERNEL_PRIO] =
207 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
208 FS_CHAINING_CAPS,
209 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
210 ADD_MULTIPLE_PRIO(1, 1))),
211 }
212};
213
24670b1a
MG
214static struct init_tree_node rdma_tx_root_fs = {
215 .type = FS_TYPE_NAMESPACE,
216 .ar_size = 1,
217 .children = (struct init_tree_node[]) {
218 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
219 FS_CHAINING_CAPS_RDMA_TX,
220 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
221 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
222 BY_PASS_PRIO_NUM_LEVELS))),
223 }
224};
225
c7784b1c
MG
226enum fs_i_lock_class {
227 FS_LOCK_GRANDPARENT,
228 FS_LOCK_PARENT,
229 FS_LOCK_CHILD
f0d22d18
MG
230};
231
0d235c3f 232static const struct rhashtable_params rhash_fte = {
c593642c 233 .key_len = sizeof_field(struct fs_fte, val),
0d235c3f
MB
234 .key_offset = offsetof(struct fs_fte, val),
235 .head_offset = offsetof(struct fs_fte, hash),
236 .automatic_shrinking = true,
237 .min_size = 1,
238};
239
693c6883 240static const struct rhashtable_params rhash_fg = {
c593642c 241 .key_len = sizeof_field(struct mlx5_flow_group, mask),
693c6883
MB
242 .key_offset = offsetof(struct mlx5_flow_group, mask),
243 .head_offset = offsetof(struct mlx5_flow_group, hash),
244 .automatic_shrinking = true,
245 .min_size = 1,
246
247};
248
bd71b08e
MG
249static void del_hw_flow_table(struct fs_node *node);
250static void del_hw_flow_group(struct fs_node *node);
251static void del_hw_fte(struct fs_node *node);
252static void del_sw_flow_table(struct fs_node *node);
253static void del_sw_flow_group(struct fs_node *node);
254static void del_sw_fte(struct fs_node *node);
139ed6c6
MG
255static void del_sw_prio(struct fs_node *node);
256static void del_sw_ns(struct fs_node *node);
bd71b08e
MG
257/* Delete rule (destination) is special case that
258 * requires to lock the FTE for all the deletion process.
259 */
260static void del_sw_hw_rule(struct fs_node *node);
814fb875
MB
261static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
262 struct mlx5_flow_destination *d2);
9c26f5f8 263static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
74491de9
MB
264static struct mlx5_flow_rule *
265find_flow_rule(struct fs_fte *fte,
266 struct mlx5_flow_destination *dest);
de8575e0
MG
267
268static void tree_init_node(struct fs_node *node,
bd71b08e
MG
269 void (*del_hw_func)(struct fs_node *),
270 void (*del_sw_func)(struct fs_node *))
de8575e0 271{
dd8e1945 272 refcount_set(&node->refcount, 1);
de8575e0
MG
273 INIT_LIST_HEAD(&node->list);
274 INIT_LIST_HEAD(&node->children);
c7784b1c 275 init_rwsem(&node->lock);
bd71b08e
MG
276 node->del_hw_func = del_hw_func;
277 node->del_sw_func = del_sw_func;
19f100fe 278 node->active = false;
de8575e0
MG
279}
280
281static void tree_add_node(struct fs_node *node, struct fs_node *parent)
282{
283 if (parent)
dd8e1945 284 refcount_inc(&parent->refcount);
de8575e0
MG
285 node->parent = parent;
286
287 /* Parent is the root */
288 if (!parent)
289 node->root = node;
290 else
291 node->root = parent->root;
292}
293
bd71b08e 294static int tree_get_node(struct fs_node *node)
de8575e0 295{
dd8e1945 296 return refcount_inc_not_zero(&node->refcount);
de8575e0
MG
297}
298
bd71b08e
MG
299static void nested_down_read_ref_node(struct fs_node *node,
300 enum fs_i_lock_class class)
de8575e0
MG
301{
302 if (node) {
bd71b08e 303 down_read_nested(&node->lock, class);
dd8e1945 304 refcount_inc(&node->refcount);
de8575e0
MG
305 }
306}
307
bd71b08e
MG
308static void nested_down_write_ref_node(struct fs_node *node,
309 enum fs_i_lock_class class)
de8575e0
MG
310{
311 if (node) {
bd71b08e 312 down_write_nested(&node->lock, class);
dd8e1945 313 refcount_inc(&node->refcount);
de8575e0
MG
314 }
315}
316
476d61b7 317static void down_write_ref_node(struct fs_node *node, bool locked)
de8575e0
MG
318{
319 if (node) {
476d61b7
EB
320 if (!locked)
321 down_write(&node->lock);
dd8e1945 322 refcount_inc(&node->refcount);
de8575e0
MG
323 }
324}
325
bd71b08e
MG
326static void up_read_ref_node(struct fs_node *node)
327{
dd8e1945 328 refcount_dec(&node->refcount);
bd71b08e
MG
329 up_read(&node->lock);
330}
331
476d61b7 332static void up_write_ref_node(struct fs_node *node, bool locked)
bd71b08e 333{
dd8e1945 334 refcount_dec(&node->refcount);
476d61b7
EB
335 if (!locked)
336 up_write(&node->lock);
bd71b08e
MG
337}
338
476d61b7 339static void tree_put_node(struct fs_node *node, bool locked)
de8575e0
MG
340{
341 struct fs_node *parent_node = node->parent;
342
dd8e1945 343 if (refcount_dec_and_test(&node->refcount)) {
bd71b08e
MG
344 if (node->del_hw_func)
345 node->del_hw_func(node);
346 if (parent_node) {
347 /* Only root namespace doesn't have parent and we just
348 * need to free its node.
349 */
476d61b7 350 down_write_ref_node(parent_node, locked);
de8575e0 351 list_del_init(&node->list);
bd71b08e
MG
352 if (node->del_sw_func)
353 node->del_sw_func(node);
476d61b7 354 up_write_ref_node(parent_node, locked);
a369d4ac
MG
355 } else {
356 kfree(node);
bd71b08e 357 }
de8575e0
MG
358 node = NULL;
359 }
de8575e0 360 if (!node && parent_node)
476d61b7 361 tree_put_node(parent_node, locked);
de8575e0
MG
362}
363
476d61b7 364static int tree_remove_node(struct fs_node *node, bool locked)
de8575e0 365{
dd8e1945
ER
366 if (refcount_read(&node->refcount) > 1) {
367 refcount_dec(&node->refcount);
b3638e1a
MG
368 return -EEXIST;
369 }
476d61b7 370 tree_put_node(node, locked);
de8575e0
MG
371 return 0;
372}
5e1626c0
MG
373
374static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
375 unsigned int prio)
376{
377 struct fs_prio *iter_prio;
378
379 fs_for_each_prio(iter_prio, ns) {
380 if (iter_prio->prio == prio)
381 return iter_prio;
382 }
383
384 return NULL;
385}
386
693c6883 387static bool check_valid_spec(const struct mlx5_flow_spec *spec)
5e1626c0 388{
693c6883
MB
389 int i;
390
693c6883
MB
391 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
392 if (spec->match_value[i] & ~spec->match_criteria[i]) {
393 pr_warn("mlx5_core: match_value differs from match_criteria\n");
394 return false;
395 }
396
2aada6c0 397 return true;
5e1626c0 398}
0c56b975
MG
399
400static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
401{
402 struct fs_node *root;
403 struct mlx5_flow_namespace *ns;
404
405 root = node->root;
406
407 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
408 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
409 return NULL;
410 }
411
412 ns = container_of(root, struct mlx5_flow_namespace, node);
413 return container_of(ns, struct mlx5_flow_root_namespace, ns);
414}
415
a369d4ac
MG
416static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
417{
418 struct mlx5_flow_root_namespace *root = find_root(node);
419
420 if (root)
421 return root->dev->priv.steering;
422 return NULL;
423}
424
0c56b975
MG
425static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
426{
427 struct mlx5_flow_root_namespace *root = find_root(node);
428
429 if (root)
430 return root->dev;
431 return NULL;
432}
433
139ed6c6
MG
434static void del_sw_ns(struct fs_node *node)
435{
436 kfree(node);
437}
438
439static void del_sw_prio(struct fs_node *node)
440{
441 kfree(node);
442}
443
bd71b08e 444static void del_hw_flow_table(struct fs_node *node)
0c56b975 445{
af76c501 446 struct mlx5_flow_root_namespace *root;
0c56b975
MG
447 struct mlx5_flow_table *ft;
448 struct mlx5_core_dev *dev;
0c56b975
MG
449 int err;
450
451 fs_get_obj(ft, node);
452 dev = get_dev(&ft->node);
af76c501 453 root = find_root(&ft->node);
8e4ca986 454 trace_mlx5_fs_del_ft(ft);
0c56b975 455
19f100fe 456 if (node->active) {
ae288a48 457 err = root->cmds->destroy_flow_table(root, ft);
19f100fe
MG
458 if (err)
459 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
460 }
bd71b08e
MG
461}
462
463static void del_sw_flow_table(struct fs_node *node)
464{
465 struct mlx5_flow_table *ft;
466 struct fs_prio *prio;
467
468 fs_get_obj(ft, node);
469
693c6883 470 rhltable_destroy(&ft->fgs_hash);
0c56b975
MG
471 fs_get_obj(prio, ft->node.parent);
472 prio->num_ft--;
a369d4ac 473 kfree(ft);
0c56b975
MG
474}
475
e7aafc8f 476static void modify_fte(struct fs_fte *fte)
0c56b975 477{
af76c501 478 struct mlx5_flow_root_namespace *root;
0c56b975
MG
479 struct mlx5_flow_table *ft;
480 struct mlx5_flow_group *fg;
e7aafc8f 481 struct mlx5_core_dev *dev;
0c56b975
MG
482 int err;
483
0c56b975 484 fs_get_obj(fg, fte->node.parent);
0c56b975 485 fs_get_obj(ft, fg->node.parent);
e7aafc8f
EB
486 dev = get_dev(&fte->node);
487
488 root = find_root(&ft->node);
ae288a48 489 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
e7aafc8f
EB
490 if (err)
491 mlx5_core_warn(dev,
492 "%s can't del rule fg id=%d fte_index=%d\n",
493 __func__, fg->id, fte->index);
494 fte->modify_mask = 0;
495}
496
497static void del_sw_hw_rule(struct fs_node *node)
498{
499 struct mlx5_flow_rule *rule;
500 struct fs_fte *fte;
501
502 fs_get_obj(rule, node);
503 fs_get_obj(fte, rule->node.parent);
4c03e69a 504 trace_mlx5_fs_del_rule(rule);
b3638e1a
MG
505 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
506 mutex_lock(&rule->dest_attr.ft->lock);
507 list_del(&rule->next_ft);
508 mutex_unlock(&rule->dest_attr.ft->lock);
509 }
ae058314
MB
510
511 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
512 --fte->dests_size) {
e7aafc8f
EB
513 fte->modify_mask |=
514 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
515 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
d2ec6a35 516 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
ae058314
MB
517 goto out;
518 }
519
d2ec6a35 520 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
60ab4584 521 --fte->dests_size) {
e7aafc8f
EB
522 fte->modify_mask |=
523 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
ae058314
MB
524 }
525out:
a369d4ac 526 kfree(rule);
0c56b975
MG
527}
528
bd71b08e 529static void del_hw_fte(struct fs_node *node)
0c56b975 530{
af76c501 531 struct mlx5_flow_root_namespace *root;
0c56b975
MG
532 struct mlx5_flow_table *ft;
533 struct mlx5_flow_group *fg;
534 struct mlx5_core_dev *dev;
535 struct fs_fte *fte;
536 int err;
537
538 fs_get_obj(fte, node);
539 fs_get_obj(fg, fte->node.parent);
540 fs_get_obj(ft, fg->node.parent);
541
bd71b08e 542 trace_mlx5_fs_del_fte(fte);
0c56b975 543 dev = get_dev(&ft->node);
af76c501 544 root = find_root(&ft->node);
19f100fe 545 if (node->active) {
ae288a48 546 err = root->cmds->delete_fte(root, ft, fte);
19f100fe
MG
547 if (err)
548 mlx5_core_warn(dev,
549 "flow steering can't delete fte in index %d of flow group id %d\n",
550 fte->index, fg->id);
6237634d 551 node->active = 0;
19f100fe 552 }
bd71b08e
MG
553}
554
555static void del_sw_fte(struct fs_node *node)
556{
1f0593e7 557 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
558 struct mlx5_flow_group *fg;
559 struct fs_fte *fte;
560 int err;
561
562 fs_get_obj(fte, node);
563 fs_get_obj(fg, fte->node.parent);
0c56b975 564
19f100fe
MG
565 err = rhashtable_remove_fast(&fg->ftes_hash,
566 &fte->hash,
567 rhash_fte);
568 WARN_ON(err);
569 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
1f0593e7 570 kmem_cache_free(steering->ftes_cache, fte);
0c56b975
MG
571}
572
bd71b08e 573static void del_hw_flow_group(struct fs_node *node)
0c56b975 574{
af76c501 575 struct mlx5_flow_root_namespace *root;
0c56b975
MG
576 struct mlx5_flow_group *fg;
577 struct mlx5_flow_table *ft;
578 struct mlx5_core_dev *dev;
579
580 fs_get_obj(fg, node);
581 fs_get_obj(ft, fg->node.parent);
582 dev = get_dev(&ft->node);
4c03e69a 583 trace_mlx5_fs_del_fg(fg);
0c56b975 584
af76c501 585 root = find_root(&ft->node);
ae288a48 586 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
bd71b08e
MG
587 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
588 fg->id, ft->id);
589}
590
591static void del_sw_flow_group(struct fs_node *node)
592{
a369d4ac 593 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
594 struct mlx5_flow_group *fg;
595 struct mlx5_flow_table *ft;
596 int err;
597
598 fs_get_obj(fg, node);
599 fs_get_obj(ft, fg->node.parent);
32dba76a 600
0d235c3f 601 rhashtable_destroy(&fg->ftes_hash);
75d1d187 602 ida_destroy(&fg->fte_allocator);
79cdb0aa
PB
603 if (ft->autogroup.active &&
604 fg->max_ftes == ft->autogroup.group_size &&
605 fg->start_index < ft->autogroup.max_fte)
bd71b08e 606 ft->autogroup.num_groups--;
693c6883
MB
607 err = rhltable_remove(&ft->fgs_hash,
608 &fg->hash,
609 rhash_fg);
610 WARN_ON(err);
a369d4ac 611 kmem_cache_free(steering->fgs_cache, fg);
0c56b975
MG
612}
613
f5c2ff17
MG
614static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
615{
616 int index;
617 int ret;
618
619 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
620 if (index < 0)
621 return index;
622
623 fte->index = index + fg->start_index;
624 ret = rhashtable_insert_fast(&fg->ftes_hash,
625 &fte->hash,
626 rhash_fte);
627 if (ret)
628 goto err_ida_remove;
629
630 tree_add_node(&fte->node, &fg->node);
631 list_add_tail(&fte->node.list, &fg->node.children);
632 return 0;
633
634err_ida_remove:
635 ida_simple_remove(&fg->fte_allocator, index);
636 return ret;
637}
638
a369d4ac 639static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
5233794b 640 const struct mlx5_flow_spec *spec,
f5c2ff17 641 struct mlx5_flow_act *flow_act)
0c56b975 642{
a369d4ac 643 struct mlx5_flow_steering *steering = get_steering(&ft->node);
0c56b975
MG
644 struct fs_fte *fte;
645
a369d4ac 646 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
0c56b975
MG
647 if (!fte)
648 return ERR_PTR(-ENOMEM);
649
bb0ee7dc 650 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
0c56b975 651 fte->node.type = FS_TYPE_FLOW_ENTRY;
d2ec6a35 652 fte->action = *flow_act;
bb0ee7dc 653 fte->flow_context = spec->flow_context;
0c56b975 654
718ce4d6 655 tree_init_node(&fte->node, NULL, del_sw_fte);
19f100fe
MG
656
657 return fte;
19f100fe
MG
658}
659
a369d4ac
MG
660static void dealloc_flow_group(struct mlx5_flow_steering *steering,
661 struct mlx5_flow_group *fg)
19f100fe
MG
662{
663 rhashtable_destroy(&fg->ftes_hash);
a369d4ac 664 kmem_cache_free(steering->fgs_cache, fg);
19f100fe
MG
665}
666
a369d4ac
MG
667static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
668 u8 match_criteria_enable,
5233794b 669 const void *match_criteria,
19f100fe
MG
670 int start_index,
671 int end_index)
0c56b975
MG
672{
673 struct mlx5_flow_group *fg;
0d235c3f
MB
674 int ret;
675
a369d4ac 676 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
0c56b975
MG
677 if (!fg)
678 return ERR_PTR(-ENOMEM);
679
0d235c3f
MB
680 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
681 if (ret) {
a369d4ac 682 kmem_cache_free(steering->fgs_cache, fg);
0d235c3f 683 return ERR_PTR(ret);
fc9c5a4a
TZ
684 }
685
75d1d187 686 ida_init(&fg->fte_allocator);
0c56b975
MG
687 fg->mask.match_criteria_enable = match_criteria_enable;
688 memcpy(&fg->mask.match_criteria, match_criteria,
689 sizeof(fg->mask.match_criteria));
690 fg->node.type = FS_TYPE_FLOW_GROUP;
19f100fe
MG
691 fg->start_index = start_index;
692 fg->max_ftes = end_index - start_index + 1;
693
694 return fg;
695}
696
697static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
698 u8 match_criteria_enable,
5233794b 699 const void *match_criteria,
19f100fe
MG
700 int start_index,
701 int end_index,
702 struct list_head *prev)
703{
a369d4ac 704 struct mlx5_flow_steering *steering = get_steering(&ft->node);
19f100fe
MG
705 struct mlx5_flow_group *fg;
706 int ret;
707
a369d4ac 708 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
19f100fe
MG
709 start_index, end_index);
710 if (IS_ERR(fg))
711 return fg;
712
713 /* initialize refcnt, add to parent list */
714 ret = rhltable_insert(&ft->fgs_hash,
715 &fg->hash,
716 rhash_fg);
717 if (ret) {
a369d4ac 718 dealloc_flow_group(steering, fg);
19f100fe
MG
719 return ERR_PTR(ret);
720 }
721
bd71b08e 722 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
19f100fe
MG
723 tree_add_node(&fg->node, &ft->node);
724 /* Add node to group list */
725 list_add(&fg->node.list, prev);
bd71b08e 726 atomic_inc(&ft->node.version);
19f100fe 727
0c56b975
MG
728 return fg;
729}
730
efdc810b 731static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
aaff1bea 732 enum fs_flow_table_type table_type,
c9f1b073
HHZ
733 enum fs_flow_table_op_mod op_mod,
734 u32 flags)
0c56b975
MG
735{
736 struct mlx5_flow_table *ft;
693c6883 737 int ret;
0c56b975
MG
738
739 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
740 if (!ft)
693c6883
MB
741 return ERR_PTR(-ENOMEM);
742
743 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
744 if (ret) {
745 kfree(ft);
746 return ERR_PTR(ret);
747 }
0c56b975
MG
748
749 ft->level = level;
750 ft->node.type = FS_TYPE_FLOW_TABLE;
aaff1bea 751 ft->op_mod = op_mod;
0c56b975 752 ft->type = table_type;
efdc810b 753 ft->vport = vport;
0c56b975 754 ft->max_fte = max_fte;
c9f1b073 755 ft->flags = flags;
b3638e1a
MG
756 INIT_LIST_HEAD(&ft->fwd_rules);
757 mutex_init(&ft->lock);
0c56b975
MG
758
759 return ft;
760}
761
fdb6896f
MG
762/* If reverse is false, then we search for the first flow table in the
763 * root sub-tree from start(closest from right), else we search for the
764 * last flow table in the root sub-tree till start(closest from left).
765 */
766static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
767 struct list_head *start,
768 bool reverse)
769{
770#define list_advance_entry(pos, reverse) \
771 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
772
773#define list_for_each_advance_continue(pos, head, reverse) \
774 for (pos = list_advance_entry(pos, reverse); \
775 &pos->list != (head); \
776 pos = list_advance_entry(pos, reverse))
777
778 struct fs_node *iter = list_entry(start, struct fs_node, list);
779 struct mlx5_flow_table *ft = NULL;
780
328edb49 781 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
fdb6896f
MG
782 return NULL;
783
784 list_for_each_advance_continue(iter, &root->children, reverse) {
785 if (iter->type == FS_TYPE_FLOW_TABLE) {
786 fs_get_obj(ft, iter);
787 return ft;
788 }
789 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
790 if (ft)
791 return ft;
792 }
793
794 return ft;
795}
796
797/* If reverse if false then return the first flow table in next priority of
798 * prio in the tree, else return the last flow table in the previous priority
799 * of prio in the tree.
800 */
801static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
802{
803 struct mlx5_flow_table *ft = NULL;
804 struct fs_node *curr_node;
805 struct fs_node *parent;
806
807 parent = prio->node.parent;
808 curr_node = &prio->node;
809 while (!ft && parent) {
810 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
811 curr_node = parent;
812 parent = curr_node->parent;
813 }
814 return ft;
815}
816
817/* Assuming all the tree is locked by mutex chain lock */
818static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
819{
820 return find_closest_ft(prio, false);
821}
822
823/* Assuming all the tree is locked by mutex chain lock */
824static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
825{
826 return find_closest_ft(prio, true);
827}
828
f90edfd2
MG
829static int connect_fts_in_prio(struct mlx5_core_dev *dev,
830 struct fs_prio *prio,
831 struct mlx5_flow_table *ft)
832{
af76c501 833 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
f90edfd2
MG
834 struct mlx5_flow_table *iter;
835 int i = 0;
836 int err;
837
838 fs_for_each_ft(iter, prio) {
839 i++;
ae288a48 840 err = root->cmds->modify_flow_table(root, iter, ft);
f90edfd2
MG
841 if (err) {
842 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
843 iter->id);
844 /* The driver is out of sync with the FW */
845 if (i > 1)
846 WARN_ON(true);
847 return err;
848 }
849 }
850 return 0;
851}
852
853/* Connect flow tables from previous priority of prio to ft */
854static int connect_prev_fts(struct mlx5_core_dev *dev,
855 struct mlx5_flow_table *ft,
856 struct fs_prio *prio)
857{
858 struct mlx5_flow_table *prev_ft;
859
860 prev_ft = find_prev_chained_ft(prio);
861 if (prev_ft) {
862 struct fs_prio *prev_prio;
863
864 fs_get_obj(prev_prio, prev_ft->node.parent);
865 return connect_fts_in_prio(dev, prev_prio, ft);
866 }
867 return 0;
868}
869
2cc43b49
MG
870static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
871 *prio)
872{
873 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
dae37456 874 struct mlx5_ft_underlay_qp *uqp;
2cc43b49 875 int min_level = INT_MAX;
aef6c443 876 int err = 0;
dae37456 877 u32 qpn;
2cc43b49
MG
878
879 if (root->root_ft)
880 min_level = root->root_ft->level;
881
882 if (ft->level >= min_level)
883 return 0;
884
dae37456
AV
885 if (list_empty(&root->underlay_qpns)) {
886 /* Don't set any QPN (zero) in case QPN list is empty */
887 qpn = 0;
ae288a48 888 err = root->cmds->update_root_ft(root, ft, qpn, false);
dae37456
AV
889 } else {
890 list_for_each_entry(uqp, &root->underlay_qpns, list) {
891 qpn = uqp->qpn;
ae288a48 892 err = root->cmds->update_root_ft(root, ft,
af76c501 893 qpn, false);
dae37456
AV
894 if (err)
895 break;
896 }
897 }
898
2cc43b49 899 if (err)
dae37456
AV
900 mlx5_core_warn(root->dev,
901 "Update root flow table of id(%u) qpn(%d) failed\n",
902 ft->id, qpn);
2cc43b49
MG
903 else
904 root->root_ft = ft;
905
906 return err;
907}
908
74491de9
MB
909static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
910 struct mlx5_flow_destination *dest)
b3638e1a 911{
af76c501 912 struct mlx5_flow_root_namespace *root;
b3638e1a
MG
913 struct mlx5_flow_table *ft;
914 struct mlx5_flow_group *fg;
915 struct fs_fte *fte;
bd5251db 916 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
b3638e1a
MG
917 int err = 0;
918
919 fs_get_obj(fte, rule->node.parent);
d2ec6a35 920 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
b3638e1a 921 return -EINVAL;
476d61b7 922 down_write_ref_node(&fte->node, false);
b3638e1a
MG
923 fs_get_obj(fg, fte->node.parent);
924 fs_get_obj(ft, fg->node.parent);
925
926 memcpy(&rule->dest_attr, dest, sizeof(*dest));
af76c501 927 root = find_root(&ft->node);
ae288a48 928 err = root->cmds->update_fte(root, ft, fg,
af76c501 929 modify_mask, fte);
476d61b7 930 up_write_ref_node(&fte->node, false);
b3638e1a
MG
931
932 return err;
933}
934
74491de9
MB
935int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
936 struct mlx5_flow_destination *new_dest,
937 struct mlx5_flow_destination *old_dest)
938{
939 int i;
940
941 if (!old_dest) {
942 if (handle->num_rules != 1)
943 return -EINVAL;
944 return _mlx5_modify_rule_destination(handle->rule[0],
945 new_dest);
946 }
947
948 for (i = 0; i < handle->num_rules; i++) {
949 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
950 return _mlx5_modify_rule_destination(handle->rule[i],
951 new_dest);
952 }
953
954 return -EINVAL;
955}
956
b3638e1a
MG
957/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
958static int connect_fwd_rules(struct mlx5_core_dev *dev,
959 struct mlx5_flow_table *new_next_ft,
960 struct mlx5_flow_table *old_next_ft)
961{
4c5009c5 962 struct mlx5_flow_destination dest = {};
b3638e1a
MG
963 struct mlx5_flow_rule *iter;
964 int err = 0;
965
966 /* new_next_ft and old_next_ft could be NULL only
967 * when we create/destroy the anchor flow table.
968 */
969 if (!new_next_ft || !old_next_ft)
970 return 0;
971
972 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
973 dest.ft = new_next_ft;
974
975 mutex_lock(&old_next_ft->lock);
976 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
977 mutex_unlock(&old_next_ft->lock);
978 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
74491de9 979 err = _mlx5_modify_rule_destination(iter, &dest);
b3638e1a
MG
980 if (err)
981 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
982 new_next_ft->id);
983 }
984 return 0;
985}
986
f90edfd2
MG
987static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
988 struct fs_prio *prio)
989{
b3638e1a 990 struct mlx5_flow_table *next_ft;
f90edfd2
MG
991 int err = 0;
992
993 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
994
995 if (list_empty(&prio->node.children)) {
996 err = connect_prev_fts(dev, ft, prio);
997 if (err)
998 return err;
b3638e1a
MG
999
1000 next_ft = find_next_chained_ft(prio);
1001 err = connect_fwd_rules(dev, ft, next_ft);
1002 if (err)
1003 return err;
f90edfd2
MG
1004 }
1005
1006 if (MLX5_CAP_FLOWTABLE(dev,
1007 flow_table_properties_nic_receive.modify_root))
1008 err = update_root_ft_create(ft, prio);
1009 return err;
1010}
1011
d63cd286
MG
1012static void list_add_flow_table(struct mlx5_flow_table *ft,
1013 struct fs_prio *prio)
1014{
1015 struct list_head *prev = &prio->node.children;
1016 struct mlx5_flow_table *iter;
1017
1018 fs_for_each_ft(iter, prio) {
1019 if (iter->level > ft->level)
1020 break;
1021 prev = &iter->node.list;
1022 }
1023 list_add(&ft->node.list, prev);
1024}
1025
efdc810b 1026static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 1027 struct mlx5_flow_table_attr *ft_attr,
aaff1bea 1028 enum fs_flow_table_op_mod op_mod,
b3ba5149 1029 u16 vport)
0c56b975 1030{
b3ba5149 1031 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
5281a0c9
PB
1032 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1033 struct mlx5_flow_table *next_ft;
b3ba5149 1034 struct fs_prio *fs_prio = NULL;
0c56b975 1035 struct mlx5_flow_table *ft;
0c56b975 1036 int log_table_sz;
b3ba5149 1037 int err;
0c56b975
MG
1038
1039 if (!root) {
1040 pr_err("mlx5: flow steering failed to find root of namespace\n");
1041 return ERR_PTR(-ENODEV);
1042 }
1043
2cc43b49 1044 mutex_lock(&root->chain_lock);
b3ba5149 1045 fs_prio = find_prio(ns, ft_attr->prio);
2cc43b49
MG
1046 if (!fs_prio) {
1047 err = -EINVAL;
1048 goto unlock_root;
1049 }
5281a0c9
PB
1050 if (!unmanaged) {
1051 /* The level is related to the
1052 * priority level range.
1053 */
1054 if (ft_attr->level >= fs_prio->num_levels) {
1055 err = -ENOSPC;
1056 goto unlock_root;
1057 }
1058
1059 ft_attr->level += fs_prio->start_level;
0c56b975 1060 }
5281a0c9 1061
d63cd286
MG
1062 /* The level is related to the
1063 * priority level range.
1064 */
b3ba5149 1065 ft = alloc_flow_table(ft_attr->level,
efdc810b 1066 vport,
b3ba5149 1067 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
aaff1bea 1068 root->table_type,
b3ba5149 1069 op_mod, ft_attr->flags);
693c6883
MB
1070 if (IS_ERR(ft)) {
1071 err = PTR_ERR(ft);
2cc43b49 1072 goto unlock_root;
0c56b975
MG
1073 }
1074
bd71b08e 1075 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
aaff1bea 1076 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
5281a0c9
PB
1077 next_ft = unmanaged ? ft_attr->next_ft :
1078 find_next_chained_ft(fs_prio);
f66ad830 1079 ft->def_miss_action = ns->def_miss_action;
ae288a48 1080 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
0c56b975
MG
1081 if (err)
1082 goto free_ft;
1083
5281a0c9
PB
1084 if (!unmanaged) {
1085 err = connect_flow_table(root->dev, ft, fs_prio);
1086 if (err)
1087 goto destroy_ft;
1088 }
1089
19f100fe 1090 ft->node.active = true;
476d61b7 1091 down_write_ref_node(&fs_prio->node, false);
5281a0c9
PB
1092 if (!unmanaged) {
1093 tree_add_node(&ft->node, &fs_prio->node);
1094 list_add_flow_table(ft, fs_prio);
1095 } else {
1096 ft->node.root = fs_prio->node.root;
1097 }
0c56b975 1098 fs_prio->num_ft++;
476d61b7 1099 up_write_ref_node(&fs_prio->node, false);
2cc43b49 1100 mutex_unlock(&root->chain_lock);
8e4ca986 1101 trace_mlx5_fs_add_ft(ft);
0c56b975 1102 return ft;
2cc43b49 1103destroy_ft:
ae288a48 1104 root->cmds->destroy_flow_table(root, ft);
0c56b975
MG
1105free_ft:
1106 kfree(ft);
2cc43b49
MG
1107unlock_root:
1108 mutex_unlock(&root->chain_lock);
0c56b975
MG
1109 return ERR_PTR(err);
1110}
1111
efdc810b 1112struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 1113 struct mlx5_flow_table_attr *ft_attr)
efdc810b 1114{
b3ba5149 1115 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
efdc810b
MHY
1116}
1117
1118struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1119 int prio, int max_fte,
1120 u32 level, u16 vport)
1121{
b3ba5149
ES
1122 struct mlx5_flow_table_attr ft_attr = {};
1123
1124 ft_attr.max_fte = max_fte;
1125 ft_attr.level = level;
1126 ft_attr.prio = prio;
1127
57f35c93 1128 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
efdc810b
MHY
1129}
1130
b3ba5149
ES
1131struct mlx5_flow_table*
1132mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1133 int prio, u32 level)
aaff1bea 1134{
b3ba5149
ES
1135 struct mlx5_flow_table_attr ft_attr = {};
1136
1137 ft_attr.level = level;
1138 ft_attr.prio = prio;
1139 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
aaff1bea
AH
1140}
1141EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1142
b3ba5149
ES
1143struct mlx5_flow_table*
1144mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
61dc7b01 1145 struct mlx5_flow_table_attr *ft_attr)
f0d22d18 1146{
79cdb0aa
PB
1147 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1148 int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
1149 int max_num_groups = ft_attr->autogroup.max_num_groups;
f0d22d18
MG
1150 struct mlx5_flow_table *ft;
1151
79cdb0aa
PB
1152 if (max_num_groups > autogroups_max_fte)
1153 return ERR_PTR(-EINVAL);
1154 if (num_reserved_entries > ft_attr->max_fte)
f0d22d18
MG
1155 return ERR_PTR(-EINVAL);
1156
61dc7b01 1157 ft = mlx5_create_flow_table(ns, ft_attr);
f0d22d18
MG
1158 if (IS_ERR(ft))
1159 return ft;
1160
1161 ft->autogroup.active = true;
79cdb0aa
PB
1162 ft->autogroup.required_groups = max_num_groups;
1163 ft->autogroup.max_fte = autogroups_max_fte;
97fd8da2 1164 /* We save place for flow groups in addition to max types */
79cdb0aa 1165 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
f0d22d18
MG
1166
1167 return ft;
1168}
b217ea25 1169EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
f0d22d18 1170
f0d22d18
MG
1171struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1172 u32 *fg_in)
1173{
af76c501 1174 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
0d235c3f
MB
1175 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1176 fg_in, match_criteria);
1177 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1178 fg_in,
1179 match_criteria_enable);
19f100fe
MG
1180 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1181 start_flow_index);
1182 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1183 end_flow_index);
f0d22d18 1184 struct mlx5_flow_group *fg;
19f100fe 1185 int err;
f0d22d18 1186
79cdb0aa 1187 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
f0d22d18
MG
1188 return ERR_PTR(-EPERM);
1189
476d61b7 1190 down_write_ref_node(&ft->node, false);
19f100fe
MG
1191 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1192 start_index, end_index,
1193 ft->node.children.prev);
476d61b7 1194 up_write_ref_node(&ft->node, false);
19f100fe
MG
1195 if (IS_ERR(fg))
1196 return fg;
1197
ae288a48 1198 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
19f100fe 1199 if (err) {
476d61b7 1200 tree_put_node(&fg->node, false);
19f100fe
MG
1201 return ERR_PTR(err);
1202 }
1203 trace_mlx5_fs_add_fg(fg);
1204 fg->node.active = true;
0c56b975
MG
1205
1206 return fg;
1207}
1208
1209static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1210{
1211 struct mlx5_flow_rule *rule;
1212
1213 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1214 if (!rule)
1215 return NULL;
1216
b3638e1a 1217 INIT_LIST_HEAD(&rule->next_ft);
0c56b975 1218 rule->node.type = FS_TYPE_FLOW_DEST;
60ab4584
AV
1219 if (dest)
1220 memcpy(&rule->dest_attr, dest, sizeof(*dest));
0c56b975
MG
1221
1222 return rule;
1223}
1224
74491de9
MB
1225static struct mlx5_flow_handle *alloc_handle(int num_rules)
1226{
1227 struct mlx5_flow_handle *handle;
1228
acafe7e3 1229 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
74491de9
MB
1230 if (!handle)
1231 return NULL;
1232
1233 handle->num_rules = num_rules;
1234
1235 return handle;
1236}
1237
1238static void destroy_flow_handle(struct fs_fte *fte,
1239 struct mlx5_flow_handle *handle,
1240 struct mlx5_flow_destination *dest,
1241 int i)
1242{
1243 for (; --i >= 0;) {
dd8e1945 1244 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
74491de9
MB
1245 fte->dests_size--;
1246 list_del(&handle->rule[i]->node.list);
1247 kfree(handle->rule[i]);
1248 }
1249 }
1250 kfree(handle);
1251}
1252
1253static struct mlx5_flow_handle *
1254create_flow_handle(struct fs_fte *fte,
1255 struct mlx5_flow_destination *dest,
1256 int dest_num,
1257 int *modify_mask,
1258 bool *new_rule)
1259{
1260 struct mlx5_flow_handle *handle;
1261 struct mlx5_flow_rule *rule = NULL;
1262 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1263 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1264 int type;
1265 int i = 0;
1266
1267 handle = alloc_handle((dest_num) ? dest_num : 1);
1268 if (!handle)
1269 return ERR_PTR(-ENOMEM);
1270
1271 do {
1272 if (dest) {
1273 rule = find_flow_rule(fte, dest + i);
1274 if (rule) {
dd8e1945 1275 refcount_inc(&rule->node.refcount);
74491de9
MB
1276 goto rule_found;
1277 }
1278 }
1279
1280 *new_rule = true;
1281 rule = alloc_rule(dest + i);
1282 if (!rule)
1283 goto free_rules;
1284
1285 /* Add dest to dests list- we need flow tables to be in the
1286 * end of the list for forward to next prio rules.
1287 */
bd71b08e 1288 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
74491de9
MB
1289 if (dest &&
1290 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1291 list_add(&rule->node.list, &fte->node.children);
1292 else
1293 list_add_tail(&rule->node.list, &fte->node.children);
1294 if (dest) {
1295 fte->dests_size++;
1296
1297 type = dest[i].type ==
1298 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1299 *modify_mask |= type ? count : dst;
1300 }
1301rule_found:
1302 handle->rule[i] = rule;
1303 } while (++i < dest_num);
1304
1305 return handle;
1306
1307free_rules:
1308 destroy_flow_handle(fte, handle, dest, i);
1309 return ERR_PTR(-ENOMEM);
1310}
1311
0c56b975 1312/* fte should not be deleted while calling this function */
74491de9
MB
1313static struct mlx5_flow_handle *
1314add_rule_fte(struct fs_fte *fte,
1315 struct mlx5_flow_group *fg,
1316 struct mlx5_flow_destination *dest,
1317 int dest_num,
1318 bool update_action)
0c56b975 1319{
af76c501 1320 struct mlx5_flow_root_namespace *root;
74491de9 1321 struct mlx5_flow_handle *handle;
0c56b975 1322 struct mlx5_flow_table *ft;
bd5251db 1323 int modify_mask = 0;
0c56b975 1324 int err;
74491de9 1325 bool new_rule = false;
0c56b975 1326
74491de9
MB
1327 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1328 &new_rule);
1329 if (IS_ERR(handle) || !new_rule)
1330 goto out;
bd5251db 1331
a6224985
MB
1332 if (update_action)
1333 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
bd5251db 1334
0c56b975 1335 fs_get_obj(ft, fg->node.parent);
af76c501 1336 root = find_root(&fg->node);
0501fc47 1337 if (!(fte->status & FS_FTE_STATUS_EXISTING))
ae288a48 1338 err = root->cmds->create_fte(root, ft, fg, fte);
0c56b975 1339 else
ae288a48 1340 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
0c56b975 1341 if (err)
74491de9 1342 goto free_handle;
0c56b975 1343
19f100fe 1344 fte->node.active = true;
0c56b975 1345 fte->status |= FS_FTE_STATUS_EXISTING;
454401ae 1346 atomic_inc(&fg->node.version);
0c56b975 1347
74491de9
MB
1348out:
1349 return handle;
0c56b975 1350
74491de9
MB
1351free_handle:
1352 destroy_flow_handle(fte, handle, dest, handle->num_rules);
0c56b975
MG
1353 return ERR_PTR(err);
1354}
1355
19f100fe 1356static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
5233794b 1357 const struct mlx5_flow_spec *spec)
0c56b975 1358{
af363705 1359 struct list_head *prev = &ft->node.children;
79cdb0aa 1360 u32 max_fte = ft->autogroup.max_fte;
19f100fe 1361 unsigned int candidate_index = 0;
f0d22d18 1362 unsigned int group_size = 0;
79cdb0aa 1363 struct mlx5_flow_group *fg;
f0d22d18
MG
1364
1365 if (!ft->autogroup.active)
1366 return ERR_PTR(-ENOENT);
1367
f0d22d18 1368 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
97fd8da2 1369 group_size = ft->autogroup.group_size;
f0d22d18 1370
79cdb0aa 1371 /* max_fte == ft->autogroup.max_types */
f0d22d18
MG
1372 if (group_size == 0)
1373 group_size = 1;
1374
1375 /* sorted by start_index */
1376 fs_for_each_fg(fg, ft) {
1377 if (candidate_index + group_size > fg->start_index)
1378 candidate_index = fg->start_index + fg->max_ftes;
1379 else
1380 break;
1381 prev = &fg->node.list;
1382 }
1383
79cdb0aa 1384 if (candidate_index + group_size > max_fte)
19f100fe
MG
1385 return ERR_PTR(-ENOSPC);
1386
1387 fg = alloc_insert_flow_group(ft,
1388 spec->match_criteria_enable,
1389 spec->match_criteria,
1390 candidate_index,
1391 candidate_index + group_size - 1,
1392 prev);
1393 if (IS_ERR(fg))
f0d22d18 1394 goto out;
19f100fe 1395
97fd8da2
MG
1396 if (group_size == ft->autogroup.group_size)
1397 ft->autogroup.num_groups++;
19f100fe
MG
1398
1399out:
1400 return fg;
1401}
1402
1403static int create_auto_flow_group(struct mlx5_flow_table *ft,
1404 struct mlx5_flow_group *fg)
1405{
af76c501 1406 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
19f100fe
MG
1407 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1408 void *match_criteria_addr;
3e99df87
SK
1409 u8 src_esw_owner_mask_on;
1410 void *misc;
19f100fe
MG
1411 int err;
1412 u32 *in;
1413
1414 in = kvzalloc(inlen, GFP_KERNEL);
1415 if (!in)
1416 return -ENOMEM;
f0d22d18
MG
1417
1418 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
19f100fe
MG
1419 fg->mask.match_criteria_enable);
1420 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1421 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1422 fg->max_ftes - 1);
3e99df87
SK
1423
1424 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1425 misc_parameters);
1426 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1427 source_eswitch_owner_vhca_id);
1428 MLX5_SET(create_flow_group_in, in,
1429 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1430
f0d22d18
MG
1431 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1432 in, match_criteria);
19f100fe
MG
1433 memcpy(match_criteria_addr, fg->mask.match_criteria,
1434 sizeof(fg->mask.match_criteria));
1435
ae288a48 1436 err = root->cmds->create_flow_group(root, ft, in, fg);
19f100fe
MG
1437 if (!err) {
1438 fg->node.active = true;
1439 trace_mlx5_fs_add_fg(fg);
1440 }
f0d22d18 1441
f0d22d18 1442 kvfree(in);
19f100fe 1443 return err;
f0d22d18
MG
1444}
1445
814fb875
MB
1446static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1447 struct mlx5_flow_destination *d2)
1448{
1449 if (d1->type == d2->type) {
1450 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1228e912
EB
1451 d1->vport.num == d2->vport.num &&
1452 d1->vport.flags == d2->vport.flags &&
c979c445
DL
1453 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1454 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1228e912 1455 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
2b688ea5
MG
1456 (d1->vport.pkt_reformat->id ==
1457 d2->vport.pkt_reformat->id) : true)) ||
814fb875
MB
1458 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1459 d1->ft == d2->ft) ||
1460 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
664000b6
YH
1461 d1->tir_num == d2->tir_num) ||
1462 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1463 d1->ft_num == d2->ft_num))
814fb875
MB
1464 return true;
1465 }
1466
1467 return false;
1468}
1469
b3638e1a
MG
1470static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1471 struct mlx5_flow_destination *dest)
1472{
1473 struct mlx5_flow_rule *rule;
1474
1475 list_for_each_entry(rule, &fte->node.children, node.list) {
814fb875
MB
1476 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1477 return rule;
b3638e1a
MG
1478 }
1479 return NULL;
1480}
1481
0d235c3f
MB
1482static bool check_conflicting_actions(u32 action1, u32 action2)
1483{
1484 u32 xored_actions = action1 ^ action2;
1485
1486 /* if one rule only wants to count, it's ok */
1487 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1488 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1489 return false;
1490
1491 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
60786f09 1492 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
96de67a7 1493 MLX5_FLOW_CONTEXT_ACTION_DECAP |
0c06897a
OG
1494 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1495 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
8da6fe2a
JL
1496 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1497 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1498 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
0d235c3f
MB
1499 return true;
1500
1501 return false;
1502}
1503
bb0ee7dc
JL
1504static int check_conflicting_ftes(struct fs_fte *fte,
1505 const struct mlx5_flow_context *flow_context,
1506 const struct mlx5_flow_act *flow_act)
0d235c3f 1507{
d2ec6a35 1508 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
0d235c3f
MB
1509 mlx5_core_warn(get_dev(&fte->node),
1510 "Found two FTEs with conflicting actions\n");
1511 return -EEXIST;
1512 }
1513
bb0ee7dc
JL
1514 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1515 fte->flow_context.flow_tag != flow_context->flow_tag) {
0d235c3f
MB
1516 mlx5_core_warn(get_dev(&fte->node),
1517 "FTE flow tag %u already exists with different flow tag %u\n",
bb0ee7dc
JL
1518 fte->flow_context.flow_tag,
1519 flow_context->flow_tag);
0d235c3f
MB
1520 return -EEXIST;
1521 }
1522
1523 return 0;
1524}
1525
74491de9 1526static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
5233794b 1527 const struct mlx5_flow_spec *spec,
66958ed9 1528 struct mlx5_flow_act *flow_act,
74491de9 1529 struct mlx5_flow_destination *dest,
693c6883
MB
1530 int dest_num,
1531 struct fs_fte *fte)
0c56b975 1532{
74491de9 1533 struct mlx5_flow_handle *handle;
bd71b08e 1534 int old_action;
74491de9 1535 int i;
bd71b08e 1536 int ret;
0c56b975 1537
bb0ee7dc 1538 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
bd71b08e
MG
1539 if (ret)
1540 return ERR_PTR(ret);
0c56b975 1541
d2ec6a35
MB
1542 old_action = fte->action.action;
1543 fte->action.action |= flow_act->action;
bd71b08e
MG
1544 handle = add_rule_fte(fte, fg, dest, dest_num,
1545 old_action != flow_act->action);
74491de9 1546 if (IS_ERR(handle)) {
d2ec6a35 1547 fte->action.action = old_action;
693c6883 1548 return handle;
0c56b975 1549 }
bd71b08e 1550 trace_mlx5_fs_set_fte(fte, false);
0c56b975 1551
74491de9 1552 for (i = 0; i < handle->num_rules; i++) {
dd8e1945 1553 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
74491de9 1554 tree_add_node(&handle->rule[i]->node, &fte->node);
4c03e69a
MB
1555 trace_mlx5_fs_add_rule(handle->rule[i]);
1556 }
74491de9 1557 }
74491de9 1558 return handle;
0c56b975
MG
1559}
1560
171c7625 1561static bool counter_is_valid(u32 action)
bd5251db 1562{
ae058314 1563 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
eafa6abd 1564 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
bd5251db
AV
1565}
1566
d63cd286 1567static bool dest_is_valid(struct mlx5_flow_destination *dest,
ff189b43 1568 struct mlx5_flow_act *flow_act,
d63cd286
MG
1569 struct mlx5_flow_table *ft)
1570{
ff189b43
PB
1571 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1572 u32 action = flow_act->action;
1573
bd5251db 1574 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
171c7625 1575 return counter_is_valid(action);
bd5251db 1576
d63cd286
MG
1577 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1578 return true;
1579
ff189b43
PB
1580 if (ignore_level) {
1581 if (ft->type != FS_FT_FDB)
1582 return false;
1583
1584 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1585 dest->ft->type != FS_FT_FDB)
1586 return false;
1587 }
1588
d63cd286
MG
1589 if (!dest || ((dest->type ==
1590 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
ff189b43 1591 (dest->ft->level <= ft->level && !ignore_level)))
d63cd286
MG
1592 return false;
1593 return true;
1594}
1595
46719d77
MG
1596struct match_list {
1597 struct list_head list;
693c6883 1598 struct mlx5_flow_group *g;
46719d77
MG
1599};
1600
b820ce00 1601static void free_match_list(struct match_list *head, bool ft_locked)
46719d77 1602{
b820ce00 1603 struct match_list *iter, *match_tmp;
46719d77 1604
b820ce00
EC
1605 list_for_each_entry_safe(iter, match_tmp, &head->list,
1606 list) {
1607 tree_put_node(&iter->g->node, ft_locked);
1608 list_del(&iter->list);
1609 kfree(iter);
46719d77
MG
1610 }
1611}
1612
b820ce00 1613static int build_match_list(struct match_list *match_head,
46719d77 1614 struct mlx5_flow_table *ft,
c1948390
MG
1615 const struct mlx5_flow_spec *spec,
1616 bool ft_locked)
46719d77 1617{
693c6883 1618 struct rhlist_head *tmp, *list;
46719d77
MG
1619 struct mlx5_flow_group *g;
1620 int err = 0;
693c6883
MB
1621
1622 rcu_read_lock();
46719d77 1623 INIT_LIST_HEAD(&match_head->list);
693c6883
MB
1624 /* Collect all fgs which has a matching match_criteria */
1625 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
46719d77 1626 /* RCU is atomic, we can't execute FW commands here */
693c6883
MB
1627 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1628 struct match_list *curr_match;
1629
b820ce00 1630 if (unlikely(!tree_get_node(&g->node)))
693c6883 1631 continue;
693c6883 1632
46719d77 1633 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
693c6883 1634 if (!curr_match) {
c1948390 1635 free_match_list(match_head, ft_locked);
46719d77
MG
1636 err = -ENOMEM;
1637 goto out;
693c6883
MB
1638 }
1639 curr_match->g = g;
46719d77 1640 list_add_tail(&curr_match->list, &match_head->list);
693c6883 1641 }
46719d77 1642out:
693c6883 1643 rcu_read_unlock();
46719d77
MG
1644 return err;
1645}
1646
bd71b08e
MG
1647static u64 matched_fgs_get_version(struct list_head *match_head)
1648{
1649 struct match_list *iter;
1650 u64 version = 0;
1651
1652 list_for_each_entry(iter, match_head, list)
1653 version += (u64)atomic_read(&iter->g->node.version);
1654 return version;
1655}
1656
ad9421e3 1657static struct fs_fte *
1f0593e7
PP
1658lookup_fte_locked(struct mlx5_flow_group *g,
1659 const u32 *match_value,
1660 bool take_write)
7dee607e
PP
1661{
1662 struct fs_fte *fte_tmp;
1663
1f0593e7
PP
1664 if (take_write)
1665 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1666 else
1667 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1668 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1669 rhash_fte);
ad9421e3
RD
1670 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1671 fte_tmp = NULL;
1672 goto out;
1673 }
6237634d 1674 if (!fte_tmp->node.active) {
476d61b7 1675 tree_put_node(&fte_tmp->node, false);
6237634d
EB
1676 fte_tmp = NULL;
1677 goto out;
1678 }
ad9421e3
RD
1679
1680 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1681out:
1f0593e7
PP
1682 if (take_write)
1683 up_write_ref_node(&g->node, false);
7dee607e 1684 else
1f0593e7
PP
1685 up_read_ref_node(&g->node);
1686 return fte_tmp;
7dee607e
PP
1687}
1688
46719d77
MG
1689static struct mlx5_flow_handle *
1690try_add_to_existing_fg(struct mlx5_flow_table *ft,
bd71b08e 1691 struct list_head *match_head,
5233794b 1692 const struct mlx5_flow_spec *spec,
46719d77
MG
1693 struct mlx5_flow_act *flow_act,
1694 struct mlx5_flow_destination *dest,
bd71b08e
MG
1695 int dest_num,
1696 int ft_version)
46719d77 1697{
a369d4ac 1698 struct mlx5_flow_steering *steering = get_steering(&ft->node);
46719d77
MG
1699 struct mlx5_flow_group *g;
1700 struct mlx5_flow_handle *rule;
46719d77 1701 struct match_list *iter;
bd71b08e
MG
1702 bool take_write = false;
1703 struct fs_fte *fte;
dc638d11 1704 u64 version = 0;
f5c2ff17
MG
1705 int err;
1706
bb0ee7dc 1707 fte = alloc_fte(ft, spec, flow_act);
f5c2ff17
MG
1708 if (IS_ERR(fte))
1709 return ERR_PTR(-ENOMEM);
46719d77 1710
bd71b08e 1711search_again_locked:
d5634fee
PB
1712 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1713 goto skip_search;
dc638d11
EC
1714 version = matched_fgs_get_version(match_head);
1715 /* Try to find an fte with identical match value and attempt update its
1716 * action.
1717 */
bd71b08e
MG
1718 list_for_each_entry(iter, match_head, list) {
1719 struct fs_fte *fte_tmp;
693c6883
MB
1720
1721 g = iter->g;
ad9421e3
RD
1722 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1723 if (!fte_tmp)
bd71b08e 1724 continue;
bb0ee7dc 1725 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
476d61b7
EB
1726 up_write_ref_node(&fte_tmp->node, false);
1727 tree_put_node(&fte_tmp->node, false);
a369d4ac 1728 kmem_cache_free(steering->ftes_cache, fte);
bd71b08e 1729 return rule;
693c6883
MB
1730 }
1731
d5634fee
PB
1732skip_search:
1733 /* No group with matching fte found, or we skipped the search.
1734 * Try to add a new fte to any matching fg.
1735 */
1736
bd71b08e
MG
1737 /* Check the ft version, for case that new flow group
1738 * was added while the fgs weren't locked
1739 */
1740 if (atomic_read(&ft->node.version) != ft_version) {
1741 rule = ERR_PTR(-EAGAIN);
1742 goto out;
1743 }
b92af5a7 1744
dc638d11
EC
1745 /* Check the fgs version. If version have changed it could be that an
1746 * FTE with the same match value was added while the fgs weren't
1747 * locked.
bd71b08e 1748 */
dc638d11
EC
1749 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1750 version != matched_fgs_get_version(match_head)) {
ad9421e3 1751 take_write = true;
bd71b08e 1752 goto search_again_locked;
ad9421e3 1753 }
bd71b08e
MG
1754
1755 list_for_each_entry(iter, match_head, list) {
1756 g = iter->g;
1757
1758 if (!g->node.active)
1759 continue;
ad9421e3
RD
1760
1761 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1762
f5c2ff17
MG
1763 err = insert_fte(g, fte);
1764 if (err) {
476d61b7 1765 up_write_ref_node(&g->node, false);
f5c2ff17 1766 if (err == -ENOSPC)
bd71b08e 1767 continue;
a369d4ac 1768 kmem_cache_free(steering->ftes_cache, fte);
f5c2ff17 1769 return ERR_PTR(err);
bd71b08e 1770 }
693c6883 1771
bd71b08e 1772 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
476d61b7 1773 up_write_ref_node(&g->node, false);
bb0ee7dc 1774 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
476d61b7
EB
1775 up_write_ref_node(&fte->node, false);
1776 tree_put_node(&fte->node, false);
bd71b08e
MG
1777 return rule;
1778 }
1779 rule = ERR_PTR(-ENOENT);
1780out:
a369d4ac 1781 kmem_cache_free(steering->ftes_cache, fte);
693c6883
MB
1782 return rule;
1783}
1784
74491de9
MB
1785static struct mlx5_flow_handle *
1786_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
5233794b 1787 const struct mlx5_flow_spec *spec,
66958ed9 1788 struct mlx5_flow_act *flow_act,
74491de9
MB
1789 struct mlx5_flow_destination *dest,
1790 int dest_num)
66958ed9 1791
0c56b975 1792{
a369d4ac 1793 struct mlx5_flow_steering *steering = get_steering(&ft->node);
74491de9 1794 struct mlx5_flow_handle *rule;
b820ce00
EC
1795 struct match_list match_head;
1796 struct mlx5_flow_group *g;
bd71b08e
MG
1797 bool take_write = false;
1798 struct fs_fte *fte;
1799 int version;
19f100fe 1800 int err;
74491de9 1801 int i;
0c56b975 1802
693c6883 1803 if (!check_valid_spec(spec))
0d235c3f
MB
1804 return ERR_PTR(-EINVAL);
1805
74491de9 1806 for (i = 0; i < dest_num; i++) {
ff189b43 1807 if (!dest_is_valid(&dest[i], flow_act, ft))
74491de9
MB
1808 return ERR_PTR(-EINVAL);
1809 }
bd71b08e
MG
1810 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1811search_again_locked:
1812 version = atomic_read(&ft->node.version);
60ab4584 1813
bd71b08e 1814 /* Collect all fgs which has a matching match_criteria */
c1948390 1815 err = build_match_list(&match_head, ft, spec, take_write);
9238e380
VB
1816 if (err) {
1817 if (take_write)
476d61b7 1818 up_write_ref_node(&ft->node, false);
07130477
RD
1819 else
1820 up_read_ref_node(&ft->node);
bd71b08e 1821 return ERR_PTR(err);
9238e380 1822 }
bd71b08e
MG
1823
1824 if (!take_write)
1825 up_read_ref_node(&ft->node);
1826
1827 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1828 dest_num, version);
c1948390 1829 free_match_list(&match_head, take_write);
bd71b08e 1830 if (!IS_ERR(rule) ||
9238e380
VB
1831 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1832 if (take_write)
476d61b7 1833 up_write_ref_node(&ft->node, false);
bd71b08e 1834 return rule;
9238e380 1835 }
bd71b08e
MG
1836
1837 if (!take_write) {
1838 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1839 take_write = true;
1840 }
1841
1842 if (PTR_ERR(rule) == -EAGAIN ||
1843 version != atomic_read(&ft->node.version))
1844 goto search_again_locked;
f0d22d18 1845
19f100fe 1846 g = alloc_auto_flow_group(ft, spec);
c3f9bf62 1847 if (IS_ERR(g)) {
d34c6efc 1848 rule = ERR_CAST(g);
476d61b7 1849 up_write_ref_node(&ft->node, false);
bd71b08e 1850 return rule;
c3f9bf62
MG
1851 }
1852
84c7af63
PP
1853 fte = alloc_fte(ft, spec, flow_act);
1854 if (IS_ERR(fte)) {
1855 up_write_ref_node(&ft->node, false);
1856 err = PTR_ERR(fte);
1857 goto err_alloc_fte;
1858 }
1859
bd71b08e 1860 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
476d61b7 1861 up_write_ref_node(&ft->node, false);
bd71b08e 1862
19f100fe 1863 err = create_auto_flow_group(ft, g);
bd71b08e
MG
1864 if (err)
1865 goto err_release_fg;
1866
f5c2ff17 1867 err = insert_fte(g, fte);
84c7af63 1868 if (err)
f5c2ff17 1869 goto err_release_fg;
f5c2ff17 1870
bd71b08e 1871 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
476d61b7 1872 up_write_ref_node(&g->node, false);
bb0ee7dc 1873 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
476d61b7
EB
1874 up_write_ref_node(&fte->node, false);
1875 tree_put_node(&fte->node, false);
1876 tree_put_node(&g->node, false);
0c56b975 1877 return rule;
bd71b08e
MG
1878
1879err_release_fg:
476d61b7 1880 up_write_ref_node(&g->node, false);
84c7af63
PP
1881 kmem_cache_free(steering->ftes_cache, fte);
1882err_alloc_fte:
476d61b7 1883 tree_put_node(&g->node, false);
bd71b08e 1884 return ERR_PTR(err);
0c56b975 1885}
b3638e1a
MG
1886
1887static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1888{
1889 return ((ft->type == FS_FT_NIC_RX) &&
1890 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1891}
1892
74491de9
MB
1893struct mlx5_flow_handle *
1894mlx5_add_flow_rules(struct mlx5_flow_table *ft,
5233794b 1895 const struct mlx5_flow_spec *spec,
66958ed9 1896 struct mlx5_flow_act *flow_act,
74491de9 1897 struct mlx5_flow_destination *dest,
cf916ffb 1898 int num_dest)
b3638e1a
MG
1899{
1900 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
5c2aa8ae 1901 static const struct mlx5_flow_spec zero_spec = {};
4c5009c5 1902 struct mlx5_flow_destination gen_dest = {};
b3638e1a 1903 struct mlx5_flow_table *next_ft = NULL;
74491de9 1904 struct mlx5_flow_handle *handle = NULL;
66958ed9 1905 u32 sw_action = flow_act->action;
b3638e1a
MG
1906 struct fs_prio *prio;
1907
5c2aa8ae
MB
1908 if (!spec)
1909 spec = &zero_spec;
1910
b3638e1a 1911 fs_get_obj(prio, ft->node.parent);
66958ed9 1912 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
b3638e1a
MG
1913 if (!fwd_next_prio_supported(ft))
1914 return ERR_PTR(-EOPNOTSUPP);
cf916ffb 1915 if (num_dest)
b3638e1a
MG
1916 return ERR_PTR(-EINVAL);
1917 mutex_lock(&root->chain_lock);
1918 next_ft = find_next_chained_ft(prio);
1919 if (next_ft) {
1920 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1921 gen_dest.ft = next_ft;
1922 dest = &gen_dest;
cf916ffb 1923 num_dest = 1;
66958ed9 1924 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
b3638e1a
MG
1925 } else {
1926 mutex_unlock(&root->chain_lock);
1927 return ERR_PTR(-EOPNOTSUPP);
1928 }
1929 }
1930
cf916ffb 1931 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
b3638e1a
MG
1932
1933 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
74491de9
MB
1934 if (!IS_ERR_OR_NULL(handle) &&
1935 (list_empty(&handle->rule[0]->next_ft))) {
b3638e1a 1936 mutex_lock(&next_ft->lock);
74491de9
MB
1937 list_add(&handle->rule[0]->next_ft,
1938 &next_ft->fwd_rules);
b3638e1a 1939 mutex_unlock(&next_ft->lock);
74491de9 1940 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
b3638e1a
MG
1941 }
1942 mutex_unlock(&root->chain_lock);
1943 }
74491de9 1944 return handle;
b3638e1a 1945}
74491de9 1946EXPORT_SYMBOL(mlx5_add_flow_rules);
0c56b975 1947
74491de9 1948void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
0c56b975 1949{
718ce4d6 1950 struct fs_fte *fte;
74491de9
MB
1951 int i;
1952
718ce4d6
EB
1953 /* In order to consolidate the HW changes we lock the FTE for other
1954 * changes, and increase its refcount, in order not to perform the
1955 * "del" functions of the FTE. Will handle them here.
1956 * The removal of the rules is done under locked FTE.
1957 * After removing all the handle's rules, if there are remaining
1958 * rules, it means we just need to modify the FTE in FW, and
1959 * unlock/decrease the refcount we increased before.
1960 * Otherwise, it means the FTE should be deleted. First delete the
1961 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
1962 * the FTE, which will handle the last decrease of the refcount, as
1963 * well as required handling of its parent.
1964 */
1965 fs_get_obj(fte, handle->rule[0]->node.parent);
1966 down_write_ref_node(&fte->node, false);
74491de9 1967 for (i = handle->num_rules - 1; i >= 0; i--)
718ce4d6
EB
1968 tree_remove_node(&handle->rule[i]->node, true);
1969 if (fte->modify_mask && fte->dests_size) {
1970 modify_fte(fte);
1971 up_write_ref_node(&fte->node, false);
1972 } else {
1973 del_hw_fte(&fte->node);
1974 up_write(&fte->node.lock);
1975 tree_put_node(&fte->node, false);
1976 }
74491de9 1977 kfree(handle);
0c56b975 1978}
74491de9 1979EXPORT_SYMBOL(mlx5_del_flow_rules);
0c56b975 1980
2cc43b49
MG
1981/* Assuming prio->node.children(flow tables) is sorted by level */
1982static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1983{
1984 struct fs_prio *prio;
1985
1986 fs_get_obj(prio, ft->node.parent);
1987
1988 if (!list_is_last(&ft->node.list, &prio->node.children))
1989 return list_next_entry(ft, node.list);
1990 return find_next_chained_ft(prio);
1991}
1992
1993static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1994{
1995 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
dae37456 1996 struct mlx5_ft_underlay_qp *uqp;
2cc43b49 1997 struct mlx5_flow_table *new_root_ft = NULL;
dae37456
AV
1998 int err = 0;
1999 u32 qpn;
2cc43b49
MG
2000
2001 if (root->root_ft != ft)
2002 return 0;
2003
2004 new_root_ft = find_next_ft(ft);
dae37456
AV
2005 if (!new_root_ft) {
2006 root->root_ft = NULL;
2007 return 0;
2008 }
2009
2010 if (list_empty(&root->underlay_qpns)) {
2011 /* Don't set any QPN (zero) in case QPN list is empty */
2012 qpn = 0;
ae288a48 2013 err = root->cmds->update_root_ft(root, new_root_ft,
af76c501 2014 qpn, false);
dae37456
AV
2015 } else {
2016 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2017 qpn = uqp->qpn;
ae288a48 2018 err = root->cmds->update_root_ft(root,
af76c501
MB
2019 new_root_ft, qpn,
2020 false);
dae37456
AV
2021 if (err)
2022 break;
2cc43b49 2023 }
2cc43b49 2024 }
dae37456
AV
2025
2026 if (err)
2027 mlx5_core_warn(root->dev,
2028 "Update root flow table of id(%u) qpn(%d) failed\n",
2029 ft->id, qpn);
2030 else
2031 root->root_ft = new_root_ft;
2032
2cc43b49
MG
2033 return 0;
2034}
2035
f90edfd2
MG
2036/* Connect flow table from previous priority to
2037 * the next flow table.
2038 */
2039static int disconnect_flow_table(struct mlx5_flow_table *ft)
2040{
2041 struct mlx5_core_dev *dev = get_dev(&ft->node);
2042 struct mlx5_flow_table *next_ft;
2043 struct fs_prio *prio;
2044 int err = 0;
2045
2046 err = update_root_ft_destroy(ft);
2047 if (err)
2048 return err;
2049
2050 fs_get_obj(prio, ft->node.parent);
2051 if (!(list_first_entry(&prio->node.children,
2052 struct mlx5_flow_table,
2053 node.list) == ft))
2054 return 0;
2055
2056 next_ft = find_next_chained_ft(prio);
b3638e1a
MG
2057 err = connect_fwd_rules(dev, next_ft, ft);
2058 if (err)
2059 return err;
2060
f90edfd2
MG
2061 err = connect_prev_fts(dev, next_ft, prio);
2062 if (err)
2063 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2064 ft->id);
2065 return err;
2066}
2067
86d722ad 2068int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
0c56b975 2069{
2cc43b49
MG
2070 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2071 int err = 0;
2072
2073 mutex_lock(&root->chain_lock);
5281a0c9
PB
2074 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2075 err = disconnect_flow_table(ft);
2cc43b49
MG
2076 if (err) {
2077 mutex_unlock(&root->chain_lock);
2078 return err;
2079 }
476d61b7 2080 if (tree_remove_node(&ft->node, false))
0c56b975
MG
2081 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2082 ft->id);
2cc43b49 2083 mutex_unlock(&root->chain_lock);
0c56b975 2084
2cc43b49 2085 return err;
0c56b975 2086}
b217ea25 2087EXPORT_SYMBOL(mlx5_destroy_flow_table);
0c56b975 2088
86d722ad 2089void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
0c56b975 2090{
476d61b7 2091 if (tree_remove_node(&fg->node, false))
0c56b975
MG
2092 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2093 fg->id);
2094}
25302363 2095
328edb49
PB
2096struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2097 int n)
2098{
2099 struct mlx5_flow_steering *steering = dev->priv.steering;
2100
2101 if (!steering || !steering->fdb_sub_ns)
2102 return NULL;
2103
2104 return steering->fdb_sub_ns[n];
2105}
2106EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2107
86d722ad
MG
2108struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2109 enum mlx5_flow_namespace_type type)
25302363 2110{
fba53f7b
MG
2111 struct mlx5_flow_steering *steering = dev->priv.steering;
2112 struct mlx5_flow_root_namespace *root_ns;
8ce78257 2113 int prio = 0;
78228cbd 2114 struct fs_prio *fs_prio;
25302363
MG
2115 struct mlx5_flow_namespace *ns;
2116
fba53f7b 2117 if (!steering)
25302363
MG
2118 return NULL;
2119
2120 switch (type) {
25302363 2121 case MLX5_FLOW_NAMESPACE_FDB:
fba53f7b
MG
2122 if (steering->fdb_root_ns)
2123 return &steering->fdb_root_ns->ns;
2226dcb4 2124 return NULL;
87d22483
MG
2125 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2126 if (steering->sniffer_rx_root_ns)
2127 return &steering->sniffer_rx_root_ns->ns;
2226dcb4 2128 return NULL;
87d22483
MG
2129 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2130 if (steering->sniffer_tx_root_ns)
2131 return &steering->sniffer_tx_root_ns->ns;
25302363 2132 return NULL;
2226dcb4
MB
2133 default:
2134 break;
25302363
MG
2135 }
2136
8ce78257
MB
2137 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2138 root_ns = steering->egress_root_ns;
e6806e9a
MZ
2139 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2140 root_ns = steering->rdma_rx_root_ns;
2141 prio = RDMA_RX_BYPASS_PRIO;
2142 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2143 root_ns = steering->rdma_rx_root_ns;
2144 prio = RDMA_RX_KERNEL_PRIO;
24670b1a
MG
2145 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2146 root_ns = steering->rdma_tx_root_ns;
8ce78257
MB
2147 } else { /* Must be NIC RX */
2148 root_ns = steering->root_ns;
2149 prio = type;
25302363
MG
2150 }
2151
fba53f7b
MG
2152 if (!root_ns)
2153 return NULL;
2154
25302363
MG
2155 fs_prio = find_prio(&root_ns->ns, prio);
2156 if (!fs_prio)
2157 return NULL;
2158
2159 ns = list_first_entry(&fs_prio->node.children,
2160 typeof(*ns),
2161 node.list);
2162
2163 return ns;
2164}
b217ea25 2165EXPORT_SYMBOL(mlx5_get_flow_namespace);
25302363 2166
9b93ab98
GP
2167struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2168 enum mlx5_flow_namespace_type type,
2169 int vport)
2170{
2171 struct mlx5_flow_steering *steering = dev->priv.steering;
2172
2752b823 2173 if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
9b93ab98
GP
2174 return NULL;
2175
2176 switch (type) {
2177 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2178 if (steering->esw_egress_root_ns &&
2179 steering->esw_egress_root_ns[vport])
2180 return &steering->esw_egress_root_ns[vport]->ns;
2181 else
2182 return NULL;
2183 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2184 if (steering->esw_ingress_root_ns &&
2185 steering->esw_ingress_root_ns[vport])
2186 return &steering->esw_ingress_root_ns[vport]->ns;
2187 else
2188 return NULL;
2189 default:
2190 return NULL;
2191 }
2192}
2193
328edb49
PB
2194static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2195 unsigned int prio,
2196 int num_levels,
2197 enum fs_node_type type)
25302363
MG
2198{
2199 struct fs_prio *fs_prio;
2200
2201 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2202 if (!fs_prio)
2203 return ERR_PTR(-ENOMEM);
2204
328edb49 2205 fs_prio->node.type = type;
139ed6c6 2206 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
25302363 2207 tree_add_node(&fs_prio->node, &ns->node);
a257b94a 2208 fs_prio->num_levels = num_levels;
25302363 2209 fs_prio->prio = prio;
25302363
MG
2210 list_add_tail(&fs_prio->node.list, &ns->node.children);
2211
2212 return fs_prio;
2213}
2214
328edb49
PB
2215static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2216 unsigned int prio,
2217 int num_levels)
2218{
2219 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2220}
2221
2222static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2223 unsigned int prio, int num_levels)
2224{
2225 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2226}
2227
25302363
MG
2228static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2229 *ns)
2230{
2231 ns->node.type = FS_TYPE_NAMESPACE;
2232
2233 return ns;
2234}
2235
f66ad830
MZ
2236static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2237 int def_miss_act)
25302363
MG
2238{
2239 struct mlx5_flow_namespace *ns;
2240
2241 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2242 if (!ns)
2243 return ERR_PTR(-ENOMEM);
2244
2245 fs_init_namespace(ns);
f66ad830 2246 ns->def_miss_action = def_miss_act;
139ed6c6 2247 tree_init_node(&ns->node, NULL, del_sw_ns);
25302363
MG
2248 tree_add_node(&ns->node, &prio->node);
2249 list_add_tail(&ns->node.list, &prio->node.children);
2250
2251 return ns;
2252}
2253
13de6c10
MG
2254static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2255 struct init_tree_node *prio_metadata)
4cbdd30e
MG
2256{
2257 struct fs_prio *fs_prio;
2258 int i;
2259
2260 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
13de6c10 2261 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
4cbdd30e
MG
2262 if (IS_ERR(fs_prio))
2263 return PTR_ERR(fs_prio);
2264 }
2265 return 0;
2266}
2267
8d40d162
MG
2268#define FLOW_TABLE_BIT_SZ 1
2269#define GET_FLOW_TABLE_CAP(dev, offset) \
701052c5 2270 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
8d40d162
MG
2271 offset / 32)) >> \
2272 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2273static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2274{
2275 int i;
2276
2277 for (i = 0; i < caps->arr_sz; i++) {
2278 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2279 return false;
2280 }
2281 return true;
2282}
2283
fba53f7b 2284static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
8d40d162 2285 struct init_tree_node *init_node,
25302363
MG
2286 struct fs_node *fs_parent_node,
2287 struct init_tree_node *init_parent_node,
13de6c10 2288 int prio)
25302363 2289{
fba53f7b 2290 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
8d40d162
MG
2291 flow_table_properties_nic_receive.
2292 max_ft_level);
25302363
MG
2293 struct mlx5_flow_namespace *fs_ns;
2294 struct fs_prio *fs_prio;
2295 struct fs_node *base;
2296 int i;
2297 int err;
2298
2299 if (init_node->type == FS_TYPE_PRIO) {
8d40d162 2300 if ((init_node->min_ft_level > max_ft_level) ||
fba53f7b 2301 !has_required_caps(steering->dev, &init_node->caps))
8d40d162 2302 return 0;
25302363
MG
2303
2304 fs_get_obj(fs_ns, fs_parent_node);
4cbdd30e 2305 if (init_node->num_leaf_prios)
13de6c10
MG
2306 return create_leaf_prios(fs_ns, prio, init_node);
2307 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
25302363
MG
2308 if (IS_ERR(fs_prio))
2309 return PTR_ERR(fs_prio);
2310 base = &fs_prio->node;
2311 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2312 fs_get_obj(fs_prio, fs_parent_node);
f66ad830 2313 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
25302363
MG
2314 if (IS_ERR(fs_ns))
2315 return PTR_ERR(fs_ns);
2316 base = &fs_ns->node;
2317 } else {
2318 return -EINVAL;
2319 }
13de6c10 2320 prio = 0;
25302363 2321 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2322 err = init_root_tree_recursive(steering, &init_node->children[i],
13de6c10 2323 base, init_node, prio);
25302363
MG
2324 if (err)
2325 return err;
13de6c10
MG
2326 if (init_node->children[i].type == FS_TYPE_PRIO &&
2327 init_node->children[i].num_leaf_prios) {
2328 prio += init_node->children[i].num_leaf_prios;
2329 }
25302363
MG
2330 }
2331
2332 return 0;
2333}
2334
fba53f7b 2335static int init_root_tree(struct mlx5_flow_steering *steering,
8d40d162 2336 struct init_tree_node *init_node,
25302363
MG
2337 struct fs_node *fs_parent_node)
2338{
2339 int i;
2340 struct mlx5_flow_namespace *fs_ns;
2341 int err;
2342
2343 fs_get_obj(fs_ns, fs_parent_node);
2344 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2345 err = init_root_tree_recursive(steering, &init_node->children[i],
25302363
MG
2346 &fs_ns->node,
2347 init_node, i);
2348 if (err)
2349 return err;
2350 }
2351 return 0;
2352}
2353
af76c501
MB
2354static struct mlx5_flow_root_namespace
2355*create_root_ns(struct mlx5_flow_steering *steering,
2356 enum fs_flow_table_type table_type)
25302363 2357{
af76c501 2358 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
25302363
MG
2359 struct mlx5_flow_root_namespace *root_ns;
2360 struct mlx5_flow_namespace *ns;
2361
8c8eea07 2362 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
05564d0a
AY
2363 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2364 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2365
86d722ad 2366 /* Create the root namespace */
25fa506b 2367 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
25302363
MG
2368 if (!root_ns)
2369 return NULL;
2370
fba53f7b 2371 root_ns->dev = steering->dev;
25302363 2372 root_ns->table_type = table_type;
af76c501 2373 root_ns->cmds = cmds;
25302363 2374
dae37456
AV
2375 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2376
25302363
MG
2377 ns = &root_ns->ns;
2378 fs_init_namespace(ns);
2cc43b49 2379 mutex_init(&root_ns->chain_lock);
bd71b08e 2380 tree_init_node(&ns->node, NULL, NULL);
25302363
MG
2381 tree_add_node(&ns->node, NULL);
2382
2383 return root_ns;
2384}
2385
655227ed
MG
2386static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2387
2388static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2389{
2390 struct fs_prio *prio;
2391
2392 fs_for_each_prio(prio, ns) {
a257b94a 2393 /* This updates prio start_level and num_levels */
655227ed 2394 set_prio_attrs_in_prio(prio, acc_level);
a257b94a 2395 acc_level += prio->num_levels;
655227ed
MG
2396 }
2397 return acc_level;
2398}
2399
2400static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2401{
2402 struct mlx5_flow_namespace *ns;
2403 int acc_level_ns = acc_level;
2404
2405 prio->start_level = acc_level;
34b13cb3 2406 fs_for_each_ns(ns, prio) {
a257b94a 2407 /* This updates start_level and num_levels of ns's priority descendants */
655227ed 2408 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
34b13cb3
PB
2409
2410 /* If this a prio with chains, and we can jump from one chain
2411 * (namepsace) to another, so we accumulate the levels
2412 */
2413 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2414 acc_level = acc_level_ns;
2415 }
2416
a257b94a
MG
2417 if (!prio->num_levels)
2418 prio->num_levels = acc_level_ns - prio->start_level;
2419 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
655227ed
MG
2420}
2421
2422static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2423{
2424 struct mlx5_flow_namespace *ns = &root_ns->ns;
2425 struct fs_prio *prio;
2426 int start_level = 0;
2427
2428 fs_for_each_prio(prio, ns) {
2429 set_prio_attrs_in_prio(prio, start_level);
a257b94a 2430 start_level += prio->num_levels;
655227ed
MG
2431 }
2432}
2433
153fefbf
MG
2434#define ANCHOR_PRIO 0
2435#define ANCHOR_SIZE 1
d63cd286 2436#define ANCHOR_LEVEL 0
fba53f7b 2437static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
153fefbf
MG
2438{
2439 struct mlx5_flow_namespace *ns = NULL;
b3ba5149 2440 struct mlx5_flow_table_attr ft_attr = {};
153fefbf
MG
2441 struct mlx5_flow_table *ft;
2442
fba53f7b 2443 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
eff596da 2444 if (WARN_ON(!ns))
153fefbf 2445 return -EINVAL;
b3ba5149
ES
2446
2447 ft_attr.max_fte = ANCHOR_SIZE;
2448 ft_attr.level = ANCHOR_LEVEL;
2449 ft_attr.prio = ANCHOR_PRIO;
2450
2451 ft = mlx5_create_flow_table(ns, &ft_attr);
153fefbf 2452 if (IS_ERR(ft)) {
fba53f7b 2453 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
153fefbf
MG
2454 return PTR_ERR(ft);
2455 }
2456 return 0;
2457}
2458
fba53f7b 2459static int init_root_ns(struct mlx5_flow_steering *steering)
25302363 2460{
9c26f5f8
TB
2461 int err;
2462
fba53f7b 2463 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
42fb18fd 2464 if (!steering->root_ns)
9c26f5f8 2465 return -ENOMEM;
25302363 2466
9c26f5f8
TB
2467 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2468 if (err)
2469 goto out_err;
25302363 2470
fba53f7b 2471 set_prio_attrs(steering->root_ns);
9c26f5f8
TB
2472 err = create_anchor_flow_table(steering);
2473 if (err)
2474 goto out_err;
153fefbf 2475
25302363
MG
2476 return 0;
2477
9c26f5f8
TB
2478out_err:
2479 cleanup_root_ns(steering->root_ns);
2480 steering->root_ns = NULL;
2481 return err;
25302363
MG
2482}
2483
0da2d666 2484static void clean_tree(struct fs_node *node)
25302363 2485{
0da2d666
MG
2486 if (node) {
2487 struct fs_node *iter;
2488 struct fs_node *temp;
25302363 2489
800350a3 2490 tree_get_node(node);
0da2d666
MG
2491 list_for_each_entry_safe(iter, temp, &node->children, list)
2492 clean_tree(iter);
476d61b7
EB
2493 tree_put_node(node, false);
2494 tree_remove_node(node, false);
25302363 2495 }
153fefbf
MG
2496}
2497
0da2d666 2498static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
25302363 2499{
25302363
MG
2500 if (!root_ns)
2501 return;
2502
0da2d666 2503 clean_tree(&root_ns->ns.node);
25302363
MG
2504}
2505
9b93ab98
GP
2506static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2507{
2508 struct mlx5_flow_steering *steering = dev->priv.steering;
2509 int i;
2510
2511 if (!steering->esw_egress_root_ns)
2512 return;
2513
2752b823 2514 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
9b93ab98
GP
2515 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2516
2517 kfree(steering->esw_egress_root_ns);
9414277a 2518 steering->esw_egress_root_ns = NULL;
9b93ab98
GP
2519}
2520
2521static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2522{
2523 struct mlx5_flow_steering *steering = dev->priv.steering;
2524 int i;
2525
2526 if (!steering->esw_ingress_root_ns)
2527 return;
2528
2752b823 2529 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
9b93ab98
GP
2530 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2531
2532 kfree(steering->esw_ingress_root_ns);
9414277a 2533 steering->esw_ingress_root_ns = NULL;
9b93ab98
GP
2534}
2535
25302363
MG
2536void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2537{
fba53f7b
MG
2538 struct mlx5_flow_steering *steering = dev->priv.steering;
2539
0da2d666 2540 cleanup_root_ns(steering->root_ns);
9b93ab98
GP
2541 cleanup_egress_acls_root_ns(dev);
2542 cleanup_ingress_acls_root_ns(dev);
0da2d666 2543 cleanup_root_ns(steering->fdb_root_ns);
328edb49
PB
2544 steering->fdb_root_ns = NULL;
2545 kfree(steering->fdb_sub_ns);
2546 steering->fdb_sub_ns = NULL;
87d22483
MG
2547 cleanup_root_ns(steering->sniffer_rx_root_ns);
2548 cleanup_root_ns(steering->sniffer_tx_root_ns);
d83eb50e 2549 cleanup_root_ns(steering->rdma_rx_root_ns);
24670b1a 2550 cleanup_root_ns(steering->rdma_tx_root_ns);
5f418378 2551 cleanup_root_ns(steering->egress_root_ns);
43a335e0 2552 mlx5_cleanup_fc_stats(dev);
a369d4ac
MG
2553 kmem_cache_destroy(steering->ftes_cache);
2554 kmem_cache_destroy(steering->fgs_cache);
fba53f7b 2555 kfree(steering);
25302363
MG
2556}
2557
87d22483
MG
2558static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2559{
2560 struct fs_prio *prio;
2561
2562 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2563 if (!steering->sniffer_tx_root_ns)
2564 return -ENOMEM;
2565
2566 /* Create single prio */
2567 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
905f6bd3 2568 return PTR_ERR_OR_ZERO(prio);
87d22483
MG
2569}
2570
2571static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2572{
2573 struct fs_prio *prio;
2574
2575 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2576 if (!steering->sniffer_rx_root_ns)
2577 return -ENOMEM;
2578
2579 /* Create single prio */
2580 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
905f6bd3 2581 return PTR_ERR_OR_ZERO(prio);
87d22483
MG
2582}
2583
d83eb50e
MG
2584static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2585{
e6806e9a 2586 int err;
d83eb50e
MG
2587
2588 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2589 if (!steering->rdma_rx_root_ns)
2590 return -ENOMEM;
2591
e6806e9a
MZ
2592 err = init_root_tree(steering, &rdma_rx_root_fs,
2593 &steering->rdma_rx_root_ns->ns.node);
2594 if (err)
2595 goto out_err;
f6f7d6b5 2596
e6806e9a
MZ
2597 set_prio_attrs(steering->rdma_rx_root_ns);
2598
2599 return 0;
2600
2601out_err:
2602 cleanup_root_ns(steering->rdma_rx_root_ns);
2603 steering->rdma_rx_root_ns = NULL;
2604 return err;
d83eb50e 2605}
439e843f 2606
24670b1a
MG
2607static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2608{
2609 int err;
2610
2611 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2612 if (!steering->rdma_tx_root_ns)
2613 return -ENOMEM;
2614
2615 err = init_root_tree(steering, &rdma_tx_root_fs,
2616 &steering->rdma_tx_root_ns->ns.node);
2617 if (err)
2618 goto out_err;
2619
2620 set_prio_attrs(steering->rdma_tx_root_ns);
2621
2622 return 0;
2623
2624out_err:
2625 cleanup_root_ns(steering->rdma_tx_root_ns);
2626 steering->rdma_tx_root_ns = NULL;
2627 return err;
2628}
2629
439e843f
PB
2630/* FT and tc chains are stored in the same array so we can re-use the
2631 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2632 * When creating a new ns for each chain store it in the first available slot.
2633 * Assume tc chains are created and stored first and only then the FT chain.
2634 */
2635static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2636 struct mlx5_flow_namespace *ns)
2637{
2638 int chain = 0;
2639
2640 while (steering->fdb_sub_ns[chain])
2641 ++chain;
2642
2643 steering->fdb_sub_ns[chain] = ns;
2644}
2645
2646static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2647 struct fs_prio *maj_prio)
25302363 2648{
328edb49 2649 struct mlx5_flow_namespace *ns;
328edb49 2650 struct fs_prio *min_prio;
439e843f
PB
2651 int prio;
2652
2653 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2654 if (IS_ERR(ns))
2655 return PTR_ERR(ns);
2656
2657 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2658 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2659 if (IS_ERR(min_prio))
2660 return PTR_ERR(min_prio);
2661 }
2662
2663 store_fdb_sub_ns_prio_chain(steering, ns);
2664
2665 return 0;
2666}
2667
2668static int create_fdb_chains(struct mlx5_flow_steering *steering,
2669 int fs_prio,
2670 int chains)
2671{
2672 struct fs_prio *maj_prio;
328edb49
PB
2673 int levels;
2674 int chain;
328edb49 2675 int err;
25302363 2676
439e843f
PB
2677 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2678 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2679 fs_prio,
2680 levels);
2681 if (IS_ERR(maj_prio))
2682 return PTR_ERR(maj_prio);
2683
2684 for (chain = 0; chain < chains; chain++) {
2685 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2686 if (err)
2687 return err;
2688 }
2689
2690 return 0;
2691}
25302363 2692
439e843f
PB
2693static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2694{
439e843f
PB
2695 int err;
2696
975b992f 2697 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
439e843f
PB
2698 sizeof(*steering->fdb_sub_ns),
2699 GFP_KERNEL);
328edb49
PB
2700 if (!steering->fdb_sub_ns)
2701 return -ENOMEM;
2702
975b992f
PB
2703 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2704 if (err)
2705 return err;
2706
2707 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
439e843f
PB
2708 if (err)
2709 return err;
2710
2711 return 0;
2712}
2713
2714static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2715{
2716 struct fs_prio *maj_prio;
2717 int err;
2718
2719 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2720 if (!steering->fdb_root_ns)
2721 return -ENOMEM;
2722
d9cb0675
MB
2723 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2724 1);
2725 if (IS_ERR(maj_prio)) {
2726 err = PTR_ERR(maj_prio);
2727 goto out_err;
2728 }
439e843f
PB
2729 err = create_fdb_fast_path(steering);
2730 if (err)
1033665e 2731 goto out_err;
328edb49 2732
b6d9ccb1 2733 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
328edb49
PB
2734 if (IS_ERR(maj_prio)) {
2735 err = PTR_ERR(maj_prio);
1033665e 2736 goto out_err;
328edb49 2737 }
1033665e 2738
96e32687
EC
2739 /* We put this priority last, knowing that nothing will get here
2740 * unless explicitly forwarded to. This is possible because the
2741 * slow path tables have catch all rules and nothing gets passed
2742 * those tables.
2743 */
2744 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2745 if (IS_ERR(maj_prio)) {
2746 err = PTR_ERR(maj_prio);
2747 goto out_err;
2748 }
2749
1033665e
OG
2750 set_prio_attrs(steering->fdb_root_ns);
2751 return 0;
2752
2753out_err:
2754 cleanup_root_ns(steering->fdb_root_ns);
328edb49
PB
2755 kfree(steering->fdb_sub_ns);
2756 steering->fdb_sub_ns = NULL;
1033665e 2757 steering->fdb_root_ns = NULL;
328edb49 2758 return err;
25302363
MG
2759}
2760
9b93ab98 2761static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2762{
2763 struct fs_prio *prio;
2764
9b93ab98
GP
2765 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2766 if (!steering->esw_egress_root_ns[vport])
efdc810b
MHY
2767 return -ENOMEM;
2768
2769 /* create 1 prio*/
9b93ab98 2770 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
44fafdaa 2771 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
2772}
2773
9b93ab98 2774static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2775{
2776 struct fs_prio *prio;
2777
9b93ab98
GP
2778 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2779 if (!steering->esw_ingress_root_ns[vport])
efdc810b
MHY
2780 return -ENOMEM;
2781
2782 /* create 1 prio*/
9b93ab98 2783 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
44fafdaa 2784 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
2785}
2786
9b93ab98
GP
2787static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2788{
2789 struct mlx5_flow_steering *steering = dev->priv.steering;
2752b823 2790 int total_vports = mlx5_eswitch_get_total_vports(dev);
9b93ab98
GP
2791 int err;
2792 int i;
2793
2752b823
PP
2794 steering->esw_egress_root_ns =
2795 kcalloc(total_vports,
2796 sizeof(*steering->esw_egress_root_ns),
2797 GFP_KERNEL);
9b93ab98
GP
2798 if (!steering->esw_egress_root_ns)
2799 return -ENOMEM;
2800
2752b823 2801 for (i = 0; i < total_vports; i++) {
9b93ab98
GP
2802 err = init_egress_acl_root_ns(steering, i);
2803 if (err)
2804 goto cleanup_root_ns;
2805 }
2806
2807 return 0;
2808
2809cleanup_root_ns:
2810 for (i--; i >= 0; i--)
2811 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2812 kfree(steering->esw_egress_root_ns);
9414277a 2813 steering->esw_egress_root_ns = NULL;
9b93ab98
GP
2814 return err;
2815}
2816
2817static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2818{
2819 struct mlx5_flow_steering *steering = dev->priv.steering;
2752b823 2820 int total_vports = mlx5_eswitch_get_total_vports(dev);
9b93ab98
GP
2821 int err;
2822 int i;
2823
2752b823
PP
2824 steering->esw_ingress_root_ns =
2825 kcalloc(total_vports,
2826 sizeof(*steering->esw_ingress_root_ns),
2827 GFP_KERNEL);
9b93ab98
GP
2828 if (!steering->esw_ingress_root_ns)
2829 return -ENOMEM;
2830
2752b823 2831 for (i = 0; i < total_vports; i++) {
9b93ab98
GP
2832 err = init_ingress_acl_root_ns(steering, i);
2833 if (err)
2834 goto cleanup_root_ns;
2835 }
2836
2837 return 0;
2838
2839cleanup_root_ns:
2840 for (i--; i >= 0; i--)
2841 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2842 kfree(steering->esw_ingress_root_ns);
9414277a 2843 steering->esw_ingress_root_ns = NULL;
9b93ab98
GP
2844 return err;
2845}
2846
5f418378
AY
2847static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2848{
8ce78257 2849 int err;
5f418378
AY
2850
2851 steering->egress_root_ns = create_root_ns(steering,
2852 FS_FT_NIC_TX);
2853 if (!steering->egress_root_ns)
2854 return -ENOMEM;
2855
8ce78257
MB
2856 err = init_root_tree(steering, &egress_root_fs,
2857 &steering->egress_root_ns->ns.node);
2858 if (err)
2859 goto cleanup;
2860 set_prio_attrs(steering->egress_root_ns);
2861 return 0;
2862cleanup:
2863 cleanup_root_ns(steering->egress_root_ns);
2864 steering->egress_root_ns = NULL;
2865 return err;
5f418378
AY
2866}
2867
25302363
MG
2868int mlx5_init_fs(struct mlx5_core_dev *dev)
2869{
fba53f7b 2870 struct mlx5_flow_steering *steering;
25302363
MG
2871 int err = 0;
2872
43a335e0
AV
2873 err = mlx5_init_fc_stats(dev);
2874 if (err)
2875 return err;
2876
fba53f7b
MG
2877 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2878 if (!steering)
2879 return -ENOMEM;
2880 steering->dev = dev;
2881 dev->priv.steering = steering;
2882
a369d4ac
MG
2883 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2884 sizeof(struct mlx5_flow_group), 0,
2885 0, NULL);
2886 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2887 0, NULL);
2888 if (!steering->ftes_cache || !steering->fgs_cache) {
2889 err = -ENOMEM;
2890 goto err;
2891 }
2892
ffdb8827
ES
2893 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2894 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2895 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2896 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
876d634d 2897 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
fba53f7b 2898 err = init_root_ns(steering);
25302363 2899 if (err)
43a335e0 2900 goto err;
25302363 2901 }
876d634d 2902
0efc8562 2903 if (MLX5_ESWITCH_MANAGER(dev)) {
bd02ef8e 2904 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
fba53f7b 2905 err = init_fdb_root_ns(steering);
bd02ef8e
MG
2906 if (err)
2907 goto err;
2908 }
2909 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
9b93ab98 2910 err = init_egress_acls_root_ns(dev);
bd02ef8e
MG
2911 if (err)
2912 goto err;
2913 }
2914 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
9b93ab98 2915 err = init_ingress_acls_root_ns(dev);
bd02ef8e
MG
2916 if (err)
2917 goto err;
2918 }
25302363
MG
2919 }
2920
87d22483
MG
2921 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2922 err = init_sniffer_rx_root_ns(steering);
2923 if (err)
2924 goto err;
2925 }
2926
2927 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2928 err = init_sniffer_tx_root_ns(steering);
2929 if (err)
2930 goto err;
2931 }
2932
f6f7d6b5
MG
2933 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
2934 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
d83eb50e
MG
2935 err = init_rdma_rx_root_ns(steering);
2936 if (err)
2937 goto err;
2938 }
2939
24670b1a
MG
2940 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
2941 err = init_rdma_tx_root_ns(steering);
2942 if (err)
2943 goto err;
2944 }
2945
8c8eea07
RS
2946 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
2947 MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
5f418378
AY
2948 err = init_egress_root_ns(steering);
2949 if (err)
2950 goto err;
2951 }
2952
efdc810b
MHY
2953 return 0;
2954err:
2955 mlx5_cleanup_fs(dev);
25302363
MG
2956 return err;
2957}
50854114
YH
2958
2959int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2960{
2961 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
2962 struct mlx5_ft_underlay_qp *new_uqp;
2963 int err = 0;
2964
2965 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2966 if (!new_uqp)
2967 return -ENOMEM;
2968
2969 mutex_lock(&root->chain_lock);
2970
2971 if (!root->root_ft) {
2972 err = -EINVAL;
2973 goto update_ft_fail;
2974 }
2975
ae288a48 2976 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
af76c501 2977 false);
dae37456
AV
2978 if (err) {
2979 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2980 underlay_qpn, err);
2981 goto update_ft_fail;
2982 }
2983
2984 new_uqp->qpn = underlay_qpn;
2985 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2986
2987 mutex_unlock(&root->chain_lock);
50854114 2988
50854114 2989 return 0;
dae37456
AV
2990
2991update_ft_fail:
2992 mutex_unlock(&root->chain_lock);
2993 kfree(new_uqp);
2994 return err;
50854114
YH
2995}
2996EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2997
2998int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2999{
3000 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
3001 struct mlx5_ft_underlay_qp *uqp;
3002 bool found = false;
3003 int err = 0;
3004
3005 mutex_lock(&root->chain_lock);
3006 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3007 if (uqp->qpn == underlay_qpn) {
3008 found = true;
3009 break;
3010 }
3011 }
3012
3013 if (!found) {
3014 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3015 underlay_qpn);
3016 err = -EINVAL;
3017 goto out;
3018 }
3019
ae288a48 3020 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
af76c501 3021 true);
dae37456
AV
3022 if (err)
3023 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3024 underlay_qpn, err);
3025
3026 list_del(&uqp->list);
3027 mutex_unlock(&root->chain_lock);
3028 kfree(uqp);
50854114 3029
50854114 3030 return 0;
dae37456
AV
3031
3032out:
3033 mutex_unlock(&root->chain_lock);
3034 return err;
50854114
YH
3035}
3036EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
2b688ea5
MG
3037
3038static struct mlx5_flow_root_namespace
3039*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3040{
3041 struct mlx5_flow_namespace *ns;
3042
3043 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3044 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3045 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3046 else
3047 ns = mlx5_get_flow_namespace(dev, ns_type);
3048 if (!ns)
3049 return NULL;
3050
3051 return find_root(&ns->node);
3052}
3053
3054struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3055 u8 ns_type, u8 num_actions,
3056 void *modify_actions)
3057{
3058 struct mlx5_flow_root_namespace *root;
3059 struct mlx5_modify_hdr *modify_hdr;
3060 int err;
3061
3062 root = get_root_namespace(dev, ns_type);
3063 if (!root)
3064 return ERR_PTR(-EOPNOTSUPP);
3065
3066 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3067 if (!modify_hdr)
3068 return ERR_PTR(-ENOMEM);
3069
3070 modify_hdr->ns_type = ns_type;
3071 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3072 modify_actions, modify_hdr);
3073 if (err) {
3074 kfree(modify_hdr);
3075 return ERR_PTR(err);
3076 }
3077
3078 return modify_hdr;
3079}
3080EXPORT_SYMBOL(mlx5_modify_header_alloc);
3081
3082void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3083 struct mlx5_modify_hdr *modify_hdr)
3084{
3085 struct mlx5_flow_root_namespace *root;
3086
3087 root = get_root_namespace(dev, modify_hdr->ns_type);
3088 if (WARN_ON(!root))
3089 return;
3090 root->cmds->modify_header_dealloc(root, modify_hdr);
3091 kfree(modify_hdr);
3092}
3093EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3094
3095struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3096 int reformat_type,
3097 size_t size,
3098 void *reformat_data,
3099 enum mlx5_flow_namespace_type ns_type)
3100{
3101 struct mlx5_pkt_reformat *pkt_reformat;
3102 struct mlx5_flow_root_namespace *root;
3103 int err;
3104
3105 root = get_root_namespace(dev, ns_type);
3106 if (!root)
3107 return ERR_PTR(-EOPNOTSUPP);
3108
3109 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3110 if (!pkt_reformat)
3111 return ERR_PTR(-ENOMEM);
3112
3113 pkt_reformat->ns_type = ns_type;
3114 pkt_reformat->reformat_type = reformat_type;
3115 err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
3116 reformat_data, ns_type,
3117 pkt_reformat);
3118 if (err) {
3119 kfree(pkt_reformat);
3120 return ERR_PTR(err);
3121 }
3122
3123 return pkt_reformat;
3124}
3125EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3126
3127void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3128 struct mlx5_pkt_reformat *pkt_reformat)
3129{
3130 struct mlx5_flow_root_namespace *root;
3131
3132 root = get_root_namespace(dev, pkt_reformat->ns_type);
3133 if (WARN_ON(!root))
3134 return;
3135 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3136 kfree(pkt_reformat);
3137}
3138EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
6a48faee
MG
3139
3140int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3141 struct mlx5_flow_root_namespace *peer_ns)
3142{
38b9d1c6
MG
3143 if (peer_ns && ns->mode != peer_ns->mode) {
3144 mlx5_core_err(ns->dev,
3145 "Can't peer namespace of different steering mode\n");
3146 return -EINVAL;
3147 }
3148
6a48faee
MG
3149 return ns->cmds->set_peer(ns, peer_ns);
3150}
38b9d1c6
MG
3151
3152/* This function should be called only at init stage of the namespace.
3153 * It is not safe to call this function while steering operations
3154 * are executed in the namespace.
3155 */
3156int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3157 enum mlx5_flow_steering_mode mode)
3158{
3159 struct mlx5_flow_root_namespace *root;
3160 const struct mlx5_flow_cmds *cmds;
3161 int err;
3162
3163 root = find_root(&ns->node);
3164 if (&root->ns != ns)
3165 /* Can't set cmds to non root namespace */
3166 return -EINVAL;
3167
3168 if (root->table_type != FS_FT_FDB)
3169 return -EOPNOTSUPP;
3170
3171 if (root->mode == mode)
3172 return 0;
3173
3174 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3175 cmds = mlx5_fs_cmd_get_dr_cmds();
3176 else
3177 cmds = mlx5_fs_cmd_get_fw_cmds();
3178 if (!cmds)
3179 return -EOPNOTSUPP;
3180
3181 err = cmds->create_ns(root);
3182 if (err) {
3183 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3184 err);
3185 return err;
3186 }
3187
3188 root->cmds->destroy_ns(root);
3189 root->cmds = cmds;
3190 root->mode = mode;
3191
3192 return 0;
3193}