net/mlx5: Fix memory leak in error flow of port set buffer
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_core.c
CommitLineData
de8575e0
MG
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h>
bf3e4d38 35#include <linux/mlx5/vport.h>
0efc8562 36#include <linux/mlx5/eswitch.h>
db492c1e 37#include <net/devlink.h>
de8575e0
MG
38
39#include "mlx5_core.h"
40#include "fs_core.h"
0c56b975 41#include "fs_cmd.h"
4a98544d 42#include "fs_ft_pool.h"
4c03e69a 43#include "diag/fs_tracepoint.h"
db492c1e 44#include "devlink.h"
0c56b975 45
25302363
MG
46#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
48
a257b94a 49#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
8d40d162 50 ...) {.type = FS_TYPE_PRIO,\
25302363 51 .min_ft_level = min_level_val,\
a257b94a 52 .num_levels = num_levels_val,\
4cbdd30e 53 .num_leaf_prios = num_prios_val,\
8d40d162 54 .caps = caps_val,\
25302363
MG
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57}
58
a257b94a
MG
59#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
4cbdd30e 61 __VA_ARGS__)\
25302363 62
f66ad830
MZ
63#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
25302363
MG
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
67}
68
8d40d162
MG
69#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
70 sizeof(long))
71
72#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
73
74#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
76
6dc6071c
MG
77#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
81
8ce78257
MB
82#define FS_CHAINING_CAPS_EGRESS \
83 FS_REQUIRED_CAPS( \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
89
24670b1a
MG
90#define FS_CHAINING_CAPS_RDMA_TX \
91 FS_REQUIRED_CAPS( \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
97 .flow_table_modify))
98
a257b94a 99#define LEFTOVERS_NUM_LEVELS 1
4cbdd30e 100#define LEFTOVERS_NUM_PRIOS 1
4cbdd30e 101
b8dfed63
AL
102#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
104
a257b94a 105#define BY_PASS_PRIO_NUM_LEVELS 1
6dc6071c 106#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
a257b94a
MG
107 LEFTOVERS_NUM_PRIOS)
108
15d187e2
LN
109#define KERNEL_RX_MACSEC_NUM_PRIOS 1
110#define KERNEL_RX_MACSEC_NUM_LEVELS 2
111#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
112
6dc6071c 113#define ETHTOOL_PRIO_NUM_LEVELS 1
e5835f28 114#define ETHTOOL_NUM_PRIOS 11
6dc6071c 115#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
8c17295b
LR
116/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
117#define KERNEL_NIC_PRIO_NUM_LEVELS 8
13de6c10
MG
118#define KERNEL_NIC_NUM_PRIOS 1
119/* One more level for tc */
120#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
8d40d162 121
479f074c 122#define KERNEL_NIC_TC_NUM_PRIOS 1
66cb64e2 123#define KERNEL_NIC_TC_NUM_LEVELS 3
479f074c 124
a257b94a 125#define ANCHOR_NUM_LEVELS 1
153fefbf
MG
126#define ANCHOR_NUM_PRIOS 1
127#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
acbc2004 128
20f7b37f 129#define OFFLOADS_MAX_FT 2
11b717d6
PB
130#define OFFLOADS_NUM_PRIOS 2
131#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
acbc2004 132
3e75d4eb
AH
133#define LAG_PRIO_NUM_LEVELS 1
134#define LAG_NUM_PRIOS 1
15d187e2 135#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
3e75d4eb 136
ee92e4f1 137#define KERNEL_TX_IPSEC_NUM_PRIOS 1
8c17295b 138#define KERNEL_TX_IPSEC_NUM_LEVELS 2
ee534d7f
LN
139#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
140
141#define KERNEL_TX_MACSEC_NUM_PRIOS 1
142#define KERNEL_TX_MACSEC_NUM_LEVELS 2
143#define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
ee92e4f1 144
8d40d162
MG
145struct node_caps {
146 size_t arr_sz;
147 long *caps;
148};
8963ca45 149
25302363
MG
150static struct init_tree_node {
151 enum fs_node_type type;
152 struct init_tree_node *children;
153 int ar_size;
8d40d162 154 struct node_caps caps;
25302363 155 int min_ft_level;
4cbdd30e 156 int num_leaf_prios;
25302363 157 int prio;
a257b94a 158 int num_levels;
f66ad830 159 enum mlx5_flow_table_miss_action def_miss_action;
25302363
MG
160} root_fs = {
161 .type = FS_TYPE_NAMESPACE,
15d187e2 162 .ar_size = 8,
f66ad830
MZ
163 .children = (struct init_tree_node[]){
164 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
167 BY_PASS_PRIO_NUM_LEVELS))),
15d187e2
LN
168 ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
169 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170 ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
171 KERNEL_RX_MACSEC_NUM_LEVELS))),
f66ad830
MZ
172 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
173 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
174 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
175 LAG_PRIO_NUM_LEVELS))),
20f7b37f 176 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
f66ad830
MZ
177 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
178 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
179 OFFLOADS_MAX_FT))),
180 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
181 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
182 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
183 ETHTOOL_PRIO_NUM_LEVELS))),
184 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
185 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
186 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
187 KERNEL_NIC_TC_NUM_LEVELS),
188 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
189 KERNEL_NIC_PRIO_NUM_LEVELS))),
190 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
191 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
192 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
193 LEFTOVERS_NUM_LEVELS))),
194 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
195 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
196 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
197 ANCHOR_NUM_LEVELS))),
25302363
MG
198 }
199};
200
8ce78257
MB
201static struct init_tree_node egress_root_fs = {
202 .type = FS_TYPE_NAMESPACE,
ee534d7f 203 .ar_size = 3,
8ce78257
MB
204 .children = (struct init_tree_node[]) {
205 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
206 FS_CHAINING_CAPS_EGRESS,
f66ad830
MZ
207 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
208 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
8ce78257 209 BY_PASS_PRIO_NUM_LEVELS))),
ee534d7f 210 ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
ee92e4f1
HN
211 FS_CHAINING_CAPS_EGRESS,
212 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
213 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
214 KERNEL_TX_IPSEC_NUM_LEVELS))),
ee534d7f
LN
215 ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
216 FS_CHAINING_CAPS_EGRESS,
217 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
218 ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
219 KERNEL_TX_MACSEC_NUM_LEVELS))),
8ce78257
MB
220 }
221};
222
b8dfed63
AL
223enum {
224 RDMA_RX_COUNTERS_PRIO,
225 RDMA_RX_BYPASS_PRIO,
226 RDMA_RX_KERNEL_PRIO,
227};
228
229#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
230#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
231#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
232
e6806e9a
MZ
233static struct init_tree_node rdma_rx_root_fs = {
234 .type = FS_TYPE_NAMESPACE,
b8dfed63 235 .ar_size = 3,
e6806e9a 236 .children = (struct init_tree_node[]) {
b8dfed63
AL
237 [RDMA_RX_COUNTERS_PRIO] =
238 ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
239 FS_CHAINING_CAPS,
240 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
241 ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
242 RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
e6806e9a 243 [RDMA_RX_BYPASS_PRIO] =
b8dfed63 244 ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
e6806e9a
MZ
245 FS_CHAINING_CAPS,
246 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
247 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
248 BY_PASS_PRIO_NUM_LEVELS))),
249 [RDMA_RX_KERNEL_PRIO] =
b8dfed63 250 ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
e6806e9a
MZ
251 FS_CHAINING_CAPS,
252 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
253 ADD_MULTIPLE_PRIO(1, 1))),
254 }
255};
256
b8dfed63
AL
257enum {
258 RDMA_TX_COUNTERS_PRIO,
259 RDMA_TX_BYPASS_PRIO,
260};
261
262#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
263#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
264
24670b1a
MG
265static struct init_tree_node rdma_tx_root_fs = {
266 .type = FS_TYPE_NAMESPACE,
b8dfed63 267 .ar_size = 2,
24670b1a 268 .children = (struct init_tree_node[]) {
b8dfed63
AL
269 [RDMA_TX_COUNTERS_PRIO] =
270 ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
271 FS_CHAINING_CAPS,
272 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
273 ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
274 RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
275 [RDMA_TX_BYPASS_PRIO] =
276 ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
24670b1a
MG
277 FS_CHAINING_CAPS_RDMA_TX,
278 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
b8dfed63 279 ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
24670b1a
MG
280 BY_PASS_PRIO_NUM_LEVELS))),
281 }
282};
283
c7784b1c
MG
284enum fs_i_lock_class {
285 FS_LOCK_GRANDPARENT,
286 FS_LOCK_PARENT,
287 FS_LOCK_CHILD
f0d22d18
MG
288};
289
0d235c3f 290static const struct rhashtable_params rhash_fte = {
c593642c 291 .key_len = sizeof_field(struct fs_fte, val),
0d235c3f
MB
292 .key_offset = offsetof(struct fs_fte, val),
293 .head_offset = offsetof(struct fs_fte, hash),
294 .automatic_shrinking = true,
295 .min_size = 1,
296};
297
693c6883 298static const struct rhashtable_params rhash_fg = {
c593642c 299 .key_len = sizeof_field(struct mlx5_flow_group, mask),
693c6883
MB
300 .key_offset = offsetof(struct mlx5_flow_group, mask),
301 .head_offset = offsetof(struct mlx5_flow_group, hash),
302 .automatic_shrinking = true,
303 .min_size = 1,
304
305};
306
bd71b08e
MG
307static void del_hw_flow_table(struct fs_node *node);
308static void del_hw_flow_group(struct fs_node *node);
309static void del_hw_fte(struct fs_node *node);
310static void del_sw_flow_table(struct fs_node *node);
311static void del_sw_flow_group(struct fs_node *node);
312static void del_sw_fte(struct fs_node *node);
139ed6c6
MG
313static void del_sw_prio(struct fs_node *node);
314static void del_sw_ns(struct fs_node *node);
14c129e3 315/* Delete rule (destination) is special case that
bd71b08e
MG
316 * requires to lock the FTE for all the deletion process.
317 */
318static void del_sw_hw_rule(struct fs_node *node);
814fb875
MB
319static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
320 struct mlx5_flow_destination *d2);
9c26f5f8 321static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
74491de9
MB
322static struct mlx5_flow_rule *
323find_flow_rule(struct fs_fte *fte,
324 struct mlx5_flow_destination *dest);
de8575e0
MG
325
326static void tree_init_node(struct fs_node *node,
bd71b08e
MG
327 void (*del_hw_func)(struct fs_node *),
328 void (*del_sw_func)(struct fs_node *))
de8575e0 329{
dd8e1945 330 refcount_set(&node->refcount, 1);
de8575e0
MG
331 INIT_LIST_HEAD(&node->list);
332 INIT_LIST_HEAD(&node->children);
c7784b1c 333 init_rwsem(&node->lock);
bd71b08e
MG
334 node->del_hw_func = del_hw_func;
335 node->del_sw_func = del_sw_func;
19f100fe 336 node->active = false;
de8575e0
MG
337}
338
339static void tree_add_node(struct fs_node *node, struct fs_node *parent)
340{
341 if (parent)
dd8e1945 342 refcount_inc(&parent->refcount);
de8575e0
MG
343 node->parent = parent;
344
345 /* Parent is the root */
346 if (!parent)
347 node->root = node;
348 else
349 node->root = parent->root;
350}
351
bd71b08e 352static int tree_get_node(struct fs_node *node)
de8575e0 353{
dd8e1945 354 return refcount_inc_not_zero(&node->refcount);
de8575e0
MG
355}
356
bd71b08e
MG
357static void nested_down_read_ref_node(struct fs_node *node,
358 enum fs_i_lock_class class)
de8575e0
MG
359{
360 if (node) {
bd71b08e 361 down_read_nested(&node->lock, class);
dd8e1945 362 refcount_inc(&node->refcount);
de8575e0
MG
363 }
364}
365
bd71b08e
MG
366static void nested_down_write_ref_node(struct fs_node *node,
367 enum fs_i_lock_class class)
de8575e0
MG
368{
369 if (node) {
bd71b08e 370 down_write_nested(&node->lock, class);
dd8e1945 371 refcount_inc(&node->refcount);
de8575e0
MG
372 }
373}
374
476d61b7 375static void down_write_ref_node(struct fs_node *node, bool locked)
de8575e0
MG
376{
377 if (node) {
476d61b7
EB
378 if (!locked)
379 down_write(&node->lock);
dd8e1945 380 refcount_inc(&node->refcount);
de8575e0
MG
381 }
382}
383
bd71b08e
MG
384static void up_read_ref_node(struct fs_node *node)
385{
dd8e1945 386 refcount_dec(&node->refcount);
bd71b08e
MG
387 up_read(&node->lock);
388}
389
476d61b7 390static void up_write_ref_node(struct fs_node *node, bool locked)
bd71b08e 391{
dd8e1945 392 refcount_dec(&node->refcount);
476d61b7
EB
393 if (!locked)
394 up_write(&node->lock);
bd71b08e
MG
395}
396
476d61b7 397static void tree_put_node(struct fs_node *node, bool locked)
de8575e0
MG
398{
399 struct fs_node *parent_node = node->parent;
400
dd8e1945 401 if (refcount_dec_and_test(&node->refcount)) {
bd71b08e
MG
402 if (node->del_hw_func)
403 node->del_hw_func(node);
404 if (parent_node) {
476d61b7 405 down_write_ref_node(parent_node, locked);
de8575e0 406 list_del_init(&node->list);
bd71b08e 407 }
6eb7a268
RD
408 node->del_sw_func(node);
409 if (parent_node)
410 up_write_ref_node(parent_node, locked);
de8575e0
MG
411 node = NULL;
412 }
de8575e0 413 if (!node && parent_node)
476d61b7 414 tree_put_node(parent_node, locked);
de8575e0
MG
415}
416
476d61b7 417static int tree_remove_node(struct fs_node *node, bool locked)
de8575e0 418{
dd8e1945
ER
419 if (refcount_read(&node->refcount) > 1) {
420 refcount_dec(&node->refcount);
b3638e1a
MG
421 return -EEXIST;
422 }
476d61b7 423 tree_put_node(node, locked);
de8575e0
MG
424 return 0;
425}
5e1626c0
MG
426
427static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
428 unsigned int prio)
429{
430 struct fs_prio *iter_prio;
431
432 fs_for_each_prio(iter_prio, ns) {
433 if (iter_prio->prio == prio)
434 return iter_prio;
435 }
436
437 return NULL;
438}
439
9254f8ed
MG
440static bool is_fwd_next_action(u32 action)
441{
442 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
443 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
444}
445
a30c8b90
MB
446static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
447{
448 return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
449 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
450 type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
451 type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
452 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
38bf24c3
YK
453 type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
454 type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
a30c8b90
MB
455}
456
693c6883 457static bool check_valid_spec(const struct mlx5_flow_spec *spec)
5e1626c0 458{
693c6883
MB
459 int i;
460
693c6883
MB
461 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
462 if (spec->match_value[i] & ~spec->match_criteria[i]) {
463 pr_warn("mlx5_core: match_value differs from match_criteria\n");
464 return false;
465 }
466
2aada6c0 467 return true;
5e1626c0 468}
0c56b975 469
db202995 470struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
0c56b975
MG
471{
472 struct fs_node *root;
473 struct mlx5_flow_namespace *ns;
474
475 root = node->root;
476
477 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
478 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
479 return NULL;
480 }
481
482 ns = container_of(root, struct mlx5_flow_namespace, node);
483 return container_of(ns, struct mlx5_flow_root_namespace, ns);
484}
485
a369d4ac
MG
486static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
487{
488 struct mlx5_flow_root_namespace *root = find_root(node);
489
490 if (root)
491 return root->dev->priv.steering;
492 return NULL;
493}
494
0c56b975
MG
495static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
496{
497 struct mlx5_flow_root_namespace *root = find_root(node);
498
499 if (root)
500 return root->dev;
501 return NULL;
502}
503
139ed6c6
MG
504static void del_sw_ns(struct fs_node *node)
505{
506 kfree(node);
507}
508
509static void del_sw_prio(struct fs_node *node)
510{
511 kfree(node);
512}
513
bd71b08e 514static void del_hw_flow_table(struct fs_node *node)
0c56b975 515{
af76c501 516 struct mlx5_flow_root_namespace *root;
0c56b975
MG
517 struct mlx5_flow_table *ft;
518 struct mlx5_core_dev *dev;
0c56b975
MG
519 int err;
520
521 fs_get_obj(ft, node);
522 dev = get_dev(&ft->node);
af76c501 523 root = find_root(&ft->node);
8e4ca986 524 trace_mlx5_fs_del_ft(ft);
0c56b975 525
19f100fe 526 if (node->active) {
ae288a48 527 err = root->cmds->destroy_flow_table(root, ft);
19f100fe
MG
528 if (err)
529 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
530 }
bd71b08e
MG
531}
532
533static void del_sw_flow_table(struct fs_node *node)
534{
535 struct mlx5_flow_table *ft;
536 struct fs_prio *prio;
537
538 fs_get_obj(ft, node);
539
693c6883 540 rhltable_destroy(&ft->fgs_hash);
aee37f3d
RD
541 if (ft->node.parent) {
542 fs_get_obj(prio, ft->node.parent);
543 prio->num_ft--;
544 }
a369d4ac 545 kfree(ft);
0c56b975
MG
546}
547
e7aafc8f 548static void modify_fte(struct fs_fte *fte)
0c56b975 549{
af76c501 550 struct mlx5_flow_root_namespace *root;
0c56b975
MG
551 struct mlx5_flow_table *ft;
552 struct mlx5_flow_group *fg;
e7aafc8f 553 struct mlx5_core_dev *dev;
0c56b975
MG
554 int err;
555
0c56b975 556 fs_get_obj(fg, fte->node.parent);
0c56b975 557 fs_get_obj(ft, fg->node.parent);
e7aafc8f
EB
558 dev = get_dev(&fte->node);
559
560 root = find_root(&ft->node);
ae288a48 561 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
e7aafc8f
EB
562 if (err)
563 mlx5_core_warn(dev,
564 "%s can't del rule fg id=%d fte_index=%d\n",
565 __func__, fg->id, fte->index);
566 fte->modify_mask = 0;
567}
568
569static void del_sw_hw_rule(struct fs_node *node)
570{
571 struct mlx5_flow_rule *rule;
572 struct fs_fte *fte;
573
574 fs_get_obj(rule, node);
575 fs_get_obj(fte, rule->node.parent);
4c03e69a 576 trace_mlx5_fs_del_rule(rule);
9254f8ed 577 if (is_fwd_next_action(rule->sw_action)) {
b3638e1a
MG
578 mutex_lock(&rule->dest_attr.ft->lock);
579 list_del(&rule->next_ft);
580 mutex_unlock(&rule->dest_attr.ft->lock);
581 }
ae058314 582
d49d6307
MB
583 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
584 --fte->dests_size;
e7aafc8f
EB
585 fte->modify_mask |=
586 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
587 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
d2ec6a35 588 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
ae058314
MB
589 goto out;
590 }
591
d49d6307
MB
592 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
593 --fte->dests_size;
8cbcc5ef
MG
594 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
595 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
596 goto out;
597 }
598
a30c8b90 599 if (is_fwd_dest_type(rule->dest_attr.type)) {
d49d6307 600 --fte->dests_size;
a30c8b90
MB
601 --fte->fwd_dests;
602
603 if (!fte->fwd_dests)
604 fte->action.action &=
605 ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e7aafc8f
EB
606 fte->modify_mask |=
607 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
c3ae3a9c 608 goto out;
ae058314
MB
609 }
610out:
a369d4ac 611 kfree(rule);
0c56b975
MG
612}
613
bd71b08e 614static void del_hw_fte(struct fs_node *node)
0c56b975 615{
af76c501 616 struct mlx5_flow_root_namespace *root;
0c56b975
MG
617 struct mlx5_flow_table *ft;
618 struct mlx5_flow_group *fg;
619 struct mlx5_core_dev *dev;
620 struct fs_fte *fte;
621 int err;
622
623 fs_get_obj(fte, node);
624 fs_get_obj(fg, fte->node.parent);
625 fs_get_obj(ft, fg->node.parent);
626
bd71b08e 627 trace_mlx5_fs_del_fte(fte);
3a09fae0 628 WARN_ON(fte->dests_size);
0c56b975 629 dev = get_dev(&ft->node);
af76c501 630 root = find_root(&ft->node);
19f100fe 631 if (node->active) {
ae288a48 632 err = root->cmds->delete_fte(root, ft, fte);
19f100fe
MG
633 if (err)
634 mlx5_core_warn(dev,
635 "flow steering can't delete fte in index %d of flow group id %d\n",
636 fte->index, fg->id);
79398543 637 node->active = false;
19f100fe 638 }
bd71b08e
MG
639}
640
641static void del_sw_fte(struct fs_node *node)
642{
1f0593e7 643 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
644 struct mlx5_flow_group *fg;
645 struct fs_fte *fte;
646 int err;
647
648 fs_get_obj(fte, node);
649 fs_get_obj(fg, fte->node.parent);
0c56b975 650
19f100fe
MG
651 err = rhashtable_remove_fast(&fg->ftes_hash,
652 &fte->hash,
653 rhash_fte);
654 WARN_ON(err);
8802b8a4 655 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
1f0593e7 656 kmem_cache_free(steering->ftes_cache, fte);
0c56b975
MG
657}
658
bd71b08e 659static void del_hw_flow_group(struct fs_node *node)
0c56b975 660{
af76c501 661 struct mlx5_flow_root_namespace *root;
0c56b975
MG
662 struct mlx5_flow_group *fg;
663 struct mlx5_flow_table *ft;
664 struct mlx5_core_dev *dev;
665
666 fs_get_obj(fg, node);
667 fs_get_obj(ft, fg->node.parent);
668 dev = get_dev(&ft->node);
4c03e69a 669 trace_mlx5_fs_del_fg(fg);
0c56b975 670
af76c501 671 root = find_root(&ft->node);
ae288a48 672 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
bd71b08e
MG
673 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
674 fg->id, ft->id);
675}
676
677static void del_sw_flow_group(struct fs_node *node)
678{
a369d4ac 679 struct mlx5_flow_steering *steering = get_steering(node);
bd71b08e
MG
680 struct mlx5_flow_group *fg;
681 struct mlx5_flow_table *ft;
682 int err;
683
684 fs_get_obj(fg, node);
685 fs_get_obj(ft, fg->node.parent);
32dba76a 686
0d235c3f 687 rhashtable_destroy(&fg->ftes_hash);
75d1d187 688 ida_destroy(&fg->fte_allocator);
79cdb0aa
PB
689 if (ft->autogroup.active &&
690 fg->max_ftes == ft->autogroup.group_size &&
691 fg->start_index < ft->autogroup.max_fte)
bd71b08e 692 ft->autogroup.num_groups--;
693c6883
MB
693 err = rhltable_remove(&ft->fgs_hash,
694 &fg->hash,
695 rhash_fg);
696 WARN_ON(err);
a369d4ac 697 kmem_cache_free(steering->fgs_cache, fg);
0c56b975
MG
698}
699
f5c2ff17
MG
700static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
701{
702 int index;
703 int ret;
704
8802b8a4 705 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
f5c2ff17
MG
706 if (index < 0)
707 return index;
708
709 fte->index = index + fg->start_index;
710 ret = rhashtable_insert_fast(&fg->ftes_hash,
711 &fte->hash,
712 rhash_fte);
713 if (ret)
714 goto err_ida_remove;
715
716 tree_add_node(&fte->node, &fg->node);
717 list_add_tail(&fte->node.list, &fg->node.children);
718 return 0;
719
720err_ida_remove:
8802b8a4 721 ida_free(&fg->fte_allocator, index);
f5c2ff17
MG
722 return ret;
723}
724
a369d4ac 725static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
5233794b 726 const struct mlx5_flow_spec *spec,
f5c2ff17 727 struct mlx5_flow_act *flow_act)
0c56b975 728{
a369d4ac 729 struct mlx5_flow_steering *steering = get_steering(&ft->node);
0c56b975
MG
730 struct fs_fte *fte;
731
a369d4ac 732 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
0c56b975
MG
733 if (!fte)
734 return ERR_PTR(-ENOMEM);
735
bb0ee7dc 736 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
0c56b975 737 fte->node.type = FS_TYPE_FLOW_ENTRY;
d2ec6a35 738 fte->action = *flow_act;
bb0ee7dc 739 fte->flow_context = spec->flow_context;
0c56b975 740
cefc2355 741 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
19f100fe
MG
742
743 return fte;
19f100fe
MG
744}
745
a369d4ac
MG
746static void dealloc_flow_group(struct mlx5_flow_steering *steering,
747 struct mlx5_flow_group *fg)
19f100fe
MG
748{
749 rhashtable_destroy(&fg->ftes_hash);
a369d4ac 750 kmem_cache_free(steering->fgs_cache, fg);
19f100fe
MG
751}
752
a369d4ac
MG
753static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
754 u8 match_criteria_enable,
5233794b 755 const void *match_criteria,
19f100fe
MG
756 int start_index,
757 int end_index)
0c56b975
MG
758{
759 struct mlx5_flow_group *fg;
0d235c3f
MB
760 int ret;
761
a369d4ac 762 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
0c56b975
MG
763 if (!fg)
764 return ERR_PTR(-ENOMEM);
765
0d235c3f
MB
766 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
767 if (ret) {
a369d4ac 768 kmem_cache_free(steering->fgs_cache, fg);
0d235c3f 769 return ERR_PTR(ret);
fc9c5a4a
TZ
770 }
771
75d1d187 772 ida_init(&fg->fte_allocator);
0c56b975
MG
773 fg->mask.match_criteria_enable = match_criteria_enable;
774 memcpy(&fg->mask.match_criteria, match_criteria,
775 sizeof(fg->mask.match_criteria));
776 fg->node.type = FS_TYPE_FLOW_GROUP;
19f100fe
MG
777 fg->start_index = start_index;
778 fg->max_ftes = end_index - start_index + 1;
779
780 return fg;
781}
782
783static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
784 u8 match_criteria_enable,
5233794b 785 const void *match_criteria,
19f100fe
MG
786 int start_index,
787 int end_index,
788 struct list_head *prev)
789{
a369d4ac 790 struct mlx5_flow_steering *steering = get_steering(&ft->node);
19f100fe
MG
791 struct mlx5_flow_group *fg;
792 int ret;
793
a369d4ac 794 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
19f100fe
MG
795 start_index, end_index);
796 if (IS_ERR(fg))
797 return fg;
798
799 /* initialize refcnt, add to parent list */
800 ret = rhltable_insert(&ft->fgs_hash,
801 &fg->hash,
802 rhash_fg);
803 if (ret) {
a369d4ac 804 dealloc_flow_group(steering, fg);
19f100fe
MG
805 return ERR_PTR(ret);
806 }
807
bd71b08e 808 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
19f100fe
MG
809 tree_add_node(&fg->node, &ft->node);
810 /* Add node to group list */
811 list_add(&fg->node.list, prev);
bd71b08e 812 atomic_inc(&ft->node.version);
19f100fe 813
0c56b975
MG
814 return fg;
815}
816
04745afb 817static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
aaff1bea 818 enum fs_flow_table_type table_type,
c9f1b073
HHZ
819 enum fs_flow_table_op_mod op_mod,
820 u32 flags)
0c56b975
MG
821{
822 struct mlx5_flow_table *ft;
693c6883 823 int ret;
0c56b975
MG
824
825 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
826 if (!ft)
693c6883
MB
827 return ERR_PTR(-ENOMEM);
828
829 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
830 if (ret) {
831 kfree(ft);
832 return ERR_PTR(ret);
833 }
0c56b975
MG
834
835 ft->level = level;
836 ft->node.type = FS_TYPE_FLOW_TABLE;
aaff1bea 837 ft->op_mod = op_mod;
0c56b975 838 ft->type = table_type;
efdc810b 839 ft->vport = vport;
c9f1b073 840 ft->flags = flags;
b3638e1a
MG
841 INIT_LIST_HEAD(&ft->fwd_rules);
842 mutex_init(&ft->lock);
0c56b975
MG
843
844 return ft;
845}
846
fdb6896f
MG
847/* If reverse is false, then we search for the first flow table in the
848 * root sub-tree from start(closest from right), else we search for the
849 * last flow table in the root sub-tree till start(closest from left).
850 */
851static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
852 struct list_head *start,
853 bool reverse)
854{
855#define list_advance_entry(pos, reverse) \
856 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
857
858#define list_for_each_advance_continue(pos, head, reverse) \
859 for (pos = list_advance_entry(pos, reverse); \
860 &pos->list != (head); \
861 pos = list_advance_entry(pos, reverse))
862
863 struct fs_node *iter = list_entry(start, struct fs_node, list);
864 struct mlx5_flow_table *ft = NULL;
865
328edb49 866 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
fdb6896f
MG
867 return NULL;
868
869 list_for_each_advance_continue(iter, &root->children, reverse) {
870 if (iter->type == FS_TYPE_FLOW_TABLE) {
871 fs_get_obj(ft, iter);
872 return ft;
873 }
874 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
875 if (ft)
876 return ft;
877 }
878
879 return ft;
880}
881
59f8f7c8 882/* If reverse is false then return the first flow table in next priority of
fdb6896f
MG
883 * prio in the tree, else return the last flow table in the previous priority
884 * of prio in the tree.
885 */
886static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
887{
888 struct mlx5_flow_table *ft = NULL;
889 struct fs_node *curr_node;
890 struct fs_node *parent;
891
892 parent = prio->node.parent;
893 curr_node = &prio->node;
894 while (!ft && parent) {
895 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
896 curr_node = parent;
897 parent = curr_node->parent;
898 }
899 return ft;
900}
901
902/* Assuming all the tree is locked by mutex chain lock */
903static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
904{
905 return find_closest_ft(prio, false);
906}
907
908/* Assuming all the tree is locked by mutex chain lock */
909static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
910{
911 return find_closest_ft(prio, true);
912}
913
9254f8ed
MG
914static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
915 struct mlx5_flow_act *flow_act)
916{
9254f8ed 917 struct fs_prio *prio;
59f8f7c8 918 bool next_ns;
9254f8ed 919
59f8f7c8
MG
920 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
921 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
9254f8ed 922
59f8f7c8 923 return find_next_chained_ft(prio);
9254f8ed
MG
924}
925
f90edfd2
MG
926static int connect_fts_in_prio(struct mlx5_core_dev *dev,
927 struct fs_prio *prio,
928 struct mlx5_flow_table *ft)
929{
af76c501 930 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
f90edfd2 931 struct mlx5_flow_table *iter;
f90edfd2
MG
932 int err;
933
934 fs_for_each_ft(iter, prio) {
ae288a48 935 err = root->cmds->modify_flow_table(root, iter, ft);
f90edfd2 936 if (err) {
6c4e9bcf
LR
937 mlx5_core_err(dev,
938 "Failed to modify flow table id %d, type %d, err %d\n",
939 iter->id, iter->type, err);
f90edfd2 940 /* The driver is out of sync with the FW */
f90edfd2
MG
941 return err;
942 }
943 }
944 return 0;
945}
946
947/* Connect flow tables from previous priority of prio to ft */
948static int connect_prev_fts(struct mlx5_core_dev *dev,
949 struct mlx5_flow_table *ft,
950 struct fs_prio *prio)
951{
952 struct mlx5_flow_table *prev_ft;
953
954 prev_ft = find_prev_chained_ft(prio);
955 if (prev_ft) {
956 struct fs_prio *prev_prio;
957
958 fs_get_obj(prev_prio, prev_ft->node.parent);
959 return connect_fts_in_prio(dev, prev_prio, ft);
960 }
961 return 0;
962}
963
2cc43b49
MG
964static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
965 *prio)
966{
967 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
dae37456 968 struct mlx5_ft_underlay_qp *uqp;
2cc43b49 969 int min_level = INT_MAX;
aef6c443 970 int err = 0;
dae37456 971 u32 qpn;
2cc43b49
MG
972
973 if (root->root_ft)
974 min_level = root->root_ft->level;
975
976 if (ft->level >= min_level)
977 return 0;
978
dae37456
AV
979 if (list_empty(&root->underlay_qpns)) {
980 /* Don't set any QPN (zero) in case QPN list is empty */
981 qpn = 0;
ae288a48 982 err = root->cmds->update_root_ft(root, ft, qpn, false);
dae37456
AV
983 } else {
984 list_for_each_entry(uqp, &root->underlay_qpns, list) {
985 qpn = uqp->qpn;
ae288a48 986 err = root->cmds->update_root_ft(root, ft,
af76c501 987 qpn, false);
dae37456
AV
988 if (err)
989 break;
990 }
991 }
992
2cc43b49 993 if (err)
dae37456
AV
994 mlx5_core_warn(root->dev,
995 "Update root flow table of id(%u) qpn(%d) failed\n",
996 ft->id, qpn);
2cc43b49
MG
997 else
998 root->root_ft = ft;
999
1000 return err;
1001}
1002
74491de9
MB
1003static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
1004 struct mlx5_flow_destination *dest)
b3638e1a 1005{
af76c501 1006 struct mlx5_flow_root_namespace *root;
b3638e1a
MG
1007 struct mlx5_flow_table *ft;
1008 struct mlx5_flow_group *fg;
1009 struct fs_fte *fte;
bd5251db 1010 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
b3638e1a
MG
1011 int err = 0;
1012
1013 fs_get_obj(fte, rule->node.parent);
d2ec6a35 1014 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
b3638e1a 1015 return -EINVAL;
476d61b7 1016 down_write_ref_node(&fte->node, false);
b3638e1a
MG
1017 fs_get_obj(fg, fte->node.parent);
1018 fs_get_obj(ft, fg->node.parent);
1019
1020 memcpy(&rule->dest_attr, dest, sizeof(*dest));
af76c501 1021 root = find_root(&ft->node);
ae288a48 1022 err = root->cmds->update_fte(root, ft, fg,
af76c501 1023 modify_mask, fte);
476d61b7 1024 up_write_ref_node(&fte->node, false);
b3638e1a
MG
1025
1026 return err;
1027}
1028
74491de9
MB
1029int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1030 struct mlx5_flow_destination *new_dest,
1031 struct mlx5_flow_destination *old_dest)
1032{
1033 int i;
1034
1035 if (!old_dest) {
1036 if (handle->num_rules != 1)
1037 return -EINVAL;
1038 return _mlx5_modify_rule_destination(handle->rule[0],
1039 new_dest);
1040 }
1041
1042 for (i = 0; i < handle->num_rules; i++) {
1043 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1044 return _mlx5_modify_rule_destination(handle->rule[i],
1045 new_dest);
1046 }
1047
1048 return -EINVAL;
1049}
1050
b3638e1a
MG
1051/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
1052static int connect_fwd_rules(struct mlx5_core_dev *dev,
1053 struct mlx5_flow_table *new_next_ft,
1054 struct mlx5_flow_table *old_next_ft)
1055{
4c5009c5 1056 struct mlx5_flow_destination dest = {};
b3638e1a
MG
1057 struct mlx5_flow_rule *iter;
1058 int err = 0;
1059
1060 /* new_next_ft and old_next_ft could be NULL only
1061 * when we create/destroy the anchor flow table.
1062 */
1063 if (!new_next_ft || !old_next_ft)
1064 return 0;
1065
1066 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1067 dest.ft = new_next_ft;
1068
1069 mutex_lock(&old_next_ft->lock);
1070 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1071 mutex_unlock(&old_next_ft->lock);
1072 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
9254f8ed
MG
1073 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1074 iter->ft->ns == new_next_ft->ns)
1075 continue;
1076
74491de9 1077 err = _mlx5_modify_rule_destination(iter, &dest);
b3638e1a
MG
1078 if (err)
1079 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1080 new_next_ft->id);
1081 }
1082 return 0;
1083}
1084
f90edfd2
MG
1085static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1086 struct fs_prio *prio)
1087{
8b54874e 1088 struct mlx5_flow_table *next_ft, *first_ft;
f90edfd2
MG
1089 int err = 0;
1090
1091 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1092
8b54874e
MG
1093 first_ft = list_first_entry_or_null(&prio->node.children,
1094 struct mlx5_flow_table, node.list);
1095 if (!first_ft || first_ft->level > ft->level) {
f90edfd2
MG
1096 err = connect_prev_fts(dev, ft, prio);
1097 if (err)
1098 return err;
b3638e1a 1099
8b54874e 1100 next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
b3638e1a
MG
1101 err = connect_fwd_rules(dev, ft, next_ft);
1102 if (err)
1103 return err;
f90edfd2
MG
1104 }
1105
1106 if (MLX5_CAP_FLOWTABLE(dev,
1107 flow_table_properties_nic_receive.modify_root))
1108 err = update_root_ft_create(ft, prio);
1109 return err;
1110}
1111
d63cd286
MG
1112static void list_add_flow_table(struct mlx5_flow_table *ft,
1113 struct fs_prio *prio)
1114{
1115 struct list_head *prev = &prio->node.children;
1116 struct mlx5_flow_table *iter;
1117
1118 fs_for_each_ft(iter, prio) {
1119 if (iter->level > ft->level)
1120 break;
1121 prev = &iter->node.list;
1122 }
1123 list_add(&ft->node.list, prev);
1124}
1125
efdc810b 1126static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 1127 struct mlx5_flow_table_attr *ft_attr,
aaff1bea 1128 enum fs_flow_table_op_mod op_mod,
b3ba5149 1129 u16 vport)
0c56b975 1130{
b3ba5149 1131 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
5281a0c9
PB
1132 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1133 struct mlx5_flow_table *next_ft;
b3ba5149 1134 struct fs_prio *fs_prio = NULL;
0c56b975 1135 struct mlx5_flow_table *ft;
b3ba5149 1136 int err;
0c56b975
MG
1137
1138 if (!root) {
1139 pr_err("mlx5: flow steering failed to find root of namespace\n");
1140 return ERR_PTR(-ENODEV);
1141 }
1142
2cc43b49 1143 mutex_lock(&root->chain_lock);
b3ba5149 1144 fs_prio = find_prio(ns, ft_attr->prio);
2cc43b49
MG
1145 if (!fs_prio) {
1146 err = -EINVAL;
1147 goto unlock_root;
1148 }
5281a0c9
PB
1149 if (!unmanaged) {
1150 /* The level is related to the
1151 * priority level range.
1152 */
1153 if (ft_attr->level >= fs_prio->num_levels) {
1154 err = -ENOSPC;
1155 goto unlock_root;
1156 }
1157
1158 ft_attr->level += fs_prio->start_level;
0c56b975 1159 }
5281a0c9 1160
d63cd286
MG
1161 /* The level is related to the
1162 * priority level range.
1163 */
b3ba5149 1164 ft = alloc_flow_table(ft_attr->level,
efdc810b 1165 vport,
aaff1bea 1166 root->table_type,
b3ba5149 1167 op_mod, ft_attr->flags);
693c6883
MB
1168 if (IS_ERR(ft)) {
1169 err = PTR_ERR(ft);
2cc43b49 1170 goto unlock_root;
0c56b975
MG
1171 }
1172
bd71b08e 1173 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
5281a0c9
PB
1174 next_ft = unmanaged ? ft_attr->next_ft :
1175 find_next_chained_ft(fs_prio);
f66ad830 1176 ft->def_miss_action = ns->def_miss_action;
9254f8ed 1177 ft->ns = ns;
b0bb369e 1178 err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
0c56b975
MG
1179 if (err)
1180 goto free_ft;
1181
5281a0c9
PB
1182 if (!unmanaged) {
1183 err = connect_flow_table(root->dev, ft, fs_prio);
1184 if (err)
1185 goto destroy_ft;
1186 }
1187
19f100fe 1188 ft->node.active = true;
476d61b7 1189 down_write_ref_node(&fs_prio->node, false);
5281a0c9
PB
1190 if (!unmanaged) {
1191 tree_add_node(&ft->node, &fs_prio->node);
1192 list_add_flow_table(ft, fs_prio);
1193 } else {
1194 ft->node.root = fs_prio->node.root;
1195 }
0c56b975 1196 fs_prio->num_ft++;
476d61b7 1197 up_write_ref_node(&fs_prio->node, false);
2cc43b49 1198 mutex_unlock(&root->chain_lock);
8e4ca986 1199 trace_mlx5_fs_add_ft(ft);
0c56b975 1200 return ft;
2cc43b49 1201destroy_ft:
ae288a48 1202 root->cmds->destroy_flow_table(root, ft);
0c56b975 1203free_ft:
487c6ef8 1204 rhltable_destroy(&ft->fgs_hash);
0c56b975 1205 kfree(ft);
2cc43b49
MG
1206unlock_root:
1207 mutex_unlock(&root->chain_lock);
0c56b975
MG
1208 return ERR_PTR(err);
1209}
1210
efdc810b 1211struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
b3ba5149 1212 struct mlx5_flow_table_attr *ft_attr)
efdc810b 1213{
b3ba5149 1214 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
efdc810b 1215}
8d2a9d8d 1216EXPORT_SYMBOL(mlx5_create_flow_table);
efdc810b 1217
6c27c56c
MB
1218u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
1219{
1220 return ft->id;
1221}
1222EXPORT_SYMBOL(mlx5_flow_table_id);
1223
617b860c
PP
1224struct mlx5_flow_table *
1225mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1226 struct mlx5_flow_table_attr *ft_attr, u16 vport)
efdc810b 1227{
617b860c 1228 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
efdc810b
MHY
1229}
1230
b3ba5149
ES
1231struct mlx5_flow_table*
1232mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1233 int prio, u32 level)
aaff1bea 1234{
b3ba5149
ES
1235 struct mlx5_flow_table_attr ft_attr = {};
1236
1237 ft_attr.level = level;
1238 ft_attr.prio = prio;
4a98544d
PB
1239 ft_attr.max_fte = 1;
1240
b3ba5149 1241 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
aaff1bea
AH
1242}
1243EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1244
71513c05 1245#define MAX_FLOW_GROUP_SIZE BIT(24)
b3ba5149
ES
1246struct mlx5_flow_table*
1247mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
61dc7b01 1248 struct mlx5_flow_table_attr *ft_attr)
f0d22d18 1249{
79cdb0aa 1250 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
79cdb0aa 1251 int max_num_groups = ft_attr->autogroup.max_num_groups;
f0d22d18 1252 struct mlx5_flow_table *ft;
4a98544d 1253 int autogroups_max_fte;
f0d22d18 1254
61dc7b01 1255 ft = mlx5_create_flow_table(ns, ft_attr);
f0d22d18
MG
1256 if (IS_ERR(ft))
1257 return ft;
1258
4a98544d
PB
1259 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1260 if (max_num_groups > autogroups_max_fte)
1261 goto err_validate;
1262 if (num_reserved_entries > ft->max_fte)
1263 goto err_validate;
1264
71513c05
PB
1265 /* Align the number of groups according to the largest group size */
1266 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1267 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1268
f0d22d18 1269 ft->autogroup.active = true;
79cdb0aa
PB
1270 ft->autogroup.required_groups = max_num_groups;
1271 ft->autogroup.max_fte = autogroups_max_fte;
97fd8da2 1272 /* We save place for flow groups in addition to max types */
79cdb0aa 1273 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
f0d22d18
MG
1274
1275 return ft;
4a98544d
PB
1276
1277err_validate:
1278 mlx5_destroy_flow_table(ft);
1279 return ERR_PTR(-ENOSPC);
f0d22d18 1280}
b217ea25 1281EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
f0d22d18 1282
f0d22d18
MG
1283struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1284 u32 *fg_in)
1285{
af76c501 1286 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
0d235c3f
MB
1287 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1288 fg_in, match_criteria);
1289 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1290 fg_in,
1291 match_criteria_enable);
19f100fe
MG
1292 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1293 start_flow_index);
1294 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1295 end_flow_index);
f0d22d18 1296 struct mlx5_flow_group *fg;
19f100fe 1297 int err;
f0d22d18 1298
79cdb0aa 1299 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
f0d22d18
MG
1300 return ERR_PTR(-EPERM);
1301
476d61b7 1302 down_write_ref_node(&ft->node, false);
19f100fe
MG
1303 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1304 start_index, end_index,
1305 ft->node.children.prev);
476d61b7 1306 up_write_ref_node(&ft->node, false);
19f100fe
MG
1307 if (IS_ERR(fg))
1308 return fg;
1309
ae288a48 1310 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
19f100fe 1311 if (err) {
476d61b7 1312 tree_put_node(&fg->node, false);
19f100fe
MG
1313 return ERR_PTR(err);
1314 }
1315 trace_mlx5_fs_add_fg(fg);
1316 fg->node.active = true;
0c56b975
MG
1317
1318 return fg;
1319}
8d2a9d8d 1320EXPORT_SYMBOL(mlx5_create_flow_group);
0c56b975
MG
1321
1322static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1323{
1324 struct mlx5_flow_rule *rule;
1325
1326 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1327 if (!rule)
1328 return NULL;
1329
b3638e1a 1330 INIT_LIST_HEAD(&rule->next_ft);
0c56b975 1331 rule->node.type = FS_TYPE_FLOW_DEST;
60ab4584
AV
1332 if (dest)
1333 memcpy(&rule->dest_attr, dest, sizeof(*dest));
6510bc0d
MB
1334 else
1335 rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
0c56b975
MG
1336
1337 return rule;
1338}
1339
74491de9
MB
1340static struct mlx5_flow_handle *alloc_handle(int num_rules)
1341{
1342 struct mlx5_flow_handle *handle;
1343
acafe7e3 1344 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
74491de9
MB
1345 if (!handle)
1346 return NULL;
1347
1348 handle->num_rules = num_rules;
1349
1350 return handle;
1351}
1352
1353static void destroy_flow_handle(struct fs_fte *fte,
1354 struct mlx5_flow_handle *handle,
1355 struct mlx5_flow_destination *dest,
1356 int i)
1357{
1358 for (; --i >= 0;) {
dd8e1945 1359 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
74491de9
MB
1360 fte->dests_size--;
1361 list_del(&handle->rule[i]->node.list);
1362 kfree(handle->rule[i]);
1363 }
1364 }
1365 kfree(handle);
1366}
1367
1368static struct mlx5_flow_handle *
1369create_flow_handle(struct fs_fte *fte,
1370 struct mlx5_flow_destination *dest,
1371 int dest_num,
1372 int *modify_mask,
1373 bool *new_rule)
1374{
1375 struct mlx5_flow_handle *handle;
1376 struct mlx5_flow_rule *rule = NULL;
1377 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1378 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1379 int type;
1380 int i = 0;
1381
1382 handle = alloc_handle((dest_num) ? dest_num : 1);
1383 if (!handle)
1384 return ERR_PTR(-ENOMEM);
1385
1386 do {
1387 if (dest) {
1388 rule = find_flow_rule(fte, dest + i);
1389 if (rule) {
dd8e1945 1390 refcount_inc(&rule->node.refcount);
74491de9
MB
1391 goto rule_found;
1392 }
1393 }
1394
1395 *new_rule = true;
1396 rule = alloc_rule(dest + i);
1397 if (!rule)
1398 goto free_rules;
1399
1400 /* Add dest to dests list- we need flow tables to be in the
1401 * end of the list for forward to next prio rules.
1402 */
bd71b08e 1403 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
74491de9
MB
1404 if (dest &&
1405 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1406 list_add(&rule->node.list, &fte->node.children);
1407 else
1408 list_add_tail(&rule->node.list, &fte->node.children);
1409 if (dest) {
1410 fte->dests_size++;
1411
a30c8b90
MB
1412 if (is_fwd_dest_type(dest[i].type))
1413 fte->fwd_dests++;
1414
74491de9
MB
1415 type = dest[i].type ==
1416 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1417 *modify_mask |= type ? count : dst;
1418 }
1419rule_found:
1420 handle->rule[i] = rule;
1421 } while (++i < dest_num);
1422
1423 return handle;
1424
1425free_rules:
1426 destroy_flow_handle(fte, handle, dest, i);
1427 return ERR_PTR(-ENOMEM);
1428}
1429
0c56b975 1430/* fte should not be deleted while calling this function */
74491de9
MB
1431static struct mlx5_flow_handle *
1432add_rule_fte(struct fs_fte *fte,
1433 struct mlx5_flow_group *fg,
1434 struct mlx5_flow_destination *dest,
1435 int dest_num,
1436 bool update_action)
0c56b975 1437{
af76c501 1438 struct mlx5_flow_root_namespace *root;
74491de9 1439 struct mlx5_flow_handle *handle;
0c56b975 1440 struct mlx5_flow_table *ft;
bd5251db 1441 int modify_mask = 0;
0c56b975 1442 int err;
74491de9 1443 bool new_rule = false;
0c56b975 1444
74491de9
MB
1445 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1446 &new_rule);
1447 if (IS_ERR(handle) || !new_rule)
1448 goto out;
bd5251db 1449
a6224985
MB
1450 if (update_action)
1451 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
bd5251db 1452
0c56b975 1453 fs_get_obj(ft, fg->node.parent);
af76c501 1454 root = find_root(&fg->node);
0501fc47 1455 if (!(fte->status & FS_FTE_STATUS_EXISTING))
ae288a48 1456 err = root->cmds->create_fte(root, ft, fg, fte);
0c56b975 1457 else
ae288a48 1458 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
0c56b975 1459 if (err)
74491de9 1460 goto free_handle;
0c56b975 1461
19f100fe 1462 fte->node.active = true;
0c56b975 1463 fte->status |= FS_FTE_STATUS_EXISTING;
454401ae 1464 atomic_inc(&fg->node.version);
0c56b975 1465
74491de9
MB
1466out:
1467 return handle;
0c56b975 1468
74491de9
MB
1469free_handle:
1470 destroy_flow_handle(fte, handle, dest, handle->num_rules);
0c56b975
MG
1471 return ERR_PTR(err);
1472}
1473
19f100fe 1474static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
5233794b 1475 const struct mlx5_flow_spec *spec)
0c56b975 1476{
af363705 1477 struct list_head *prev = &ft->node.children;
79cdb0aa 1478 u32 max_fte = ft->autogroup.max_fte;
19f100fe 1479 unsigned int candidate_index = 0;
f0d22d18 1480 unsigned int group_size = 0;
79cdb0aa 1481 struct mlx5_flow_group *fg;
f0d22d18
MG
1482
1483 if (!ft->autogroup.active)
1484 return ERR_PTR(-ENOENT);
1485
f0d22d18 1486 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
97fd8da2 1487 group_size = ft->autogroup.group_size;
f0d22d18 1488
79cdb0aa 1489 /* max_fte == ft->autogroup.max_types */
f0d22d18
MG
1490 if (group_size == 0)
1491 group_size = 1;
1492
1493 /* sorted by start_index */
1494 fs_for_each_fg(fg, ft) {
1495 if (candidate_index + group_size > fg->start_index)
1496 candidate_index = fg->start_index + fg->max_ftes;
1497 else
1498 break;
1499 prev = &fg->node.list;
1500 }
1501
79cdb0aa 1502 if (candidate_index + group_size > max_fte)
19f100fe
MG
1503 return ERR_PTR(-ENOSPC);
1504
1505 fg = alloc_insert_flow_group(ft,
1506 spec->match_criteria_enable,
1507 spec->match_criteria,
1508 candidate_index,
1509 candidate_index + group_size - 1,
1510 prev);
1511 if (IS_ERR(fg))
f0d22d18 1512 goto out;
19f100fe 1513
97fd8da2
MG
1514 if (group_size == ft->autogroup.group_size)
1515 ft->autogroup.num_groups++;
19f100fe
MG
1516
1517out:
1518 return fg;
1519}
1520
1521static int create_auto_flow_group(struct mlx5_flow_table *ft,
1522 struct mlx5_flow_group *fg)
1523{
af76c501 1524 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
19f100fe
MG
1525 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1526 void *match_criteria_addr;
3e99df87
SK
1527 u8 src_esw_owner_mask_on;
1528 void *misc;
19f100fe
MG
1529 int err;
1530 u32 *in;
1531
1532 in = kvzalloc(inlen, GFP_KERNEL);
1533 if (!in)
1534 return -ENOMEM;
f0d22d18
MG
1535
1536 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
19f100fe
MG
1537 fg->mask.match_criteria_enable);
1538 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1539 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1540 fg->max_ftes - 1);
3e99df87
SK
1541
1542 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1543 misc_parameters);
1544 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1545 source_eswitch_owner_vhca_id);
1546 MLX5_SET(create_flow_group_in, in,
1547 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1548
f0d22d18
MG
1549 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1550 in, match_criteria);
19f100fe
MG
1551 memcpy(match_criteria_addr, fg->mask.match_criteria,
1552 sizeof(fg->mask.match_criteria));
1553
ae288a48 1554 err = root->cmds->create_flow_group(root, ft, in, fg);
19f100fe
MG
1555 if (!err) {
1556 fg->node.active = true;
1557 trace_mlx5_fs_add_fg(fg);
1558 }
f0d22d18 1559
f0d22d18 1560 kvfree(in);
19f100fe 1561 return err;
f0d22d18
MG
1562}
1563
814fb875
MB
1564static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1565 struct mlx5_flow_destination *d2)
1566{
1567 if (d1->type == d2->type) {
e3a0f40b
YK
1568 if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1569 d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1228e912
EB
1570 d1->vport.num == d2->vport.num &&
1571 d1->vport.flags == d2->vport.flags &&
c979c445
DL
1572 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1573 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1228e912 1574 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
2b688ea5
MG
1575 (d1->vport.pkt_reformat->id ==
1576 d2->vport.pkt_reformat->id) : true)) ||
814fb875
MB
1577 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1578 d1->ft == d2->ft) ||
1579 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
664000b6
YH
1580 d1->tir_num == d2->tir_num) ||
1581 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
6f851556
YK
1582 d1->ft_num == d2->ft_num) ||
1583 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
38bf24c3
YK
1584 d1->sampler_id == d2->sampler_id) ||
1585 (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
1586 d1->range.field == d2->range.field &&
1587 d1->range.hit_ft == d2->range.hit_ft &&
1588 d1->range.miss_ft == d2->range.miss_ft &&
1589 d1->range.min == d2->range.min &&
1590 d1->range.max == d2->range.max))
814fb875
MB
1591 return true;
1592 }
1593
1594 return false;
1595}
1596
b3638e1a
MG
1597static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1598 struct mlx5_flow_destination *dest)
1599{
1600 struct mlx5_flow_rule *rule;
1601
1602 list_for_each_entry(rule, &fte->node.children, node.list) {
814fb875
MB
1603 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1604 return rule;
b3638e1a
MG
1605 }
1606 return NULL;
1607}
1608
8fa5e7b2
MB
1609static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1610 const struct mlx5_fs_vlan *vlan1)
0d235c3f 1611{
8fa5e7b2
MB
1612 return vlan0->ethtype != vlan1->ethtype ||
1613 vlan0->vid != vlan1->vid ||
1614 vlan0->prio != vlan1->prio;
1615}
1616
1617static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1618 const struct mlx5_flow_act *act2)
1619{
1620 u32 action1 = act1->action;
1621 u32 action2 = act2->action;
1622 u32 xored_actions;
1623
1624 xored_actions = action1 ^ action2;
0d235c3f
MB
1625
1626 /* if one rule only wants to count, it's ok */
1627 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1628 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1629 return false;
1630
1631 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
60786f09 1632 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
96de67a7 1633 MLX5_FLOW_CONTEXT_ACTION_DECAP |
0c06897a
OG
1634 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1635 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
8da6fe2a
JL
1636 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1637 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1638 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
0d235c3f
MB
1639 return true;
1640
8fa5e7b2
MB
1641 if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1642 act1->pkt_reformat != act2->pkt_reformat)
1643 return true;
1644
1645 if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1646 act1->modify_hdr != act2->modify_hdr)
1647 return true;
1648
1649 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1650 check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1651 return true;
1652
1653 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1654 check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1655 return true;
1656
0d235c3f
MB
1657 return false;
1658}
1659
bb0ee7dc
JL
1660static int check_conflicting_ftes(struct fs_fte *fte,
1661 const struct mlx5_flow_context *flow_context,
1662 const struct mlx5_flow_act *flow_act)
0d235c3f 1663{
8fa5e7b2 1664 if (check_conflicting_actions(flow_act, &fte->action)) {
0d235c3f
MB
1665 mlx5_core_warn(get_dev(&fte->node),
1666 "Found two FTEs with conflicting actions\n");
1667 return -EEXIST;
1668 }
1669
bb0ee7dc
JL
1670 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1671 fte->flow_context.flow_tag != flow_context->flow_tag) {
0d235c3f
MB
1672 mlx5_core_warn(get_dev(&fte->node),
1673 "FTE flow tag %u already exists with different flow tag %u\n",
bb0ee7dc
JL
1674 fte->flow_context.flow_tag,
1675 flow_context->flow_tag);
0d235c3f
MB
1676 return -EEXIST;
1677 }
1678
1679 return 0;
1680}
1681
74491de9 1682static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
5233794b 1683 const struct mlx5_flow_spec *spec,
66958ed9 1684 struct mlx5_flow_act *flow_act,
74491de9 1685 struct mlx5_flow_destination *dest,
693c6883
MB
1686 int dest_num,
1687 struct fs_fte *fte)
0c56b975 1688{
74491de9 1689 struct mlx5_flow_handle *handle;
bd71b08e 1690 int old_action;
74491de9 1691 int i;
bd71b08e 1692 int ret;
0c56b975 1693
bb0ee7dc 1694 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
bd71b08e
MG
1695 if (ret)
1696 return ERR_PTR(ret);
0c56b975 1697
d2ec6a35
MB
1698 old_action = fte->action.action;
1699 fte->action.action |= flow_act->action;
bd71b08e
MG
1700 handle = add_rule_fte(fte, fg, dest, dest_num,
1701 old_action != flow_act->action);
74491de9 1702 if (IS_ERR(handle)) {
d2ec6a35 1703 fte->action.action = old_action;
693c6883 1704 return handle;
0c56b975 1705 }
bd71b08e 1706 trace_mlx5_fs_set_fte(fte, false);
0c56b975 1707
74491de9 1708 for (i = 0; i < handle->num_rules; i++) {
dd8e1945 1709 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
74491de9 1710 tree_add_node(&handle->rule[i]->node, &fte->node);
4c03e69a
MB
1711 trace_mlx5_fs_add_rule(handle->rule[i]);
1712 }
74491de9 1713 }
74491de9 1714 return handle;
0c56b975
MG
1715}
1716
171c7625 1717static bool counter_is_valid(u32 action)
bd5251db 1718{
ae058314 1719 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
4c2573e1 1720 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
eafa6abd 1721 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
bd5251db
AV
1722}
1723
d63cd286 1724static bool dest_is_valid(struct mlx5_flow_destination *dest,
ff189b43 1725 struct mlx5_flow_act *flow_act,
d63cd286
MG
1726 struct mlx5_flow_table *ft)
1727{
ff189b43
PB
1728 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1729 u32 action = flow_act->action;
1730
bd5251db 1731 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
171c7625 1732 return counter_is_valid(action);
bd5251db 1733
d63cd286
MG
1734 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1735 return true;
1736
ff189b43 1737 if (ignore_level) {
006f623e
AL
1738 if (ft->type != FS_FT_FDB &&
1739 ft->type != FS_FT_NIC_RX)
ff189b43
PB
1740 return false;
1741
1742 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
006f623e 1743 ft->type != dest->ft->type)
ff189b43
PB
1744 return false;
1745 }
1746
d63cd286
MG
1747 if (!dest || ((dest->type ==
1748 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
ff189b43 1749 (dest->ft->level <= ft->level && !ignore_level)))
d63cd286
MG
1750 return false;
1751 return true;
1752}
1753
46719d77
MG
1754struct match_list {
1755 struct list_head list;
693c6883 1756 struct mlx5_flow_group *g;
46719d77
MG
1757};
1758
b820ce00 1759static void free_match_list(struct match_list *head, bool ft_locked)
46719d77 1760{
b820ce00 1761 struct match_list *iter, *match_tmp;
46719d77 1762
b820ce00
EC
1763 list_for_each_entry_safe(iter, match_tmp, &head->list,
1764 list) {
1765 tree_put_node(&iter->g->node, ft_locked);
1766 list_del(&iter->list);
1767 kfree(iter);
46719d77
MG
1768 }
1769}
1770
b820ce00 1771static int build_match_list(struct match_list *match_head,
46719d77 1772 struct mlx5_flow_table *ft,
c1948390 1773 const struct mlx5_flow_spec *spec,
c2c922da 1774 struct mlx5_flow_group *fg,
c1948390 1775 bool ft_locked)
46719d77 1776{
693c6883 1777 struct rhlist_head *tmp, *list;
46719d77
MG
1778 struct mlx5_flow_group *g;
1779 int err = 0;
693c6883
MB
1780
1781 rcu_read_lock();
46719d77 1782 INIT_LIST_HEAD(&match_head->list);
693c6883
MB
1783 /* Collect all fgs which has a matching match_criteria */
1784 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
46719d77 1785 /* RCU is atomic, we can't execute FW commands here */
693c6883
MB
1786 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1787 struct match_list *curr_match;
1788
c2c922da
MB
1789 if (fg && fg != g)
1790 continue;
1791
b820ce00 1792 if (unlikely(!tree_get_node(&g->node)))
693c6883 1793 continue;
693c6883 1794
46719d77 1795 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
693c6883 1796 if (!curr_match) {
ee27e330 1797 rcu_read_unlock();
c1948390 1798 free_match_list(match_head, ft_locked);
ee27e330 1799 return -ENOMEM;
693c6883
MB
1800 }
1801 curr_match->g = g;
46719d77 1802 list_add_tail(&curr_match->list, &match_head->list);
693c6883
MB
1803 }
1804 rcu_read_unlock();
46719d77
MG
1805 return err;
1806}
1807
bd71b08e
MG
1808static u64 matched_fgs_get_version(struct list_head *match_head)
1809{
1810 struct match_list *iter;
1811 u64 version = 0;
1812
1813 list_for_each_entry(iter, match_head, list)
1814 version += (u64)atomic_read(&iter->g->node.version);
1815 return version;
1816}
1817
ad9421e3 1818static struct fs_fte *
1f0593e7
PP
1819lookup_fte_locked(struct mlx5_flow_group *g,
1820 const u32 *match_value,
1821 bool take_write)
7dee607e
PP
1822{
1823 struct fs_fte *fte_tmp;
1824
1f0593e7
PP
1825 if (take_write)
1826 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1827 else
1828 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1829 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1830 rhash_fte);
ad9421e3
RD
1831 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1832 fte_tmp = NULL;
1833 goto out;
1834 }
6237634d 1835 if (!fte_tmp->node.active) {
476d61b7 1836 tree_put_node(&fte_tmp->node, false);
6237634d
EB
1837 fte_tmp = NULL;
1838 goto out;
1839 }
ad9421e3
RD
1840
1841 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1842out:
1f0593e7
PP
1843 if (take_write)
1844 up_write_ref_node(&g->node, false);
7dee607e 1845 else
1f0593e7
PP
1846 up_read_ref_node(&g->node);
1847 return fte_tmp;
7dee607e
PP
1848}
1849
46719d77
MG
1850static struct mlx5_flow_handle *
1851try_add_to_existing_fg(struct mlx5_flow_table *ft,
bd71b08e 1852 struct list_head *match_head,
5233794b 1853 const struct mlx5_flow_spec *spec,
46719d77
MG
1854 struct mlx5_flow_act *flow_act,
1855 struct mlx5_flow_destination *dest,
bd71b08e
MG
1856 int dest_num,
1857 int ft_version)
46719d77 1858{
a369d4ac 1859 struct mlx5_flow_steering *steering = get_steering(&ft->node);
46719d77
MG
1860 struct mlx5_flow_group *g;
1861 struct mlx5_flow_handle *rule;
46719d77 1862 struct match_list *iter;
bd71b08e
MG
1863 bool take_write = false;
1864 struct fs_fte *fte;
dc638d11 1865 u64 version = 0;
f5c2ff17
MG
1866 int err;
1867
bb0ee7dc 1868 fte = alloc_fte(ft, spec, flow_act);
f5c2ff17
MG
1869 if (IS_ERR(fte))
1870 return ERR_PTR(-ENOMEM);
46719d77 1871
bd71b08e 1872search_again_locked:
d5634fee
PB
1873 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1874 goto skip_search;
dc638d11
EC
1875 version = matched_fgs_get_version(match_head);
1876 /* Try to find an fte with identical match value and attempt update its
1877 * action.
1878 */
bd71b08e
MG
1879 list_for_each_entry(iter, match_head, list) {
1880 struct fs_fte *fte_tmp;
693c6883
MB
1881
1882 g = iter->g;
ad9421e3
RD
1883 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1884 if (!fte_tmp)
bd71b08e 1885 continue;
bb0ee7dc 1886 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
a5bfe6b4 1887 /* No error check needed here, because insert_fte() is not called */
476d61b7
EB
1888 up_write_ref_node(&fte_tmp->node, false);
1889 tree_put_node(&fte_tmp->node, false);
a369d4ac 1890 kmem_cache_free(steering->ftes_cache, fte);
bd71b08e 1891 return rule;
693c6883
MB
1892 }
1893
d5634fee
PB
1894skip_search:
1895 /* No group with matching fte found, or we skipped the search.
1896 * Try to add a new fte to any matching fg.
1897 */
1898
bd71b08e
MG
1899 /* Check the ft version, for case that new flow group
1900 * was added while the fgs weren't locked
1901 */
1902 if (atomic_read(&ft->node.version) != ft_version) {
1903 rule = ERR_PTR(-EAGAIN);
1904 goto out;
1905 }
b92af5a7 1906
dc638d11
EC
1907 /* Check the fgs version. If version have changed it could be that an
1908 * FTE with the same match value was added while the fgs weren't
1909 * locked.
bd71b08e 1910 */
dc638d11
EC
1911 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1912 version != matched_fgs_get_version(match_head)) {
ad9421e3 1913 take_write = true;
bd71b08e 1914 goto search_again_locked;
ad9421e3 1915 }
bd71b08e
MG
1916
1917 list_for_each_entry(iter, match_head, list) {
1918 g = iter->g;
1919
ad9421e3
RD
1920 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1921
49c0355d
PB
1922 if (!g->node.active) {
1923 up_write_ref_node(&g->node, false);
1924 continue;
1925 }
1926
f5c2ff17
MG
1927 err = insert_fte(g, fte);
1928 if (err) {
476d61b7 1929 up_write_ref_node(&g->node, false);
f5c2ff17 1930 if (err == -ENOSPC)
bd71b08e 1931 continue;
a369d4ac 1932 kmem_cache_free(steering->ftes_cache, fte);
f5c2ff17 1933 return ERR_PTR(err);
bd71b08e 1934 }
693c6883 1935
bd71b08e 1936 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
476d61b7 1937 up_write_ref_node(&g->node, false);
bb0ee7dc 1938 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
476d61b7 1939 up_write_ref_node(&fte->node, false);
a5bfe6b4
MG
1940 if (IS_ERR(rule))
1941 tree_put_node(&fte->node, false);
bd71b08e
MG
1942 return rule;
1943 }
1944 rule = ERR_PTR(-ENOENT);
1945out:
a369d4ac 1946 kmem_cache_free(steering->ftes_cache, fte);
693c6883
MB
1947 return rule;
1948}
1949
74491de9
MB
1950static struct mlx5_flow_handle *
1951_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
5233794b 1952 const struct mlx5_flow_spec *spec,
66958ed9 1953 struct mlx5_flow_act *flow_act,
74491de9
MB
1954 struct mlx5_flow_destination *dest,
1955 int dest_num)
66958ed9 1956
0c56b975 1957{
a369d4ac 1958 struct mlx5_flow_steering *steering = get_steering(&ft->node);
74491de9 1959 struct mlx5_flow_handle *rule;
b820ce00
EC
1960 struct match_list match_head;
1961 struct mlx5_flow_group *g;
bd71b08e
MG
1962 bool take_write = false;
1963 struct fs_fte *fte;
1964 int version;
19f100fe 1965 int err;
74491de9 1966 int i;
0c56b975 1967
693c6883 1968 if (!check_valid_spec(spec))
0d235c3f
MB
1969 return ERR_PTR(-EINVAL);
1970
c2c922da
MB
1971 if (flow_act->fg && ft->autogroup.active)
1972 return ERR_PTR(-EINVAL);
1973
f07d8afb
OS
1974 if (dest && dest_num <= 0)
1975 return ERR_PTR(-EINVAL);
1976
74491de9 1977 for (i = 0; i < dest_num; i++) {
ff189b43 1978 if (!dest_is_valid(&dest[i], flow_act, ft))
74491de9
MB
1979 return ERR_PTR(-EINVAL);
1980 }
bd71b08e
MG
1981 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1982search_again_locked:
1983 version = atomic_read(&ft->node.version);
60ab4584 1984
bd71b08e 1985 /* Collect all fgs which has a matching match_criteria */
c2c922da 1986 err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
9238e380
VB
1987 if (err) {
1988 if (take_write)
476d61b7 1989 up_write_ref_node(&ft->node, false);
07130477
RD
1990 else
1991 up_read_ref_node(&ft->node);
bd71b08e 1992 return ERR_PTR(err);
9238e380 1993 }
bd71b08e
MG
1994
1995 if (!take_write)
1996 up_read_ref_node(&ft->node);
1997
1998 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1999 dest_num, version);
c1948390 2000 free_match_list(&match_head, take_write);
bd71b08e 2001 if (!IS_ERR(rule) ||
9238e380
VB
2002 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
2003 if (take_write)
476d61b7 2004 up_write_ref_node(&ft->node, false);
bd71b08e 2005 return rule;
9238e380 2006 }
bd71b08e
MG
2007
2008 if (!take_write) {
2009 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2010 take_write = true;
2011 }
2012
2013 if (PTR_ERR(rule) == -EAGAIN ||
2014 version != atomic_read(&ft->node.version))
2015 goto search_again_locked;
f0d22d18 2016
19f100fe 2017 g = alloc_auto_flow_group(ft, spec);
c3f9bf62 2018 if (IS_ERR(g)) {
d34c6efc 2019 rule = ERR_CAST(g);
476d61b7 2020 up_write_ref_node(&ft->node, false);
bd71b08e 2021 return rule;
c3f9bf62
MG
2022 }
2023
84c7af63
PP
2024 fte = alloc_fte(ft, spec, flow_act);
2025 if (IS_ERR(fte)) {
2026 up_write_ref_node(&ft->node, false);
2027 err = PTR_ERR(fte);
2028 goto err_alloc_fte;
2029 }
2030
bd71b08e 2031 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
476d61b7 2032 up_write_ref_node(&ft->node, false);
bd71b08e 2033
19f100fe 2034 err = create_auto_flow_group(ft, g);
bd71b08e
MG
2035 if (err)
2036 goto err_release_fg;
2037
f5c2ff17 2038 err = insert_fte(g, fte);
84c7af63 2039 if (err)
f5c2ff17 2040 goto err_release_fg;
f5c2ff17 2041
bd71b08e 2042 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
476d61b7 2043 up_write_ref_node(&g->node, false);
bb0ee7dc 2044 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
476d61b7 2045 up_write_ref_node(&fte->node, false);
a5bfe6b4
MG
2046 if (IS_ERR(rule))
2047 tree_put_node(&fte->node, false);
476d61b7 2048 tree_put_node(&g->node, false);
0c56b975 2049 return rule;
bd71b08e
MG
2050
2051err_release_fg:
476d61b7 2052 up_write_ref_node(&g->node, false);
84c7af63
PP
2053 kmem_cache_free(steering->ftes_cache, fte);
2054err_alloc_fte:
476d61b7 2055 tree_put_node(&g->node, false);
bd71b08e 2056 return ERR_PTR(err);
0c56b975 2057}
b3638e1a
MG
2058
2059static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2060{
2061 return ((ft->type == FS_FT_NIC_RX) &&
2062 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2063}
2064
74491de9
MB
2065struct mlx5_flow_handle *
2066mlx5_add_flow_rules(struct mlx5_flow_table *ft,
5233794b 2067 const struct mlx5_flow_spec *spec,
66958ed9 2068 struct mlx5_flow_act *flow_act,
74491de9 2069 struct mlx5_flow_destination *dest,
cf916ffb 2070 int num_dest)
b3638e1a
MG
2071{
2072 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
5c2aa8ae 2073 static const struct mlx5_flow_spec zero_spec = {};
14c129e3 2074 struct mlx5_flow_destination *gen_dest = NULL;
b3638e1a 2075 struct mlx5_flow_table *next_ft = NULL;
74491de9 2076 struct mlx5_flow_handle *handle = NULL;
66958ed9 2077 u32 sw_action = flow_act->action;
14c129e3 2078 int i;
b3638e1a 2079
5c2aa8ae
MB
2080 if (!spec)
2081 spec = &zero_spec;
2082
9254f8ed 2083 if (!is_fwd_next_action(sw_action))
14c129e3
MG
2084 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2085
2086 if (!fwd_next_prio_supported(ft))
2087 return ERR_PTR(-EOPNOTSUPP);
2088
2089 mutex_lock(&root->chain_lock);
9254f8ed 2090 next_ft = find_next_fwd_ft(ft, flow_act);
14c129e3
MG
2091 if (!next_ft) {
2092 handle = ERR_PTR(-EOPNOTSUPP);
2093 goto unlock;
b3638e1a
MG
2094 }
2095
14c129e3
MG
2096 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2097 GFP_KERNEL);
2098 if (!gen_dest) {
2099 handle = ERR_PTR(-ENOMEM);
2100 goto unlock;
2101 }
2102 for (i = 0; i < num_dest; i++)
2103 gen_dest[i] = dest[i];
2104 gen_dest[i].type =
2105 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2106 gen_dest[i].ft = next_ft;
2107 dest = gen_dest;
2108 num_dest++;
9254f8ed
MG
2109 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2110 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
14c129e3 2111 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
cf916ffb 2112 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
14c129e3
MG
2113 if (IS_ERR(handle))
2114 goto unlock;
2115
2116 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2117 mutex_lock(&next_ft->lock);
2118 list_add(&handle->rule[num_dest - 1]->next_ft,
2119 &next_ft->fwd_rules);
2120 mutex_unlock(&next_ft->lock);
9254f8ed
MG
2121 handle->rule[num_dest - 1]->sw_action = sw_action;
2122 handle->rule[num_dest - 1]->ft = ft;
b3638e1a 2123 }
14c129e3
MG
2124unlock:
2125 mutex_unlock(&root->chain_lock);
2126 kfree(gen_dest);
74491de9 2127 return handle;
b3638e1a 2128}
74491de9 2129EXPORT_SYMBOL(mlx5_add_flow_rules);
0c56b975 2130
74491de9 2131void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
0c56b975 2132{
718ce4d6 2133 struct fs_fte *fte;
74491de9
MB
2134 int i;
2135
718ce4d6
EB
2136 /* In order to consolidate the HW changes we lock the FTE for other
2137 * changes, and increase its refcount, in order not to perform the
2138 * "del" functions of the FTE. Will handle them here.
2139 * The removal of the rules is done under locked FTE.
2140 * After removing all the handle's rules, if there are remaining
2141 * rules, it means we just need to modify the FTE in FW, and
2142 * unlock/decrease the refcount we increased before.
2143 * Otherwise, it means the FTE should be deleted. First delete the
2144 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2145 * the FTE, which will handle the last decrease of the refcount, as
2146 * well as required handling of its parent.
2147 */
2148 fs_get_obj(fte, handle->rule[0]->node.parent);
2149 down_write_ref_node(&fte->node, false);
74491de9 2150 for (i = handle->num_rules - 1; i >= 0; i--)
718ce4d6 2151 tree_remove_node(&handle->rule[i]->node, true);
7b0c6338 2152 if (list_empty(&fte->node.children)) {
72191a4c 2153 fte->node.del_hw_func(&fte->node);
cefc2355
MG
2154 /* Avoid double call to del_hw_fte */
2155 fte->node.del_hw_func = NULL;
2156 up_write_ref_node(&fte->node, false);
718ce4d6 2157 tree_put_node(&fte->node, false);
7b0c6338
MB
2158 } else if (fte->dests_size) {
2159 if (fte->modify_mask)
2160 modify_fte(fte);
2161 up_write_ref_node(&fte->node, false);
b645e57d
MG
2162 } else {
2163 up_write_ref_node(&fte->node, false);
718ce4d6 2164 }
74491de9 2165 kfree(handle);
0c56b975 2166}
74491de9 2167EXPORT_SYMBOL(mlx5_del_flow_rules);
0c56b975 2168
2cc43b49
MG
2169/* Assuming prio->node.children(flow tables) is sorted by level */
2170static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2171{
2172 struct fs_prio *prio;
2173
2174 fs_get_obj(prio, ft->node.parent);
2175
2176 if (!list_is_last(&ft->node.list, &prio->node.children))
2177 return list_next_entry(ft, node.list);
2178 return find_next_chained_ft(prio);
2179}
2180
2181static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2182{
2183 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
dae37456 2184 struct mlx5_ft_underlay_qp *uqp;
2cc43b49 2185 struct mlx5_flow_table *new_root_ft = NULL;
dae37456
AV
2186 int err = 0;
2187 u32 qpn;
2cc43b49
MG
2188
2189 if (root->root_ft != ft)
2190 return 0;
2191
2192 new_root_ft = find_next_ft(ft);
dae37456
AV
2193 if (!new_root_ft) {
2194 root->root_ft = NULL;
2195 return 0;
2196 }
2197
2198 if (list_empty(&root->underlay_qpns)) {
2199 /* Don't set any QPN (zero) in case QPN list is empty */
2200 qpn = 0;
ae288a48 2201 err = root->cmds->update_root_ft(root, new_root_ft,
af76c501 2202 qpn, false);
dae37456
AV
2203 } else {
2204 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2205 qpn = uqp->qpn;
ae288a48 2206 err = root->cmds->update_root_ft(root,
af76c501
MB
2207 new_root_ft, qpn,
2208 false);
dae37456
AV
2209 if (err)
2210 break;
2cc43b49 2211 }
2cc43b49 2212 }
dae37456
AV
2213
2214 if (err)
2215 mlx5_core_warn(root->dev,
2216 "Update root flow table of id(%u) qpn(%d) failed\n",
2217 ft->id, qpn);
2218 else
2219 root->root_ft = new_root_ft;
2220
2cc43b49
MG
2221 return 0;
2222}
2223
f90edfd2
MG
2224/* Connect flow table from previous priority to
2225 * the next flow table.
2226 */
2227static int disconnect_flow_table(struct mlx5_flow_table *ft)
2228{
2229 struct mlx5_core_dev *dev = get_dev(&ft->node);
2230 struct mlx5_flow_table *next_ft;
2231 struct fs_prio *prio;
2232 int err = 0;
2233
2234 err = update_root_ft_destroy(ft);
2235 if (err)
2236 return err;
2237
2238 fs_get_obj(prio, ft->node.parent);
2239 if (!(list_first_entry(&prio->node.children,
2240 struct mlx5_flow_table,
2241 node.list) == ft))
2242 return 0;
2243
8b54874e 2244 next_ft = find_next_ft(ft);
b3638e1a
MG
2245 err = connect_fwd_rules(dev, next_ft, ft);
2246 if (err)
2247 return err;
2248
f90edfd2
MG
2249 err = connect_prev_fts(dev, next_ft, prio);
2250 if (err)
2251 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2252 ft->id);
2253 return err;
2254}
2255
86d722ad 2256int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
0c56b975 2257{
2cc43b49
MG
2258 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2259 int err = 0;
2260
2261 mutex_lock(&root->chain_lock);
5281a0c9
PB
2262 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2263 err = disconnect_flow_table(ft);
2cc43b49
MG
2264 if (err) {
2265 mutex_unlock(&root->chain_lock);
2266 return err;
2267 }
476d61b7 2268 if (tree_remove_node(&ft->node, false))
0c56b975
MG
2269 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2270 ft->id);
2cc43b49 2271 mutex_unlock(&root->chain_lock);
0c56b975 2272
2cc43b49 2273 return err;
0c56b975 2274}
b217ea25 2275EXPORT_SYMBOL(mlx5_destroy_flow_table);
0c56b975 2276
86d722ad 2277void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
0c56b975 2278{
476d61b7 2279 if (tree_remove_node(&fg->node, false))
0c56b975
MG
2280 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2281 fg->id);
2282}
8d2a9d8d 2283EXPORT_SYMBOL(mlx5_destroy_flow_group);
25302363 2284
328edb49
PB
2285struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2286 int n)
2287{
2288 struct mlx5_flow_steering *steering = dev->priv.steering;
2289
2290 if (!steering || !steering->fdb_sub_ns)
2291 return NULL;
2292
2293 return steering->fdb_sub_ns[n];
2294}
2295EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2296
4588fed7
MG
2297static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2298{
2299 switch (type) {
2300 case MLX5_FLOW_NAMESPACE_BYPASS:
15d187e2 2301 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
4588fed7
MG
2302 case MLX5_FLOW_NAMESPACE_LAG:
2303 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2304 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2305 case MLX5_FLOW_NAMESPACE_KERNEL:
2306 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2307 case MLX5_FLOW_NAMESPACE_ANCHOR:
2308 return true;
2309 default:
2310 return false;
2311 }
2312}
2313
86d722ad
MG
2314struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2315 enum mlx5_flow_namespace_type type)
25302363 2316{
fba53f7b
MG
2317 struct mlx5_flow_steering *steering = dev->priv.steering;
2318 struct mlx5_flow_root_namespace *root_ns;
8ce78257 2319 int prio = 0;
78228cbd 2320 struct fs_prio *fs_prio;
25302363
MG
2321 struct mlx5_flow_namespace *ns;
2322
fba53f7b 2323 if (!steering)
25302363
MG
2324 return NULL;
2325
2326 switch (type) {
25302363 2327 case MLX5_FLOW_NAMESPACE_FDB:
fba53f7b
MG
2328 if (steering->fdb_root_ns)
2329 return &steering->fdb_root_ns->ns;
2226dcb4 2330 return NULL;
425a563a
MG
2331 case MLX5_FLOW_NAMESPACE_PORT_SEL:
2332 if (steering->port_sel_root_ns)
2333 return &steering->port_sel_root_ns->ns;
2334 return NULL;
87d22483
MG
2335 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2336 if (steering->sniffer_rx_root_ns)
2337 return &steering->sniffer_rx_root_ns->ns;
2226dcb4 2338 return NULL;
87d22483
MG
2339 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2340 if (steering->sniffer_tx_root_ns)
2341 return &steering->sniffer_tx_root_ns->ns;
25302363 2342 return NULL;
c7d5fa10
MG
2343 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2344 root_ns = steering->fdb_root_ns;
2345 prio = FDB_BYPASS_PATH;
2346 break;
4588fed7 2347 case MLX5_FLOW_NAMESPACE_EGRESS:
ee534d7f
LN
2348 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
2349 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
8ce78257 2350 root_ns = steering->egress_root_ns;
ee92e4f1 2351 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
4588fed7
MG
2352 break;
2353 case MLX5_FLOW_NAMESPACE_RDMA_RX:
e6806e9a
MZ
2354 root_ns = steering->rdma_rx_root_ns;
2355 prio = RDMA_RX_BYPASS_PRIO;
4588fed7
MG
2356 break;
2357 case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
e6806e9a
MZ
2358 root_ns = steering->rdma_rx_root_ns;
2359 prio = RDMA_RX_KERNEL_PRIO;
4588fed7
MG
2360 break;
2361 case MLX5_FLOW_NAMESPACE_RDMA_TX:
24670b1a 2362 root_ns = steering->rdma_tx_root_ns;
4588fed7
MG
2363 break;
2364 case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
b8dfed63
AL
2365 root_ns = steering->rdma_rx_root_ns;
2366 prio = RDMA_RX_COUNTERS_PRIO;
4588fed7
MG
2367 break;
2368 case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
b8dfed63
AL
2369 root_ns = steering->rdma_tx_root_ns;
2370 prio = RDMA_TX_COUNTERS_PRIO;
4588fed7
MG
2371 break;
2372 default: /* Must be NIC RX */
2373 WARN_ON(!is_nic_rx_ns(type));
8ce78257
MB
2374 root_ns = steering->root_ns;
2375 prio = type;
4588fed7 2376 break;
25302363
MG
2377 }
2378
fba53f7b
MG
2379 if (!root_ns)
2380 return NULL;
2381
25302363
MG
2382 fs_prio = find_prio(&root_ns->ns, prio);
2383 if (!fs_prio)
2384 return NULL;
2385
2386 ns = list_first_entry(&fs_prio->node.children,
2387 typeof(*ns),
2388 node.list);
2389
2390 return ns;
2391}
b217ea25 2392EXPORT_SYMBOL(mlx5_get_flow_namespace);
25302363 2393
9b93ab98
GP
2394struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2395 enum mlx5_flow_namespace_type type,
2396 int vport)
2397{
2398 struct mlx5_flow_steering *steering = dev->priv.steering;
2399
57b92bdd 2400 if (!steering)
9b93ab98
GP
2401 return NULL;
2402
2403 switch (type) {
2404 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
57b92bdd
PP
2405 if (vport >= steering->esw_egress_acl_vports)
2406 return NULL;
9b93ab98
GP
2407 if (steering->esw_egress_root_ns &&
2408 steering->esw_egress_root_ns[vport])
2409 return &steering->esw_egress_root_ns[vport]->ns;
2410 else
2411 return NULL;
2412 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
57b92bdd
PP
2413 if (vport >= steering->esw_ingress_acl_vports)
2414 return NULL;
9b93ab98
GP
2415 if (steering->esw_ingress_root_ns &&
2416 steering->esw_ingress_root_ns[vport])
2417 return &steering->esw_ingress_root_ns[vport]->ns;
2418 else
2419 return NULL;
2420 default:
2421 return NULL;
2422 }
2423}
2424
328edb49
PB
2425static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2426 unsigned int prio,
2427 int num_levels,
2428 enum fs_node_type type)
25302363
MG
2429{
2430 struct fs_prio *fs_prio;
2431
2432 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2433 if (!fs_prio)
2434 return ERR_PTR(-ENOMEM);
2435
328edb49 2436 fs_prio->node.type = type;
139ed6c6 2437 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
25302363 2438 tree_add_node(&fs_prio->node, &ns->node);
a257b94a 2439 fs_prio->num_levels = num_levels;
25302363 2440 fs_prio->prio = prio;
25302363
MG
2441 list_add_tail(&fs_prio->node.list, &ns->node.children);
2442
2443 return fs_prio;
2444}
2445
328edb49
PB
2446static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2447 unsigned int prio,
2448 int num_levels)
2449{
2450 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2451}
2452
2453static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2454 unsigned int prio, int num_levels)
2455{
2456 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2457}
2458
25302363
MG
2459static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2460 *ns)
2461{
2462 ns->node.type = FS_TYPE_NAMESPACE;
2463
2464 return ns;
2465}
2466
f66ad830
MZ
2467static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2468 int def_miss_act)
25302363
MG
2469{
2470 struct mlx5_flow_namespace *ns;
2471
2472 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2473 if (!ns)
2474 return ERR_PTR(-ENOMEM);
2475
2476 fs_init_namespace(ns);
f66ad830 2477 ns->def_miss_action = def_miss_act;
139ed6c6 2478 tree_init_node(&ns->node, NULL, del_sw_ns);
25302363
MG
2479 tree_add_node(&ns->node, &prio->node);
2480 list_add_tail(&ns->node.list, &prio->node.children);
2481
2482 return ns;
2483}
2484
13de6c10
MG
2485static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2486 struct init_tree_node *prio_metadata)
4cbdd30e
MG
2487{
2488 struct fs_prio *fs_prio;
2489 int i;
2490
2491 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
13de6c10 2492 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
4cbdd30e
MG
2493 if (IS_ERR(fs_prio))
2494 return PTR_ERR(fs_prio);
2495 }
2496 return 0;
2497}
2498
8d40d162
MG
2499#define FLOW_TABLE_BIT_SZ 1
2500#define GET_FLOW_TABLE_CAP(dev, offset) \
48f02eef 2501 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
8d40d162
MG
2502 offset / 32)) >> \
2503 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2504static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2505{
2506 int i;
2507
2508 for (i = 0; i < caps->arr_sz; i++) {
2509 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2510 return false;
2511 }
2512 return true;
2513}
2514
fba53f7b 2515static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
8d40d162 2516 struct init_tree_node *init_node,
25302363
MG
2517 struct fs_node *fs_parent_node,
2518 struct init_tree_node *init_parent_node,
13de6c10 2519 int prio)
25302363 2520{
fba53f7b 2521 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
8d40d162
MG
2522 flow_table_properties_nic_receive.
2523 max_ft_level);
25302363
MG
2524 struct mlx5_flow_namespace *fs_ns;
2525 struct fs_prio *fs_prio;
2526 struct fs_node *base;
2527 int i;
2528 int err;
2529
2530 if (init_node->type == FS_TYPE_PRIO) {
8d40d162 2531 if ((init_node->min_ft_level > max_ft_level) ||
fba53f7b 2532 !has_required_caps(steering->dev, &init_node->caps))
8d40d162 2533 return 0;
25302363
MG
2534
2535 fs_get_obj(fs_ns, fs_parent_node);
4cbdd30e 2536 if (init_node->num_leaf_prios)
13de6c10
MG
2537 return create_leaf_prios(fs_ns, prio, init_node);
2538 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
25302363
MG
2539 if (IS_ERR(fs_prio))
2540 return PTR_ERR(fs_prio);
2541 base = &fs_prio->node;
2542 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2543 fs_get_obj(fs_prio, fs_parent_node);
f66ad830 2544 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
25302363
MG
2545 if (IS_ERR(fs_ns))
2546 return PTR_ERR(fs_ns);
2547 base = &fs_ns->node;
2548 } else {
2549 return -EINVAL;
2550 }
13de6c10 2551 prio = 0;
25302363 2552 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2553 err = init_root_tree_recursive(steering, &init_node->children[i],
13de6c10 2554 base, init_node, prio);
25302363
MG
2555 if (err)
2556 return err;
13de6c10
MG
2557 if (init_node->children[i].type == FS_TYPE_PRIO &&
2558 init_node->children[i].num_leaf_prios) {
2559 prio += init_node->children[i].num_leaf_prios;
2560 }
25302363
MG
2561 }
2562
2563 return 0;
2564}
2565
fba53f7b 2566static int init_root_tree(struct mlx5_flow_steering *steering,
8d40d162 2567 struct init_tree_node *init_node,
25302363
MG
2568 struct fs_node *fs_parent_node)
2569{
25302363 2570 int err;
61e9508f 2571 int i;
25302363 2572
25302363 2573 for (i = 0; i < init_node->ar_size; i++) {
fba53f7b 2574 err = init_root_tree_recursive(steering, &init_node->children[i],
61e9508f 2575 fs_parent_node,
25302363
MG
2576 init_node, i);
2577 if (err)
2578 return err;
2579 }
2580 return 0;
2581}
2582
6eb7a268
RD
2583static void del_sw_root_ns(struct fs_node *node)
2584{
9ca41539
RD
2585 struct mlx5_flow_root_namespace *root_ns;
2586 struct mlx5_flow_namespace *ns;
2587
2588 fs_get_obj(ns, node);
2589 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2590 mutex_destroy(&root_ns->chain_lock);
6eb7a268
RD
2591 kfree(node);
2592}
2593
af76c501
MB
2594static struct mlx5_flow_root_namespace
2595*create_root_ns(struct mlx5_flow_steering *steering,
2596 enum fs_flow_table_type table_type)
25302363 2597{
af76c501 2598 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
25302363
MG
2599 struct mlx5_flow_root_namespace *root_ns;
2600 struct mlx5_flow_namespace *ns;
2601
86d722ad 2602 /* Create the root namespace */
25fa506b 2603 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
25302363
MG
2604 if (!root_ns)
2605 return NULL;
2606
fba53f7b 2607 root_ns->dev = steering->dev;
25302363 2608 root_ns->table_type = table_type;
af76c501 2609 root_ns->cmds = cmds;
25302363 2610
dae37456
AV
2611 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2612
25302363
MG
2613 ns = &root_ns->ns;
2614 fs_init_namespace(ns);
2cc43b49 2615 mutex_init(&root_ns->chain_lock);
6eb7a268 2616 tree_init_node(&ns->node, NULL, del_sw_root_ns);
25302363
MG
2617 tree_add_node(&ns->node, NULL);
2618
2619 return root_ns;
2620}
2621
655227ed
MG
2622static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2623
2624static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2625{
2626 struct fs_prio *prio;
2627
2628 fs_for_each_prio(prio, ns) {
a257b94a 2629 /* This updates prio start_level and num_levels */
655227ed 2630 set_prio_attrs_in_prio(prio, acc_level);
a257b94a 2631 acc_level += prio->num_levels;
655227ed
MG
2632 }
2633 return acc_level;
2634}
2635
2636static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2637{
2638 struct mlx5_flow_namespace *ns;
2639 int acc_level_ns = acc_level;
2640
2641 prio->start_level = acc_level;
34b13cb3 2642 fs_for_each_ns(ns, prio) {
a257b94a 2643 /* This updates start_level and num_levels of ns's priority descendants */
655227ed 2644 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
34b13cb3
PB
2645
2646 /* If this a prio with chains, and we can jump from one chain
39c538d6 2647 * (namespace) to another, so we accumulate the levels
34b13cb3
PB
2648 */
2649 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2650 acc_level = acc_level_ns;
2651 }
2652
a257b94a
MG
2653 if (!prio->num_levels)
2654 prio->num_levels = acc_level_ns - prio->start_level;
2655 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
655227ed
MG
2656}
2657
2658static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2659{
2660 struct mlx5_flow_namespace *ns = &root_ns->ns;
2661 struct fs_prio *prio;
2662 int start_level = 0;
2663
2664 fs_for_each_prio(prio, ns) {
2665 set_prio_attrs_in_prio(prio, start_level);
a257b94a 2666 start_level += prio->num_levels;
655227ed
MG
2667 }
2668}
2669
153fefbf
MG
2670#define ANCHOR_PRIO 0
2671#define ANCHOR_SIZE 1
d63cd286 2672#define ANCHOR_LEVEL 0
fba53f7b 2673static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
153fefbf
MG
2674{
2675 struct mlx5_flow_namespace *ns = NULL;
b3ba5149 2676 struct mlx5_flow_table_attr ft_attr = {};
153fefbf
MG
2677 struct mlx5_flow_table *ft;
2678
fba53f7b 2679 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
eff596da 2680 if (WARN_ON(!ns))
153fefbf 2681 return -EINVAL;
b3ba5149
ES
2682
2683 ft_attr.max_fte = ANCHOR_SIZE;
2684 ft_attr.level = ANCHOR_LEVEL;
2685 ft_attr.prio = ANCHOR_PRIO;
2686
2687 ft = mlx5_create_flow_table(ns, &ft_attr);
153fefbf 2688 if (IS_ERR(ft)) {
fba53f7b 2689 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
153fefbf
MG
2690 return PTR_ERR(ft);
2691 }
2692 return 0;
2693}
2694
fba53f7b 2695static int init_root_ns(struct mlx5_flow_steering *steering)
25302363 2696{
9c26f5f8
TB
2697 int err;
2698
fba53f7b 2699 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
42fb18fd 2700 if (!steering->root_ns)
9c26f5f8 2701 return -ENOMEM;
25302363 2702
9c26f5f8
TB
2703 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2704 if (err)
2705 goto out_err;
25302363 2706
fba53f7b 2707 set_prio_attrs(steering->root_ns);
9c26f5f8
TB
2708 err = create_anchor_flow_table(steering);
2709 if (err)
2710 goto out_err;
153fefbf 2711
25302363
MG
2712 return 0;
2713
9c26f5f8
TB
2714out_err:
2715 cleanup_root_ns(steering->root_ns);
2716 steering->root_ns = NULL;
2717 return err;
25302363
MG
2718}
2719
0da2d666 2720static void clean_tree(struct fs_node *node)
25302363 2721{
0da2d666
MG
2722 if (node) {
2723 struct fs_node *iter;
2724 struct fs_node *temp;
25302363 2725
800350a3 2726 tree_get_node(node);
0da2d666
MG
2727 list_for_each_entry_safe(iter, temp, &node->children, list)
2728 clean_tree(iter);
476d61b7
EB
2729 tree_put_node(node, false);
2730 tree_remove_node(node, false);
25302363 2731 }
153fefbf
MG
2732}
2733
0da2d666 2734static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
25302363 2735{
25302363
MG
2736 if (!root_ns)
2737 return;
2738
0da2d666 2739 clean_tree(&root_ns->ns.node);
25302363
MG
2740}
2741
87d22483
MG
2742static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2743{
2744 struct fs_prio *prio;
2745
2746 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2747 if (!steering->sniffer_tx_root_ns)
2748 return -ENOMEM;
2749
2750 /* Create single prio */
2751 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
905f6bd3 2752 return PTR_ERR_OR_ZERO(prio);
87d22483
MG
2753}
2754
2755static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2756{
2757 struct fs_prio *prio;
2758
2759 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2760 if (!steering->sniffer_rx_root_ns)
2761 return -ENOMEM;
2762
2763 /* Create single prio */
2764 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
905f6bd3 2765 return PTR_ERR_OR_ZERO(prio);
87d22483
MG
2766}
2767
425a563a
MG
2768#define PORT_SEL_NUM_LEVELS 3
2769static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2770{
2771 struct fs_prio *prio;
2772
2773 steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2774 if (!steering->port_sel_root_ns)
2775 return -ENOMEM;
2776
2777 /* Create single prio */
2778 prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2779 PORT_SEL_NUM_LEVELS);
2780 return PTR_ERR_OR_ZERO(prio);
2781}
2782
d83eb50e
MG
2783static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2784{
e6806e9a 2785 int err;
d83eb50e
MG
2786
2787 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2788 if (!steering->rdma_rx_root_ns)
2789 return -ENOMEM;
2790
e6806e9a
MZ
2791 err = init_root_tree(steering, &rdma_rx_root_fs,
2792 &steering->rdma_rx_root_ns->ns.node);
2793 if (err)
2794 goto out_err;
f6f7d6b5 2795
e6806e9a
MZ
2796 set_prio_attrs(steering->rdma_rx_root_ns);
2797
2798 return 0;
2799
2800out_err:
2801 cleanup_root_ns(steering->rdma_rx_root_ns);
2802 steering->rdma_rx_root_ns = NULL;
2803 return err;
d83eb50e 2804}
439e843f 2805
24670b1a
MG
2806static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2807{
2808 int err;
2809
2810 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2811 if (!steering->rdma_tx_root_ns)
2812 return -ENOMEM;
2813
2814 err = init_root_tree(steering, &rdma_tx_root_fs,
2815 &steering->rdma_tx_root_ns->ns.node);
2816 if (err)
2817 goto out_err;
2818
2819 set_prio_attrs(steering->rdma_tx_root_ns);
2820
2821 return 0;
2822
2823out_err:
2824 cleanup_root_ns(steering->rdma_tx_root_ns);
2825 steering->rdma_tx_root_ns = NULL;
2826 return err;
2827}
2828
439e843f
PB
2829/* FT and tc chains are stored in the same array so we can re-use the
2830 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2831 * When creating a new ns for each chain store it in the first available slot.
2832 * Assume tc chains are created and stored first and only then the FT chain.
2833 */
2834static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2835 struct mlx5_flow_namespace *ns)
2836{
2837 int chain = 0;
2838
2839 while (steering->fdb_sub_ns[chain])
2840 ++chain;
2841
2842 steering->fdb_sub_ns[chain] = ns;
2843}
2844
2845static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2846 struct fs_prio *maj_prio)
25302363 2847{
328edb49 2848 struct mlx5_flow_namespace *ns;
328edb49 2849 struct fs_prio *min_prio;
439e843f
PB
2850 int prio;
2851
2852 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2853 if (IS_ERR(ns))
2854 return PTR_ERR(ns);
2855
2856 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2857 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2858 if (IS_ERR(min_prio))
2859 return PTR_ERR(min_prio);
2860 }
2861
2862 store_fdb_sub_ns_prio_chain(steering, ns);
2863
2864 return 0;
2865}
2866
2867static int create_fdb_chains(struct mlx5_flow_steering *steering,
2868 int fs_prio,
2869 int chains)
2870{
2871 struct fs_prio *maj_prio;
328edb49
PB
2872 int levels;
2873 int chain;
328edb49 2874 int err;
25302363 2875
439e843f
PB
2876 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2877 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2878 fs_prio,
2879 levels);
2880 if (IS_ERR(maj_prio))
2881 return PTR_ERR(maj_prio);
2882
2883 for (chain = 0; chain < chains; chain++) {
2884 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2885 if (err)
2886 return err;
2887 }
2888
2889 return 0;
2890}
25302363 2891
439e843f
PB
2892static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2893{
439e843f
PB
2894 int err;
2895
975b992f 2896 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
439e843f
PB
2897 sizeof(*steering->fdb_sub_ns),
2898 GFP_KERNEL);
328edb49
PB
2899 if (!steering->fdb_sub_ns)
2900 return -ENOMEM;
2901
975b992f
PB
2902 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2903 if (err)
2904 return err;
2905
2906 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
439e843f
PB
2907 if (err)
2908 return err;
2909
2910 return 0;
2911}
2912
c7d5fa10
MG
2913static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2914{
2915 struct mlx5_flow_namespace *ns;
2916 struct fs_prio *prio;
2917 int i;
2918
2919 prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2920 if (IS_ERR(prio))
2921 return PTR_ERR(prio);
2922
2923 ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2924 if (IS_ERR(ns))
2925 return PTR_ERR(ns);
2926
2927 for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2928 prio = fs_create_prio(ns, i, 1);
2929 if (IS_ERR(prio))
2930 return PTR_ERR(prio);
2931 }
2932 return 0;
2933}
2934
684f062c
SD
2935static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
2936{
2937 cleanup_root_ns(steering->fdb_root_ns);
2938 steering->fdb_root_ns = NULL;
2939 kfree(steering->fdb_sub_ns);
2940 steering->fdb_sub_ns = NULL;
2941}
2942
439e843f
PB
2943static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2944{
2945 struct fs_prio *maj_prio;
2946 int err;
2947
2948 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2949 if (!steering->fdb_root_ns)
2950 return -ENOMEM;
2951
c7d5fa10
MG
2952 err = create_fdb_bypass(steering);
2953 if (err)
d9cb0675 2954 goto out_err;
c7d5fa10 2955
439e843f
PB
2956 err = create_fdb_fast_path(steering);
2957 if (err)
1033665e 2958 goto out_err;
328edb49 2959
ec3be887
VB
2960 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2961 if (IS_ERR(maj_prio)) {
2962 err = PTR_ERR(maj_prio);
2963 goto out_err;
2964 }
2965
cc2987c4 2966 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
19e9bfa0
VB
2967 if (IS_ERR(maj_prio)) {
2968 err = PTR_ERR(maj_prio);
2969 goto out_err;
2970 }
2971
b6d9ccb1 2972 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
328edb49
PB
2973 if (IS_ERR(maj_prio)) {
2974 err = PTR_ERR(maj_prio);
1033665e 2975 goto out_err;
328edb49 2976 }
1033665e 2977
96e32687
EC
2978 /* We put this priority last, knowing that nothing will get here
2979 * unless explicitly forwarded to. This is possible because the
2980 * slow path tables have catch all rules and nothing gets passed
2981 * those tables.
2982 */
2983 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2984 if (IS_ERR(maj_prio)) {
2985 err = PTR_ERR(maj_prio);
2986 goto out_err;
2987 }
2988
1033665e
OG
2989 set_prio_attrs(steering->fdb_root_ns);
2990 return 0;
2991
2992out_err:
684f062c 2993 cleanup_fdb_root_ns(steering);
328edb49 2994 return err;
25302363
MG
2995}
2996
9b93ab98 2997static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
2998{
2999 struct fs_prio *prio;
3000
9b93ab98
GP
3001 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
3002 if (!steering->esw_egress_root_ns[vport])
efdc810b
MHY
3003 return -ENOMEM;
3004
3005 /* create 1 prio*/
9b93ab98 3006 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
44fafdaa 3007 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
3008}
3009
9b93ab98 3010static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
efdc810b
MHY
3011{
3012 struct fs_prio *prio;
3013
9b93ab98
GP
3014 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
3015 if (!steering->esw_ingress_root_ns[vport])
efdc810b
MHY
3016 return -ENOMEM;
3017
3018 /* create 1 prio*/
9b93ab98 3019 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
44fafdaa 3020 return PTR_ERR_OR_ZERO(prio);
efdc810b
MHY
3021}
3022
57b92bdd 3023int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
9b93ab98
GP
3024{
3025 struct mlx5_flow_steering *steering = dev->priv.steering;
3026 int err;
3027 int i;
3028
2752b823
PP
3029 steering->esw_egress_root_ns =
3030 kcalloc(total_vports,
3031 sizeof(*steering->esw_egress_root_ns),
3032 GFP_KERNEL);
9b93ab98
GP
3033 if (!steering->esw_egress_root_ns)
3034 return -ENOMEM;
3035
2752b823 3036 for (i = 0; i < total_vports; i++) {
9b93ab98
GP
3037 err = init_egress_acl_root_ns(steering, i);
3038 if (err)
3039 goto cleanup_root_ns;
3040 }
57b92bdd 3041 steering->esw_egress_acl_vports = total_vports;
9b93ab98
GP
3042 return 0;
3043
3044cleanup_root_ns:
3045 for (i--; i >= 0; i--)
3046 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3047 kfree(steering->esw_egress_root_ns);
9414277a 3048 steering->esw_egress_root_ns = NULL;
9b93ab98
GP
3049 return err;
3050}
3051
57b92bdd
PP
3052void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3053{
3054 struct mlx5_flow_steering *steering = dev->priv.steering;
3055 int i;
3056
3057 if (!steering->esw_egress_root_ns)
3058 return;
3059
3060 for (i = 0; i < steering->esw_egress_acl_vports; i++)
3061 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3062
3063 kfree(steering->esw_egress_root_ns);
3064 steering->esw_egress_root_ns = NULL;
3065}
3066
3067int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
9b93ab98
GP
3068{
3069 struct mlx5_flow_steering *steering = dev->priv.steering;
3070 int err;
3071 int i;
3072
2752b823
PP
3073 steering->esw_ingress_root_ns =
3074 kcalloc(total_vports,
3075 sizeof(*steering->esw_ingress_root_ns),
3076 GFP_KERNEL);
9b93ab98
GP
3077 if (!steering->esw_ingress_root_ns)
3078 return -ENOMEM;
3079
2752b823 3080 for (i = 0; i < total_vports; i++) {
9b93ab98
GP
3081 err = init_ingress_acl_root_ns(steering, i);
3082 if (err)
3083 goto cleanup_root_ns;
3084 }
57b92bdd 3085 steering->esw_ingress_acl_vports = total_vports;
9b93ab98
GP
3086 return 0;
3087
3088cleanup_root_ns:
3089 for (i--; i >= 0; i--)
3090 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3091 kfree(steering->esw_ingress_root_ns);
9414277a 3092 steering->esw_ingress_root_ns = NULL;
9b93ab98
GP
3093 return err;
3094}
3095
57b92bdd
PP
3096void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3097{
3098 struct mlx5_flow_steering *steering = dev->priv.steering;
3099 int i;
3100
3101 if (!steering->esw_ingress_root_ns)
3102 return;
3103
3104 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3105 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3106
3107 kfree(steering->esw_ingress_root_ns);
3108 steering->esw_ingress_root_ns = NULL;
3109}
3110
8348b71c
DC
3111u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3112{
3113 struct mlx5_flow_root_namespace *root;
3114 struct mlx5_flow_namespace *ns;
3115
3116 ns = mlx5_get_flow_namespace(dev, type);
3117 if (!ns)
3118 return 0;
3119
3120 root = find_root(&ns->node);
3121 if (!root)
3122 return 0;
3123
3124 return root->cmds->get_capabilities(root, root->table_type);
3125}
3126
5f418378
AY
3127static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3128{
8ce78257 3129 int err;
5f418378
AY
3130
3131 steering->egress_root_ns = create_root_ns(steering,
3132 FS_FT_NIC_TX);
3133 if (!steering->egress_root_ns)
3134 return -ENOMEM;
3135
8ce78257
MB
3136 err = init_root_tree(steering, &egress_root_fs,
3137 &steering->egress_root_ns->ns.node);
3138 if (err)
3139 goto cleanup;
3140 set_prio_attrs(steering->egress_root_ns);
3141 return 0;
3142cleanup:
3143 cleanup_root_ns(steering->egress_root_ns);
3144 steering->egress_root_ns = NULL;
3145 return err;
5f418378
AY
3146}
3147
db492c1e
JP
3148static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
3149 union devlink_param_value val,
3150 struct netlink_ext_ack *extack)
3151{
3152 struct mlx5_core_dev *dev = devlink_priv(devlink);
3153 char *value = val.vstr;
3154 int err = 0;
3155
3156 if (!strcmp(value, "dmfs")) {
3157 return 0;
3158 } else if (!strcmp(value, "smfs")) {
3159 u8 eswitch_mode;
3160 bool smfs_cap;
3161
3162 eswitch_mode = mlx5_eswitch_mode(dev);
3163 smfs_cap = mlx5_fs_dr_is_supported(dev);
3164
3165 if (!smfs_cap) {
3166 err = -EOPNOTSUPP;
3167 NL_SET_ERR_MSG_MOD(extack,
3168 "Software managed steering is not supported by current device");
3169 }
3170
3171 else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
3172 NL_SET_ERR_MSG_MOD(extack,
3173 "Software managed steering is not supported when eswitch offloads enabled.");
3174 err = -EOPNOTSUPP;
3175 }
3176 } else {
3177 NL_SET_ERR_MSG_MOD(extack,
3178 "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
3179 err = -EINVAL;
3180 }
3181
3182 return err;
3183}
3184
3185static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
3186 struct devlink_param_gset_ctx *ctx)
3187{
3188 struct mlx5_core_dev *dev = devlink_priv(devlink);
3189 enum mlx5_flow_steering_mode mode;
3190
3191 if (!strcmp(ctx->val.vstr, "smfs"))
3192 mode = MLX5_FLOW_STEERING_MODE_SMFS;
3193 else
3194 mode = MLX5_FLOW_STEERING_MODE_DMFS;
3195 dev->priv.steering->mode = mode;
3196
3197 return 0;
3198}
3199
3200static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
3201 struct devlink_param_gset_ctx *ctx)
3202{
3203 struct mlx5_core_dev *dev = devlink_priv(devlink);
3204
3205 if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
3206 strcpy(ctx->val.vstr, "smfs");
3207 else
3208 strcpy(ctx->val.vstr, "dmfs");
3209 return 0;
3210}
3211
3212static const struct devlink_param mlx5_fs_params[] = {
3213 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
3214 "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
3215 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3216 mlx5_fs_mode_get, mlx5_fs_mode_set,
3217 mlx5_fs_mode_validate),
3218};
3219
b3388697 3220void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
25302363 3221{
b3388697 3222 struct mlx5_flow_steering *steering = dev->priv.steering;
fba53f7b 3223
b3388697 3224 cleanup_root_ns(steering->root_ns);
684f062c 3225 cleanup_fdb_root_ns(steering);
b3388697
SD
3226 cleanup_root_ns(steering->port_sel_root_ns);
3227 cleanup_root_ns(steering->sniffer_rx_root_ns);
3228 cleanup_root_ns(steering->sniffer_tx_root_ns);
3229 cleanup_root_ns(steering->rdma_rx_root_ns);
3230 cleanup_root_ns(steering->rdma_tx_root_ns);
3231 cleanup_root_ns(steering->egress_root_ns);
db492c1e
JP
3232
3233 devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
3234 ARRAY_SIZE(mlx5_fs_params));
b3388697 3235}
aa36c948 3236
b3388697
SD
3237int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3238{
3239 struct mlx5_flow_steering *steering = dev->priv.steering;
db492c1e
JP
3240 int err;
3241
3242 err = devl_params_register(priv_to_devlink(dev), mlx5_fs_params,
3243 ARRAY_SIZE(mlx5_fs_params));
3244 if (err)
3245 return err;
a369d4ac 3246
ffdb8827
ES
3247 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3248 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3249 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3250 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
876d634d 3251 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
fba53f7b 3252 err = init_root_ns(steering);
25302363 3253 if (err)
43a335e0 3254 goto err;
25302363 3255 }
876d634d 3256
0efc8562 3257 if (MLX5_ESWITCH_MANAGER(dev)) {
bd02ef8e 3258 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
fba53f7b 3259 err = init_fdb_root_ns(steering);
bd02ef8e
MG
3260 if (err)
3261 goto err;
3262 }
25302363
MG
3263 }
3264
87d22483
MG
3265 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3266 err = init_sniffer_rx_root_ns(steering);
3267 if (err)
3268 goto err;
3269 }
3270
3271 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3272 err = init_sniffer_tx_root_ns(steering);
3273 if (err)
3274 goto err;
3275 }
3276
425a563a
MG
3277 if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3278 err = init_port_sel_root_ns(steering);
3279 if (err)
3280 goto err;
3281 }
3282
f6f7d6b5
MG
3283 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3284 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
d83eb50e
MG
3285 err = init_rdma_rx_root_ns(steering);
3286 if (err)
3287 goto err;
3288 }
3289
24670b1a
MG
3290 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3291 err = init_rdma_tx_root_ns(steering);
3292 if (err)
3293 goto err;
3294 }
3295
2fa33b35 3296 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
5f418378
AY
3297 err = init_egress_root_ns(steering);
3298 if (err)
3299 goto err;
3300 }
3301
efdc810b 3302 return 0;
b3388697
SD
3303
3304err:
3305 mlx5_fs_core_cleanup(dev);
3306 return err;
3307}
3308
3309void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3310{
3311 struct mlx5_flow_steering *steering = dev->priv.steering;
3312
3313 kmem_cache_destroy(steering->ftes_cache);
3314 kmem_cache_destroy(steering->fgs_cache);
3315 kfree(steering);
3316 mlx5_ft_pool_destroy(dev);
3317 mlx5_cleanup_fc_stats(dev);
3318}
3319
3320int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3321{
3322 struct mlx5_flow_steering *steering;
3323 int err = 0;
3324
3325 err = mlx5_init_fc_stats(dev);
3326 if (err)
3327 return err;
3328
3329 err = mlx5_ft_pool_init(dev);
3330 if (err)
3331 goto err;
3332
3333 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3334 if (!steering) {
3335 err = -ENOMEM;
3336 goto err;
3337 }
3338
3339 steering->dev = dev;
3340 dev->priv.steering = steering;
3341
3342 if (mlx5_fs_dr_is_supported(dev))
3343 steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3344 else
3345 steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3346
3347 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3348 sizeof(struct mlx5_flow_group), 0,
3349 0, NULL);
3350 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3351 0, NULL);
3352 if (!steering->ftes_cache || !steering->fgs_cache) {
3353 err = -ENOMEM;
3354 goto err;
3355 }
3356
3357 return 0;
3358
efdc810b 3359err:
b3388697 3360 mlx5_fs_core_free(dev);
25302363
MG
3361 return err;
3362}
50854114
YH
3363
3364int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3365{
3366 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
3367 struct mlx5_ft_underlay_qp *new_uqp;
3368 int err = 0;
3369
3370 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3371 if (!new_uqp)
3372 return -ENOMEM;
3373
3374 mutex_lock(&root->chain_lock);
3375
3376 if (!root->root_ft) {
3377 err = -EINVAL;
3378 goto update_ft_fail;
3379 }
3380
ae288a48 3381 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
af76c501 3382 false);
dae37456
AV
3383 if (err) {
3384 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3385 underlay_qpn, err);
3386 goto update_ft_fail;
3387 }
3388
3389 new_uqp->qpn = underlay_qpn;
3390 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3391
3392 mutex_unlock(&root->chain_lock);
50854114 3393
50854114 3394 return 0;
dae37456
AV
3395
3396update_ft_fail:
3397 mutex_unlock(&root->chain_lock);
3398 kfree(new_uqp);
3399 return err;
50854114
YH
3400}
3401EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3402
3403int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3404{
3405 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
dae37456
AV
3406 struct mlx5_ft_underlay_qp *uqp;
3407 bool found = false;
3408 int err = 0;
3409
3410 mutex_lock(&root->chain_lock);
3411 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3412 if (uqp->qpn == underlay_qpn) {
3413 found = true;
3414 break;
3415 }
3416 }
3417
3418 if (!found) {
3419 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3420 underlay_qpn);
3421 err = -EINVAL;
3422 goto out;
3423 }
3424
ae288a48 3425 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
af76c501 3426 true);
dae37456
AV
3427 if (err)
3428 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3429 underlay_qpn, err);
3430
3431 list_del(&uqp->list);
3432 mutex_unlock(&root->chain_lock);
3433 kfree(uqp);
50854114 3434
50854114 3435 return 0;
dae37456
AV
3436
3437out:
3438 mutex_unlock(&root->chain_lock);
3439 return err;
50854114
YH
3440}
3441EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
2b688ea5
MG
3442
3443static struct mlx5_flow_root_namespace
3444*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3445{
3446 struct mlx5_flow_namespace *ns;
3447
3448 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3449 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3450 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3451 else
3452 ns = mlx5_get_flow_namespace(dev, ns_type);
3453 if (!ns)
3454 return NULL;
3455
3456 return find_root(&ns->node);
3457}
3458
3459struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3460 u8 ns_type, u8 num_actions,
3461 void *modify_actions)
3462{
3463 struct mlx5_flow_root_namespace *root;
3464 struct mlx5_modify_hdr *modify_hdr;
3465 int err;
3466
3467 root = get_root_namespace(dev, ns_type);
3468 if (!root)
3469 return ERR_PTR(-EOPNOTSUPP);
3470
3471 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3472 if (!modify_hdr)
3473 return ERR_PTR(-ENOMEM);
3474
3475 modify_hdr->ns_type = ns_type;
3476 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3477 modify_actions, modify_hdr);
3478 if (err) {
3479 kfree(modify_hdr);
3480 return ERR_PTR(err);
3481 }
3482
3483 return modify_hdr;
3484}
3485EXPORT_SYMBOL(mlx5_modify_header_alloc);
3486
3487void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3488 struct mlx5_modify_hdr *modify_hdr)
3489{
3490 struct mlx5_flow_root_namespace *root;
3491
3492 root = get_root_namespace(dev, modify_hdr->ns_type);
3493 if (WARN_ON(!root))
3494 return;
3495 root->cmds->modify_header_dealloc(root, modify_hdr);
3496 kfree(modify_hdr);
3497}
3498EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3499
3500struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3f3f05ab 3501 struct mlx5_pkt_reformat_params *params,
2b688ea5
MG
3502 enum mlx5_flow_namespace_type ns_type)
3503{
3504 struct mlx5_pkt_reformat *pkt_reformat;
3505 struct mlx5_flow_root_namespace *root;
3506 int err;
3507
3508 root = get_root_namespace(dev, ns_type);
3509 if (!root)
3510 return ERR_PTR(-EOPNOTSUPP);
3511
3512 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3513 if (!pkt_reformat)
3514 return ERR_PTR(-ENOMEM);
3515
3516 pkt_reformat->ns_type = ns_type;
3f3f05ab
YK
3517 pkt_reformat->reformat_type = params->type;
3518 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
2b688ea5
MG
3519 pkt_reformat);
3520 if (err) {
3521 kfree(pkt_reformat);
3522 return ERR_PTR(err);
3523 }
3524
3525 return pkt_reformat;
3526}
3527EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3528
3529void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3530 struct mlx5_pkt_reformat *pkt_reformat)
3531{
3532 struct mlx5_flow_root_namespace *root;
3533
3534 root = get_root_namespace(dev, pkt_reformat->ns_type);
3535 if (WARN_ON(!root))
3536 return;
3537 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3538 kfree(pkt_reformat);
3539}
3540EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
6a48faee 3541
e7e2519e
MG
3542int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3543{
3544 return definer->id;
3545}
3546
3547struct mlx5_flow_definer *
3548mlx5_create_match_definer(struct mlx5_core_dev *dev,
3549 enum mlx5_flow_namespace_type ns_type, u16 format_id,
3550 u32 *match_mask)
3551{
3552 struct mlx5_flow_root_namespace *root;
3553 struct mlx5_flow_definer *definer;
3554 int id;
3555
3556 root = get_root_namespace(dev, ns_type);
3557 if (!root)
3558 return ERR_PTR(-EOPNOTSUPP);
3559
3560 definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3561 if (!definer)
3562 return ERR_PTR(-ENOMEM);
3563
3564 definer->ns_type = ns_type;
3565 id = root->cmds->create_match_definer(root, format_id, match_mask);
3566 if (id < 0) {
3567 mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3568 kfree(definer);
3569 return ERR_PTR(id);
3570 }
3571 definer->id = id;
3572 return definer;
3573}
3574
3575void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3576 struct mlx5_flow_definer *definer)
3577{
3578 struct mlx5_flow_root_namespace *root;
3579
3580 root = get_root_namespace(dev, definer->ns_type);
3581 if (WARN_ON(!root))
3582 return;
3583
3584 root->cmds->destroy_match_definer(root, definer->id);
3585 kfree(definer);
3586}
3587
6a48faee
MG
3588int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3589 struct mlx5_flow_root_namespace *peer_ns)
3590{
38b9d1c6
MG
3591 if (peer_ns && ns->mode != peer_ns->mode) {
3592 mlx5_core_err(ns->dev,
3593 "Can't peer namespace of different steering mode\n");
3594 return -EINVAL;
3595 }
3596
6a48faee
MG
3597 return ns->cmds->set_peer(ns, peer_ns);
3598}
38b9d1c6
MG
3599
3600/* This function should be called only at init stage of the namespace.
3601 * It is not safe to call this function while steering operations
3602 * are executed in the namespace.
3603 */
3604int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3605 enum mlx5_flow_steering_mode mode)
3606{
3607 struct mlx5_flow_root_namespace *root;
3608 const struct mlx5_flow_cmds *cmds;
3609 int err;
3610
3611 root = find_root(&ns->node);
3612 if (&root->ns != ns)
3613 /* Can't set cmds to non root namespace */
3614 return -EINVAL;
3615
3616 if (root->table_type != FS_FT_FDB)
3617 return -EOPNOTSUPP;
3618
3619 if (root->mode == mode)
3620 return 0;
3621
3622 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3623 cmds = mlx5_fs_cmd_get_dr_cmds();
3624 else
3625 cmds = mlx5_fs_cmd_get_fw_cmds();
3626 if (!cmds)
3627 return -EOPNOTSUPP;
3628
3629 err = cmds->create_ns(root);
3630 if (err) {
3631 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3632 err);
3633 return err;
3634 }
3635
3636 root->cmds->destroy_ns(root);
3637 root->cmds = cmds;
3638 root->mode = mode;
3639
3640 return 0;
3641}