Merge tag 'lkmm.2023.04.07a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / esw / acl / ingress_ofld.c
CommitLineData
07bab950
VP
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3
4#include "mlx5_core.h"
5#include "eswitch.h"
6#include "helper.h"
7#include "ofld.h"
8
9static bool
47dd7e60 10esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
07bab950
VP
11 const struct mlx5_vport *vport)
12{
13 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
14 mlx5_eswitch_is_vf_vport(esw, vport->vport));
15}
16
17static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw,
18 struct mlx5_vport *vport)
19{
20 struct mlx5_flow_act flow_act = {};
21 struct mlx5_flow_spec *spec;
22 int err = 0;
23
24 /* For prio tag mode, there is only 1 FTEs:
25 * 1) Untagged packets - push prio tag VLAN and modify metadata if
26 * required, allow
27 * Unmatched traffic is allowed by default
28 */
29 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
30 if (!spec)
31 return -ENOMEM;
32
33 /* Untagged packets - push prio tag VLAN, allow */
34 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
35 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
36 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
37 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
38 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
39 flow_act.vlan[0].ethtype = ETH_P_8021Q;
40 flow_act.vlan[0].vid = 0;
41 flow_act.vlan[0].prio = 0;
42
43 if (vport->ingress.offloads.modify_metadata_rule) {
44 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
45 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
46 }
47
48 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
49 &flow_act, NULL, 0);
50 if (IS_ERR(vport->ingress.allow_rule)) {
51 err = PTR_ERR(vport->ingress.allow_rule);
52 esw_warn(esw->dev,
53 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
54 vport->vport, err);
55 vport->ingress.allow_rule = NULL;
56 }
57
58 kvfree(spec);
59 return err;
60}
61
62static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
63 struct mlx5_vport *vport)
64{
65 u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
66 struct mlx5_flow_act flow_act = {};
67 int err = 0;
68 u32 key;
69
70 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
71 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
72
73 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
74 MLX5_SET(set_action_in, action, field,
75 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
76 MLX5_SET(set_action_in, action, data, key);
77 MLX5_SET(set_action_in, action, offset,
78 ESW_SOURCE_PORT_METADATA_OFFSET);
79 MLX5_SET(set_action_in, action, length,
80 ESW_SOURCE_PORT_METADATA_BITS);
81
82 vport->ingress.offloads.modify_metadata =
83 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
84 1, action);
85 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
86 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
87 esw_warn(esw->dev,
88 "failed to alloc modify header for vport %d ingress acl (%d)\n",
89 vport->vport, err);
90 return err;
91 }
92
93 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
94 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1749c4c5 95 flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
07bab950
VP
96 vport->ingress.offloads.modify_metadata_rule =
97 mlx5_add_flow_rules(vport->ingress.acl,
98 NULL, &flow_act, NULL, 0);
99 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
100 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
101 esw_warn(esw->dev,
102 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
103 vport->vport, err);
104 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
105 vport->ingress.offloads.modify_metadata_rule = NULL;
106 }
107 return err;
108}
109
110static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
111 struct mlx5_vport *vport)
112{
113 if (!vport->ingress.offloads.modify_metadata_rule)
114 return;
115
116 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
117 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
118 vport->ingress.offloads.modify_metadata_rule = NULL;
119}
120
1749c4c5
MB
121static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
122 struct mlx5_vport *vport)
123{
124 struct mlx5_flow_act flow_act = {};
125 struct mlx5_flow_handle *flow_rule;
126 int err = 0;
127
128 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
129 flow_act.fg = vport->ingress.offloads.drop_grp;
130 flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
131 if (IS_ERR(flow_rule)) {
132 err = PTR_ERR(flow_rule);
133 goto out;
134 }
135
136 vport->ingress.offloads.drop_rule = flow_rule;
137out:
138 return err;
139}
140
141static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
142 struct mlx5_vport *vport)
143{
144 if (!vport->ingress.offloads.drop_rule)
145 return;
146
147 mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
148 vport->ingress.offloads.drop_rule = NULL;
149}
150
07bab950
VP
151static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
152 struct mlx5_vport *vport)
153{
154 int err;
155
156 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
157 err = esw_acl_ingress_mod_metadata_create(esw, vport);
158 if (err) {
159 esw_warn(esw->dev,
160 "vport(%d) create ingress modify metadata, err(%d)\n",
161 vport->vport, err);
162 return err;
163 }
164 }
165
166 if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
167 err = esw_acl_ingress_prio_tag_create(esw, vport);
168 if (err) {
169 esw_warn(esw->dev,
170 "vport(%d) create ingress prio tag rule, err(%d)\n",
171 vport->vport, err);
172 goto prio_tag_err;
173 }
174 }
175
176 return 0;
177
178prio_tag_err:
179 esw_acl_ingress_mod_metadata_destroy(esw, vport);
180 return err;
181}
182
183static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
184 struct mlx5_vport *vport)
185{
186 esw_acl_ingress_allow_rule_destroy(vport);
187 esw_acl_ingress_mod_metadata_destroy(esw, vport);
1749c4c5 188 esw_acl_ingress_src_port_drop_destroy(esw, vport);
07bab950
VP
189}
190
191static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
192 struct mlx5_vport *vport)
193{
194 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
195 struct mlx5_flow_group *g;
196 void *match_criteria;
197 u32 *flow_group_in;
198 u32 flow_index = 0;
199 int ret = 0;
200
201 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
202 if (!flow_group_in)
203 return -ENOMEM;
204
1749c4c5
MB
205 if (vport->vport == MLX5_VPORT_UPLINK) {
206 /* This group can hold an FTE to drop all traffic.
207 * Need in case LAG is enabled.
208 */
209 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
210 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
211
212 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
213 if (IS_ERR(g)) {
214 ret = PTR_ERR(g);
215 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
216 vport->vport, ret);
217 goto drop_err;
218 }
219 vport->ingress.offloads.drop_grp = g;
220 flow_index++;
221 }
222
07bab950
VP
223 if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
224 /* This group is to hold FTE to match untagged packets when prio_tag
225 * is enabled.
226 */
1749c4c5 227 memset(flow_group_in, 0, inlen);
07bab950
VP
228 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
229 flow_group_in, match_criteria);
230 MLX5_SET(create_flow_group_in, flow_group_in,
231 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
232 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
233 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
234 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
235
236 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
237 if (IS_ERR(g)) {
238 ret = PTR_ERR(g);
239 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
240 vport->vport, ret);
241 goto prio_tag_err;
242 }
243 vport->ingress.offloads.metadata_prio_tag_grp = g;
244 flow_index++;
245 }
246
247 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
248 /* This group holds an FTE with no match to add metadata for
249 * tagged packets if prio-tag is enabled, or for all untagged
250 * traffic in case prio-tag is disabled.
251 */
252 memset(flow_group_in, 0, inlen);
253 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
254 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
255
256 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
257 if (IS_ERR(g)) {
258 ret = PTR_ERR(g);
259 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
260 vport->vport, ret);
261 goto metadata_err;
262 }
263 vport->ingress.offloads.metadata_allmatch_grp = g;
264 }
265
266 kvfree(flow_group_in);
267 return 0;
268
269metadata_err:
270 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
271 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
272 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
273 }
274prio_tag_err:
1749c4c5
MB
275 if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
276 mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
277 vport->ingress.offloads.drop_grp = NULL;
278 }
279drop_err:
07bab950
VP
280 kvfree(flow_group_in);
281 return ret;
282}
283
284static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
285{
286 if (vport->ingress.offloads.metadata_allmatch_grp) {
287 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
288 vport->ingress.offloads.metadata_allmatch_grp = NULL;
289 }
290
291 if (vport->ingress.offloads.metadata_prio_tag_grp) {
292 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
293 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
294 }
1749c4c5
MB
295
296 if (vport->ingress.offloads.drop_grp) {
297 mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
298 vport->ingress.offloads.drop_grp = NULL;
299 }
07bab950
VP
300}
301
302int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
303 struct mlx5_vport *vport)
304{
305 int num_ftes = 0;
306 int err;
307
308 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
309 !esw_acl_ingress_prio_tag_enabled(esw, vport))
310 return 0;
311
312 esw_acl_ingress_allow_rule_destroy(vport);
313
314 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
315 num_ftes++;
1749c4c5
MB
316 if (vport->vport == MLX5_VPORT_UPLINK)
317 num_ftes++;
07bab950
VP
318 if (esw_acl_ingress_prio_tag_enabled(esw, vport))
319 num_ftes++;
320
47dd7e60 321 vport->ingress.acl = esw_acl_table_create(esw, vport,
07bab950
VP
322 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
323 num_ftes);
0c4accc4 324 if (IS_ERR(vport->ingress.acl)) {
07bab950
VP
325 err = PTR_ERR(vport->ingress.acl);
326 vport->ingress.acl = NULL;
327 return err;
328 }
329
330 err = esw_acl_ingress_ofld_groups_create(esw, vport);
331 if (err)
332 goto group_err;
333
334 esw_debug(esw->dev,
335 "vport[%d] configure ingress rules\n", vport->vport);
336
337 err = esw_acl_ingress_ofld_rules_create(esw, vport);
338 if (err)
339 goto rules_err;
340
341 return 0;
342
343rules_err:
344 esw_acl_ingress_ofld_groups_destroy(vport);
345group_err:
346 esw_acl_ingress_table_destroy(vport);
347 return err;
348}
349
350void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
351 struct mlx5_vport *vport)
352{
353 esw_acl_ingress_ofld_rules_destroy(esw, vport);
354 esw_acl_ingress_ofld_groups_destroy(vport);
355 esw_acl_ingress_table_destroy(vport);
356}
133dcfc5
VP
357
358/* Caller must hold rtnl_lock */
ab9fc405
RD
359int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num,
360 u32 metadata)
133dcfc5
VP
361{
362 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
363 int err;
364
365 if (WARN_ON_ONCE(IS_ERR(vport))) {
366 esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
640fcdbc 367 return PTR_ERR(vport);
133dcfc5
VP
368 }
369
370 esw_acl_ingress_ofld_rules_destroy(esw, vport);
371
372 vport->metadata = metadata ? metadata : vport->default_metadata;
373
374 /* Recreate ingress acl rules with vport->metadata */
375 err = esw_acl_ingress_ofld_rules_create(esw, vport);
376 if (err)
377 goto out;
378
379 return 0;
380
381out:
382 vport->metadata = vport->default_metadata;
383 return err;
384}
1749c4c5
MB
385
386int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
387{
388 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
389
390 if (IS_ERR(vport)) {
391 esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
392 return PTR_ERR(vport);
393 }
394
395 return esw_acl_ingress_src_port_drop_create(esw, vport);
396}
397
398void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
399{
400 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
401
402 if (WARN_ON_ONCE(IS_ERR(vport))) {
403 esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
404 return;
405 }
406
407 esw_acl_ingress_src_port_drop_destroy(esw, vport);
408}