net/mlx5e: Correct cleanup order when deleting offloaded TC rules
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed
AV
33#include <net/flow_dissector.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
12185a9f 36#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
37#include <linux/mlx5/fs.h>
38#include <linux/mlx5/device.h>
39#include <linux/rhashtable.h>
03a9d11e
OG
40#include <net/switchdev.h>
41#include <net/tc_act/tc_mirred.h>
776b12b6 42#include <net/tc_act/tc_vlan.h>
bbd00f7e 43#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 44#include <net/vxlan.h>
e8f887ac
AV
45#include "en.h"
46#include "en_tc.h"
03a9d11e 47#include "eswitch.h"
bbd00f7e 48#include "vxlan.h"
e8f887ac
AV
49
50struct mlx5e_tc_flow {
51 struct rhash_head node;
52 u64 cookie;
74491de9 53 struct mlx5_flow_handle *rule;
a54e20b4 54 struct list_head encap; /* flows sharing the same encap */
776b12b6 55 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
56};
57
a54e20b4
HHZ
58enum {
59 MLX5_HEADER_TYPE_VXLAN = 0x0,
60 MLX5_HEADER_TYPE_NVGRE = 0x1,
61};
62
acff797c
MG
63#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
64#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 65
74491de9
MB
66static struct mlx5_flow_handle *
67mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
68 struct mlx5_flow_spec *spec,
69 u32 action, u32 flow_tag)
e8f887ac 70{
aad7e08d
AV
71 struct mlx5_core_dev *dev = priv->mdev;
72 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
73 struct mlx5_flow_act flow_act = {
74 .action = action,
75 .flow_tag = flow_tag,
76 .encap_id = 0,
77 };
aad7e08d 78 struct mlx5_fc *counter = NULL;
74491de9 79 struct mlx5_flow_handle *rule;
e8f887ac
AV
80 bool table_created = false;
81
aad7e08d
AV
82 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
83 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
84 dest.ft = priv->fs.vlan.ft.t;
55130287 85 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
86 counter = mlx5_fc_create(dev, true);
87 if (IS_ERR(counter))
88 return ERR_CAST(counter);
89
90 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
91 dest.counter = counter;
92 }
93
acff797c
MG
94 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
95 priv->fs.tc.t =
96 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
97 MLX5E_TC_PRIO,
98 MLX5E_TC_TABLE_NUM_ENTRIES,
99 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 100 0, 0);
acff797c 101 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
102 netdev_err(priv->netdev,
103 "Failed to create tc offload table\n");
aad7e08d
AV
104 rule = ERR_CAST(priv->fs.tc.t);
105 goto err_create_ft;
e8f887ac
AV
106 }
107
108 table_created = true;
109 }
110
c5bb1730 111 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 112 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
113
114 if (IS_ERR(rule))
115 goto err_add_rule;
116
117 return rule;
e8f887ac 118
aad7e08d
AV
119err_add_rule:
120 if (table_created) {
acff797c
MG
121 mlx5_destroy_flow_table(priv->fs.tc.t);
122 priv->fs.tc.t = NULL;
e8f887ac 123 }
aad7e08d
AV
124err_create_ft:
125 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
126
127 return rule;
128}
129
74491de9
MB
130static struct mlx5_flow_handle *
131mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
132 struct mlx5_flow_spec *spec,
133 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
134{
135 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
136 int err;
137
138 err = mlx5_eswitch_add_vlan_action(esw, attr);
139 if (err)
140 return ERR_PTR(err);
adb4c123 141
776b12b6 142 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
143}
144
e8f887ac 145static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
74491de9 146 struct mlx5_flow_handle *rule,
8b32580d 147 struct mlx5_esw_flow_attr *attr)
e8f887ac 148{
8b32580d 149 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aad7e08d
AV
150 struct mlx5_fc *counter = NULL;
151
152 counter = mlx5_flow_rule_counter(rule);
153
86a33ae1
RD
154 mlx5_del_flow_rules(rule);
155
8b32580d
OG
156 if (esw && esw->mode == SRIOV_OFFLOADS)
157 mlx5_eswitch_del_vlan_action(esw, attr);
158
aad7e08d
AV
159 mlx5_fc_destroy(priv->mdev, counter);
160
5c40348c 161 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
162 mlx5_destroy_flow_table(priv->fs.tc.t);
163 priv->fs.tc.t = NULL;
e8f887ac
AV
164 }
165}
166
bbd00f7e
HHZ
167static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
168 struct tc_cls_flower_offload *f)
169{
170 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
171 outer_headers);
172 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
173 outer_headers);
174 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
175 misc_parameters);
176 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
177 misc_parameters);
178
179 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
180 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
181
182 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
183 struct flow_dissector_key_keyid *key =
184 skb_flow_dissector_target(f->dissector,
185 FLOW_DISSECTOR_KEY_ENC_KEYID,
186 f->key);
187 struct flow_dissector_key_keyid *mask =
188 skb_flow_dissector_target(f->dissector,
189 FLOW_DISSECTOR_KEY_ENC_KEYID,
190 f->mask);
191 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
192 be32_to_cpu(mask->keyid));
193 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
194 be32_to_cpu(key->keyid));
195 }
196}
197
198static int parse_tunnel_attr(struct mlx5e_priv *priv,
199 struct mlx5_flow_spec *spec,
200 struct tc_cls_flower_offload *f)
201{
202 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
203 outer_headers);
204 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
205 outer_headers);
206
207 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
208 struct flow_dissector_key_ports *key =
209 skb_flow_dissector_target(f->dissector,
210 FLOW_DISSECTOR_KEY_ENC_PORTS,
211 f->key);
212 struct flow_dissector_key_ports *mask =
213 skb_flow_dissector_target(f->dissector,
214 FLOW_DISSECTOR_KEY_ENC_PORTS,
215 f->mask);
216
217 /* Full udp dst port must be given */
218 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
219 return -EOPNOTSUPP;
220
221 /* udp src port isn't supported */
222 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
223 return -EOPNOTSUPP;
224
225 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
226 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
227 parse_vxlan_attr(spec, f);
228 else
229 return -EOPNOTSUPP;
230
231 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
232 udp_dport, ntohs(mask->dst));
233 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
234 udp_dport, ntohs(key->dst));
235
236 } else { /* udp dst port must be given */
237 return -EOPNOTSUPP;
238 }
239
240 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
241 struct flow_dissector_key_ipv4_addrs *key =
242 skb_flow_dissector_target(f->dissector,
243 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
244 f->key);
245 struct flow_dissector_key_ipv4_addrs *mask =
246 skb_flow_dissector_target(f->dissector,
247 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
248 f->mask);
249 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250 src_ipv4_src_ipv6.ipv4_layout.ipv4,
251 ntohl(mask->src));
252 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253 src_ipv4_src_ipv6.ipv4_layout.ipv4,
254 ntohl(key->src));
255
256 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
258 ntohl(mask->dst));
259 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
260 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
261 ntohl(key->dst));
262 }
263
264 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
265 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
266
267 /* Enforce DMAC when offloading incoming tunneled flows.
268 * Flow counters require a match on the DMAC.
269 */
270 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
271 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
272 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
273 dmac_47_16), priv->netdev->dev_addr);
274
275 /* let software handle IP fragments */
276 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
277 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
278
279 return 0;
280}
281
de0af0bf
RD
282static int __parse_cls_flower(struct mlx5e_priv *priv,
283 struct mlx5_flow_spec *spec,
284 struct tc_cls_flower_offload *f,
285 u8 *min_inline)
e3a2b7ed 286{
c5bb1730
MG
287 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
288 outer_headers);
289 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
290 outer_headers);
e3a2b7ed
AV
291 u16 addr_type = 0;
292 u8 ip_proto = 0;
293
de0af0bf
RD
294 *min_inline = MLX5_INLINE_MODE_L2;
295
e3a2b7ed
AV
296 if (f->dissector->used_keys &
297 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
298 BIT(FLOW_DISSECTOR_KEY_BASIC) |
299 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 300 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
301 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
302 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
303 BIT(FLOW_DISSECTOR_KEY_PORTS) |
304 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
305 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
306 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
307 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
308 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
309 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
310 f->dissector->used_keys);
311 return -EOPNOTSUPP;
312 }
313
bbd00f7e
HHZ
314 if ((dissector_uses_key(f->dissector,
315 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
316 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
317 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
318 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
319 struct flow_dissector_key_control *key =
320 skb_flow_dissector_target(f->dissector,
321 FLOW_DISSECTOR_KEY_ENC_CONTROL,
322 f->key);
323 switch (key->addr_type) {
324 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
325 if (parse_tunnel_attr(priv, spec, f))
326 return -EOPNOTSUPP;
327 break;
328 default:
329 return -EOPNOTSUPP;
330 }
331
332 /* In decap flow, header pointers should point to the inner
333 * headers, outer header were already set by parse_tunnel_attr
334 */
335 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
336 inner_headers);
337 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
338 inner_headers);
339 }
340
e3a2b7ed
AV
341 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
342 struct flow_dissector_key_control *key =
343 skb_flow_dissector_target(f->dissector,
1dbd0d37 344 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed
AV
345 f->key);
346 addr_type = key->addr_type;
347 }
348
349 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
350 struct flow_dissector_key_basic *key =
351 skb_flow_dissector_target(f->dissector,
352 FLOW_DISSECTOR_KEY_BASIC,
353 f->key);
354 struct flow_dissector_key_basic *mask =
355 skb_flow_dissector_target(f->dissector,
356 FLOW_DISSECTOR_KEY_BASIC,
357 f->mask);
358 ip_proto = key->ip_proto;
359
360 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
361 ntohs(mask->n_proto));
362 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
363 ntohs(key->n_proto));
364
365 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
366 mask->ip_proto);
367 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
368 key->ip_proto);
de0af0bf
RD
369
370 if (mask->ip_proto)
371 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
372 }
373
374 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
375 struct flow_dissector_key_eth_addrs *key =
376 skb_flow_dissector_target(f->dissector,
377 FLOW_DISSECTOR_KEY_ETH_ADDRS,
378 f->key);
379 struct flow_dissector_key_eth_addrs *mask =
380 skb_flow_dissector_target(f->dissector,
381 FLOW_DISSECTOR_KEY_ETH_ADDRS,
382 f->mask);
383
384 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
385 dmac_47_16),
386 mask->dst);
387 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
388 dmac_47_16),
389 key->dst);
390
391 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
392 smac_47_16),
393 mask->src);
394 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
395 smac_47_16),
396 key->src);
397 }
398
095b6cfd
OG
399 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
400 struct flow_dissector_key_vlan *key =
401 skb_flow_dissector_target(f->dissector,
402 FLOW_DISSECTOR_KEY_VLAN,
403 f->key);
404 struct flow_dissector_key_vlan *mask =
405 skb_flow_dissector_target(f->dissector,
406 FLOW_DISSECTOR_KEY_VLAN,
407 f->mask);
358d79a4 408 if (mask->vlan_id || mask->vlan_priority) {
095b6cfd
OG
409 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
410 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
411
412 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
413 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
414
415 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
417 }
418 }
419
e3a2b7ed
AV
420 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
421 struct flow_dissector_key_ipv4_addrs *key =
422 skb_flow_dissector_target(f->dissector,
423 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
424 f->key);
425 struct flow_dissector_key_ipv4_addrs *mask =
426 skb_flow_dissector_target(f->dissector,
427 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
428 f->mask);
429
430 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
431 src_ipv4_src_ipv6.ipv4_layout.ipv4),
432 &mask->src, sizeof(mask->src));
433 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
434 src_ipv4_src_ipv6.ipv4_layout.ipv4),
435 &key->src, sizeof(key->src));
436 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
437 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
438 &mask->dst, sizeof(mask->dst));
439 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
440 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
441 &key->dst, sizeof(key->dst));
de0af0bf
RD
442
443 if (mask->src || mask->dst)
444 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
445 }
446
447 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
448 struct flow_dissector_key_ipv6_addrs *key =
449 skb_flow_dissector_target(f->dissector,
450 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
451 f->key);
452 struct flow_dissector_key_ipv6_addrs *mask =
453 skb_flow_dissector_target(f->dissector,
454 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
455 f->mask);
456
457 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
458 src_ipv4_src_ipv6.ipv6_layout.ipv6),
459 &mask->src, sizeof(mask->src));
460 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
461 src_ipv4_src_ipv6.ipv6_layout.ipv6),
462 &key->src, sizeof(key->src));
463
464 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
465 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
466 &mask->dst, sizeof(mask->dst));
467 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
468 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
469 &key->dst, sizeof(key->dst));
de0af0bf
RD
470
471 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
472 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
473 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
474 }
475
476 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
477 struct flow_dissector_key_ports *key =
478 skb_flow_dissector_target(f->dissector,
479 FLOW_DISSECTOR_KEY_PORTS,
480 f->key);
481 struct flow_dissector_key_ports *mask =
482 skb_flow_dissector_target(f->dissector,
483 FLOW_DISSECTOR_KEY_PORTS,
484 f->mask);
485 switch (ip_proto) {
486 case IPPROTO_TCP:
487 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
488 tcp_sport, ntohs(mask->src));
489 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
490 tcp_sport, ntohs(key->src));
491
492 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
493 tcp_dport, ntohs(mask->dst));
494 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
495 tcp_dport, ntohs(key->dst));
496 break;
497
498 case IPPROTO_UDP:
499 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500 udp_sport, ntohs(mask->src));
501 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
502 udp_sport, ntohs(key->src));
503
504 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
505 udp_dport, ntohs(mask->dst));
506 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
507 udp_dport, ntohs(key->dst));
508 break;
509 default:
510 netdev_err(priv->netdev,
511 "Only UDP and TCP transport are supported\n");
512 return -EINVAL;
513 }
de0af0bf
RD
514
515 if (mask->src || mask->dst)
516 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
517 }
518
519 return 0;
520}
521
de0af0bf
RD
522static int parse_cls_flower(struct mlx5e_priv *priv,
523 struct mlx5_flow_spec *spec,
524 struct tc_cls_flower_offload *f)
525{
526 struct mlx5_core_dev *dev = priv->mdev;
527 struct mlx5_eswitch *esw = dev->priv.eswitch;
528 struct mlx5_eswitch_rep *rep = priv->ppriv;
529 u8 min_inline;
530 int err;
531
532 err = __parse_cls_flower(priv, spec, f, &min_inline);
533
534 if (!err && esw->mode == SRIOV_OFFLOADS &&
535 rep->vport != FDB_UPLINK_VPORT) {
536 if (min_inline > esw->offloads.inline_mode) {
537 netdev_warn(priv->netdev,
538 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
539 min_inline, esw->offloads.inline_mode);
540 return -EOPNOTSUPP;
541 }
542 }
543
544 return err;
545}
546
5c40348c
OG
547static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
548 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
549{
550 const struct tc_action *a;
22dc13c8 551 LIST_HEAD(actions);
e3a2b7ed
AV
552
553 if (tc_no_actions(exts))
554 return -EINVAL;
555
556 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
557 *action = 0;
558
22dc13c8
WC
559 tcf_exts_to_list(exts, &actions);
560 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
561 /* Only support a single action per rule */
562 if (*action)
563 return -EINVAL;
564
565 if (is_tcf_gact_shot(a)) {
566 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
567 if (MLX5_CAP_FLOWTABLE(priv->mdev,
568 flow_table_properties_nic_receive.flow_counter))
569 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
570 continue;
571 }
572
573 if (is_tcf_skbedit_mark(a)) {
574 u32 mark = tcf_skbedit_mark(a);
575
576 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
577 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
578 mark);
579 return -EINVAL;
580 }
581
582 *flow_tag = mark;
583 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
584 continue;
585 }
586
587 return -EINVAL;
588 }
589
590 return 0;
591}
592
a54e20b4
HHZ
593static inline int cmp_encap_info(struct mlx5_encap_info *a,
594 struct mlx5_encap_info *b)
595{
596 return memcmp(a, b, sizeof(*a));
597}
598
599static inline int hash_encap_info(struct mlx5_encap_info *info)
600{
601 return jhash(info, sizeof(*info), 0);
602}
603
604static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
605 struct net_device *mirred_dev,
606 struct net_device **out_dev,
607 struct flowi4 *fl4,
608 struct neighbour **out_n,
609 __be32 *saddr,
610 int *out_ttl)
611{
612 struct rtable *rt;
613 struct neighbour *n = NULL;
614 int ttl;
615
616#if IS_ENABLED(CONFIG_INET)
617 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
618 if (IS_ERR(rt)) {
619 pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
620 return -EOPNOTSUPP;
621 }
622#else
623 return -EOPNOTSUPP;
624#endif
625
626 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
627 pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
628 __func__);
629 ip_rt_put(rt);
630 return -EOPNOTSUPP;
631 }
632
633 ttl = ip4_dst_hoplimit(&rt->dst);
634 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
635 ip_rt_put(rt);
636 if (!n)
637 return -ENOMEM;
638
639 *out_n = n;
640 *saddr = fl4->saddr;
641 *out_ttl = ttl;
642 *out_dev = rt->dst.dev;
643
644 return 0;
645}
646
647static int gen_vxlan_header_ipv4(struct net_device *out_dev,
648 char buf[],
649 unsigned char h_dest[ETH_ALEN],
650 int ttl,
651 __be32 daddr,
652 __be32 saddr,
653 __be16 udp_dst_port,
654 __be32 vx_vni)
655{
656 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
657 struct ethhdr *eth = (struct ethhdr *)buf;
658 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
659 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
660 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
661
662 memset(buf, 0, encap_size);
663
664 ether_addr_copy(eth->h_dest, h_dest);
665 ether_addr_copy(eth->h_source, out_dev->dev_addr);
666 eth->h_proto = htons(ETH_P_IP);
667
668 ip->daddr = daddr;
669 ip->saddr = saddr;
670
671 ip->ttl = ttl;
672 ip->protocol = IPPROTO_UDP;
673 ip->version = 0x4;
674 ip->ihl = 0x5;
675
676 udp->dest = udp_dst_port;
677 vxh->vx_flags = VXLAN_HF_VNI;
678 vxh->vx_vni = vxlan_vni_field(vx_vni);
679
680 return encap_size;
681}
682
683static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
684 struct net_device *mirred_dev,
685 struct mlx5_encap_entry *e,
686 struct net_device **out_dev)
687{
688 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
689 struct flowi4 fl4 = {};
690 struct neighbour *n;
691 char *encap_header;
692 int encap_size;
693 __be32 saddr;
694 int ttl;
695 int err;
696
697 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
698 if (!encap_header)
699 return -ENOMEM;
700
701 switch (e->tunnel_type) {
702 case MLX5_HEADER_TYPE_VXLAN:
703 fl4.flowi4_proto = IPPROTO_UDP;
704 fl4.fl4_dport = e->tun_info.tp_dst;
705 break;
706 default:
707 err = -EOPNOTSUPP;
708 goto out;
709 }
710 fl4.daddr = e->tun_info.daddr;
711
712 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
713 &fl4, &n, &saddr, &ttl);
714 if (err)
715 goto out;
716
717 e->n = n;
718 e->out_dev = *out_dev;
719
720 if (!(n->nud_state & NUD_VALID)) {
721 err = -ENOTSUPP;
722 goto out;
723 }
724
725 neigh_ha_snapshot(e->h_dest, n, *out_dev);
726
727 switch (e->tunnel_type) {
728 case MLX5_HEADER_TYPE_VXLAN:
729 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
730 e->h_dest, ttl,
731 e->tun_info.daddr,
732 saddr, e->tun_info.tp_dst,
733 e->tun_info.tun_id);
734 break;
735 default:
736 err = -EOPNOTSUPP;
737 goto out;
738 }
739
740 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
741 encap_size, encap_header, &e->encap_id);
742out:
743 kfree(encap_header);
744 return err;
745}
746
747static int mlx5e_attach_encap(struct mlx5e_priv *priv,
748 struct ip_tunnel_info *tun_info,
749 struct net_device *mirred_dev,
750 struct mlx5_esw_flow_attr *attr)
751{
752 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
753 unsigned short family = ip_tunnel_info_af(tun_info);
754 struct ip_tunnel_key *key = &tun_info->key;
755 struct mlx5_encap_info info;
756 struct mlx5_encap_entry *e;
757 struct net_device *out_dev;
758 uintptr_t hash_key;
759 bool found = false;
760 int tunnel_type;
761 int err;
762
763 /* udp dst port must be given */
764 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
765 return -EOPNOTSUPP;
766
767 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
768 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
769 info.tp_dst = key->tp_dst;
770 info.tun_id = tunnel_id_to_key32(key->tun_id);
771 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
772 } else {
773 return -EOPNOTSUPP;
774 }
775
776 switch (family) {
777 case AF_INET:
778 info.daddr = key->u.ipv4.dst;
779 break;
780 default:
781 return -EOPNOTSUPP;
782 }
783
784 hash_key = hash_encap_info(&info);
785
786 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
787 encap_hlist, hash_key) {
788 if (!cmp_encap_info(&e->tun_info, &info)) {
789 found = true;
790 break;
791 }
792 }
793
794 if (found) {
795 attr->encap = e;
796 return 0;
797 }
798
799 e = kzalloc(sizeof(*e), GFP_KERNEL);
800 if (!e)
801 return -ENOMEM;
802
803 e->tun_info = info;
804 e->tunnel_type = tunnel_type;
805 INIT_LIST_HEAD(&e->flows);
806
807 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
808 if (err)
809 goto out_err;
810
811 attr->encap = e;
812 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
813
814 return err;
815
816out_err:
817 kfree(e);
818 return err;
819}
820
03a9d11e 821static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 822 struct mlx5e_tc_flow *flow)
03a9d11e 823{
a54e20b4
HHZ
824 struct mlx5_esw_flow_attr *attr = flow->attr;
825 struct ip_tunnel_info *info = NULL;
03a9d11e 826 const struct tc_action *a;
22dc13c8 827 LIST_HEAD(actions);
a54e20b4
HHZ
828 bool encap = false;
829 int err;
03a9d11e
OG
830
831 if (tc_no_actions(exts))
832 return -EINVAL;
833
776b12b6
OG
834 memset(attr, 0, sizeof(*attr));
835 attr->in_rep = priv->ppriv;
03a9d11e 836
22dc13c8
WC
837 tcf_exts_to_list(exts, &actions);
838 list_for_each_entry(a, &actions, list) {
03a9d11e 839 if (is_tcf_gact_shot(a)) {
8b32580d
OG
840 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
841 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
842 continue;
843 }
844
5724b8b5 845 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
846 int ifindex = tcf_mirred_ifindex(a);
847 struct net_device *out_dev;
848 struct mlx5e_priv *out_priv;
03a9d11e
OG
849
850 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
851
a54e20b4
HHZ
852 if (switchdev_port_same_parent_id(priv->netdev,
853 out_dev)) {
854 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
855 MLX5_FLOW_CONTEXT_ACTION_COUNT;
856 out_priv = netdev_priv(out_dev);
857 attr->out_rep = out_priv->ppriv;
858 } else if (encap) {
859 err = mlx5e_attach_encap(priv, info,
860 out_dev, attr);
861 if (err)
862 return err;
863 list_add(&flow->encap, &attr->encap->flows);
864 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
865 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
866 MLX5_FLOW_CONTEXT_ACTION_COUNT;
867 out_priv = netdev_priv(attr->encap->out_dev);
868 attr->out_rep = out_priv->ppriv;
869 } else {
03a9d11e
OG
870 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
871 priv->netdev->name, out_dev->name);
872 return -EINVAL;
873 }
a54e20b4
HHZ
874 continue;
875 }
03a9d11e 876
a54e20b4
HHZ
877 if (is_tcf_tunnel_set(a)) {
878 info = tcf_tunnel_info(a);
879 if (info)
880 encap = true;
881 else
882 return -EOPNOTSUPP;
03a9d11e
OG
883 continue;
884 }
885
8b32580d
OG
886 if (is_tcf_vlan(a)) {
887 if (tcf_vlan_action(a) == VLAN_F_POP) {
888 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
889 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
890 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
891 return -EOPNOTSUPP;
892
893 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
894 attr->vlan = tcf_vlan_push_vid(a);
895 }
896 continue;
897 }
898
bbd00f7e
HHZ
899 if (is_tcf_tunnel_release(a)) {
900 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
901 continue;
902 }
903
03a9d11e
OG
904 return -EINVAL;
905 }
906 return 0;
907}
908
e3a2b7ed
AV
909int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
910 struct tc_cls_flower_offload *f)
911{
acff797c 912 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed 913 int err = 0;
776b12b6
OG
914 bool fdb_flow = false;
915 u32 flow_tag, action;
e3a2b7ed 916 struct mlx5e_tc_flow *flow;
c5bb1730 917 struct mlx5_flow_spec *spec;
adb4c123 918 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
e3a2b7ed 919
776b12b6
OG
920 if (esw && esw->mode == SRIOV_OFFLOADS)
921 fdb_flow = true;
922
53636068
RD
923 if (fdb_flow)
924 flow = kzalloc(sizeof(*flow) +
925 sizeof(struct mlx5_esw_flow_attr),
926 GFP_KERNEL);
927 else
928 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
e3a2b7ed 929
c5bb1730
MG
930 spec = mlx5_vzalloc(sizeof(*spec));
931 if (!spec || !flow) {
e3a2b7ed
AV
932 err = -ENOMEM;
933 goto err_free;
934 }
935
936 flow->cookie = f->cookie;
937
c5bb1730 938 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
939 if (err < 0)
940 goto err_free;
941
776b12b6
OG
942 if (fdb_flow) {
943 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 944 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
945 if (err < 0)
946 goto err_free;
776b12b6 947 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
948 } else {
949 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
950 if (err < 0)
951 goto err_free;
952 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
953 }
e3a2b7ed 954
e3a2b7ed
AV
955 if (IS_ERR(flow->rule)) {
956 err = PTR_ERR(flow->rule);
5c40348c 957 goto err_free;
e3a2b7ed
AV
958 }
959
5c40348c
OG
960 err = rhashtable_insert_fast(&tc->ht, &flow->node,
961 tc->ht_params);
962 if (err)
963 goto err_del_rule;
964
e3a2b7ed
AV
965 goto out;
966
5c40348c 967err_del_rule:
74491de9 968 mlx5_del_flow_rules(flow->rule);
e3a2b7ed
AV
969
970err_free:
53636068 971 kfree(flow);
e3a2b7ed 972out:
c5bb1730 973 kvfree(spec);
e3a2b7ed
AV
974 return err;
975}
976
a54e20b4
HHZ
977static void mlx5e_detach_encap(struct mlx5e_priv *priv,
978 struct mlx5e_tc_flow *flow) {
979 struct list_head *next = flow->encap.next;
980
981 list_del(&flow->encap);
982 if (list_empty(next)) {
983 struct mlx5_encap_entry *e;
984
985 e = list_entry(next, struct mlx5_encap_entry, flows);
986 if (e->n) {
987 mlx5_encap_dealloc(priv->mdev, e->encap_id);
988 neigh_release(e->n);
989 }
990 hlist_del_rcu(&e->encap_hlist);
991 kfree(e);
992 }
993}
994
e3a2b7ed
AV
995int mlx5e_delete_flower(struct mlx5e_priv *priv,
996 struct tc_cls_flower_offload *f)
997{
998 struct mlx5e_tc_flow *flow;
acff797c 999 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1000
1001 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1002 tc->ht_params);
1003 if (!flow)
1004 return -EINVAL;
1005
1006 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1007
8b32580d 1008 mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
e3a2b7ed 1009
a54e20b4
HHZ
1010 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
1011 mlx5e_detach_encap(priv, flow);
1012
e3a2b7ed
AV
1013 kfree(flow);
1014
1015 return 0;
1016}
1017
aad7e08d
AV
1018int mlx5e_stats_flower(struct mlx5e_priv *priv,
1019 struct tc_cls_flower_offload *f)
1020{
1021 struct mlx5e_tc_table *tc = &priv->fs.tc;
1022 struct mlx5e_tc_flow *flow;
1023 struct tc_action *a;
1024 struct mlx5_fc *counter;
22dc13c8 1025 LIST_HEAD(actions);
aad7e08d
AV
1026 u64 bytes;
1027 u64 packets;
1028 u64 lastuse;
1029
1030 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1031 tc->ht_params);
1032 if (!flow)
1033 return -EINVAL;
1034
1035 counter = mlx5_flow_rule_counter(flow->rule);
1036 if (!counter)
1037 return 0;
1038
1039 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1040
22dc13c8
WC
1041 tcf_exts_to_list(f->exts, &actions);
1042 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1043 tcf_action_stats_update(a, bytes, packets, lastuse);
1044
1045 return 0;
1046}
1047
e8f887ac
AV
1048static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1049 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1050 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1051 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1052 .automatic_shrinking = true,
1053};
1054
1055int mlx5e_tc_init(struct mlx5e_priv *priv)
1056{
acff797c 1057 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1058
1059 tc->ht_params = mlx5e_tc_flow_ht_params;
1060 return rhashtable_init(&tc->ht, &tc->ht_params);
1061}
1062
1063static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1064{
1065 struct mlx5e_tc_flow *flow = ptr;
1066 struct mlx5e_priv *priv = arg;
1067
8b32580d 1068 mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
e8f887ac
AV
1069 kfree(flow);
1070}
1071
1072void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1073{
acff797c 1074 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1075
1076 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1077
acff797c
MG
1078 if (!IS_ERR_OR_NULL(tc->t)) {
1079 mlx5_destroy_flow_table(tc->t);
1080 tc->t = NULL;
e8f887ac
AV
1081 }
1082}