net/mlx5: E-Switch, Correctly deal with inline mode on ConnectX-5
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac 50
65ba8fb7
OG
51enum {
52 MLX5E_TC_FLOW_ESWITCH = BIT(0),
53};
54
e8f887ac
AV
55struct mlx5e_tc_flow {
56 struct rhash_head node;
57 u64 cookie;
65ba8fb7 58 u8 flags;
74491de9 59 struct mlx5_flow_handle *rule;
a54e20b4 60 struct list_head encap; /* flows sharing the same encap */
776b12b6 61 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
62};
63
a54e20b4
HHZ
64enum {
65 MLX5_HEADER_TYPE_VXLAN = 0x0,
66 MLX5_HEADER_TYPE_NVGRE = 0x1,
67};
68
acff797c
MG
69#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 71
74491de9
MB
72static struct mlx5_flow_handle *
73mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
74 struct mlx5_flow_spec *spec,
75 u32 action, u32 flow_tag)
e8f887ac 76{
aad7e08d
AV
77 struct mlx5_core_dev *dev = priv->mdev;
78 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
79 struct mlx5_flow_act flow_act = {
80 .action = action,
81 .flow_tag = flow_tag,
82 .encap_id = 0,
83 };
aad7e08d 84 struct mlx5_fc *counter = NULL;
74491de9 85 struct mlx5_flow_handle *rule;
e8f887ac
AV
86 bool table_created = false;
87
aad7e08d
AV
88 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
89 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
90 dest.ft = priv->fs.vlan.ft.t;
55130287 91 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
92 counter = mlx5_fc_create(dev, true);
93 if (IS_ERR(counter))
94 return ERR_CAST(counter);
95
96 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
97 dest.counter = counter;
98 }
99
acff797c
MG
100 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
101 priv->fs.tc.t =
102 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
103 MLX5E_TC_PRIO,
104 MLX5E_TC_TABLE_NUM_ENTRIES,
105 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 106 0, 0);
acff797c 107 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
108 netdev_err(priv->netdev,
109 "Failed to create tc offload table\n");
aad7e08d
AV
110 rule = ERR_CAST(priv->fs.tc.t);
111 goto err_create_ft;
e8f887ac
AV
112 }
113
114 table_created = true;
115 }
116
c5bb1730 117 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 118 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
119
120 if (IS_ERR(rule))
121 goto err_add_rule;
122
123 return rule;
e8f887ac 124
aad7e08d
AV
125err_add_rule:
126 if (table_created) {
acff797c
MG
127 mlx5_destroy_flow_table(priv->fs.tc.t);
128 priv->fs.tc.t = NULL;
e8f887ac 129 }
aad7e08d
AV
130err_create_ft:
131 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
132
133 return rule;
134}
135
d85cdccb
OG
136static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
137 struct mlx5e_tc_flow *flow)
138{
139 struct mlx5_fc *counter = NULL;
140
141 if (!IS_ERR(flow->rule)) {
142 counter = mlx5_flow_rule_counter(flow->rule);
143 mlx5_del_flow_rules(flow->rule);
144 mlx5_fc_destroy(priv->mdev, counter);
145 }
146
147 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148 mlx5_destroy_flow_table(priv->fs.tc.t);
149 priv->fs.tc.t = NULL;
150 }
151}
152
74491de9
MB
153static struct mlx5_flow_handle *
154mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
155 struct mlx5_flow_spec *spec,
156 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
157{
158 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
159 int err;
160
161 err = mlx5_eswitch_add_vlan_action(esw, attr);
162 if (err)
163 return ERR_PTR(err);
adb4c123 164
776b12b6 165 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
166}
167
5067b602 168static void mlx5e_detach_encap(struct mlx5e_priv *priv,
d85cdccb
OG
169 struct mlx5e_tc_flow *flow);
170
171static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
172 struct mlx5e_tc_flow *flow)
173{
174 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
175
176 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
177
178 mlx5_eswitch_del_vlan_action(esw, flow->attr);
179
180 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
181 mlx5e_detach_encap(priv, flow);
182}
183
184static void mlx5e_detach_encap(struct mlx5e_priv *priv,
185 struct mlx5e_tc_flow *flow)
186{
5067b602
RD
187 struct list_head *next = flow->encap.next;
188
189 list_del(&flow->encap);
190 if (list_empty(next)) {
191 struct mlx5_encap_entry *e;
192
193 e = list_entry(next, struct mlx5_encap_entry, flows);
194 if (e->n) {
195 mlx5_encap_dealloc(priv->mdev, e->encap_id);
196 neigh_release(e->n);
197 }
198 hlist_del_rcu(&e->encap_hlist);
199 kfree(e);
200 }
201}
202
5e86397a
OG
203/* we get here also when setting rule to the FW failed, etc. It means that the
204 * flow rule itself might not exist, but some offloading related to the actions
205 * should be cleaned.
206 */
e8f887ac 207static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 208 struct mlx5e_tc_flow *flow)
e8f887ac 209{
d85cdccb
OG
210 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
211 mlx5e_tc_del_fdb_flow(priv, flow);
212 else
213 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
214}
215
bbd00f7e
HHZ
216static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
217 struct tc_cls_flower_offload *f)
218{
219 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
220 outer_headers);
221 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
222 outer_headers);
223 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224 misc_parameters);
225 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226 misc_parameters);
227
228 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
229 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
230
231 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
232 struct flow_dissector_key_keyid *key =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_KEYID,
235 f->key);
236 struct flow_dissector_key_keyid *mask =
237 skb_flow_dissector_target(f->dissector,
238 FLOW_DISSECTOR_KEY_ENC_KEYID,
239 f->mask);
240 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
241 be32_to_cpu(mask->keyid));
242 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
243 be32_to_cpu(key->keyid));
244 }
245}
246
247static int parse_tunnel_attr(struct mlx5e_priv *priv,
248 struct mlx5_flow_spec *spec,
249 struct tc_cls_flower_offload *f)
250{
251 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
252 outer_headers);
253 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
254 outer_headers);
255
2e72eb43
OG
256 struct flow_dissector_key_control *enc_control =
257 skb_flow_dissector_target(f->dissector,
258 FLOW_DISSECTOR_KEY_ENC_CONTROL,
259 f->key);
260
bbd00f7e
HHZ
261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
262 struct flow_dissector_key_ports *key =
263 skb_flow_dissector_target(f->dissector,
264 FLOW_DISSECTOR_KEY_ENC_PORTS,
265 f->key);
266 struct flow_dissector_key_ports *mask =
267 skb_flow_dissector_target(f->dissector,
268 FLOW_DISSECTOR_KEY_ENC_PORTS,
269 f->mask);
1ad9a00a
PB
270 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
271 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
272 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
273
274 /* Full udp dst port must be given */
275 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 276 goto vxlan_match_offload_err;
bbd00f7e 277
1ad9a00a 278 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
279 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
280 parse_vxlan_attr(spec, f);
2fcd82e9
OG
281 else {
282 netdev_warn(priv->netdev,
283 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 284 return -EOPNOTSUPP;
2fcd82e9 285 }
bbd00f7e
HHZ
286
287 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
288 udp_dport, ntohs(mask->dst));
289 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
290 udp_dport, ntohs(key->dst));
291
cd377663
OG
292 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293 udp_sport, ntohs(mask->src));
294 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
295 udp_sport, ntohs(key->src));
bbd00f7e 296 } else { /* udp dst port must be given */
2fcd82e9
OG
297vxlan_match_offload_err:
298 netdev_warn(priv->netdev,
299 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
300 return -EOPNOTSUPP;
bbd00f7e
HHZ
301 }
302
2e72eb43 303 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
304 struct flow_dissector_key_ipv4_addrs *key =
305 skb_flow_dissector_target(f->dissector,
306 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
307 f->key);
308 struct flow_dissector_key_ipv4_addrs *mask =
309 skb_flow_dissector_target(f->dissector,
310 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
311 f->mask);
312 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
313 src_ipv4_src_ipv6.ipv4_layout.ipv4,
314 ntohl(mask->src));
315 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
316 src_ipv4_src_ipv6.ipv4_layout.ipv4,
317 ntohl(key->src));
318
319 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
320 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
321 ntohl(mask->dst));
322 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
323 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
324 ntohl(key->dst));
bbd00f7e 325
2e72eb43
OG
326 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
327 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
328 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
329 struct flow_dissector_key_ipv6_addrs *key =
330 skb_flow_dissector_target(f->dissector,
331 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
332 f->key);
333 struct flow_dissector_key_ipv6_addrs *mask =
334 skb_flow_dissector_target(f->dissector,
335 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
336 f->mask);
337
338 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
339 src_ipv4_src_ipv6.ipv6_layout.ipv6),
340 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
342 src_ipv4_src_ipv6.ipv6_layout.ipv6),
343 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
344
345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
347 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
348 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
349 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
350 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
351
352 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
353 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 354 }
bbd00f7e
HHZ
355
356 /* Enforce DMAC when offloading incoming tunneled flows.
357 * Flow counters require a match on the DMAC.
358 */
359 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
360 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
361 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
362 dmac_47_16), priv->netdev->dev_addr);
363
364 /* let software handle IP fragments */
365 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
366 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
367
368 return 0;
369}
370
de0af0bf
RD
371static int __parse_cls_flower(struct mlx5e_priv *priv,
372 struct mlx5_flow_spec *spec,
373 struct tc_cls_flower_offload *f,
374 u8 *min_inline)
e3a2b7ed 375{
c5bb1730
MG
376 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
377 outer_headers);
378 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
379 outer_headers);
e3a2b7ed
AV
380 u16 addr_type = 0;
381 u8 ip_proto = 0;
382
de0af0bf
RD
383 *min_inline = MLX5_INLINE_MODE_L2;
384
e3a2b7ed
AV
385 if (f->dissector->used_keys &
386 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
387 BIT(FLOW_DISSECTOR_KEY_BASIC) |
388 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 389 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
390 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
391 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
392 BIT(FLOW_DISSECTOR_KEY_PORTS) |
393 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
394 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
395 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
396 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
397 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
398 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
399 f->dissector->used_keys);
400 return -EOPNOTSUPP;
401 }
402
bbd00f7e
HHZ
403 if ((dissector_uses_key(f->dissector,
404 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
405 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
406 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
407 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
408 struct flow_dissector_key_control *key =
409 skb_flow_dissector_target(f->dissector,
410 FLOW_DISSECTOR_KEY_ENC_CONTROL,
411 f->key);
412 switch (key->addr_type) {
413 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 414 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
415 if (parse_tunnel_attr(priv, spec, f))
416 return -EOPNOTSUPP;
417 break;
418 default:
419 return -EOPNOTSUPP;
420 }
421
422 /* In decap flow, header pointers should point to the inner
423 * headers, outer header were already set by parse_tunnel_attr
424 */
425 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
426 inner_headers);
427 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
428 inner_headers);
429 }
430
e3a2b7ed
AV
431 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
432 struct flow_dissector_key_control *key =
433 skb_flow_dissector_target(f->dissector,
1dbd0d37 434 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 435 f->key);
3f7d0eb4
OG
436
437 struct flow_dissector_key_control *mask =
438 skb_flow_dissector_target(f->dissector,
439 FLOW_DISSECTOR_KEY_CONTROL,
440 f->mask);
e3a2b7ed 441 addr_type = key->addr_type;
3f7d0eb4
OG
442
443 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
444 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
445 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
446 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
447
448 /* the HW doesn't need L3 inline to match on frag=no */
449 if (key->flags & FLOW_DIS_IS_FRAGMENT)
450 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 451 }
e3a2b7ed
AV
452 }
453
454 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
455 struct flow_dissector_key_basic *key =
456 skb_flow_dissector_target(f->dissector,
457 FLOW_DISSECTOR_KEY_BASIC,
458 f->key);
459 struct flow_dissector_key_basic *mask =
460 skb_flow_dissector_target(f->dissector,
461 FLOW_DISSECTOR_KEY_BASIC,
462 f->mask);
463 ip_proto = key->ip_proto;
464
465 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
466 ntohs(mask->n_proto));
467 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
468 ntohs(key->n_proto));
469
470 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
471 mask->ip_proto);
472 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
473 key->ip_proto);
de0af0bf
RD
474
475 if (mask->ip_proto)
476 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
477 }
478
479 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
480 struct flow_dissector_key_eth_addrs *key =
481 skb_flow_dissector_target(f->dissector,
482 FLOW_DISSECTOR_KEY_ETH_ADDRS,
483 f->key);
484 struct flow_dissector_key_eth_addrs *mask =
485 skb_flow_dissector_target(f->dissector,
486 FLOW_DISSECTOR_KEY_ETH_ADDRS,
487 f->mask);
488
489 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
490 dmac_47_16),
491 mask->dst);
492 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
493 dmac_47_16),
494 key->dst);
495
496 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
497 smac_47_16),
498 mask->src);
499 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
500 smac_47_16),
501 key->src);
502 }
503
095b6cfd
OG
504 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
505 struct flow_dissector_key_vlan *key =
506 skb_flow_dissector_target(f->dissector,
507 FLOW_DISSECTOR_KEY_VLAN,
508 f->key);
509 struct flow_dissector_key_vlan *mask =
510 skb_flow_dissector_target(f->dissector,
511 FLOW_DISSECTOR_KEY_VLAN,
512 f->mask);
358d79a4 513 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
514 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
515 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
516
517 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
518 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
519
520 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
521 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
522 }
523 }
524
e3a2b7ed
AV
525 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
526 struct flow_dissector_key_ipv4_addrs *key =
527 skb_flow_dissector_target(f->dissector,
528 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
529 f->key);
530 struct flow_dissector_key_ipv4_addrs *mask =
531 skb_flow_dissector_target(f->dissector,
532 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
533 f->mask);
534
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
536 src_ipv4_src_ipv6.ipv4_layout.ipv4),
537 &mask->src, sizeof(mask->src));
538 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
539 src_ipv4_src_ipv6.ipv4_layout.ipv4),
540 &key->src, sizeof(key->src));
541 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
542 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
543 &mask->dst, sizeof(mask->dst));
544 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
545 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
546 &key->dst, sizeof(key->dst));
de0af0bf
RD
547
548 if (mask->src || mask->dst)
549 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
550 }
551
552 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
553 struct flow_dissector_key_ipv6_addrs *key =
554 skb_flow_dissector_target(f->dissector,
555 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
556 f->key);
557 struct flow_dissector_key_ipv6_addrs *mask =
558 skb_flow_dissector_target(f->dissector,
559 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
560 f->mask);
561
562 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
563 src_ipv4_src_ipv6.ipv6_layout.ipv6),
564 &mask->src, sizeof(mask->src));
565 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
566 src_ipv4_src_ipv6.ipv6_layout.ipv6),
567 &key->src, sizeof(key->src));
568
569 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
570 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
571 &mask->dst, sizeof(mask->dst));
572 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
573 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
574 &key->dst, sizeof(key->dst));
de0af0bf
RD
575
576 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
577 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
578 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
579 }
580
581 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
582 struct flow_dissector_key_ports *key =
583 skb_flow_dissector_target(f->dissector,
584 FLOW_DISSECTOR_KEY_PORTS,
585 f->key);
586 struct flow_dissector_key_ports *mask =
587 skb_flow_dissector_target(f->dissector,
588 FLOW_DISSECTOR_KEY_PORTS,
589 f->mask);
590 switch (ip_proto) {
591 case IPPROTO_TCP:
592 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
593 tcp_sport, ntohs(mask->src));
594 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
595 tcp_sport, ntohs(key->src));
596
597 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
598 tcp_dport, ntohs(mask->dst));
599 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
600 tcp_dport, ntohs(key->dst));
601 break;
602
603 case IPPROTO_UDP:
604 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
605 udp_sport, ntohs(mask->src));
606 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
607 udp_sport, ntohs(key->src));
608
609 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
610 udp_dport, ntohs(mask->dst));
611 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
612 udp_dport, ntohs(key->dst));
613 break;
614 default:
615 netdev_err(priv->netdev,
616 "Only UDP and TCP transport are supported\n");
617 return -EINVAL;
618 }
de0af0bf
RD
619
620 if (mask->src || mask->dst)
621 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
622 }
623
624 return 0;
625}
626
de0af0bf 627static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 628 struct mlx5e_tc_flow *flow,
de0af0bf
RD
629 struct mlx5_flow_spec *spec,
630 struct tc_cls_flower_offload *f)
631{
632 struct mlx5_core_dev *dev = priv->mdev;
633 struct mlx5_eswitch *esw = dev->priv.eswitch;
634 struct mlx5_eswitch_rep *rep = priv->ppriv;
635 u8 min_inline;
636 int err;
637
638 err = __parse_cls_flower(priv, spec, f, &min_inline);
639
65ba8fb7 640 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
de0af0bf 641 rep->vport != FDB_UPLINK_VPORT) {
c415f704
OG
642 if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
643 esw->offloads.inline_mode < min_inline) {
de0af0bf
RD
644 netdev_warn(priv->netdev,
645 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
646 min_inline, esw->offloads.inline_mode);
647 return -EOPNOTSUPP;
648 }
649 }
650
651 return err;
652}
653
5c40348c
OG
654static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
655 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
656{
657 const struct tc_action *a;
22dc13c8 658 LIST_HEAD(actions);
e3a2b7ed
AV
659
660 if (tc_no_actions(exts))
661 return -EINVAL;
662
663 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
664 *action = 0;
665
22dc13c8
WC
666 tcf_exts_to_list(exts, &actions);
667 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
668 /* Only support a single action per rule */
669 if (*action)
670 return -EINVAL;
671
672 if (is_tcf_gact_shot(a)) {
673 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
674 if (MLX5_CAP_FLOWTABLE(priv->mdev,
675 flow_table_properties_nic_receive.flow_counter))
676 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
677 continue;
678 }
679
680 if (is_tcf_skbedit_mark(a)) {
681 u32 mark = tcf_skbedit_mark(a);
682
683 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
684 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
685 mark);
686 return -EINVAL;
687 }
688
689 *flow_tag = mark;
690 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
691 continue;
692 }
693
694 return -EINVAL;
695 }
696
697 return 0;
698}
699
76f7444d
OG
700static inline int cmp_encap_info(struct ip_tunnel_key *a,
701 struct ip_tunnel_key *b)
a54e20b4
HHZ
702{
703 return memcmp(a, b, sizeof(*a));
704}
705
76f7444d 706static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 707{
76f7444d 708 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
709}
710
711static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
712 struct net_device *mirred_dev,
713 struct net_device **out_dev,
714 struct flowi4 *fl4,
715 struct neighbour **out_n,
a54e20b4
HHZ
716 int *out_ttl)
717{
3e621b19 718 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
719 struct rtable *rt;
720 struct neighbour *n = NULL;
a54e20b4
HHZ
721
722#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
723 int ret;
724
a54e20b4 725 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
726 ret = PTR_ERR_OR_ZERO(rt);
727 if (ret)
728 return ret;
a54e20b4
HHZ
729#else
730 return -EOPNOTSUPP;
731#endif
3e621b19
HHZ
732 /* if the egress device isn't on the same HW e-switch, we use the uplink */
733 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
734 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
735 else
736 *out_dev = rt->dst.dev;
a54e20b4 737
75c33da8 738 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
739 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
740 ip_rt_put(rt);
741 if (!n)
742 return -ENOMEM;
743
744 *out_n = n;
a54e20b4
HHZ
745 return 0;
746}
747
ce99f6b9
OG
748static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
749 struct net_device *mirred_dev,
750 struct net_device **out_dev,
751 struct flowi6 *fl6,
752 struct neighbour **out_n,
753 int *out_ttl)
754{
755 struct neighbour *n = NULL;
756 struct dst_entry *dst;
757
758#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
759 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
760 int ret;
761
762 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
763 ret = dst->error;
764 if (ret) {
ce99f6b9
OG
765 dst_release(dst);
766 return ret;
767 }
768
769 *out_ttl = ip6_dst_hoplimit(dst);
770
771 /* if the egress device isn't on the same HW e-switch, we use the uplink */
772 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
773 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
774 else
775 *out_dev = dst->dev;
776#else
777 return -EOPNOTSUPP;
778#endif
779
780 n = dst_neigh_lookup(dst, &fl6->daddr);
781 dst_release(dst);
782 if (!n)
783 return -ENOMEM;
784
785 *out_n = n;
786 return 0;
787}
788
a54e20b4
HHZ
789static int gen_vxlan_header_ipv4(struct net_device *out_dev,
790 char buf[],
791 unsigned char h_dest[ETH_ALEN],
792 int ttl,
793 __be32 daddr,
794 __be32 saddr,
795 __be16 udp_dst_port,
796 __be32 vx_vni)
797{
798 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
799 struct ethhdr *eth = (struct ethhdr *)buf;
800 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
801 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
802 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
803
804 memset(buf, 0, encap_size);
805
806 ether_addr_copy(eth->h_dest, h_dest);
807 ether_addr_copy(eth->h_source, out_dev->dev_addr);
808 eth->h_proto = htons(ETH_P_IP);
809
810 ip->daddr = daddr;
811 ip->saddr = saddr;
812
813 ip->ttl = ttl;
814 ip->protocol = IPPROTO_UDP;
815 ip->version = 0x4;
816 ip->ihl = 0x5;
817
818 udp->dest = udp_dst_port;
819 vxh->vx_flags = VXLAN_HF_VNI;
820 vxh->vx_vni = vxlan_vni_field(vx_vni);
821
822 return encap_size;
823}
824
ce99f6b9
OG
825static int gen_vxlan_header_ipv6(struct net_device *out_dev,
826 char buf[],
827 unsigned char h_dest[ETH_ALEN],
828 int ttl,
829 struct in6_addr *daddr,
830 struct in6_addr *saddr,
831 __be16 udp_dst_port,
832 __be32 vx_vni)
833{
834 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
835 struct ethhdr *eth = (struct ethhdr *)buf;
836 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
837 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
838 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
839
840 memset(buf, 0, encap_size);
841
842 ether_addr_copy(eth->h_dest, h_dest);
843 ether_addr_copy(eth->h_source, out_dev->dev_addr);
844 eth->h_proto = htons(ETH_P_IPV6);
845
846 ip6_flow_hdr(ip6h, 0, 0);
847 /* the HW fills up ipv6 payload len */
848 ip6h->nexthdr = IPPROTO_UDP;
849 ip6h->hop_limit = ttl;
850 ip6h->daddr = *daddr;
851 ip6h->saddr = *saddr;
852
853 udp->dest = udp_dst_port;
854 vxh->vx_flags = VXLAN_HF_VNI;
855 vxh->vx_vni = vxlan_vni_field(vx_vni);
856
857 return encap_size;
858}
859
a54e20b4
HHZ
860static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
861 struct net_device *mirred_dev,
862 struct mlx5_encap_entry *e,
863 struct net_device **out_dev)
864{
865 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
76f7444d 866 struct ip_tunnel_key *tun_key = &e->tun_info.key;
9a941117 867 int encap_size, ttl, err;
a42485eb 868 struct neighbour *n = NULL;
a54e20b4 869 struct flowi4 fl4 = {};
a54e20b4 870 char *encap_header;
a54e20b4
HHZ
871
872 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
873 if (!encap_header)
874 return -ENOMEM;
875
876 switch (e->tunnel_type) {
877 case MLX5_HEADER_TYPE_VXLAN:
878 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 879 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
880 break;
881 default:
882 err = -EOPNOTSUPP;
883 goto out;
884 }
9a941117 885 fl4.flowi4_tos = tun_key->tos;
76f7444d 886 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 887 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4
HHZ
888
889 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
9a941117 890 &fl4, &n, &ttl);
a54e20b4
HHZ
891 if (err)
892 goto out;
893
a54e20b4 894 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
895 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
896 err = -EOPNOTSUPP;
a54e20b4
HHZ
897 goto out;
898 }
899
75c33da8
OG
900 e->n = n;
901 e->out_dev = *out_dev;
902
a54e20b4
HHZ
903 neigh_ha_snapshot(e->h_dest, n, *out_dev);
904
905 switch (e->tunnel_type) {
906 case MLX5_HEADER_TYPE_VXLAN:
907 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
908 e->h_dest, ttl,
9a941117
OG
909 fl4.daddr,
910 fl4.saddr, tun_key->tp_dst,
76f7444d 911 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
912 break;
913 default:
914 err = -EOPNOTSUPP;
915 goto out;
916 }
917
918 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
919 encap_size, encap_header, &e->encap_id);
920out:
a42485eb
OG
921 if (err && n)
922 neigh_release(n);
a54e20b4
HHZ
923 kfree(encap_header);
924 return err;
925}
926
ce99f6b9
OG
927static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
928 struct net_device *mirred_dev,
929 struct mlx5_encap_entry *e,
930 struct net_device **out_dev)
931
932{
933 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
934 struct ip_tunnel_key *tun_key = &e->tun_info.key;
935 int encap_size, err, ttl = 0;
936 struct neighbour *n = NULL;
937 struct flowi6 fl6 = {};
938 char *encap_header;
939
940 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
941 if (!encap_header)
942 return -ENOMEM;
943
944 switch (e->tunnel_type) {
945 case MLX5_HEADER_TYPE_VXLAN:
946 fl6.flowi6_proto = IPPROTO_UDP;
947 fl6.fl6_dport = tun_key->tp_dst;
948 break;
949 default:
950 err = -EOPNOTSUPP;
951 goto out;
952 }
953
954 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
955 fl6.daddr = tun_key->u.ipv6.dst;
956 fl6.saddr = tun_key->u.ipv6.src;
957
958 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
959 &fl6, &n, &ttl);
960 if (err)
961 goto out;
962
963 if (!(n->nud_state & NUD_VALID)) {
964 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
965 err = -EOPNOTSUPP;
966 goto out;
967 }
968
969 e->n = n;
970 e->out_dev = *out_dev;
971
972 neigh_ha_snapshot(e->h_dest, n, *out_dev);
973
974 switch (e->tunnel_type) {
975 case MLX5_HEADER_TYPE_VXLAN:
976 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
977 e->h_dest, ttl,
978 &fl6.daddr,
979 &fl6.saddr, tun_key->tp_dst,
980 tunnel_id_to_key32(tun_key->tun_id));
981 break;
982 default:
983 err = -EOPNOTSUPP;
984 goto out;
985 }
986
987 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
988 encap_size, encap_header, &e->encap_id);
989out:
990 if (err && n)
991 neigh_release(n);
992 kfree(encap_header);
993 return err;
994}
995
a54e20b4
HHZ
996static int mlx5e_attach_encap(struct mlx5e_priv *priv,
997 struct ip_tunnel_info *tun_info,
998 struct net_device *mirred_dev,
999 struct mlx5_esw_flow_attr *attr)
1000{
1001 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a
PB
1002 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1003 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
a54e20b4
HHZ
1004 unsigned short family = ip_tunnel_info_af(tun_info);
1005 struct ip_tunnel_key *key = &tun_info->key;
a54e20b4
HHZ
1006 struct mlx5_encap_entry *e;
1007 struct net_device *out_dev;
ce99f6b9 1008 int tunnel_type, err = -EOPNOTSUPP;
a54e20b4
HHZ
1009 uintptr_t hash_key;
1010 bool found = false;
a54e20b4 1011
2fcd82e9 1012 /* udp dst port must be set */
a54e20b4 1013 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1014 goto vxlan_encap_offload_err;
a54e20b4 1015
cd377663 1016 /* setting udp src port isn't supported */
2fcd82e9
OG
1017 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1018vxlan_encap_offload_err:
1019 netdev_warn(priv->netdev,
1020 "must set udp dst port and not set udp src port\n");
cd377663 1021 return -EOPNOTSUPP;
2fcd82e9 1022 }
cd377663 1023
1ad9a00a 1024 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1025 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1026 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1027 } else {
2fcd82e9
OG
1028 netdev_warn(priv->netdev,
1029 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1030 return -EOPNOTSUPP;
1031 }
1032
76f7444d 1033 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1034
1035 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1036 encap_hlist, hash_key) {
76f7444d 1037 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1038 found = true;
1039 break;
1040 }
1041 }
1042
1043 if (found) {
1044 attr->encap = e;
1045 return 0;
1046 }
1047
1048 e = kzalloc(sizeof(*e), GFP_KERNEL);
1049 if (!e)
1050 return -ENOMEM;
1051
76f7444d 1052 e->tun_info = *tun_info;
a54e20b4
HHZ
1053 e->tunnel_type = tunnel_type;
1054 INIT_LIST_HEAD(&e->flows);
1055
ce99f6b9
OG
1056 if (family == AF_INET)
1057 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1058 else if (family == AF_INET6)
1059 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1060
a54e20b4
HHZ
1061 if (err)
1062 goto out_err;
1063
1064 attr->encap = e;
1065 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1066
1067 return err;
1068
1069out_err:
1070 kfree(e);
1071 return err;
1072}
1073
03a9d11e 1074static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 1075 struct mlx5e_tc_flow *flow)
03a9d11e 1076{
a54e20b4
HHZ
1077 struct mlx5_esw_flow_attr *attr = flow->attr;
1078 struct ip_tunnel_info *info = NULL;
03a9d11e 1079 const struct tc_action *a;
22dc13c8 1080 LIST_HEAD(actions);
a54e20b4
HHZ
1081 bool encap = false;
1082 int err;
03a9d11e
OG
1083
1084 if (tc_no_actions(exts))
1085 return -EINVAL;
1086
776b12b6
OG
1087 memset(attr, 0, sizeof(*attr));
1088 attr->in_rep = priv->ppriv;
03a9d11e 1089
22dc13c8
WC
1090 tcf_exts_to_list(exts, &actions);
1091 list_for_each_entry(a, &actions, list) {
03a9d11e 1092 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1093 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1094 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1095 continue;
1096 }
1097
5724b8b5 1098 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
1099 int ifindex = tcf_mirred_ifindex(a);
1100 struct net_device *out_dev;
1101 struct mlx5e_priv *out_priv;
03a9d11e
OG
1102
1103 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1104
a54e20b4
HHZ
1105 if (switchdev_port_same_parent_id(priv->netdev,
1106 out_dev)) {
1107 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1108 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1109 out_priv = netdev_priv(out_dev);
1110 attr->out_rep = out_priv->ppriv;
1111 } else if (encap) {
1112 err = mlx5e_attach_encap(priv, info,
1113 out_dev, attr);
1114 if (err)
1115 return err;
1116 list_add(&flow->encap, &attr->encap->flows);
1117 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1118 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1119 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1120 out_priv = netdev_priv(attr->encap->out_dev);
1121 attr->out_rep = out_priv->ppriv;
1122 } else {
03a9d11e
OG
1123 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1124 priv->netdev->name, out_dev->name);
1125 return -EINVAL;
1126 }
a54e20b4
HHZ
1127 continue;
1128 }
03a9d11e 1129
a54e20b4
HHZ
1130 if (is_tcf_tunnel_set(a)) {
1131 info = tcf_tunnel_info(a);
1132 if (info)
1133 encap = true;
1134 else
1135 return -EOPNOTSUPP;
03a9d11e
OG
1136 continue;
1137 }
1138
8b32580d 1139 if (is_tcf_vlan(a)) {
09c91ddf 1140 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1141 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1142 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1143 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1144 return -EOPNOTSUPP;
1145
1146 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1147 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1148 } else { /* action is TCA_VLAN_ACT_MODIFY */
1149 return -EOPNOTSUPP;
8b32580d
OG
1150 }
1151 continue;
1152 }
1153
bbd00f7e
HHZ
1154 if (is_tcf_tunnel_release(a)) {
1155 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1156 continue;
1157 }
1158
03a9d11e
OG
1159 return -EINVAL;
1160 }
1161 return 0;
1162}
1163
e3a2b7ed
AV
1164int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1165 struct tc_cls_flower_offload *f)
1166{
acff797c 1167 struct mlx5e_tc_table *tc = &priv->fs.tc;
65ba8fb7 1168 int err, attr_size = 0;
776b12b6 1169 u32 flow_tag, action;
e3a2b7ed 1170 struct mlx5e_tc_flow *flow;
c5bb1730 1171 struct mlx5_flow_spec *spec;
adb4c123 1172 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
65ba8fb7 1173 u8 flow_flags = 0;
e3a2b7ed 1174
65ba8fb7
OG
1175 if (esw && esw->mode == SRIOV_OFFLOADS) {
1176 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1177 attr_size = sizeof(struct mlx5_esw_flow_attr);
1178 }
e3a2b7ed 1179
65ba8fb7 1180 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
c5bb1730
MG
1181 spec = mlx5_vzalloc(sizeof(*spec));
1182 if (!spec || !flow) {
e3a2b7ed
AV
1183 err = -ENOMEM;
1184 goto err_free;
1185 }
1186
1187 flow->cookie = f->cookie;
65ba8fb7 1188 flow->flags = flow_flags;
e3a2b7ed 1189
65ba8fb7 1190 err = parse_cls_flower(priv, flow, spec, f);
e3a2b7ed
AV
1191 if (err < 0)
1192 goto err_free;
1193
65ba8fb7 1194 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
776b12b6 1195 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 1196 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1197 if (err < 0)
1198 goto err_free;
776b12b6 1199 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
1200 } else {
1201 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1202 if (err < 0)
1203 goto err_free;
1204 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1205 }
e3a2b7ed 1206
e3a2b7ed
AV
1207 if (IS_ERR(flow->rule)) {
1208 err = PTR_ERR(flow->rule);
5e86397a 1209 goto err_del_rule;
e3a2b7ed
AV
1210 }
1211
5c40348c
OG
1212 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1213 tc->ht_params);
1214 if (err)
1215 goto err_del_rule;
1216
e3a2b7ed
AV
1217 goto out;
1218
5c40348c 1219err_del_rule:
5e86397a 1220 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1221
1222err_free:
53636068 1223 kfree(flow);
e3a2b7ed 1224out:
c5bb1730 1225 kvfree(spec);
e3a2b7ed
AV
1226 return err;
1227}
1228
1229int mlx5e_delete_flower(struct mlx5e_priv *priv,
1230 struct tc_cls_flower_offload *f)
1231{
1232 struct mlx5e_tc_flow *flow;
acff797c 1233 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1234
1235 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1236 tc->ht_params);
1237 if (!flow)
1238 return -EINVAL;
1239
1240 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1241
961e8979 1242 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1243
a54e20b4 1244
e3a2b7ed
AV
1245 kfree(flow);
1246
1247 return 0;
1248}
1249
aad7e08d
AV
1250int mlx5e_stats_flower(struct mlx5e_priv *priv,
1251 struct tc_cls_flower_offload *f)
1252{
1253 struct mlx5e_tc_table *tc = &priv->fs.tc;
1254 struct mlx5e_tc_flow *flow;
1255 struct tc_action *a;
1256 struct mlx5_fc *counter;
22dc13c8 1257 LIST_HEAD(actions);
aad7e08d
AV
1258 u64 bytes;
1259 u64 packets;
1260 u64 lastuse;
1261
1262 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1263 tc->ht_params);
1264 if (!flow)
1265 return -EINVAL;
1266
1267 counter = mlx5_flow_rule_counter(flow->rule);
1268 if (!counter)
1269 return 0;
1270
1271 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1272
fed06ee8
OG
1273 preempt_disable();
1274
22dc13c8
WC
1275 tcf_exts_to_list(f->exts, &actions);
1276 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1277 tcf_action_stats_update(a, bytes, packets, lastuse);
1278
fed06ee8
OG
1279 preempt_enable();
1280
aad7e08d
AV
1281 return 0;
1282}
1283
e8f887ac
AV
1284static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1285 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1286 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1287 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1288 .automatic_shrinking = true,
1289};
1290
1291int mlx5e_tc_init(struct mlx5e_priv *priv)
1292{
acff797c 1293 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1294
1295 tc->ht_params = mlx5e_tc_flow_ht_params;
1296 return rhashtable_init(&tc->ht, &tc->ht_params);
1297}
1298
1299static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1300{
1301 struct mlx5e_tc_flow *flow = ptr;
1302 struct mlx5e_priv *priv = arg;
1303
961e8979 1304 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1305 kfree(flow);
1306}
1307
1308void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1309{
acff797c 1310 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1311
1312 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1313
acff797c
MG
1314 if (!IS_ERR_OR_NULL(tc->t)) {
1315 mlx5_destroy_flow_table(tc->t);
1316 tc->t = NULL;
e8f887ac
AV
1317 }
1318}