net/mlx5e: Add NIC attributes for offloaded TC flows
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac 50
3bc4b7bf
OG
51struct mlx5_nic_flow_attr {
52 u32 action;
53 u32 flow_tag;
54};
55
65ba8fb7
OG
56enum {
57 MLX5E_TC_FLOW_ESWITCH = BIT(0),
3bc4b7bf 58 MLX5E_TC_FLOW_NIC = BIT(1),
65ba8fb7
OG
59};
60
e8f887ac
AV
61struct mlx5e_tc_flow {
62 struct rhash_head node;
63 u64 cookie;
65ba8fb7 64 u8 flags;
74491de9 65 struct mlx5_flow_handle *rule;
a54e20b4 66 struct list_head encap; /* flows sharing the same encap */
3bc4b7bf
OG
67 union {
68 struct mlx5_esw_flow_attr esw_attr[0];
69 struct mlx5_nic_flow_attr nic_attr[0];
70 };
e8f887ac
AV
71};
72
a54e20b4
HHZ
73enum {
74 MLX5_HEADER_TYPE_VXLAN = 0x0,
75 MLX5_HEADER_TYPE_NVGRE = 0x1,
76};
77
acff797c
MG
78#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
79#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 80
74491de9
MB
81static struct mlx5_flow_handle *
82mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
83 struct mlx5_flow_spec *spec,
3bc4b7bf 84 struct mlx5_nic_flow_attr *attr)
e8f887ac 85{
aad7e08d
AV
86 struct mlx5_core_dev *dev = priv->mdev;
87 struct mlx5_flow_destination dest = { 0 };
66958ed9 88 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
89 .action = attr->action,
90 .flow_tag = attr->flow_tag,
66958ed9
HHZ
91 .encap_id = 0,
92 };
aad7e08d 93 struct mlx5_fc *counter = NULL;
74491de9 94 struct mlx5_flow_handle *rule;
e8f887ac
AV
95 bool table_created = false;
96
3bc4b7bf 97 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
aad7e08d
AV
98 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
99 dest.ft = priv->fs.vlan.ft.t;
3bc4b7bf 100 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
101 counter = mlx5_fc_create(dev, true);
102 if (IS_ERR(counter))
103 return ERR_CAST(counter);
104
105 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
106 dest.counter = counter;
107 }
108
acff797c
MG
109 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
110 priv->fs.tc.t =
111 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
112 MLX5E_TC_PRIO,
113 MLX5E_TC_TABLE_NUM_ENTRIES,
114 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 115 0, 0);
acff797c 116 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
117 netdev_err(priv->netdev,
118 "Failed to create tc offload table\n");
aad7e08d
AV
119 rule = ERR_CAST(priv->fs.tc.t);
120 goto err_create_ft;
e8f887ac
AV
121 }
122
123 table_created = true;
124 }
125
c5bb1730 126 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 127 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
128
129 if (IS_ERR(rule))
130 goto err_add_rule;
131
132 return rule;
e8f887ac 133
aad7e08d
AV
134err_add_rule:
135 if (table_created) {
acff797c
MG
136 mlx5_destroy_flow_table(priv->fs.tc.t);
137 priv->fs.tc.t = NULL;
e8f887ac 138 }
aad7e08d
AV
139err_create_ft:
140 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
141
142 return rule;
143}
144
d85cdccb
OG
145static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
146 struct mlx5e_tc_flow *flow)
147{
148 struct mlx5_fc *counter = NULL;
149
150 if (!IS_ERR(flow->rule)) {
151 counter = mlx5_flow_rule_counter(flow->rule);
152 mlx5_del_flow_rules(flow->rule);
153 mlx5_fc_destroy(priv->mdev, counter);
154 }
155
156 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
157 mlx5_destroy_flow_table(priv->fs.tc.t);
158 priv->fs.tc.t = NULL;
159 }
160}
161
74491de9
MB
162static struct mlx5_flow_handle *
163mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
164 struct mlx5_flow_spec *spec,
165 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
166{
167 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
168 int err;
169
170 err = mlx5_eswitch_add_vlan_action(esw, attr);
171 if (err)
172 return ERR_PTR(err);
adb4c123 173
776b12b6 174 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
175}
176
5067b602 177static void mlx5e_detach_encap(struct mlx5e_priv *priv,
d85cdccb
OG
178 struct mlx5e_tc_flow *flow);
179
180static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
181 struct mlx5e_tc_flow *flow)
182{
183 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
184
ecf5bb79 185 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
d85cdccb 186
ecf5bb79 187 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
d85cdccb 188
ecf5bb79 189 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
d85cdccb
OG
190 mlx5e_detach_encap(priv, flow);
191}
192
193static void mlx5e_detach_encap(struct mlx5e_priv *priv,
194 struct mlx5e_tc_flow *flow)
195{
5067b602
RD
196 struct list_head *next = flow->encap.next;
197
198 list_del(&flow->encap);
199 if (list_empty(next)) {
200 struct mlx5_encap_entry *e;
201
202 e = list_entry(next, struct mlx5_encap_entry, flows);
203 if (e->n) {
204 mlx5_encap_dealloc(priv->mdev, e->encap_id);
205 neigh_release(e->n);
206 }
207 hlist_del_rcu(&e->encap_hlist);
208 kfree(e);
209 }
210}
211
5e86397a
OG
212/* we get here also when setting rule to the FW failed, etc. It means that the
213 * flow rule itself might not exist, but some offloading related to the actions
214 * should be cleaned.
215 */
e8f887ac 216static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 217 struct mlx5e_tc_flow *flow)
e8f887ac 218{
d85cdccb
OG
219 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
220 mlx5e_tc_del_fdb_flow(priv, flow);
221 else
222 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
223}
224
bbd00f7e
HHZ
225static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
226 struct tc_cls_flower_offload *f)
227{
228 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
229 outer_headers);
230 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
231 outer_headers);
232 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
233 misc_parameters);
234 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
235 misc_parameters);
236
237 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
238 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
239
240 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
241 struct flow_dissector_key_keyid *key =
242 skb_flow_dissector_target(f->dissector,
243 FLOW_DISSECTOR_KEY_ENC_KEYID,
244 f->key);
245 struct flow_dissector_key_keyid *mask =
246 skb_flow_dissector_target(f->dissector,
247 FLOW_DISSECTOR_KEY_ENC_KEYID,
248 f->mask);
249 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
250 be32_to_cpu(mask->keyid));
251 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
252 be32_to_cpu(key->keyid));
253 }
254}
255
256static int parse_tunnel_attr(struct mlx5e_priv *priv,
257 struct mlx5_flow_spec *spec,
258 struct tc_cls_flower_offload *f)
259{
260 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
261 outer_headers);
262 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
263 outer_headers);
264
2e72eb43
OG
265 struct flow_dissector_key_control *enc_control =
266 skb_flow_dissector_target(f->dissector,
267 FLOW_DISSECTOR_KEY_ENC_CONTROL,
268 f->key);
269
bbd00f7e
HHZ
270 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
271 struct flow_dissector_key_ports *key =
272 skb_flow_dissector_target(f->dissector,
273 FLOW_DISSECTOR_KEY_ENC_PORTS,
274 f->key);
275 struct flow_dissector_key_ports *mask =
276 skb_flow_dissector_target(f->dissector,
277 FLOW_DISSECTOR_KEY_ENC_PORTS,
278 f->mask);
1ad9a00a
PB
279 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
280 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
281 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
282
283 /* Full udp dst port must be given */
284 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 285 goto vxlan_match_offload_err;
bbd00f7e 286
1ad9a00a 287 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
288 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
289 parse_vxlan_attr(spec, f);
2fcd82e9
OG
290 else {
291 netdev_warn(priv->netdev,
292 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 293 return -EOPNOTSUPP;
2fcd82e9 294 }
bbd00f7e
HHZ
295
296 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
297 udp_dport, ntohs(mask->dst));
298 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
299 udp_dport, ntohs(key->dst));
300
cd377663
OG
301 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
302 udp_sport, ntohs(mask->src));
303 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
304 udp_sport, ntohs(key->src));
bbd00f7e 305 } else { /* udp dst port must be given */
2fcd82e9
OG
306vxlan_match_offload_err:
307 netdev_warn(priv->netdev,
308 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
309 return -EOPNOTSUPP;
bbd00f7e
HHZ
310 }
311
2e72eb43 312 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
313 struct flow_dissector_key_ipv4_addrs *key =
314 skb_flow_dissector_target(f->dissector,
315 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
316 f->key);
317 struct flow_dissector_key_ipv4_addrs *mask =
318 skb_flow_dissector_target(f->dissector,
319 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
320 f->mask);
321 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
322 src_ipv4_src_ipv6.ipv4_layout.ipv4,
323 ntohl(mask->src));
324 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
325 src_ipv4_src_ipv6.ipv4_layout.ipv4,
326 ntohl(key->src));
327
328 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
329 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
330 ntohl(mask->dst));
331 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
332 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
333 ntohl(key->dst));
bbd00f7e 334
2e72eb43
OG
335 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
336 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
337 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
338 struct flow_dissector_key_ipv6_addrs *key =
339 skb_flow_dissector_target(f->dissector,
340 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
341 f->key);
342 struct flow_dissector_key_ipv6_addrs *mask =
343 skb_flow_dissector_target(f->dissector,
344 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
345 f->mask);
346
347 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
348 src_ipv4_src_ipv6.ipv6_layout.ipv6),
349 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
350 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
351 src_ipv4_src_ipv6.ipv6_layout.ipv6),
352 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
353
354 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
355 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
356 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
357 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
358 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
359 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
360
361 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
362 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 363 }
bbd00f7e
HHZ
364
365 /* Enforce DMAC when offloading incoming tunneled flows.
366 * Flow counters require a match on the DMAC.
367 */
368 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
369 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
370 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
371 dmac_47_16), priv->netdev->dev_addr);
372
373 /* let software handle IP fragments */
374 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
375 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
376
377 return 0;
378}
379
de0af0bf
RD
380static int __parse_cls_flower(struct mlx5e_priv *priv,
381 struct mlx5_flow_spec *spec,
382 struct tc_cls_flower_offload *f,
383 u8 *min_inline)
e3a2b7ed 384{
c5bb1730
MG
385 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
386 outer_headers);
387 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
388 outer_headers);
e3a2b7ed
AV
389 u16 addr_type = 0;
390 u8 ip_proto = 0;
391
de0af0bf
RD
392 *min_inline = MLX5_INLINE_MODE_L2;
393
e3a2b7ed
AV
394 if (f->dissector->used_keys &
395 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
396 BIT(FLOW_DISSECTOR_KEY_BASIC) |
397 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 398 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
399 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
400 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
401 BIT(FLOW_DISSECTOR_KEY_PORTS) |
402 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
403 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
404 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
405 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
406 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
407 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
408 f->dissector->used_keys);
409 return -EOPNOTSUPP;
410 }
411
bbd00f7e
HHZ
412 if ((dissector_uses_key(f->dissector,
413 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
414 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
415 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
416 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
417 struct flow_dissector_key_control *key =
418 skb_flow_dissector_target(f->dissector,
419 FLOW_DISSECTOR_KEY_ENC_CONTROL,
420 f->key);
421 switch (key->addr_type) {
422 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 423 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
424 if (parse_tunnel_attr(priv, spec, f))
425 return -EOPNOTSUPP;
426 break;
427 default:
428 return -EOPNOTSUPP;
429 }
430
431 /* In decap flow, header pointers should point to the inner
432 * headers, outer header were already set by parse_tunnel_attr
433 */
434 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
435 inner_headers);
436 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
437 inner_headers);
438 }
439
e3a2b7ed
AV
440 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
441 struct flow_dissector_key_control *key =
442 skb_flow_dissector_target(f->dissector,
1dbd0d37 443 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 444 f->key);
3f7d0eb4
OG
445
446 struct flow_dissector_key_control *mask =
447 skb_flow_dissector_target(f->dissector,
448 FLOW_DISSECTOR_KEY_CONTROL,
449 f->mask);
e3a2b7ed 450 addr_type = key->addr_type;
3f7d0eb4
OG
451
452 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
453 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
454 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
455 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
456
457 /* the HW doesn't need L3 inline to match on frag=no */
458 if (key->flags & FLOW_DIS_IS_FRAGMENT)
459 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 460 }
e3a2b7ed
AV
461 }
462
463 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
464 struct flow_dissector_key_basic *key =
465 skb_flow_dissector_target(f->dissector,
466 FLOW_DISSECTOR_KEY_BASIC,
467 f->key);
468 struct flow_dissector_key_basic *mask =
469 skb_flow_dissector_target(f->dissector,
470 FLOW_DISSECTOR_KEY_BASIC,
471 f->mask);
472 ip_proto = key->ip_proto;
473
474 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
475 ntohs(mask->n_proto));
476 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
477 ntohs(key->n_proto));
478
479 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
480 mask->ip_proto);
481 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
482 key->ip_proto);
de0af0bf
RD
483
484 if (mask->ip_proto)
485 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
486 }
487
488 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
489 struct flow_dissector_key_eth_addrs *key =
490 skb_flow_dissector_target(f->dissector,
491 FLOW_DISSECTOR_KEY_ETH_ADDRS,
492 f->key);
493 struct flow_dissector_key_eth_addrs *mask =
494 skb_flow_dissector_target(f->dissector,
495 FLOW_DISSECTOR_KEY_ETH_ADDRS,
496 f->mask);
497
498 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
499 dmac_47_16),
500 mask->dst);
501 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
502 dmac_47_16),
503 key->dst);
504
505 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
506 smac_47_16),
507 mask->src);
508 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
509 smac_47_16),
510 key->src);
511 }
512
095b6cfd
OG
513 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
514 struct flow_dissector_key_vlan *key =
515 skb_flow_dissector_target(f->dissector,
516 FLOW_DISSECTOR_KEY_VLAN,
517 f->key);
518 struct flow_dissector_key_vlan *mask =
519 skb_flow_dissector_target(f->dissector,
520 FLOW_DISSECTOR_KEY_VLAN,
521 f->mask);
358d79a4 522 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
523 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
524 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
525
526 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
527 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
528
529 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
530 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
531 }
532 }
533
e3a2b7ed
AV
534 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
535 struct flow_dissector_key_ipv4_addrs *key =
536 skb_flow_dissector_target(f->dissector,
537 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
538 f->key);
539 struct flow_dissector_key_ipv4_addrs *mask =
540 skb_flow_dissector_target(f->dissector,
541 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
542 f->mask);
543
544 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
545 src_ipv4_src_ipv6.ipv4_layout.ipv4),
546 &mask->src, sizeof(mask->src));
547 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
548 src_ipv4_src_ipv6.ipv4_layout.ipv4),
549 &key->src, sizeof(key->src));
550 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
551 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
552 &mask->dst, sizeof(mask->dst));
553 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
554 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
555 &key->dst, sizeof(key->dst));
de0af0bf
RD
556
557 if (mask->src || mask->dst)
558 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
559 }
560
561 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
562 struct flow_dissector_key_ipv6_addrs *key =
563 skb_flow_dissector_target(f->dissector,
564 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
565 f->key);
566 struct flow_dissector_key_ipv6_addrs *mask =
567 skb_flow_dissector_target(f->dissector,
568 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
569 f->mask);
570
571 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
572 src_ipv4_src_ipv6.ipv6_layout.ipv6),
573 &mask->src, sizeof(mask->src));
574 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
575 src_ipv4_src_ipv6.ipv6_layout.ipv6),
576 &key->src, sizeof(key->src));
577
578 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
579 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
580 &mask->dst, sizeof(mask->dst));
581 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
582 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
583 &key->dst, sizeof(key->dst));
de0af0bf
RD
584
585 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
586 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
587 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
588 }
589
590 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
591 struct flow_dissector_key_ports *key =
592 skb_flow_dissector_target(f->dissector,
593 FLOW_DISSECTOR_KEY_PORTS,
594 f->key);
595 struct flow_dissector_key_ports *mask =
596 skb_flow_dissector_target(f->dissector,
597 FLOW_DISSECTOR_KEY_PORTS,
598 f->mask);
599 switch (ip_proto) {
600 case IPPROTO_TCP:
601 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
602 tcp_sport, ntohs(mask->src));
603 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
604 tcp_sport, ntohs(key->src));
605
606 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
607 tcp_dport, ntohs(mask->dst));
608 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
609 tcp_dport, ntohs(key->dst));
610 break;
611
612 case IPPROTO_UDP:
613 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
614 udp_sport, ntohs(mask->src));
615 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
616 udp_sport, ntohs(key->src));
617
618 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
619 udp_dport, ntohs(mask->dst));
620 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
621 udp_dport, ntohs(key->dst));
622 break;
623 default:
624 netdev_err(priv->netdev,
625 "Only UDP and TCP transport are supported\n");
626 return -EINVAL;
627 }
de0af0bf
RD
628
629 if (mask->src || mask->dst)
630 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
631 }
632
633 return 0;
634}
635
de0af0bf 636static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 637 struct mlx5e_tc_flow *flow,
de0af0bf
RD
638 struct mlx5_flow_spec *spec,
639 struct tc_cls_flower_offload *f)
640{
641 struct mlx5_core_dev *dev = priv->mdev;
642 struct mlx5_eswitch *esw = dev->priv.eswitch;
643 struct mlx5_eswitch_rep *rep = priv->ppriv;
644 u8 min_inline;
645 int err;
646
647 err = __parse_cls_flower(priv, spec, f, &min_inline);
648
65ba8fb7 649 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
de0af0bf
RD
650 rep->vport != FDB_UPLINK_VPORT) {
651 if (min_inline > esw->offloads.inline_mode) {
652 netdev_warn(priv->netdev,
653 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
654 min_inline, esw->offloads.inline_mode);
655 return -EOPNOTSUPP;
656 }
657 }
658
659 return err;
660}
661
5c40348c 662static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
3bc4b7bf 663 struct mlx5_nic_flow_attr *attr)
e3a2b7ed
AV
664{
665 const struct tc_action *a;
22dc13c8 666 LIST_HEAD(actions);
e3a2b7ed
AV
667
668 if (tc_no_actions(exts))
669 return -EINVAL;
670
3bc4b7bf
OG
671 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
672 attr->action = 0;
e3a2b7ed 673
22dc13c8
WC
674 tcf_exts_to_list(exts, &actions);
675 list_for_each_entry(a, &actions, list) {
e3a2b7ed 676 /* Only support a single action per rule */
3bc4b7bf 677 if (attr->action)
e3a2b7ed
AV
678 return -EINVAL;
679
680 if (is_tcf_gact_shot(a)) {
3bc4b7bf 681 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
682 if (MLX5_CAP_FLOWTABLE(priv->mdev,
683 flow_table_properties_nic_receive.flow_counter))
3bc4b7bf 684 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
685 continue;
686 }
687
688 if (is_tcf_skbedit_mark(a)) {
689 u32 mark = tcf_skbedit_mark(a);
690
691 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
692 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
693 mark);
694 return -EINVAL;
695 }
696
3bc4b7bf
OG
697 attr->flow_tag = mark;
698 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
699 continue;
700 }
701
702 return -EINVAL;
703 }
704
705 return 0;
706}
707
76f7444d
OG
708static inline int cmp_encap_info(struct ip_tunnel_key *a,
709 struct ip_tunnel_key *b)
a54e20b4
HHZ
710{
711 return memcmp(a, b, sizeof(*a));
712}
713
76f7444d 714static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 715{
76f7444d 716 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
717}
718
719static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
720 struct net_device *mirred_dev,
721 struct net_device **out_dev,
722 struct flowi4 *fl4,
723 struct neighbour **out_n,
a54e20b4
HHZ
724 int *out_ttl)
725{
3e621b19 726 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
727 struct rtable *rt;
728 struct neighbour *n = NULL;
a54e20b4
HHZ
729
730#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
731 int ret;
732
a54e20b4 733 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
734 ret = PTR_ERR_OR_ZERO(rt);
735 if (ret)
736 return ret;
a54e20b4
HHZ
737#else
738 return -EOPNOTSUPP;
739#endif
3e621b19
HHZ
740 /* if the egress device isn't on the same HW e-switch, we use the uplink */
741 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
742 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
743 else
744 *out_dev = rt->dst.dev;
a54e20b4 745
75c33da8 746 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
747 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
748 ip_rt_put(rt);
749 if (!n)
750 return -ENOMEM;
751
752 *out_n = n;
a54e20b4
HHZ
753 return 0;
754}
755
ce99f6b9
OG
756static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
757 struct net_device *mirred_dev,
758 struct net_device **out_dev,
759 struct flowi6 *fl6,
760 struct neighbour **out_n,
761 int *out_ttl)
762{
763 struct neighbour *n = NULL;
764 struct dst_entry *dst;
765
766#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
767 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
768 int ret;
769
770 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
771 ret = dst->error;
772 if (ret) {
ce99f6b9
OG
773 dst_release(dst);
774 return ret;
775 }
776
777 *out_ttl = ip6_dst_hoplimit(dst);
778
779 /* if the egress device isn't on the same HW e-switch, we use the uplink */
780 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
781 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
782 else
783 *out_dev = dst->dev;
784#else
785 return -EOPNOTSUPP;
786#endif
787
788 n = dst_neigh_lookup(dst, &fl6->daddr);
789 dst_release(dst);
790 if (!n)
791 return -ENOMEM;
792
793 *out_n = n;
794 return 0;
795}
796
a54e20b4
HHZ
797static int gen_vxlan_header_ipv4(struct net_device *out_dev,
798 char buf[],
799 unsigned char h_dest[ETH_ALEN],
800 int ttl,
801 __be32 daddr,
802 __be32 saddr,
803 __be16 udp_dst_port,
804 __be32 vx_vni)
805{
806 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
807 struct ethhdr *eth = (struct ethhdr *)buf;
808 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
809 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
810 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
811
812 memset(buf, 0, encap_size);
813
814 ether_addr_copy(eth->h_dest, h_dest);
815 ether_addr_copy(eth->h_source, out_dev->dev_addr);
816 eth->h_proto = htons(ETH_P_IP);
817
818 ip->daddr = daddr;
819 ip->saddr = saddr;
820
821 ip->ttl = ttl;
822 ip->protocol = IPPROTO_UDP;
823 ip->version = 0x4;
824 ip->ihl = 0x5;
825
826 udp->dest = udp_dst_port;
827 vxh->vx_flags = VXLAN_HF_VNI;
828 vxh->vx_vni = vxlan_vni_field(vx_vni);
829
830 return encap_size;
831}
832
ce99f6b9
OG
833static int gen_vxlan_header_ipv6(struct net_device *out_dev,
834 char buf[],
835 unsigned char h_dest[ETH_ALEN],
836 int ttl,
837 struct in6_addr *daddr,
838 struct in6_addr *saddr,
839 __be16 udp_dst_port,
840 __be32 vx_vni)
841{
842 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
843 struct ethhdr *eth = (struct ethhdr *)buf;
844 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
845 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
846 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
847
848 memset(buf, 0, encap_size);
849
850 ether_addr_copy(eth->h_dest, h_dest);
851 ether_addr_copy(eth->h_source, out_dev->dev_addr);
852 eth->h_proto = htons(ETH_P_IPV6);
853
854 ip6_flow_hdr(ip6h, 0, 0);
855 /* the HW fills up ipv6 payload len */
856 ip6h->nexthdr = IPPROTO_UDP;
857 ip6h->hop_limit = ttl;
858 ip6h->daddr = *daddr;
859 ip6h->saddr = *saddr;
860
861 udp->dest = udp_dst_port;
862 vxh->vx_flags = VXLAN_HF_VNI;
863 vxh->vx_vni = vxlan_vni_field(vx_vni);
864
865 return encap_size;
866}
867
a54e20b4
HHZ
868static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
869 struct net_device *mirred_dev,
870 struct mlx5_encap_entry *e,
871 struct net_device **out_dev)
872{
873 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
76f7444d 874 struct ip_tunnel_key *tun_key = &e->tun_info.key;
9a941117 875 int encap_size, ttl, err;
a42485eb 876 struct neighbour *n = NULL;
a54e20b4 877 struct flowi4 fl4 = {};
a54e20b4 878 char *encap_header;
a54e20b4
HHZ
879
880 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
881 if (!encap_header)
882 return -ENOMEM;
883
884 switch (e->tunnel_type) {
885 case MLX5_HEADER_TYPE_VXLAN:
886 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 887 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
888 break;
889 default:
890 err = -EOPNOTSUPP;
891 goto out;
892 }
9a941117 893 fl4.flowi4_tos = tun_key->tos;
76f7444d 894 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 895 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4
HHZ
896
897 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
9a941117 898 &fl4, &n, &ttl);
a54e20b4
HHZ
899 if (err)
900 goto out;
901
a54e20b4 902 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
903 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
904 err = -EOPNOTSUPP;
a54e20b4
HHZ
905 goto out;
906 }
907
75c33da8
OG
908 e->n = n;
909 e->out_dev = *out_dev;
910
a54e20b4
HHZ
911 neigh_ha_snapshot(e->h_dest, n, *out_dev);
912
913 switch (e->tunnel_type) {
914 case MLX5_HEADER_TYPE_VXLAN:
915 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
916 e->h_dest, ttl,
9a941117
OG
917 fl4.daddr,
918 fl4.saddr, tun_key->tp_dst,
76f7444d 919 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
920 break;
921 default:
922 err = -EOPNOTSUPP;
923 goto out;
924 }
925
926 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
927 encap_size, encap_header, &e->encap_id);
928out:
a42485eb
OG
929 if (err && n)
930 neigh_release(n);
a54e20b4
HHZ
931 kfree(encap_header);
932 return err;
933}
934
ce99f6b9
OG
935static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
936 struct net_device *mirred_dev,
937 struct mlx5_encap_entry *e,
938 struct net_device **out_dev)
939
940{
941 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
942 struct ip_tunnel_key *tun_key = &e->tun_info.key;
943 int encap_size, err, ttl = 0;
944 struct neighbour *n = NULL;
945 struct flowi6 fl6 = {};
946 char *encap_header;
947
948 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
949 if (!encap_header)
950 return -ENOMEM;
951
952 switch (e->tunnel_type) {
953 case MLX5_HEADER_TYPE_VXLAN:
954 fl6.flowi6_proto = IPPROTO_UDP;
955 fl6.fl6_dport = tun_key->tp_dst;
956 break;
957 default:
958 err = -EOPNOTSUPP;
959 goto out;
960 }
961
962 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
963 fl6.daddr = tun_key->u.ipv6.dst;
964 fl6.saddr = tun_key->u.ipv6.src;
965
966 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
967 &fl6, &n, &ttl);
968 if (err)
969 goto out;
970
971 if (!(n->nud_state & NUD_VALID)) {
972 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
973 err = -EOPNOTSUPP;
974 goto out;
975 }
976
977 e->n = n;
978 e->out_dev = *out_dev;
979
980 neigh_ha_snapshot(e->h_dest, n, *out_dev);
981
982 switch (e->tunnel_type) {
983 case MLX5_HEADER_TYPE_VXLAN:
984 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
985 e->h_dest, ttl,
986 &fl6.daddr,
987 &fl6.saddr, tun_key->tp_dst,
988 tunnel_id_to_key32(tun_key->tun_id));
989 break;
990 default:
991 err = -EOPNOTSUPP;
992 goto out;
993 }
994
995 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
996 encap_size, encap_header, &e->encap_id);
997out:
998 if (err && n)
999 neigh_release(n);
1000 kfree(encap_header);
1001 return err;
1002}
1003
a54e20b4
HHZ
1004static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1005 struct ip_tunnel_info *tun_info,
1006 struct net_device *mirred_dev,
1007 struct mlx5_esw_flow_attr *attr)
1008{
1009 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a
PB
1010 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1011 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
a54e20b4
HHZ
1012 unsigned short family = ip_tunnel_info_af(tun_info);
1013 struct ip_tunnel_key *key = &tun_info->key;
a54e20b4
HHZ
1014 struct mlx5_encap_entry *e;
1015 struct net_device *out_dev;
ce99f6b9 1016 int tunnel_type, err = -EOPNOTSUPP;
a54e20b4
HHZ
1017 uintptr_t hash_key;
1018 bool found = false;
a54e20b4 1019
2fcd82e9 1020 /* udp dst port must be set */
a54e20b4 1021 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1022 goto vxlan_encap_offload_err;
a54e20b4 1023
cd377663 1024 /* setting udp src port isn't supported */
2fcd82e9
OG
1025 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1026vxlan_encap_offload_err:
1027 netdev_warn(priv->netdev,
1028 "must set udp dst port and not set udp src port\n");
cd377663 1029 return -EOPNOTSUPP;
2fcd82e9 1030 }
cd377663 1031
1ad9a00a 1032 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1033 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1034 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1035 } else {
2fcd82e9
OG
1036 netdev_warn(priv->netdev,
1037 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1038 return -EOPNOTSUPP;
1039 }
1040
76f7444d 1041 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1042
1043 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1044 encap_hlist, hash_key) {
76f7444d 1045 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1046 found = true;
1047 break;
1048 }
1049 }
1050
1051 if (found) {
1052 attr->encap = e;
1053 return 0;
1054 }
1055
1056 e = kzalloc(sizeof(*e), GFP_KERNEL);
1057 if (!e)
1058 return -ENOMEM;
1059
76f7444d 1060 e->tun_info = *tun_info;
a54e20b4
HHZ
1061 e->tunnel_type = tunnel_type;
1062 INIT_LIST_HEAD(&e->flows);
1063
ce99f6b9
OG
1064 if (family == AF_INET)
1065 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1066 else if (family == AF_INET6)
1067 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1068
a54e20b4
HHZ
1069 if (err)
1070 goto out_err;
1071
1072 attr->encap = e;
1073 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1074
1075 return err;
1076
1077out_err:
1078 kfree(e);
1079 return err;
1080}
1081
03a9d11e 1082static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 1083 struct mlx5e_tc_flow *flow)
03a9d11e 1084{
ecf5bb79 1085 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 1086 struct ip_tunnel_info *info = NULL;
03a9d11e 1087 const struct tc_action *a;
22dc13c8 1088 LIST_HEAD(actions);
a54e20b4
HHZ
1089 bool encap = false;
1090 int err;
03a9d11e
OG
1091
1092 if (tc_no_actions(exts))
1093 return -EINVAL;
1094
776b12b6
OG
1095 memset(attr, 0, sizeof(*attr));
1096 attr->in_rep = priv->ppriv;
03a9d11e 1097
22dc13c8
WC
1098 tcf_exts_to_list(exts, &actions);
1099 list_for_each_entry(a, &actions, list) {
03a9d11e 1100 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1101 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1102 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1103 continue;
1104 }
1105
5724b8b5 1106 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
1107 int ifindex = tcf_mirred_ifindex(a);
1108 struct net_device *out_dev;
1109 struct mlx5e_priv *out_priv;
03a9d11e
OG
1110
1111 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1112
a54e20b4
HHZ
1113 if (switchdev_port_same_parent_id(priv->netdev,
1114 out_dev)) {
1115 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1116 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1117 out_priv = netdev_priv(out_dev);
1118 attr->out_rep = out_priv->ppriv;
1119 } else if (encap) {
1120 err = mlx5e_attach_encap(priv, info,
1121 out_dev, attr);
1122 if (err)
1123 return err;
1124 list_add(&flow->encap, &attr->encap->flows);
1125 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1126 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1127 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1128 out_priv = netdev_priv(attr->encap->out_dev);
1129 attr->out_rep = out_priv->ppriv;
1130 } else {
03a9d11e
OG
1131 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1132 priv->netdev->name, out_dev->name);
1133 return -EINVAL;
1134 }
a54e20b4
HHZ
1135 continue;
1136 }
03a9d11e 1137
a54e20b4
HHZ
1138 if (is_tcf_tunnel_set(a)) {
1139 info = tcf_tunnel_info(a);
1140 if (info)
1141 encap = true;
1142 else
1143 return -EOPNOTSUPP;
03a9d11e
OG
1144 continue;
1145 }
1146
8b32580d 1147 if (is_tcf_vlan(a)) {
09c91ddf 1148 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1149 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1150 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1151 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1152 return -EOPNOTSUPP;
1153
1154 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1155 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1156 } else { /* action is TCA_VLAN_ACT_MODIFY */
1157 return -EOPNOTSUPP;
8b32580d
OG
1158 }
1159 continue;
1160 }
1161
bbd00f7e
HHZ
1162 if (is_tcf_tunnel_release(a)) {
1163 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1164 continue;
1165 }
1166
03a9d11e
OG
1167 return -EINVAL;
1168 }
1169 return 0;
1170}
1171
e3a2b7ed
AV
1172int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1173 struct tc_cls_flower_offload *f)
1174{
3bc4b7bf 1175 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
acff797c 1176 struct mlx5e_tc_table *tc = &priv->fs.tc;
c5bb1730 1177 struct mlx5_flow_spec *spec;
3bc4b7bf
OG
1178 struct mlx5e_tc_flow *flow;
1179 int attr_size, err = 0;
65ba8fb7 1180 u8 flow_flags = 0;
e3a2b7ed 1181
65ba8fb7
OG
1182 if (esw && esw->mode == SRIOV_OFFLOADS) {
1183 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1184 attr_size = sizeof(struct mlx5_esw_flow_attr);
3bc4b7bf
OG
1185 } else {
1186 flow_flags = MLX5E_TC_FLOW_NIC;
1187 attr_size = sizeof(struct mlx5_nic_flow_attr);
65ba8fb7 1188 }
e3a2b7ed 1189
65ba8fb7 1190 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
c5bb1730
MG
1191 spec = mlx5_vzalloc(sizeof(*spec));
1192 if (!spec || !flow) {
e3a2b7ed
AV
1193 err = -ENOMEM;
1194 goto err_free;
1195 }
1196
1197 flow->cookie = f->cookie;
65ba8fb7 1198 flow->flags = flow_flags;
e3a2b7ed 1199
65ba8fb7 1200 err = parse_cls_flower(priv, flow, spec, f);
e3a2b7ed
AV
1201 if (err < 0)
1202 goto err_free;
1203
65ba8fb7 1204 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
a54e20b4 1205 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1206 if (err < 0)
1207 goto err_free;
ecf5bb79 1208 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->esw_attr);
adb4c123 1209 } else {
3bc4b7bf 1210 err = parse_tc_nic_actions(priv, f->exts, flow->nic_attr);
adb4c123
OG
1211 if (err < 0)
1212 goto err_free;
3bc4b7bf 1213 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, flow->nic_attr);
adb4c123 1214 }
e3a2b7ed 1215
e3a2b7ed
AV
1216 if (IS_ERR(flow->rule)) {
1217 err = PTR_ERR(flow->rule);
5e86397a 1218 goto err_del_rule;
e3a2b7ed
AV
1219 }
1220
5c40348c
OG
1221 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1222 tc->ht_params);
1223 if (err)
1224 goto err_del_rule;
1225
e3a2b7ed
AV
1226 goto out;
1227
5c40348c 1228err_del_rule:
5e86397a 1229 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1230
1231err_free:
53636068 1232 kfree(flow);
e3a2b7ed 1233out:
c5bb1730 1234 kvfree(spec);
e3a2b7ed
AV
1235 return err;
1236}
1237
1238int mlx5e_delete_flower(struct mlx5e_priv *priv,
1239 struct tc_cls_flower_offload *f)
1240{
1241 struct mlx5e_tc_flow *flow;
acff797c 1242 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1243
1244 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1245 tc->ht_params);
1246 if (!flow)
1247 return -EINVAL;
1248
1249 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1250
961e8979 1251 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1252
a54e20b4 1253
e3a2b7ed
AV
1254 kfree(flow);
1255
1256 return 0;
1257}
1258
aad7e08d
AV
1259int mlx5e_stats_flower(struct mlx5e_priv *priv,
1260 struct tc_cls_flower_offload *f)
1261{
1262 struct mlx5e_tc_table *tc = &priv->fs.tc;
1263 struct mlx5e_tc_flow *flow;
1264 struct tc_action *a;
1265 struct mlx5_fc *counter;
22dc13c8 1266 LIST_HEAD(actions);
aad7e08d
AV
1267 u64 bytes;
1268 u64 packets;
1269 u64 lastuse;
1270
1271 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1272 tc->ht_params);
1273 if (!flow)
1274 return -EINVAL;
1275
1276 counter = mlx5_flow_rule_counter(flow->rule);
1277 if (!counter)
1278 return 0;
1279
1280 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1281
fed06ee8
OG
1282 preempt_disable();
1283
22dc13c8
WC
1284 tcf_exts_to_list(f->exts, &actions);
1285 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1286 tcf_action_stats_update(a, bytes, packets, lastuse);
1287
fed06ee8
OG
1288 preempt_enable();
1289
aad7e08d
AV
1290 return 0;
1291}
1292
e8f887ac
AV
1293static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1294 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1295 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1296 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1297 .automatic_shrinking = true,
1298};
1299
1300int mlx5e_tc_init(struct mlx5e_priv *priv)
1301{
acff797c 1302 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1303
1304 tc->ht_params = mlx5e_tc_flow_ht_params;
1305 return rhashtable_init(&tc->ht, &tc->ht_params);
1306}
1307
1308static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1309{
1310 struct mlx5e_tc_flow *flow = ptr;
1311 struct mlx5e_priv *priv = arg;
1312
961e8979 1313 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1314 kfree(flow);
1315}
1316
1317void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1318{
acff797c 1319 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1320
1321 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1322
acff797c
MG
1323 if (!IS_ERR_OR_NULL(tc->t)) {
1324 mlx5_destroy_flow_table(tc->t);
1325 tc->t = NULL;
e8f887ac
AV
1326 }
1327}