net/mlx5: Add missing entries for set/query rate limit commands
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac 50
65ba8fb7
OG
51enum {
52 MLX5E_TC_FLOW_ESWITCH = BIT(0),
53};
54
e8f887ac
AV
55struct mlx5e_tc_flow {
56 struct rhash_head node;
57 u64 cookie;
65ba8fb7 58 u8 flags;
74491de9 59 struct mlx5_flow_handle *rule;
a54e20b4 60 struct list_head encap; /* flows sharing the same encap */
776b12b6 61 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
62};
63
a54e20b4
HHZ
64enum {
65 MLX5_HEADER_TYPE_VXLAN = 0x0,
66 MLX5_HEADER_TYPE_NVGRE = 0x1,
67};
68
acff797c
MG
69#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 71
74491de9
MB
72static struct mlx5_flow_handle *
73mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
74 struct mlx5_flow_spec *spec,
75 u32 action, u32 flow_tag)
e8f887ac 76{
aad7e08d
AV
77 struct mlx5_core_dev *dev = priv->mdev;
78 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
79 struct mlx5_flow_act flow_act = {
80 .action = action,
81 .flow_tag = flow_tag,
82 .encap_id = 0,
83 };
aad7e08d 84 struct mlx5_fc *counter = NULL;
74491de9 85 struct mlx5_flow_handle *rule;
e8f887ac
AV
86 bool table_created = false;
87
aad7e08d
AV
88 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
89 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
90 dest.ft = priv->fs.vlan.ft.t;
55130287 91 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
92 counter = mlx5_fc_create(dev, true);
93 if (IS_ERR(counter))
94 return ERR_CAST(counter);
95
96 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
97 dest.counter = counter;
98 }
99
acff797c
MG
100 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
101 priv->fs.tc.t =
102 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
103 MLX5E_TC_PRIO,
104 MLX5E_TC_TABLE_NUM_ENTRIES,
105 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 106 0, 0);
acff797c 107 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
108 netdev_err(priv->netdev,
109 "Failed to create tc offload table\n");
aad7e08d
AV
110 rule = ERR_CAST(priv->fs.tc.t);
111 goto err_create_ft;
e8f887ac
AV
112 }
113
114 table_created = true;
115 }
116
c5bb1730 117 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 118 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
119
120 if (IS_ERR(rule))
121 goto err_add_rule;
122
123 return rule;
e8f887ac 124
aad7e08d
AV
125err_add_rule:
126 if (table_created) {
acff797c
MG
127 mlx5_destroy_flow_table(priv->fs.tc.t);
128 priv->fs.tc.t = NULL;
e8f887ac 129 }
aad7e08d
AV
130err_create_ft:
131 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
132
133 return rule;
134}
135
74491de9
MB
136static struct mlx5_flow_handle *
137mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
138 struct mlx5_flow_spec *spec,
139 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
140{
141 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
142 int err;
143
144 err = mlx5_eswitch_add_vlan_action(esw, attr);
145 if (err)
146 return ERR_PTR(err);
adb4c123 147
776b12b6 148 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
149}
150
5067b602
RD
151static void mlx5e_detach_encap(struct mlx5e_priv *priv,
152 struct mlx5e_tc_flow *flow) {
153 struct list_head *next = flow->encap.next;
154
155 list_del(&flow->encap);
156 if (list_empty(next)) {
157 struct mlx5_encap_entry *e;
158
159 e = list_entry(next, struct mlx5_encap_entry, flows);
160 if (e->n) {
161 mlx5_encap_dealloc(priv->mdev, e->encap_id);
162 neigh_release(e->n);
163 }
164 hlist_del_rcu(&e->encap_hlist);
165 kfree(e);
166 }
167}
168
5e86397a
OG
169/* we get here also when setting rule to the FW failed, etc. It means that the
170 * flow rule itself might not exist, but some offloading related to the actions
171 * should be cleaned.
172 */
e8f887ac 173static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 174 struct mlx5e_tc_flow *flow)
e8f887ac 175{
8b32580d 176 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aad7e08d
AV
177 struct mlx5_fc *counter = NULL;
178
5e86397a
OG
179 if (!IS_ERR(flow->rule)) {
180 counter = mlx5_flow_rule_counter(flow->rule);
181 mlx5_del_flow_rules(flow->rule);
182 mlx5_fc_destroy(priv->mdev, counter);
183 }
86a33ae1 184
65ba8fb7 185 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
961e8979 186 mlx5_eswitch_del_vlan_action(esw, flow->attr);
5067b602
RD
187 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
188 mlx5e_detach_encap(priv, flow);
189 }
8b32580d 190
5c40348c 191 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
192 mlx5_destroy_flow_table(priv->fs.tc.t);
193 priv->fs.tc.t = NULL;
e8f887ac
AV
194 }
195}
196
bbd00f7e
HHZ
197static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
198 struct tc_cls_flower_offload *f)
199{
200 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
201 outer_headers);
202 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
203 outer_headers);
204 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
205 misc_parameters);
206 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
207 misc_parameters);
208
209 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
210 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
211
212 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
213 struct flow_dissector_key_keyid *key =
214 skb_flow_dissector_target(f->dissector,
215 FLOW_DISSECTOR_KEY_ENC_KEYID,
216 f->key);
217 struct flow_dissector_key_keyid *mask =
218 skb_flow_dissector_target(f->dissector,
219 FLOW_DISSECTOR_KEY_ENC_KEYID,
220 f->mask);
221 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
222 be32_to_cpu(mask->keyid));
223 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
224 be32_to_cpu(key->keyid));
225 }
226}
227
228static int parse_tunnel_attr(struct mlx5e_priv *priv,
229 struct mlx5_flow_spec *spec,
230 struct tc_cls_flower_offload *f)
231{
232 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
233 outer_headers);
234 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
235 outer_headers);
236
2e72eb43
OG
237 struct flow_dissector_key_control *enc_control =
238 skb_flow_dissector_target(f->dissector,
239 FLOW_DISSECTOR_KEY_ENC_CONTROL,
240 f->key);
241
bbd00f7e
HHZ
242 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
243 struct flow_dissector_key_ports *key =
244 skb_flow_dissector_target(f->dissector,
245 FLOW_DISSECTOR_KEY_ENC_PORTS,
246 f->key);
247 struct flow_dissector_key_ports *mask =
248 skb_flow_dissector_target(f->dissector,
249 FLOW_DISSECTOR_KEY_ENC_PORTS,
250 f->mask);
251
252 /* Full udp dst port must be given */
253 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 254 goto vxlan_match_offload_err;
bbd00f7e 255
bbd00f7e
HHZ
256 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
257 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
258 parse_vxlan_attr(spec, f);
2fcd82e9
OG
259 else {
260 netdev_warn(priv->netdev,
261 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 262 return -EOPNOTSUPP;
2fcd82e9 263 }
bbd00f7e
HHZ
264
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 udp_dport, ntohs(mask->dst));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 udp_dport, ntohs(key->dst));
269
cd377663
OG
270 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
271 udp_sport, ntohs(mask->src));
272 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
273 udp_sport, ntohs(key->src));
bbd00f7e 274 } else { /* udp dst port must be given */
2fcd82e9
OG
275vxlan_match_offload_err:
276 netdev_warn(priv->netdev,
277 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
278 return -EOPNOTSUPP;
bbd00f7e
HHZ
279 }
280
2e72eb43 281 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
282 struct flow_dissector_key_ipv4_addrs *key =
283 skb_flow_dissector_target(f->dissector,
284 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
285 f->key);
286 struct flow_dissector_key_ipv4_addrs *mask =
287 skb_flow_dissector_target(f->dissector,
288 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
289 f->mask);
290 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
291 src_ipv4_src_ipv6.ipv4_layout.ipv4,
292 ntohl(mask->src));
293 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
294 src_ipv4_src_ipv6.ipv4_layout.ipv4,
295 ntohl(key->src));
296
297 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
298 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
299 ntohl(mask->dst));
300 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
301 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
302 ntohl(key->dst));
bbd00f7e 303
2e72eb43
OG
304 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
305 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
306 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
307 struct flow_dissector_key_ipv6_addrs *key =
308 skb_flow_dissector_target(f->dissector,
309 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
310 f->key);
311 struct flow_dissector_key_ipv6_addrs *mask =
312 skb_flow_dissector_target(f->dissector,
313 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
314 f->mask);
315
316 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
317 src_ipv4_src_ipv6.ipv6_layout.ipv6),
318 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
319 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
320 src_ipv4_src_ipv6.ipv6_layout.ipv6),
321 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
322
323 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
324 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
325 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
326 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
327 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
328 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
329
330 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
331 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 332 }
bbd00f7e
HHZ
333
334 /* Enforce DMAC when offloading incoming tunneled flows.
335 * Flow counters require a match on the DMAC.
336 */
337 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
338 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
339 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
340 dmac_47_16), priv->netdev->dev_addr);
341
342 /* let software handle IP fragments */
343 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
344 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
345
346 return 0;
347}
348
de0af0bf
RD
349static int __parse_cls_flower(struct mlx5e_priv *priv,
350 struct mlx5_flow_spec *spec,
351 struct tc_cls_flower_offload *f,
352 u8 *min_inline)
e3a2b7ed 353{
c5bb1730
MG
354 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
355 outer_headers);
356 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
357 outer_headers);
e3a2b7ed
AV
358 u16 addr_type = 0;
359 u8 ip_proto = 0;
360
de0af0bf
RD
361 *min_inline = MLX5_INLINE_MODE_L2;
362
e3a2b7ed
AV
363 if (f->dissector->used_keys &
364 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
365 BIT(FLOW_DISSECTOR_KEY_BASIC) |
366 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 367 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
368 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
369 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
370 BIT(FLOW_DISSECTOR_KEY_PORTS) |
371 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
372 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
373 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
374 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
375 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
376 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
377 f->dissector->used_keys);
378 return -EOPNOTSUPP;
379 }
380
bbd00f7e
HHZ
381 if ((dissector_uses_key(f->dissector,
382 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
383 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
384 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
385 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
386 struct flow_dissector_key_control *key =
387 skb_flow_dissector_target(f->dissector,
388 FLOW_DISSECTOR_KEY_ENC_CONTROL,
389 f->key);
390 switch (key->addr_type) {
391 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 392 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
393 if (parse_tunnel_attr(priv, spec, f))
394 return -EOPNOTSUPP;
395 break;
396 default:
397 return -EOPNOTSUPP;
398 }
399
400 /* In decap flow, header pointers should point to the inner
401 * headers, outer header were already set by parse_tunnel_attr
402 */
403 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
404 inner_headers);
405 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
406 inner_headers);
407 }
408
e3a2b7ed
AV
409 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
410 struct flow_dissector_key_control *key =
411 skb_flow_dissector_target(f->dissector,
1dbd0d37 412 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 413 f->key);
3f7d0eb4
OG
414
415 struct flow_dissector_key_control *mask =
416 skb_flow_dissector_target(f->dissector,
417 FLOW_DISSECTOR_KEY_CONTROL,
418 f->mask);
e3a2b7ed 419 addr_type = key->addr_type;
3f7d0eb4
OG
420
421 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
422 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
423 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
424 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
425
426 /* the HW doesn't need L3 inline to match on frag=no */
427 if (key->flags & FLOW_DIS_IS_FRAGMENT)
428 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 429 }
e3a2b7ed
AV
430 }
431
432 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
433 struct flow_dissector_key_basic *key =
434 skb_flow_dissector_target(f->dissector,
435 FLOW_DISSECTOR_KEY_BASIC,
436 f->key);
437 struct flow_dissector_key_basic *mask =
438 skb_flow_dissector_target(f->dissector,
439 FLOW_DISSECTOR_KEY_BASIC,
440 f->mask);
441 ip_proto = key->ip_proto;
442
443 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
444 ntohs(mask->n_proto));
445 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
446 ntohs(key->n_proto));
447
448 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
449 mask->ip_proto);
450 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
451 key->ip_proto);
de0af0bf
RD
452
453 if (mask->ip_proto)
454 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
455 }
456
457 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
458 struct flow_dissector_key_eth_addrs *key =
459 skb_flow_dissector_target(f->dissector,
460 FLOW_DISSECTOR_KEY_ETH_ADDRS,
461 f->key);
462 struct flow_dissector_key_eth_addrs *mask =
463 skb_flow_dissector_target(f->dissector,
464 FLOW_DISSECTOR_KEY_ETH_ADDRS,
465 f->mask);
466
467 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
468 dmac_47_16),
469 mask->dst);
470 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
471 dmac_47_16),
472 key->dst);
473
474 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
475 smac_47_16),
476 mask->src);
477 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
478 smac_47_16),
479 key->src);
480 }
481
095b6cfd
OG
482 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
483 struct flow_dissector_key_vlan *key =
484 skb_flow_dissector_target(f->dissector,
485 FLOW_DISSECTOR_KEY_VLAN,
486 f->key);
487 struct flow_dissector_key_vlan *mask =
488 skb_flow_dissector_target(f->dissector,
489 FLOW_DISSECTOR_KEY_VLAN,
490 f->mask);
358d79a4 491 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
492 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
493 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
494
495 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
496 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
497
498 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
499 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
500 }
501 }
502
e3a2b7ed
AV
503 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
504 struct flow_dissector_key_ipv4_addrs *key =
505 skb_flow_dissector_target(f->dissector,
506 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
507 f->key);
508 struct flow_dissector_key_ipv4_addrs *mask =
509 skb_flow_dissector_target(f->dissector,
510 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
511 f->mask);
512
513 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
514 src_ipv4_src_ipv6.ipv4_layout.ipv4),
515 &mask->src, sizeof(mask->src));
516 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
517 src_ipv4_src_ipv6.ipv4_layout.ipv4),
518 &key->src, sizeof(key->src));
519 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
520 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
521 &mask->dst, sizeof(mask->dst));
522 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
523 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
524 &key->dst, sizeof(key->dst));
de0af0bf
RD
525
526 if (mask->src || mask->dst)
527 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
528 }
529
530 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
531 struct flow_dissector_key_ipv6_addrs *key =
532 skb_flow_dissector_target(f->dissector,
533 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
534 f->key);
535 struct flow_dissector_key_ipv6_addrs *mask =
536 skb_flow_dissector_target(f->dissector,
537 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
538 f->mask);
539
540 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
541 src_ipv4_src_ipv6.ipv6_layout.ipv6),
542 &mask->src, sizeof(mask->src));
543 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
544 src_ipv4_src_ipv6.ipv6_layout.ipv6),
545 &key->src, sizeof(key->src));
546
547 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
548 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
549 &mask->dst, sizeof(mask->dst));
550 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
551 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
552 &key->dst, sizeof(key->dst));
de0af0bf
RD
553
554 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
555 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
556 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
557 }
558
559 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
560 struct flow_dissector_key_ports *key =
561 skb_flow_dissector_target(f->dissector,
562 FLOW_DISSECTOR_KEY_PORTS,
563 f->key);
564 struct flow_dissector_key_ports *mask =
565 skb_flow_dissector_target(f->dissector,
566 FLOW_DISSECTOR_KEY_PORTS,
567 f->mask);
568 switch (ip_proto) {
569 case IPPROTO_TCP:
570 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
571 tcp_sport, ntohs(mask->src));
572 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
573 tcp_sport, ntohs(key->src));
574
575 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
576 tcp_dport, ntohs(mask->dst));
577 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
578 tcp_dport, ntohs(key->dst));
579 break;
580
581 case IPPROTO_UDP:
582 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
583 udp_sport, ntohs(mask->src));
584 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
585 udp_sport, ntohs(key->src));
586
587 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
588 udp_dport, ntohs(mask->dst));
589 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
590 udp_dport, ntohs(key->dst));
591 break;
592 default:
593 netdev_err(priv->netdev,
594 "Only UDP and TCP transport are supported\n");
595 return -EINVAL;
596 }
de0af0bf
RD
597
598 if (mask->src || mask->dst)
599 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
600 }
601
602 return 0;
603}
604
de0af0bf 605static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 606 struct mlx5e_tc_flow *flow,
de0af0bf
RD
607 struct mlx5_flow_spec *spec,
608 struct tc_cls_flower_offload *f)
609{
610 struct mlx5_core_dev *dev = priv->mdev;
611 struct mlx5_eswitch *esw = dev->priv.eswitch;
612 struct mlx5_eswitch_rep *rep = priv->ppriv;
613 u8 min_inline;
614 int err;
615
616 err = __parse_cls_flower(priv, spec, f, &min_inline);
617
65ba8fb7 618 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
de0af0bf
RD
619 rep->vport != FDB_UPLINK_VPORT) {
620 if (min_inline > esw->offloads.inline_mode) {
621 netdev_warn(priv->netdev,
622 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
623 min_inline, esw->offloads.inline_mode);
624 return -EOPNOTSUPP;
625 }
626 }
627
628 return err;
629}
630
5c40348c
OG
631static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
632 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
633{
634 const struct tc_action *a;
22dc13c8 635 LIST_HEAD(actions);
e3a2b7ed
AV
636
637 if (tc_no_actions(exts))
638 return -EINVAL;
639
640 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
641 *action = 0;
642
22dc13c8
WC
643 tcf_exts_to_list(exts, &actions);
644 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
645 /* Only support a single action per rule */
646 if (*action)
647 return -EINVAL;
648
649 if (is_tcf_gact_shot(a)) {
650 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
651 if (MLX5_CAP_FLOWTABLE(priv->mdev,
652 flow_table_properties_nic_receive.flow_counter))
653 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
654 continue;
655 }
656
657 if (is_tcf_skbedit_mark(a)) {
658 u32 mark = tcf_skbedit_mark(a);
659
660 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
661 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
662 mark);
663 return -EINVAL;
664 }
665
666 *flow_tag = mark;
667 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
668 continue;
669 }
670
671 return -EINVAL;
672 }
673
674 return 0;
675}
676
76f7444d
OG
677static inline int cmp_encap_info(struct ip_tunnel_key *a,
678 struct ip_tunnel_key *b)
a54e20b4
HHZ
679{
680 return memcmp(a, b, sizeof(*a));
681}
682
76f7444d 683static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 684{
76f7444d 685 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
686}
687
688static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
689 struct net_device *mirred_dev,
690 struct net_device **out_dev,
691 struct flowi4 *fl4,
692 struct neighbour **out_n,
a54e20b4
HHZ
693 int *out_ttl)
694{
3e621b19 695 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
696 struct rtable *rt;
697 struct neighbour *n = NULL;
a54e20b4
HHZ
698
699#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
700 int ret;
701
a54e20b4 702 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
703 ret = PTR_ERR_OR_ZERO(rt);
704 if (ret)
705 return ret;
a54e20b4
HHZ
706#else
707 return -EOPNOTSUPP;
708#endif
3e621b19
HHZ
709 /* if the egress device isn't on the same HW e-switch, we use the uplink */
710 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
711 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
712 else
713 *out_dev = rt->dst.dev;
a54e20b4 714
75c33da8 715 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
716 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
717 ip_rt_put(rt);
718 if (!n)
719 return -ENOMEM;
720
721 *out_n = n;
a54e20b4
HHZ
722 return 0;
723}
724
ce99f6b9
OG
725static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
726 struct net_device *mirred_dev,
727 struct net_device **out_dev,
728 struct flowi6 *fl6,
729 struct neighbour **out_n,
730 int *out_ttl)
731{
732 struct neighbour *n = NULL;
733 struct dst_entry *dst;
734
735#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
736 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
737 int ret;
738
739 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
740 ret = dst->error;
741 if (ret) {
ce99f6b9
OG
742 dst_release(dst);
743 return ret;
744 }
745
746 *out_ttl = ip6_dst_hoplimit(dst);
747
748 /* if the egress device isn't on the same HW e-switch, we use the uplink */
749 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
750 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
751 else
752 *out_dev = dst->dev;
753#else
754 return -EOPNOTSUPP;
755#endif
756
757 n = dst_neigh_lookup(dst, &fl6->daddr);
758 dst_release(dst);
759 if (!n)
760 return -ENOMEM;
761
762 *out_n = n;
763 return 0;
764}
765
a54e20b4
HHZ
766static int gen_vxlan_header_ipv4(struct net_device *out_dev,
767 char buf[],
768 unsigned char h_dest[ETH_ALEN],
769 int ttl,
770 __be32 daddr,
771 __be32 saddr,
772 __be16 udp_dst_port,
773 __be32 vx_vni)
774{
775 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
776 struct ethhdr *eth = (struct ethhdr *)buf;
777 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
778 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
779 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
780
781 memset(buf, 0, encap_size);
782
783 ether_addr_copy(eth->h_dest, h_dest);
784 ether_addr_copy(eth->h_source, out_dev->dev_addr);
785 eth->h_proto = htons(ETH_P_IP);
786
787 ip->daddr = daddr;
788 ip->saddr = saddr;
789
790 ip->ttl = ttl;
791 ip->protocol = IPPROTO_UDP;
792 ip->version = 0x4;
793 ip->ihl = 0x5;
794
795 udp->dest = udp_dst_port;
796 vxh->vx_flags = VXLAN_HF_VNI;
797 vxh->vx_vni = vxlan_vni_field(vx_vni);
798
799 return encap_size;
800}
801
ce99f6b9
OG
802static int gen_vxlan_header_ipv6(struct net_device *out_dev,
803 char buf[],
804 unsigned char h_dest[ETH_ALEN],
805 int ttl,
806 struct in6_addr *daddr,
807 struct in6_addr *saddr,
808 __be16 udp_dst_port,
809 __be32 vx_vni)
810{
811 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
812 struct ethhdr *eth = (struct ethhdr *)buf;
813 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
814 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
815 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
816
817 memset(buf, 0, encap_size);
818
819 ether_addr_copy(eth->h_dest, h_dest);
820 ether_addr_copy(eth->h_source, out_dev->dev_addr);
821 eth->h_proto = htons(ETH_P_IPV6);
822
823 ip6_flow_hdr(ip6h, 0, 0);
824 /* the HW fills up ipv6 payload len */
825 ip6h->nexthdr = IPPROTO_UDP;
826 ip6h->hop_limit = ttl;
827 ip6h->daddr = *daddr;
828 ip6h->saddr = *saddr;
829
830 udp->dest = udp_dst_port;
831 vxh->vx_flags = VXLAN_HF_VNI;
832 vxh->vx_vni = vxlan_vni_field(vx_vni);
833
834 return encap_size;
835}
836
a54e20b4
HHZ
837static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
838 struct net_device *mirred_dev,
839 struct mlx5_encap_entry *e,
840 struct net_device **out_dev)
841{
842 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
76f7444d 843 struct ip_tunnel_key *tun_key = &e->tun_info.key;
9a941117 844 int encap_size, ttl, err;
a42485eb 845 struct neighbour *n = NULL;
a54e20b4 846 struct flowi4 fl4 = {};
a54e20b4 847 char *encap_header;
a54e20b4
HHZ
848
849 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
850 if (!encap_header)
851 return -ENOMEM;
852
853 switch (e->tunnel_type) {
854 case MLX5_HEADER_TYPE_VXLAN:
855 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 856 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
857 break;
858 default:
859 err = -EOPNOTSUPP;
860 goto out;
861 }
9a941117 862 fl4.flowi4_tos = tun_key->tos;
76f7444d 863 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 864 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4
HHZ
865
866 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
9a941117 867 &fl4, &n, &ttl);
a54e20b4
HHZ
868 if (err)
869 goto out;
870
a54e20b4 871 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
872 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
873 err = -EOPNOTSUPP;
a54e20b4
HHZ
874 goto out;
875 }
876
75c33da8
OG
877 e->n = n;
878 e->out_dev = *out_dev;
879
a54e20b4
HHZ
880 neigh_ha_snapshot(e->h_dest, n, *out_dev);
881
882 switch (e->tunnel_type) {
883 case MLX5_HEADER_TYPE_VXLAN:
884 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
885 e->h_dest, ttl,
9a941117
OG
886 fl4.daddr,
887 fl4.saddr, tun_key->tp_dst,
76f7444d 888 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
889 break;
890 default:
891 err = -EOPNOTSUPP;
892 goto out;
893 }
894
895 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
896 encap_size, encap_header, &e->encap_id);
897out:
a42485eb
OG
898 if (err && n)
899 neigh_release(n);
a54e20b4
HHZ
900 kfree(encap_header);
901 return err;
902}
903
ce99f6b9
OG
904static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
905 struct net_device *mirred_dev,
906 struct mlx5_encap_entry *e,
907 struct net_device **out_dev)
908
909{
910 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
911 struct ip_tunnel_key *tun_key = &e->tun_info.key;
912 int encap_size, err, ttl = 0;
913 struct neighbour *n = NULL;
914 struct flowi6 fl6 = {};
915 char *encap_header;
916
917 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
918 if (!encap_header)
919 return -ENOMEM;
920
921 switch (e->tunnel_type) {
922 case MLX5_HEADER_TYPE_VXLAN:
923 fl6.flowi6_proto = IPPROTO_UDP;
924 fl6.fl6_dport = tun_key->tp_dst;
925 break;
926 default:
927 err = -EOPNOTSUPP;
928 goto out;
929 }
930
931 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
932 fl6.daddr = tun_key->u.ipv6.dst;
933 fl6.saddr = tun_key->u.ipv6.src;
934
935 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
936 &fl6, &n, &ttl);
937 if (err)
938 goto out;
939
940 if (!(n->nud_state & NUD_VALID)) {
941 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
942 err = -EOPNOTSUPP;
943 goto out;
944 }
945
946 e->n = n;
947 e->out_dev = *out_dev;
948
949 neigh_ha_snapshot(e->h_dest, n, *out_dev);
950
951 switch (e->tunnel_type) {
952 case MLX5_HEADER_TYPE_VXLAN:
953 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
954 e->h_dest, ttl,
955 &fl6.daddr,
956 &fl6.saddr, tun_key->tp_dst,
957 tunnel_id_to_key32(tun_key->tun_id));
958 break;
959 default:
960 err = -EOPNOTSUPP;
961 goto out;
962 }
963
964 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
965 encap_size, encap_header, &e->encap_id);
966out:
967 if (err && n)
968 neigh_release(n);
969 kfree(encap_header);
970 return err;
971}
972
a54e20b4
HHZ
973static int mlx5e_attach_encap(struct mlx5e_priv *priv,
974 struct ip_tunnel_info *tun_info,
975 struct net_device *mirred_dev,
976 struct mlx5_esw_flow_attr *attr)
977{
978 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
979 unsigned short family = ip_tunnel_info_af(tun_info);
980 struct ip_tunnel_key *key = &tun_info->key;
a54e20b4
HHZ
981 struct mlx5_encap_entry *e;
982 struct net_device *out_dev;
ce99f6b9 983 int tunnel_type, err = -EOPNOTSUPP;
a54e20b4
HHZ
984 uintptr_t hash_key;
985 bool found = false;
a54e20b4 986
2fcd82e9 987 /* udp dst port must be set */
a54e20b4 988 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 989 goto vxlan_encap_offload_err;
a54e20b4 990
cd377663 991 /* setting udp src port isn't supported */
2fcd82e9
OG
992 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
993vxlan_encap_offload_err:
994 netdev_warn(priv->netdev,
995 "must set udp dst port and not set udp src port\n");
cd377663 996 return -EOPNOTSUPP;
2fcd82e9 997 }
cd377663 998
a54e20b4
HHZ
999 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
1000 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1001 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1002 } else {
2fcd82e9
OG
1003 netdev_warn(priv->netdev,
1004 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1005 return -EOPNOTSUPP;
1006 }
1007
76f7444d 1008 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1009
1010 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1011 encap_hlist, hash_key) {
76f7444d 1012 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1013 found = true;
1014 break;
1015 }
1016 }
1017
1018 if (found) {
1019 attr->encap = e;
1020 return 0;
1021 }
1022
1023 e = kzalloc(sizeof(*e), GFP_KERNEL);
1024 if (!e)
1025 return -ENOMEM;
1026
76f7444d 1027 e->tun_info = *tun_info;
a54e20b4
HHZ
1028 e->tunnel_type = tunnel_type;
1029 INIT_LIST_HEAD(&e->flows);
1030
ce99f6b9
OG
1031 if (family == AF_INET)
1032 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1033 else if (family == AF_INET6)
1034 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1035
a54e20b4
HHZ
1036 if (err)
1037 goto out_err;
1038
1039 attr->encap = e;
1040 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1041
1042 return err;
1043
1044out_err:
1045 kfree(e);
1046 return err;
1047}
1048
03a9d11e 1049static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 1050 struct mlx5e_tc_flow *flow)
03a9d11e 1051{
a54e20b4
HHZ
1052 struct mlx5_esw_flow_attr *attr = flow->attr;
1053 struct ip_tunnel_info *info = NULL;
03a9d11e 1054 const struct tc_action *a;
22dc13c8 1055 LIST_HEAD(actions);
a54e20b4
HHZ
1056 bool encap = false;
1057 int err;
03a9d11e
OG
1058
1059 if (tc_no_actions(exts))
1060 return -EINVAL;
1061
776b12b6
OG
1062 memset(attr, 0, sizeof(*attr));
1063 attr->in_rep = priv->ppriv;
03a9d11e 1064
22dc13c8
WC
1065 tcf_exts_to_list(exts, &actions);
1066 list_for_each_entry(a, &actions, list) {
03a9d11e 1067 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1068 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1069 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1070 continue;
1071 }
1072
5724b8b5 1073 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
1074 int ifindex = tcf_mirred_ifindex(a);
1075 struct net_device *out_dev;
1076 struct mlx5e_priv *out_priv;
03a9d11e
OG
1077
1078 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1079
a54e20b4
HHZ
1080 if (switchdev_port_same_parent_id(priv->netdev,
1081 out_dev)) {
1082 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1083 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1084 out_priv = netdev_priv(out_dev);
1085 attr->out_rep = out_priv->ppriv;
1086 } else if (encap) {
1087 err = mlx5e_attach_encap(priv, info,
1088 out_dev, attr);
1089 if (err)
1090 return err;
1091 list_add(&flow->encap, &attr->encap->flows);
1092 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1093 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1094 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1095 out_priv = netdev_priv(attr->encap->out_dev);
1096 attr->out_rep = out_priv->ppriv;
1097 } else {
03a9d11e
OG
1098 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1099 priv->netdev->name, out_dev->name);
1100 return -EINVAL;
1101 }
a54e20b4
HHZ
1102 continue;
1103 }
03a9d11e 1104
a54e20b4
HHZ
1105 if (is_tcf_tunnel_set(a)) {
1106 info = tcf_tunnel_info(a);
1107 if (info)
1108 encap = true;
1109 else
1110 return -EOPNOTSUPP;
03a9d11e
OG
1111 continue;
1112 }
1113
8b32580d
OG
1114 if (is_tcf_vlan(a)) {
1115 if (tcf_vlan_action(a) == VLAN_F_POP) {
1116 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1117 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
1118 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1119 return -EOPNOTSUPP;
1120
1121 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1122 attr->vlan = tcf_vlan_push_vid(a);
1123 }
1124 continue;
1125 }
1126
bbd00f7e
HHZ
1127 if (is_tcf_tunnel_release(a)) {
1128 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1129 continue;
1130 }
1131
03a9d11e
OG
1132 return -EINVAL;
1133 }
1134 return 0;
1135}
1136
e3a2b7ed
AV
1137int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1138 struct tc_cls_flower_offload *f)
1139{
acff797c 1140 struct mlx5e_tc_table *tc = &priv->fs.tc;
65ba8fb7 1141 int err, attr_size = 0;
776b12b6 1142 u32 flow_tag, action;
e3a2b7ed 1143 struct mlx5e_tc_flow *flow;
c5bb1730 1144 struct mlx5_flow_spec *spec;
adb4c123 1145 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
65ba8fb7 1146 u8 flow_flags = 0;
e3a2b7ed 1147
65ba8fb7
OG
1148 if (esw && esw->mode == SRIOV_OFFLOADS) {
1149 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1150 attr_size = sizeof(struct mlx5_esw_flow_attr);
1151 }
e3a2b7ed 1152
65ba8fb7 1153 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
c5bb1730
MG
1154 spec = mlx5_vzalloc(sizeof(*spec));
1155 if (!spec || !flow) {
e3a2b7ed
AV
1156 err = -ENOMEM;
1157 goto err_free;
1158 }
1159
1160 flow->cookie = f->cookie;
65ba8fb7 1161 flow->flags = flow_flags;
e3a2b7ed 1162
65ba8fb7 1163 err = parse_cls_flower(priv, flow, spec, f);
e3a2b7ed
AV
1164 if (err < 0)
1165 goto err_free;
1166
65ba8fb7 1167 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
776b12b6 1168 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 1169 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1170 if (err < 0)
1171 goto err_free;
776b12b6 1172 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
1173 } else {
1174 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1175 if (err < 0)
1176 goto err_free;
1177 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1178 }
e3a2b7ed 1179
e3a2b7ed
AV
1180 if (IS_ERR(flow->rule)) {
1181 err = PTR_ERR(flow->rule);
5e86397a 1182 goto err_del_rule;
e3a2b7ed
AV
1183 }
1184
5c40348c
OG
1185 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1186 tc->ht_params);
1187 if (err)
1188 goto err_del_rule;
1189
e3a2b7ed
AV
1190 goto out;
1191
5c40348c 1192err_del_rule:
5e86397a 1193 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1194
1195err_free:
53636068 1196 kfree(flow);
e3a2b7ed 1197out:
c5bb1730 1198 kvfree(spec);
e3a2b7ed
AV
1199 return err;
1200}
1201
1202int mlx5e_delete_flower(struct mlx5e_priv *priv,
1203 struct tc_cls_flower_offload *f)
1204{
1205 struct mlx5e_tc_flow *flow;
acff797c 1206 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1207
1208 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1209 tc->ht_params);
1210 if (!flow)
1211 return -EINVAL;
1212
1213 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1214
961e8979 1215 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1216
a54e20b4 1217
e3a2b7ed
AV
1218 kfree(flow);
1219
1220 return 0;
1221}
1222
aad7e08d
AV
1223int mlx5e_stats_flower(struct mlx5e_priv *priv,
1224 struct tc_cls_flower_offload *f)
1225{
1226 struct mlx5e_tc_table *tc = &priv->fs.tc;
1227 struct mlx5e_tc_flow *flow;
1228 struct tc_action *a;
1229 struct mlx5_fc *counter;
22dc13c8 1230 LIST_HEAD(actions);
aad7e08d
AV
1231 u64 bytes;
1232 u64 packets;
1233 u64 lastuse;
1234
1235 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1236 tc->ht_params);
1237 if (!flow)
1238 return -EINVAL;
1239
1240 counter = mlx5_flow_rule_counter(flow->rule);
1241 if (!counter)
1242 return 0;
1243
1244 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1245
fed06ee8
OG
1246 preempt_disable();
1247
22dc13c8
WC
1248 tcf_exts_to_list(f->exts, &actions);
1249 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1250 tcf_action_stats_update(a, bytes, packets, lastuse);
1251
fed06ee8
OG
1252 preempt_enable();
1253
aad7e08d
AV
1254 return 0;
1255}
1256
e8f887ac
AV
1257static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1258 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1259 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1260 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1261 .automatic_shrinking = true,
1262};
1263
1264int mlx5e_tc_init(struct mlx5e_priv *priv)
1265{
acff797c 1266 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1267
1268 tc->ht_params = mlx5e_tc_flow_ht_params;
1269 return rhashtable_init(&tc->ht, &tc->ht_params);
1270}
1271
1272static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1273{
1274 struct mlx5e_tc_flow *flow = ptr;
1275 struct mlx5e_priv *priv = arg;
1276
961e8979 1277 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1278 kfree(flow);
1279}
1280
1281void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1282{
acff797c 1283 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1284
1285 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1286
acff797c
MG
1287 if (!IS_ERR_OR_NULL(tc->t)) {
1288 mlx5_destroy_flow_table(tc->t);
1289 tc->t = NULL;
e8f887ac
AV
1290 }
1291}