net/mlx5e: Remove limitation of single NIC offloaded TC action per rule
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 45#include <net/tc_act/tc_pedit.h>
26c02749 46#include <net/tc_act/tc_csum.h>
a54e20b4 47#include <net/vxlan.h>
f6dfb4c3 48#include <net/arp.h>
e8f887ac 49#include "en.h"
1d447a39 50#include "en_rep.h"
232c0013 51#include "en_tc.h"
03a9d11e 52#include "eswitch.h"
bbd00f7e 53#include "vxlan.h"
e8f887ac 54
3bc4b7bf
OG
55struct mlx5_nic_flow_attr {
56 u32 action;
57 u32 flow_tag;
2f4fe4ca 58 u32 mod_hdr_id;
3bc4b7bf
OG
59};
60
65ba8fb7
OG
61enum {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
3bc4b7bf 63 MLX5E_TC_FLOW_NIC = BIT(1),
0b67a38f 64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65ba8fb7
OG
65};
66
e8f887ac
AV
67struct mlx5e_tc_flow {
68 struct rhash_head node;
69 u64 cookie;
65ba8fb7 70 u8 flags;
74491de9 71 struct mlx5_flow_handle *rule;
a54e20b4 72 struct list_head encap; /* flows sharing the same encap */
3bc4b7bf
OG
73 union {
74 struct mlx5_esw_flow_attr esw_attr[0];
75 struct mlx5_nic_flow_attr nic_attr[0];
76 };
e8f887ac
AV
77};
78
17091853
OG
79struct mlx5e_tc_flow_parse_attr {
80 struct mlx5_flow_spec spec;
d79b6df6
OG
81 int num_mod_hdr_actions;
82 void *mod_hdr_actions;
17091853
OG
83};
84
a54e20b4
HHZ
85enum {
86 MLX5_HEADER_TYPE_VXLAN = 0x0,
87 MLX5_HEADER_TYPE_NVGRE = 0x1,
88};
89
acff797c
MG
90#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
91#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 92
74491de9
MB
93static struct mlx5_flow_handle *
94mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 95 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 96 struct mlx5e_tc_flow *flow)
e8f887ac 97{
aa0cbbae 98 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 99 struct mlx5_core_dev *dev = priv->mdev;
aa0cbbae 100 struct mlx5_flow_destination dest = {};
66958ed9 101 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
102 .action = attr->action,
103 .flow_tag = attr->flow_tag,
66958ed9
HHZ
104 .encap_id = 0,
105 };
aad7e08d 106 struct mlx5_fc *counter = NULL;
74491de9 107 struct mlx5_flow_handle *rule;
e8f887ac 108 bool table_created = false;
2f4fe4ca 109 int err;
e8f887ac 110
3bc4b7bf 111 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
aad7e08d
AV
112 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
113 dest.ft = priv->fs.vlan.ft.t;
3bc4b7bf 114 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
115 counter = mlx5_fc_create(dev, true);
116 if (IS_ERR(counter))
117 return ERR_CAST(counter);
118
119 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
120 dest.counter = counter;
121 }
122
2f4fe4ca
OG
123 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
124 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
125 parse_attr->num_mod_hdr_actions,
126 parse_attr->mod_hdr_actions,
127 &attr->mod_hdr_id);
d7e75a32 128 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca
OG
129 kfree(parse_attr->mod_hdr_actions);
130 if (err) {
131 rule = ERR_PTR(err);
132 goto err_create_mod_hdr_id;
133 }
134 }
135
acff797c
MG
136 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
137 priv->fs.tc.t =
138 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
139 MLX5E_TC_PRIO,
140 MLX5E_TC_TABLE_NUM_ENTRIES,
141 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 142 0, 0);
acff797c 143 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
144 netdev_err(priv->netdev,
145 "Failed to create tc offload table\n");
aad7e08d
AV
146 rule = ERR_CAST(priv->fs.tc.t);
147 goto err_create_ft;
e8f887ac
AV
148 }
149
150 table_created = true;
151 }
152
17091853
OG
153 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
154 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
155 &flow_act, &dest, 1);
aad7e08d
AV
156
157 if (IS_ERR(rule))
158 goto err_add_rule;
159
160 return rule;
e8f887ac 161
aad7e08d
AV
162err_add_rule:
163 if (table_created) {
acff797c
MG
164 mlx5_destroy_flow_table(priv->fs.tc.t);
165 priv->fs.tc.t = NULL;
e8f887ac 166 }
aad7e08d 167err_create_ft:
2f4fe4ca
OG
168 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
169 mlx5_modify_header_dealloc(priv->mdev,
170 attr->mod_hdr_id);
171err_create_mod_hdr_id:
aad7e08d 172 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
173
174 return rule;
175}
176
d85cdccb
OG
177static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
178 struct mlx5e_tc_flow *flow)
179{
180 struct mlx5_fc *counter = NULL;
181
aa0cbbae
OG
182 counter = mlx5_flow_rule_counter(flow->rule);
183 mlx5_del_flow_rules(flow->rule);
184 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb
OG
185
186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
189 }
2f4fe4ca
OG
190
191 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
192 mlx5_modify_header_dealloc(priv->mdev,
193 flow->nic_attr->mod_hdr_id);
d85cdccb
OG
194}
195
aa0cbbae
OG
196static void mlx5e_detach_encap(struct mlx5e_priv *priv,
197 struct mlx5e_tc_flow *flow);
198
74491de9
MB
199static struct mlx5_flow_handle *
200mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 201 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 202 struct mlx5e_tc_flow *flow)
adb4c123
OG
203{
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aa0cbbae
OG
205 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
206 struct mlx5_flow_handle *rule;
8b32580d
OG
207 int err;
208
209 err = mlx5_eswitch_add_vlan_action(esw, attr);
aa0cbbae
OG
210 if (err) {
211 rule = ERR_PTR(err);
212 goto err_add_vlan;
213 }
adb4c123 214
d7e75a32
OG
215 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
216 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
217 parse_attr->num_mod_hdr_actions,
218 parse_attr->mod_hdr_actions,
219 &attr->mod_hdr_id);
220 kfree(parse_attr->mod_hdr_actions);
221 if (err) {
222 rule = ERR_PTR(err);
223 goto err_mod_hdr;
224 }
225 }
226
aa0cbbae
OG
227 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
228 if (IS_ERR(rule))
229 goto err_add_rule;
adb4c123 230
aa0cbbae
OG
231 return rule;
232
233err_add_rule:
d7e75a32
OG
234 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
235 mlx5_modify_header_dealloc(priv->mdev,
236 attr->mod_hdr_id);
237err_mod_hdr:
aa0cbbae
OG
238 mlx5_eswitch_del_vlan_action(esw, attr);
239err_add_vlan:
240 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
241 mlx5e_detach_encap(priv, flow);
aa0cbbae
OG
242 return rule;
243}
d85cdccb
OG
244
245static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
246 struct mlx5e_tc_flow *flow)
247{
248 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 249 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
d85cdccb 250
232c0013
HHZ
251 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
252 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
0b67a38f 253 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
232c0013 254 }
d85cdccb 255
ecf5bb79 256 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
d85cdccb 257
232c0013 258 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
d85cdccb 259 mlx5e_detach_encap(priv, flow);
232c0013
HHZ
260 kvfree(flow->esw_attr->parse_attr);
261 }
d7e75a32
OG
262
263 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
264 mlx5_modify_header_dealloc(priv->mdev,
265 attr->mod_hdr_id);
d85cdccb
OG
266}
267
232c0013
HHZ
268void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
269 struct mlx5e_encap_entry *e)
270{
271 struct mlx5e_tc_flow *flow;
272 int err;
273
274 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
275 e->encap_size, e->encap_header,
276 &e->encap_id);
277 if (err) {
278 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
279 err);
280 return;
281 }
282 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 283 mlx5e_rep_queue_neigh_stats_work(priv);
232c0013
HHZ
284
285 list_for_each_entry(flow, &e->flows, encap) {
286 flow->esw_attr->encap_id = e->encap_id;
287 flow->rule = mlx5e_tc_add_fdb_flow(priv,
288 flow->esw_attr->parse_attr,
289 flow);
290 if (IS_ERR(flow->rule)) {
291 err = PTR_ERR(flow->rule);
292 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
293 err);
294 continue;
295 }
296 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
297 }
298}
299
300void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
301 struct mlx5e_encap_entry *e)
302{
303 struct mlx5e_tc_flow *flow;
304 struct mlx5_fc *counter;
305
306 list_for_each_entry(flow, &e->flows, encap) {
307 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
308 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
309 counter = mlx5_flow_rule_counter(flow->rule);
310 mlx5_del_flow_rules(flow->rule);
311 mlx5_fc_destroy(priv->mdev, counter);
312 }
313 }
314
315 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
316 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
317 mlx5_encap_dealloc(priv->mdev, e->encap_id);
318 }
319}
320
f6dfb4c3
HHZ
321void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
322{
323 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
324 u64 bytes, packets, lastuse = 0;
325 struct mlx5e_tc_flow *flow;
326 struct mlx5e_encap_entry *e;
327 struct mlx5_fc *counter;
328 struct neigh_table *tbl;
329 bool neigh_used = false;
330 struct neighbour *n;
331
332 if (m_neigh->family == AF_INET)
333 tbl = &arp_tbl;
334#if IS_ENABLED(CONFIG_IPV6)
335 else if (m_neigh->family == AF_INET6)
336 tbl = ipv6_stub->nd_tbl;
337#endif
338 else
339 return;
340
341 list_for_each_entry(e, &nhe->encap_list, encap_list) {
342 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
343 continue;
344 list_for_each_entry(flow, &e->flows, encap) {
345 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
346 counter = mlx5_flow_rule_counter(flow->rule);
347 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
348 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
349 neigh_used = true;
350 break;
351 }
352 }
353 }
354 }
355
356 if (neigh_used) {
357 nhe->reported_lastuse = jiffies;
358
359 /* find the relevant neigh according to the cached device and
360 * dst ip pair
361 */
362 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
363 if (!n) {
364 WARN(1, "The neighbour already freed\n");
365 return;
366 }
367
368 neigh_event_send(n, NULL);
369 neigh_release(n);
370 }
371}
372
d85cdccb
OG
373static void mlx5e_detach_encap(struct mlx5e_priv *priv,
374 struct mlx5e_tc_flow *flow)
375{
5067b602
RD
376 struct list_head *next = flow->encap.next;
377
378 list_del(&flow->encap);
379 if (list_empty(next)) {
c1ae1152 380 struct mlx5e_encap_entry *e;
5067b602 381
c1ae1152 382 e = list_entry(next, struct mlx5e_encap_entry, flows);
232c0013
HHZ
383 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
384
385 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
5067b602 386 mlx5_encap_dealloc(priv->mdev, e->encap_id);
232c0013 387
cdc5a7f3 388 hash_del_rcu(&e->encap_hlist);
232c0013 389 kfree(e->encap_header);
5067b602
RD
390 kfree(e);
391 }
392}
393
e8f887ac 394static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 395 struct mlx5e_tc_flow *flow)
e8f887ac 396{
d85cdccb
OG
397 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
398 mlx5e_tc_del_fdb_flow(priv, flow);
399 else
400 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
401}
402
bbd00f7e
HHZ
403static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
404 struct tc_cls_flower_offload *f)
405{
406 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
407 outer_headers);
408 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
409 outer_headers);
410 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
411 misc_parameters);
412 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
413 misc_parameters);
414
415 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
417
418 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
419 struct flow_dissector_key_keyid *key =
420 skb_flow_dissector_target(f->dissector,
421 FLOW_DISSECTOR_KEY_ENC_KEYID,
422 f->key);
423 struct flow_dissector_key_keyid *mask =
424 skb_flow_dissector_target(f->dissector,
425 FLOW_DISSECTOR_KEY_ENC_KEYID,
426 f->mask);
427 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
428 be32_to_cpu(mask->keyid));
429 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
430 be32_to_cpu(key->keyid));
431 }
432}
433
434static int parse_tunnel_attr(struct mlx5e_priv *priv,
435 struct mlx5_flow_spec *spec,
436 struct tc_cls_flower_offload *f)
437{
438 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
439 outer_headers);
440 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
441 outer_headers);
442
2e72eb43
OG
443 struct flow_dissector_key_control *enc_control =
444 skb_flow_dissector_target(f->dissector,
445 FLOW_DISSECTOR_KEY_ENC_CONTROL,
446 f->key);
447
bbd00f7e
HHZ
448 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
449 struct flow_dissector_key_ports *key =
450 skb_flow_dissector_target(f->dissector,
451 FLOW_DISSECTOR_KEY_ENC_PORTS,
452 f->key);
453 struct flow_dissector_key_ports *mask =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_ENC_PORTS,
456 f->mask);
1ad9a00a
PB
457 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
458 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
459 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
460
461 /* Full udp dst port must be given */
462 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 463 goto vxlan_match_offload_err;
bbd00f7e 464
1ad9a00a 465 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
466 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
467 parse_vxlan_attr(spec, f);
2fcd82e9
OG
468 else {
469 netdev_warn(priv->netdev,
470 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 471 return -EOPNOTSUPP;
2fcd82e9 472 }
bbd00f7e
HHZ
473
474 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
475 udp_dport, ntohs(mask->dst));
476 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
477 udp_dport, ntohs(key->dst));
478
cd377663
OG
479 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
480 udp_sport, ntohs(mask->src));
481 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
482 udp_sport, ntohs(key->src));
bbd00f7e 483 } else { /* udp dst port must be given */
2fcd82e9
OG
484vxlan_match_offload_err:
485 netdev_warn(priv->netdev,
486 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
487 return -EOPNOTSUPP;
bbd00f7e
HHZ
488 }
489
2e72eb43 490 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
491 struct flow_dissector_key_ipv4_addrs *key =
492 skb_flow_dissector_target(f->dissector,
493 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
494 f->key);
495 struct flow_dissector_key_ipv4_addrs *mask =
496 skb_flow_dissector_target(f->dissector,
497 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
498 f->mask);
499 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500 src_ipv4_src_ipv6.ipv4_layout.ipv4,
501 ntohl(mask->src));
502 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
503 src_ipv4_src_ipv6.ipv4_layout.ipv4,
504 ntohl(key->src));
505
506 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
507 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
508 ntohl(mask->dst));
509 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
510 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
511 ntohl(key->dst));
bbd00f7e 512
2e72eb43
OG
513 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
514 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
515 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
516 struct flow_dissector_key_ipv6_addrs *key =
517 skb_flow_dissector_target(f->dissector,
518 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
519 f->key);
520 struct flow_dissector_key_ipv6_addrs *mask =
521 skb_flow_dissector_target(f->dissector,
522 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
523 f->mask);
524
525 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
526 src_ipv4_src_ipv6.ipv6_layout.ipv6),
527 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
528 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
529 src_ipv4_src_ipv6.ipv6_layout.ipv6),
530 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
531
532 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
534 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
537 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
538
539 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
540 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 541 }
bbd00f7e
HHZ
542
543 /* Enforce DMAC when offloading incoming tunneled flows.
544 * Flow counters require a match on the DMAC.
545 */
546 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
547 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
548 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
549 dmac_47_16), priv->netdev->dev_addr);
550
551 /* let software handle IP fragments */
552 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
553 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
554
555 return 0;
556}
557
de0af0bf
RD
558static int __parse_cls_flower(struct mlx5e_priv *priv,
559 struct mlx5_flow_spec *spec,
560 struct tc_cls_flower_offload *f,
561 u8 *min_inline)
e3a2b7ed 562{
c5bb1730
MG
563 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
564 outer_headers);
565 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
566 outer_headers);
e3a2b7ed
AV
567 u16 addr_type = 0;
568 u8 ip_proto = 0;
569
de0af0bf
RD
570 *min_inline = MLX5_INLINE_MODE_L2;
571
e3a2b7ed
AV
572 if (f->dissector->used_keys &
573 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
574 BIT(FLOW_DISSECTOR_KEY_BASIC) |
575 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 576 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
577 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
578 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
579 BIT(FLOW_DISSECTOR_KEY_PORTS) |
580 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
581 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
582 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
583 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
e77834ec 584 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
fd7da28b
OG
585 BIT(FLOW_DISSECTOR_KEY_TCP) |
586 BIT(FLOW_DISSECTOR_KEY_IP))) {
e3a2b7ed
AV
587 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
588 f->dissector->used_keys);
589 return -EOPNOTSUPP;
590 }
591
bbd00f7e
HHZ
592 if ((dissector_uses_key(f->dissector,
593 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
594 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
595 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
596 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
597 struct flow_dissector_key_control *key =
598 skb_flow_dissector_target(f->dissector,
599 FLOW_DISSECTOR_KEY_ENC_CONTROL,
600 f->key);
601 switch (key->addr_type) {
602 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 603 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
604 if (parse_tunnel_attr(priv, spec, f))
605 return -EOPNOTSUPP;
606 break;
607 default:
608 return -EOPNOTSUPP;
609 }
610
611 /* In decap flow, header pointers should point to the inner
612 * headers, outer header were already set by parse_tunnel_attr
613 */
614 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
615 inner_headers);
616 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
617 inner_headers);
618 }
619
e3a2b7ed
AV
620 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
621 struct flow_dissector_key_control *key =
622 skb_flow_dissector_target(f->dissector,
1dbd0d37 623 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 624 f->key);
3f7d0eb4
OG
625
626 struct flow_dissector_key_control *mask =
627 skb_flow_dissector_target(f->dissector,
628 FLOW_DISSECTOR_KEY_CONTROL,
629 f->mask);
e3a2b7ed 630 addr_type = key->addr_type;
3f7d0eb4
OG
631
632 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
633 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
634 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
635 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
636
637 /* the HW doesn't need L3 inline to match on frag=no */
638 if (key->flags & FLOW_DIS_IS_FRAGMENT)
639 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 640 }
e3a2b7ed
AV
641 }
642
643 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
644 struct flow_dissector_key_basic *key =
645 skb_flow_dissector_target(f->dissector,
646 FLOW_DISSECTOR_KEY_BASIC,
647 f->key);
648 struct flow_dissector_key_basic *mask =
649 skb_flow_dissector_target(f->dissector,
650 FLOW_DISSECTOR_KEY_BASIC,
651 f->mask);
652 ip_proto = key->ip_proto;
653
654 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
655 ntohs(mask->n_proto));
656 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
657 ntohs(key->n_proto));
658
659 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
660 mask->ip_proto);
661 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
662 key->ip_proto);
de0af0bf
RD
663
664 if (mask->ip_proto)
665 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
666 }
667
668 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
669 struct flow_dissector_key_eth_addrs *key =
670 skb_flow_dissector_target(f->dissector,
671 FLOW_DISSECTOR_KEY_ETH_ADDRS,
672 f->key);
673 struct flow_dissector_key_eth_addrs *mask =
674 skb_flow_dissector_target(f->dissector,
675 FLOW_DISSECTOR_KEY_ETH_ADDRS,
676 f->mask);
677
678 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
679 dmac_47_16),
680 mask->dst);
681 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
682 dmac_47_16),
683 key->dst);
684
685 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
686 smac_47_16),
687 mask->src);
688 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
689 smac_47_16),
690 key->src);
691 }
692
095b6cfd
OG
693 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
694 struct flow_dissector_key_vlan *key =
695 skb_flow_dissector_target(f->dissector,
696 FLOW_DISSECTOR_KEY_VLAN,
697 f->key);
698 struct flow_dissector_key_vlan *mask =
699 skb_flow_dissector_target(f->dissector,
700 FLOW_DISSECTOR_KEY_VLAN,
701 f->mask);
358d79a4 702 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
703 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
704 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
705
706 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
707 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
708
709 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
710 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
711 }
712 }
713
e3a2b7ed
AV
714 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
715 struct flow_dissector_key_ipv4_addrs *key =
716 skb_flow_dissector_target(f->dissector,
717 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
718 f->key);
719 struct flow_dissector_key_ipv4_addrs *mask =
720 skb_flow_dissector_target(f->dissector,
721 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
722 f->mask);
723
724 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
725 src_ipv4_src_ipv6.ipv4_layout.ipv4),
726 &mask->src, sizeof(mask->src));
727 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
728 src_ipv4_src_ipv6.ipv4_layout.ipv4),
729 &key->src, sizeof(key->src));
730 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
731 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
732 &mask->dst, sizeof(mask->dst));
733 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
734 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
735 &key->dst, sizeof(key->dst));
de0af0bf
RD
736
737 if (mask->src || mask->dst)
738 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
739 }
740
741 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
742 struct flow_dissector_key_ipv6_addrs *key =
743 skb_flow_dissector_target(f->dissector,
744 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
745 f->key);
746 struct flow_dissector_key_ipv6_addrs *mask =
747 skb_flow_dissector_target(f->dissector,
748 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
749 f->mask);
750
751 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
752 src_ipv4_src_ipv6.ipv6_layout.ipv6),
753 &mask->src, sizeof(mask->src));
754 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
755 src_ipv4_src_ipv6.ipv6_layout.ipv6),
756 &key->src, sizeof(key->src));
757
758 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
759 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
760 &mask->dst, sizeof(mask->dst));
761 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
762 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
763 &key->dst, sizeof(key->dst));
de0af0bf
RD
764
765 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
766 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
767 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
768 }
769
770 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
771 struct flow_dissector_key_ports *key =
772 skb_flow_dissector_target(f->dissector,
773 FLOW_DISSECTOR_KEY_PORTS,
774 f->key);
775 struct flow_dissector_key_ports *mask =
776 skb_flow_dissector_target(f->dissector,
777 FLOW_DISSECTOR_KEY_PORTS,
778 f->mask);
779 switch (ip_proto) {
780 case IPPROTO_TCP:
781 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
782 tcp_sport, ntohs(mask->src));
783 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
784 tcp_sport, ntohs(key->src));
785
786 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
787 tcp_dport, ntohs(mask->dst));
788 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
789 tcp_dport, ntohs(key->dst));
790 break;
791
792 case IPPROTO_UDP:
793 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
794 udp_sport, ntohs(mask->src));
795 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
796 udp_sport, ntohs(key->src));
797
798 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
799 udp_dport, ntohs(mask->dst));
800 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
801 udp_dport, ntohs(key->dst));
802 break;
803 default:
804 netdev_err(priv->netdev,
805 "Only UDP and TCP transport are supported\n");
806 return -EINVAL;
807 }
de0af0bf
RD
808
809 if (mask->src || mask->dst)
810 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
811 }
812
fd7da28b
OG
813 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
814 struct flow_dissector_key_ip *key =
815 skb_flow_dissector_target(f->dissector,
816 FLOW_DISSECTOR_KEY_IP,
817 f->key);
818 struct flow_dissector_key_ip *mask =
819 skb_flow_dissector_target(f->dissector,
820 FLOW_DISSECTOR_KEY_IP,
821 f->mask);
822
823 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
824 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
825
826 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
827 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
828
829 if (mask->tos)
830 *min_inline = MLX5_INLINE_MODE_IP;
831
832 if (mask->ttl) /* currently not supported */
833 return -EOPNOTSUPP;
834 }
835
e77834ec
OG
836 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
837 struct flow_dissector_key_tcp *key =
838 skb_flow_dissector_target(f->dissector,
839 FLOW_DISSECTOR_KEY_TCP,
840 f->key);
841 struct flow_dissector_key_tcp *mask =
842 skb_flow_dissector_target(f->dissector,
843 FLOW_DISSECTOR_KEY_TCP,
844 f->mask);
845
846 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
847 ntohs(mask->flags));
848 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
849 ntohs(key->flags));
850
851 if (mask->flags)
852 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
853 }
854
e3a2b7ed
AV
855 return 0;
856}
857
de0af0bf 858static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 859 struct mlx5e_tc_flow *flow,
de0af0bf
RD
860 struct mlx5_flow_spec *spec,
861 struct tc_cls_flower_offload *f)
862{
863 struct mlx5_core_dev *dev = priv->mdev;
864 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39
SM
865 struct mlx5e_rep_priv *rpriv = priv->ppriv;
866 struct mlx5_eswitch_rep *rep;
de0af0bf
RD
867 u8 min_inline;
868 int err;
869
870 err = __parse_cls_flower(priv, spec, f, &min_inline);
871
1d447a39
SM
872 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
873 rep = rpriv->rep;
874 if (rep->vport != FDB_UPLINK_VPORT &&
875 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
876 esw->offloads.inline_mode < min_inline)) {
de0af0bf
RD
877 netdev_warn(priv->netdev,
878 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
879 min_inline, esw->offloads.inline_mode);
880 return -EOPNOTSUPP;
881 }
882 }
883
884 return err;
885}
886
d79b6df6
OG
887struct pedit_headers {
888 struct ethhdr eth;
889 struct iphdr ip4;
890 struct ipv6hdr ip6;
891 struct tcphdr tcp;
892 struct udphdr udp;
893};
894
895static int pedit_header_offsets[] = {
896 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
897 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
898 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
899 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
900 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
901};
902
903#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
904
905static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
906 struct pedit_headers *masks,
907 struct pedit_headers *vals)
908{
909 u32 *curr_pmask, *curr_pval;
910
911 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
912 goto out_err;
913
914 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
915 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
916
917 if (*curr_pmask & mask) /* disallow acting twice on the same location */
918 goto out_err;
919
920 *curr_pmask |= mask;
921 *curr_pval |= (val & mask);
922
923 return 0;
924
925out_err:
926 return -EOPNOTSUPP;
927}
928
929struct mlx5_fields {
930 u8 field;
931 u8 size;
932 u32 offset;
933};
934
935static struct mlx5_fields fields[] = {
936 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
937 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
938 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
939 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
940 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
941
942 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
943 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
944 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
945 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
946
947 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
948 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
949 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
950 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
951 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
952 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
953 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
954 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
955
956 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
957 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
958 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
959
960 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
961 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
962};
963
964/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
965 * max from the SW pedit action. On success, it says how many HW actions were
966 * actually parsed.
967 */
968static int offload_pedit_fields(struct pedit_headers *masks,
969 struct pedit_headers *vals,
970 struct mlx5e_tc_flow_parse_attr *parse_attr)
971{
972 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
d824bf3f 973 int i, action_size, nactions, max_actions, first, last, first_z;
d79b6df6 974 void *s_masks_p, *a_masks_p, *vals_p;
d79b6df6
OG
975 struct mlx5_fields *f;
976 u8 cmd, field_bsize;
e3ca4e05 977 u32 s_mask, a_mask;
d79b6df6
OG
978 unsigned long mask;
979 void *action;
980
981 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
982 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
983 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
984 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
985
986 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
987 action = parse_attr->mod_hdr_actions;
988 max_actions = parse_attr->num_mod_hdr_actions;
989 nactions = 0;
990
991 for (i = 0; i < ARRAY_SIZE(fields); i++) {
992 f = &fields[i];
993 /* avoid seeing bits set from previous iterations */
e3ca4e05
OG
994 s_mask = 0;
995 a_mask = 0;
d79b6df6
OG
996
997 s_masks_p = (void *)set_masks + f->offset;
998 a_masks_p = (void *)add_masks + f->offset;
999
1000 memcpy(&s_mask, s_masks_p, f->size);
1001 memcpy(&a_mask, a_masks_p, f->size);
1002
1003 if (!s_mask && !a_mask) /* nothing to offload here */
1004 continue;
1005
1006 if (s_mask && a_mask) {
1007 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1008 return -EOPNOTSUPP;
1009 }
1010
1011 if (nactions == max_actions) {
1012 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1013 return -EOPNOTSUPP;
1014 }
1015
1016 if (s_mask) {
1017 cmd = MLX5_ACTION_TYPE_SET;
1018 mask = s_mask;
1019 vals_p = (void *)set_vals + f->offset;
1020 /* clear to denote we consumed this field */
1021 memset(s_masks_p, 0, f->size);
1022 } else {
1023 cmd = MLX5_ACTION_TYPE_ADD;
1024 mask = a_mask;
1025 vals_p = (void *)add_vals + f->offset;
1026 /* clear to denote we consumed this field */
1027 memset(a_masks_p, 0, f->size);
1028 }
1029
d79b6df6 1030 field_bsize = f->size * BITS_PER_BYTE;
e3ca4e05 1031
d824bf3f 1032 first_z = find_first_zero_bit(&mask, field_bsize);
d79b6df6
OG
1033 first = find_first_bit(&mask, field_bsize);
1034 last = find_last_bit(&mask, field_bsize);
d824bf3f 1035 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
d79b6df6
OG
1036 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
1037 mask);
1038 return -EOPNOTSUPP;
1039 }
1040
1041 MLX5_SET(set_action_in, action, action_type, cmd);
1042 MLX5_SET(set_action_in, action, field, f->field);
1043
1044 if (cmd == MLX5_ACTION_TYPE_SET) {
1045 MLX5_SET(set_action_in, action, offset, 0);
1046 /* length is num of bits to be written, zero means length of 32 */
1047 MLX5_SET(set_action_in, action, length, field_bsize);
1048 }
1049
1050 if (field_bsize == 32)
e3ca4e05 1051 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
d79b6df6 1052 else if (field_bsize == 16)
e3ca4e05 1053 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
d79b6df6 1054 else if (field_bsize == 8)
e3ca4e05 1055 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
d79b6df6
OG
1056
1057 action += action_size;
1058 nactions++;
1059 }
1060
1061 parse_attr->num_mod_hdr_actions = nactions;
1062 return 0;
1063}
1064
1065static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1066 const struct tc_action *a, int namespace,
1067 struct mlx5e_tc_flow_parse_attr *parse_attr)
1068{
1069 int nkeys, action_size, max_actions;
1070
1071 nkeys = tcf_pedit_nkeys(a);
1072 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1073
1074 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1075 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1076 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1077 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1078
1079 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1080 max_actions = min(max_actions, nkeys * 16);
1081
1082 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1083 if (!parse_attr->mod_hdr_actions)
1084 return -ENOMEM;
1085
1086 parse_attr->num_mod_hdr_actions = max_actions;
1087 return 0;
1088}
1089
1090static const struct pedit_headers zero_masks = {};
1091
1092static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1093 const struct tc_action *a, int namespace,
1094 struct mlx5e_tc_flow_parse_attr *parse_attr)
1095{
1096 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1097 int nkeys, i, err = -EOPNOTSUPP;
1098 u32 mask, val, offset;
1099 u8 cmd, htype;
1100
1101 nkeys = tcf_pedit_nkeys(a);
1102
1103 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1104 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1105
1106 for (i = 0; i < nkeys; i++) {
1107 htype = tcf_pedit_htype(a, i);
1108 cmd = tcf_pedit_cmd(a, i);
1109 err = -EOPNOTSUPP; /* can't be all optimistic */
1110
1111 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1112 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1113 goto out_err;
1114 }
1115
1116 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1117 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1118 goto out_err;
1119 }
1120
1121 mask = tcf_pedit_mask(a, i);
1122 val = tcf_pedit_val(a, i);
1123 offset = tcf_pedit_offset(a, i);
1124
1125 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1126 if (err)
1127 goto out_err;
1128 }
1129
1130 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1131 if (err)
1132 goto out_err;
1133
1134 err = offload_pedit_fields(masks, vals, parse_attr);
1135 if (err < 0)
1136 goto out_dealloc_parsed_actions;
1137
1138 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1139 cmd_masks = &masks[cmd];
1140 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1141 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1142 cmd);
1143 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1144 16, 1, cmd_masks, sizeof(zero_masks), true);
1145 err = -EOPNOTSUPP;
1146 goto out_dealloc_parsed_actions;
1147 }
1148 }
1149
1150 return 0;
1151
1152out_dealloc_parsed_actions:
1153 kfree(parse_attr->mod_hdr_actions);
1154out_err:
1155 return err;
1156}
1157
26c02749
OG
1158static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1159{
1160 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1161 TCA_CSUM_UPDATE_FLAG_UDP;
1162
1163 /* The HW recalcs checksums only if re-writing headers */
1164 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1165 netdev_warn(priv->netdev,
1166 "TC csum action is only offloaded with pedit\n");
1167 return false;
1168 }
1169
1170 if (update_flags & ~prot_flags) {
1171 netdev_warn(priv->netdev,
1172 "can't offload TC csum action for some header/s - flags %#x\n",
1173 update_flags);
1174 return false;
1175 }
1176
1177 return true;
1178}
1179
5c40348c 1180static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
aa0cbbae
OG
1181 struct mlx5e_tc_flow_parse_attr *parse_attr,
1182 struct mlx5e_tc_flow *flow)
e3a2b7ed 1183{
aa0cbbae 1184 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
e3a2b7ed 1185 const struct tc_action *a;
22dc13c8 1186 LIST_HEAD(actions);
2f4fe4ca 1187 int err;
e3a2b7ed
AV
1188
1189 if (tc_no_actions(exts))
1190 return -EINVAL;
1191
3bc4b7bf
OG
1192 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1193 attr->action = 0;
e3a2b7ed 1194
22dc13c8
WC
1195 tcf_exts_to_list(exts, &actions);
1196 list_for_each_entry(a, &actions, list) {
e3a2b7ed 1197 if (is_tcf_gact_shot(a)) {
3bc4b7bf 1198 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
1199 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1200 flow_table_properties_nic_receive.flow_counter))
3bc4b7bf 1201 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
1202 continue;
1203 }
1204
2f4fe4ca
OG
1205 if (is_tcf_pedit(a)) {
1206 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1207 parse_attr);
1208 if (err)
1209 return err;
1210
1211 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1212 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1213 continue;
1214 }
1215
26c02749
OG
1216 if (is_tcf_csum(a)) {
1217 if (csum_offload_supported(priv, attr->action,
1218 tcf_csum_update_flags(a)))
1219 continue;
1220
1221 return -EOPNOTSUPP;
1222 }
1223
e3a2b7ed
AV
1224 if (is_tcf_skbedit_mark(a)) {
1225 u32 mark = tcf_skbedit_mark(a);
1226
1227 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1228 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1229 mark);
1230 return -EINVAL;
1231 }
1232
3bc4b7bf
OG
1233 attr->flow_tag = mark;
1234 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
1235 continue;
1236 }
1237
1238 return -EINVAL;
1239 }
1240
1241 return 0;
1242}
1243
76f7444d
OG
1244static inline int cmp_encap_info(struct ip_tunnel_key *a,
1245 struct ip_tunnel_key *b)
a54e20b4
HHZ
1246{
1247 return memcmp(a, b, sizeof(*a));
1248}
1249
76f7444d 1250static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 1251{
76f7444d 1252 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
1253}
1254
1255static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1256 struct net_device *mirred_dev,
1257 struct net_device **out_dev,
1258 struct flowi4 *fl4,
1259 struct neighbour **out_n,
a54e20b4
HHZ
1260 int *out_ttl)
1261{
3e621b19 1262 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
1263 struct rtable *rt;
1264 struct neighbour *n = NULL;
a54e20b4
HHZ
1265
1266#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
1267 int ret;
1268
a54e20b4 1269 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
1270 ret = PTR_ERR_OR_ZERO(rt);
1271 if (ret)
1272 return ret;
a54e20b4
HHZ
1273#else
1274 return -EOPNOTSUPP;
1275#endif
3e621b19
HHZ
1276 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1277 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1278 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1279 else
1280 *out_dev = rt->dst.dev;
a54e20b4 1281
75c33da8 1282 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
1283 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1284 ip_rt_put(rt);
1285 if (!n)
1286 return -ENOMEM;
1287
1288 *out_n = n;
a54e20b4
HHZ
1289 return 0;
1290}
1291
ce99f6b9
OG
1292static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1293 struct net_device *mirred_dev,
1294 struct net_device **out_dev,
1295 struct flowi6 *fl6,
1296 struct neighbour **out_n,
1297 int *out_ttl)
1298{
1299 struct neighbour *n = NULL;
1300 struct dst_entry *dst;
1301
1302#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1303 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1304 int ret;
1305
1306 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
1307 ret = dst->error;
1308 if (ret) {
ce99f6b9
OG
1309 dst_release(dst);
1310 return ret;
1311 }
1312
1313 *out_ttl = ip6_dst_hoplimit(dst);
1314
1315 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1316 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1317 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1318 else
1319 *out_dev = dst->dev;
1320#else
1321 return -EOPNOTSUPP;
1322#endif
1323
1324 n = dst_neigh_lookup(dst, &fl6->daddr);
1325 dst_release(dst);
1326 if (!n)
1327 return -ENOMEM;
1328
1329 *out_n = n;
1330 return 0;
1331}
1332
32f3671f
OG
1333static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1334 char buf[], int encap_size,
1335 unsigned char h_dest[ETH_ALEN],
1336 int ttl,
1337 __be32 daddr,
1338 __be32 saddr,
1339 __be16 udp_dst_port,
1340 __be32 vx_vni)
a54e20b4 1341{
a54e20b4
HHZ
1342 struct ethhdr *eth = (struct ethhdr *)buf;
1343 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1344 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1345 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1346
1347 memset(buf, 0, encap_size);
1348
1349 ether_addr_copy(eth->h_dest, h_dest);
1350 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1351 eth->h_proto = htons(ETH_P_IP);
1352
1353 ip->daddr = daddr;
1354 ip->saddr = saddr;
1355
1356 ip->ttl = ttl;
1357 ip->protocol = IPPROTO_UDP;
1358 ip->version = 0x4;
1359 ip->ihl = 0x5;
1360
1361 udp->dest = udp_dst_port;
1362 vxh->vx_flags = VXLAN_HF_VNI;
1363 vxh->vx_vni = vxlan_vni_field(vx_vni);
a54e20b4
HHZ
1364}
1365
225aabaf
OG
1366static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1367 char buf[], int encap_size,
1368 unsigned char h_dest[ETH_ALEN],
1369 int ttl,
1370 struct in6_addr *daddr,
1371 struct in6_addr *saddr,
1372 __be16 udp_dst_port,
1373 __be32 vx_vni)
ce99f6b9 1374{
ce99f6b9
OG
1375 struct ethhdr *eth = (struct ethhdr *)buf;
1376 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1377 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1378 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1379
1380 memset(buf, 0, encap_size);
1381
1382 ether_addr_copy(eth->h_dest, h_dest);
1383 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1384 eth->h_proto = htons(ETH_P_IPV6);
1385
1386 ip6_flow_hdr(ip6h, 0, 0);
1387 /* the HW fills up ipv6 payload len */
1388 ip6h->nexthdr = IPPROTO_UDP;
1389 ip6h->hop_limit = ttl;
1390 ip6h->daddr = *daddr;
1391 ip6h->saddr = *saddr;
1392
1393 udp->dest = udp_dst_port;
1394 vxh->vx_flags = VXLAN_HF_VNI;
1395 vxh->vx_vni = vxlan_vni_field(vx_vni);
ce99f6b9
OG
1396}
1397
a54e20b4
HHZ
1398static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1399 struct net_device *mirred_dev,
1a8552bd 1400 struct mlx5e_encap_entry *e)
a54e20b4
HHZ
1401{
1402 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
32f3671f 1403 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
76f7444d 1404 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1405 struct net_device *out_dev;
a42485eb 1406 struct neighbour *n = NULL;
a54e20b4 1407 struct flowi4 fl4 = {};
a54e20b4 1408 char *encap_header;
32f3671f 1409 int ttl, err;
033354d5 1410 u8 nud_state;
32f3671f
OG
1411
1412 if (max_encap_size < ipv4_encap_size) {
1413 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1414 ipv4_encap_size, max_encap_size);
1415 return -EOPNOTSUPP;
1416 }
a54e20b4 1417
32f3671f 1418 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
a54e20b4
HHZ
1419 if (!encap_header)
1420 return -ENOMEM;
1421
1422 switch (e->tunnel_type) {
1423 case MLX5_HEADER_TYPE_VXLAN:
1424 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 1425 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
1426 break;
1427 default:
1428 err = -EOPNOTSUPP;
1429 goto out;
1430 }
9a941117 1431 fl4.flowi4_tos = tun_key->tos;
76f7444d 1432 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 1433 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4 1434
1a8552bd 1435 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
9a941117 1436 &fl4, &n, &ttl);
a54e20b4
HHZ
1437 if (err)
1438 goto out;
1439
232c0013
HHZ
1440 /* used by mlx5e_detach_encap to lookup a neigh hash table
1441 * entry in the neigh hash table when a user deletes a rule
1442 */
1443 e->m_neigh.dev = n->dev;
f6dfb4c3 1444 e->m_neigh.family = n->ops->family;
232c0013
HHZ
1445 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1446 e->out_dev = out_dev;
1447
1448 /* It's importent to add the neigh to the hash table before checking
1449 * the neigh validity state. So if we'll get a notification, in case the
1450 * neigh changes it's validity state, we would find the relevant neigh
1451 * in the hash.
1452 */
1453 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1454 if (err)
1455 goto out;
1456
033354d5
HHZ
1457 read_lock_bh(&n->lock);
1458 nud_state = n->nud_state;
1459 ether_addr_copy(e->h_dest, n->ha);
1460 read_unlock_bh(&n->lock);
1461
a54e20b4
HHZ
1462 switch (e->tunnel_type) {
1463 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1464 gen_vxlan_header_ipv4(out_dev, encap_header,
32f3671f
OG
1465 ipv4_encap_size, e->h_dest, ttl,
1466 fl4.daddr,
1467 fl4.saddr, tun_key->tp_dst,
1468 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
1469 break;
1470 default:
1471 err = -EOPNOTSUPP;
232c0013
HHZ
1472 goto destroy_neigh_entry;
1473 }
1474 e->encap_size = ipv4_encap_size;
1475 e->encap_header = encap_header;
1476
1477 if (!(nud_state & NUD_VALID)) {
1478 neigh_event_send(n, NULL);
27902f08
WY
1479 err = -EAGAIN;
1480 goto out;
a54e20b4
HHZ
1481 }
1482
1483 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
32f3671f 1484 ipv4_encap_size, encap_header, &e->encap_id);
232c0013
HHZ
1485 if (err)
1486 goto destroy_neigh_entry;
1487
1488 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1489 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
1490 neigh_release(n);
1491 return err;
1492
1493destroy_neigh_entry:
1494 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
a54e20b4
HHZ
1495out:
1496 kfree(encap_header);
232c0013
HHZ
1497 if (n)
1498 neigh_release(n);
a54e20b4
HHZ
1499 return err;
1500}
1501
ce99f6b9
OG
1502static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1503 struct net_device *mirred_dev,
1a8552bd 1504 struct mlx5e_encap_entry *e)
ce99f6b9
OG
1505{
1506 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
225aabaf 1507 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
ce99f6b9 1508 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1509 struct net_device *out_dev;
ce99f6b9
OG
1510 struct neighbour *n = NULL;
1511 struct flowi6 fl6 = {};
1512 char *encap_header;
225aabaf 1513 int err, ttl = 0;
033354d5 1514 u8 nud_state;
ce99f6b9 1515
225aabaf
OG
1516 if (max_encap_size < ipv6_encap_size) {
1517 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1518 ipv6_encap_size, max_encap_size);
1519 return -EOPNOTSUPP;
1520 }
ce99f6b9 1521
225aabaf 1522 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
ce99f6b9
OG
1523 if (!encap_header)
1524 return -ENOMEM;
1525
1526 switch (e->tunnel_type) {
1527 case MLX5_HEADER_TYPE_VXLAN:
1528 fl6.flowi6_proto = IPPROTO_UDP;
1529 fl6.fl6_dport = tun_key->tp_dst;
1530 break;
1531 default:
1532 err = -EOPNOTSUPP;
1533 goto out;
1534 }
1535
1536 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1537 fl6.daddr = tun_key->u.ipv6.dst;
1538 fl6.saddr = tun_key->u.ipv6.src;
1539
1a8552bd 1540 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
ce99f6b9
OG
1541 &fl6, &n, &ttl);
1542 if (err)
1543 goto out;
1544
232c0013
HHZ
1545 /* used by mlx5e_detach_encap to lookup a neigh hash table
1546 * entry in the neigh hash table when a user deletes a rule
1547 */
1548 e->m_neigh.dev = n->dev;
f6dfb4c3 1549 e->m_neigh.family = n->ops->family;
232c0013
HHZ
1550 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1551 e->out_dev = out_dev;
1552
1553 /* It's importent to add the neigh to the hash table before checking
1554 * the neigh validity state. So if we'll get a notification, in case the
1555 * neigh changes it's validity state, we would find the relevant neigh
1556 * in the hash.
1557 */
1558 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1559 if (err)
1560 goto out;
1561
033354d5
HHZ
1562 read_lock_bh(&n->lock);
1563 nud_state = n->nud_state;
1564 ether_addr_copy(e->h_dest, n->ha);
1565 read_unlock_bh(&n->lock);
1566
ce99f6b9
OG
1567 switch (e->tunnel_type) {
1568 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1569 gen_vxlan_header_ipv6(out_dev, encap_header,
225aabaf
OG
1570 ipv6_encap_size, e->h_dest, ttl,
1571 &fl6.daddr,
1572 &fl6.saddr, tun_key->tp_dst,
1573 tunnel_id_to_key32(tun_key->tun_id));
ce99f6b9
OG
1574 break;
1575 default:
1576 err = -EOPNOTSUPP;
232c0013
HHZ
1577 goto destroy_neigh_entry;
1578 }
1579
1580 e->encap_size = ipv6_encap_size;
1581 e->encap_header = encap_header;
1582
1583 if (!(nud_state & NUD_VALID)) {
1584 neigh_event_send(n, NULL);
27902f08
WY
1585 err = -EAGAIN;
1586 goto out;
ce99f6b9
OG
1587 }
1588
1589 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
225aabaf 1590 ipv6_encap_size, encap_header, &e->encap_id);
232c0013
HHZ
1591 if (err)
1592 goto destroy_neigh_entry;
1593
1594 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1595 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
1596 neigh_release(n);
1597 return err;
1598
1599destroy_neigh_entry:
1600 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
ce99f6b9 1601out:
ce99f6b9 1602 kfree(encap_header);
232c0013
HHZ
1603 if (n)
1604 neigh_release(n);
ce99f6b9
OG
1605 return err;
1606}
1607
a54e20b4
HHZ
1608static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1609 struct ip_tunnel_info *tun_info,
1610 struct net_device *mirred_dev,
45247bf2
OG
1611 struct net_device **encap_dev,
1612 struct mlx5e_tc_flow *flow)
a54e20b4
HHZ
1613{
1614 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a 1615 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
a54e20b4 1616 unsigned short family = ip_tunnel_info_af(tun_info);
45247bf2
OG
1617 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1618 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 1619 struct ip_tunnel_key *key = &tun_info->key;
c1ae1152 1620 struct mlx5e_encap_entry *e;
45247bf2 1621 int tunnel_type, err = 0;
a54e20b4
HHZ
1622 uintptr_t hash_key;
1623 bool found = false;
a54e20b4 1624
2fcd82e9 1625 /* udp dst port must be set */
a54e20b4 1626 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1627 goto vxlan_encap_offload_err;
a54e20b4 1628
cd377663 1629 /* setting udp src port isn't supported */
2fcd82e9
OG
1630 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1631vxlan_encap_offload_err:
1632 netdev_warn(priv->netdev,
1633 "must set udp dst port and not set udp src port\n");
cd377663 1634 return -EOPNOTSUPP;
2fcd82e9 1635 }
cd377663 1636
1ad9a00a 1637 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1638 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1639 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1640 } else {
2fcd82e9
OG
1641 netdev_warn(priv->netdev,
1642 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1643 return -EOPNOTSUPP;
1644 }
1645
76f7444d 1646 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1647
1648 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1649 encap_hlist, hash_key) {
76f7444d 1650 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1651 found = true;
1652 break;
1653 }
1654 }
1655
45247bf2
OG
1656 if (found)
1657 goto attach_flow;
a54e20b4
HHZ
1658
1659 e = kzalloc(sizeof(*e), GFP_KERNEL);
1660 if (!e)
1661 return -ENOMEM;
1662
76f7444d 1663 e->tun_info = *tun_info;
a54e20b4
HHZ
1664 e->tunnel_type = tunnel_type;
1665 INIT_LIST_HEAD(&e->flows);
1666
ce99f6b9 1667 if (family == AF_INET)
1a8552bd 1668 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
ce99f6b9 1669 else if (family == AF_INET6)
1a8552bd 1670 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
ce99f6b9 1671
232c0013 1672 if (err && err != -EAGAIN)
a54e20b4
HHZ
1673 goto out_err;
1674
a54e20b4
HHZ
1675 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1676
45247bf2
OG
1677attach_flow:
1678 list_add(&flow->encap, &e->flows);
1679 *encap_dev = e->out_dev;
232c0013
HHZ
1680 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1681 attr->encap_id = e->encap_id;
45247bf2 1682
232c0013 1683 return err;
a54e20b4
HHZ
1684
1685out_err:
1686 kfree(e);
1687 return err;
1688}
1689
03a9d11e 1690static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
d7e75a32 1691 struct mlx5e_tc_flow_parse_attr *parse_attr,
a54e20b4 1692 struct mlx5e_tc_flow *flow)
03a9d11e 1693{
ecf5bb79 1694 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 1695 struct mlx5e_rep_priv *rpriv = priv->ppriv;
a54e20b4 1696 struct ip_tunnel_info *info = NULL;
03a9d11e 1697 const struct tc_action *a;
22dc13c8 1698 LIST_HEAD(actions);
a54e20b4 1699 bool encap = false;
232c0013 1700 int err = 0;
03a9d11e
OG
1701
1702 if (tc_no_actions(exts))
1703 return -EINVAL;
1704
776b12b6 1705 memset(attr, 0, sizeof(*attr));
1d447a39 1706 attr->in_rep = rpriv->rep;
03a9d11e 1707
22dc13c8
WC
1708 tcf_exts_to_list(exts, &actions);
1709 list_for_each_entry(a, &actions, list) {
03a9d11e 1710 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1711 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1712 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1713 continue;
1714 }
1715
d7e75a32
OG
1716 if (is_tcf_pedit(a)) {
1717 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1718 parse_attr);
1719 if (err)
1720 return err;
1721
1722 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1723 continue;
1724 }
1725
26c02749
OG
1726 if (is_tcf_csum(a)) {
1727 if (csum_offload_supported(priv, attr->action,
1728 tcf_csum_update_flags(a)))
1729 continue;
1730
1731 return -EOPNOTSUPP;
1732 }
1733
5724b8b5 1734 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e 1735 int ifindex = tcf_mirred_ifindex(a);
45247bf2 1736 struct net_device *out_dev, *encap_dev = NULL;
03a9d11e 1737 struct mlx5e_priv *out_priv;
03a9d11e
OG
1738
1739 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1740
a54e20b4
HHZ
1741 if (switchdev_port_same_parent_id(priv->netdev,
1742 out_dev)) {
1743 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1744 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1745 out_priv = netdev_priv(out_dev);
1d447a39
SM
1746 rpriv = out_priv->ppriv;
1747 attr->out_rep = rpriv->rep;
a54e20b4
HHZ
1748 } else if (encap) {
1749 err = mlx5e_attach_encap(priv, info,
45247bf2 1750 out_dev, &encap_dev, flow);
232c0013 1751 if (err && err != -EAGAIN)
a54e20b4 1752 return err;
a54e20b4
HHZ
1753 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1754 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1755 MLX5_FLOW_CONTEXT_ACTION_COUNT;
45247bf2 1756 out_priv = netdev_priv(encap_dev);
1d447a39
SM
1757 rpriv = out_priv->ppriv;
1758 attr->out_rep = rpriv->rep;
232c0013 1759 attr->parse_attr = parse_attr;
a54e20b4 1760 } else {
03a9d11e
OG
1761 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1762 priv->netdev->name, out_dev->name);
1763 return -EINVAL;
1764 }
a54e20b4
HHZ
1765 continue;
1766 }
03a9d11e 1767
a54e20b4
HHZ
1768 if (is_tcf_tunnel_set(a)) {
1769 info = tcf_tunnel_info(a);
1770 if (info)
1771 encap = true;
1772 else
1773 return -EOPNOTSUPP;
03a9d11e
OG
1774 continue;
1775 }
1776
8b32580d 1777 if (is_tcf_vlan(a)) {
09c91ddf 1778 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1779 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1780 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1781 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1782 return -EOPNOTSUPP;
1783
1784 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1785 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1786 } else { /* action is TCA_VLAN_ACT_MODIFY */
1787 return -EOPNOTSUPP;
8b32580d
OG
1788 }
1789 continue;
1790 }
1791
bbd00f7e
HHZ
1792 if (is_tcf_tunnel_release(a)) {
1793 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1794 continue;
1795 }
1796
03a9d11e
OG
1797 return -EINVAL;
1798 }
232c0013 1799 return err;
03a9d11e
OG
1800}
1801
e3a2b7ed
AV
1802int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1803 struct tc_cls_flower_offload *f)
1804{
3bc4b7bf 1805 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
17091853 1806 struct mlx5e_tc_flow_parse_attr *parse_attr;
acff797c 1807 struct mlx5e_tc_table *tc = &priv->fs.tc;
3bc4b7bf
OG
1808 struct mlx5e_tc_flow *flow;
1809 int attr_size, err = 0;
65ba8fb7 1810 u8 flow_flags = 0;
e3a2b7ed 1811
65ba8fb7
OG
1812 if (esw && esw->mode == SRIOV_OFFLOADS) {
1813 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1814 attr_size = sizeof(struct mlx5_esw_flow_attr);
3bc4b7bf
OG
1815 } else {
1816 flow_flags = MLX5E_TC_FLOW_NIC;
1817 attr_size = sizeof(struct mlx5_nic_flow_attr);
65ba8fb7 1818 }
e3a2b7ed 1819
65ba8fb7 1820 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1b9a07ee 1821 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
17091853 1822 if (!parse_attr || !flow) {
e3a2b7ed
AV
1823 err = -ENOMEM;
1824 goto err_free;
1825 }
1826
1827 flow->cookie = f->cookie;
65ba8fb7 1828 flow->flags = flow_flags;
e3a2b7ed 1829
17091853 1830 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
e3a2b7ed
AV
1831 if (err < 0)
1832 goto err_free;
1833
65ba8fb7 1834 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
d7e75a32 1835 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
adb4c123 1836 if (err < 0)
232c0013 1837 goto err_handle_encap_flow;
aa0cbbae 1838 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
adb4c123 1839 } else {
aa0cbbae 1840 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
adb4c123
OG
1841 if (err < 0)
1842 goto err_free;
aa0cbbae 1843 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
adb4c123 1844 }
e3a2b7ed 1845
e3a2b7ed
AV
1846 if (IS_ERR(flow->rule)) {
1847 err = PTR_ERR(flow->rule);
aa0cbbae 1848 goto err_free;
e3a2b7ed
AV
1849 }
1850
0b67a38f 1851 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
5c40348c
OG
1852 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1853 tc->ht_params);
1854 if (err)
1855 goto err_del_rule;
1856
232c0013
HHZ
1857 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1858 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1859 kvfree(parse_attr);
1860 return err;
e3a2b7ed 1861
5c40348c 1862err_del_rule:
5e86397a 1863 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1864
232c0013
HHZ
1865err_handle_encap_flow:
1866 if (err == -EAGAIN) {
1867 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1868 tc->ht_params);
1869 if (err)
1870 mlx5e_tc_del_flow(priv, flow);
1871 else
1872 return 0;
1873 }
1874
e3a2b7ed 1875err_free:
17091853 1876 kvfree(parse_attr);
232c0013 1877 kfree(flow);
e3a2b7ed
AV
1878 return err;
1879}
1880
1881int mlx5e_delete_flower(struct mlx5e_priv *priv,
1882 struct tc_cls_flower_offload *f)
1883{
1884 struct mlx5e_tc_flow *flow;
acff797c 1885 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1886
1887 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1888 tc->ht_params);
1889 if (!flow)
1890 return -EINVAL;
1891
1892 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1893
961e8979 1894 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1895
1896 kfree(flow);
1897
1898 return 0;
1899}
1900
aad7e08d
AV
1901int mlx5e_stats_flower(struct mlx5e_priv *priv,
1902 struct tc_cls_flower_offload *f)
1903{
1904 struct mlx5e_tc_table *tc = &priv->fs.tc;
1905 struct mlx5e_tc_flow *flow;
aad7e08d
AV
1906 struct mlx5_fc *counter;
1907 u64 bytes;
1908 u64 packets;
1909 u64 lastuse;
1910
1911 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1912 tc->ht_params);
1913 if (!flow)
1914 return -EINVAL;
1915
0b67a38f
HHZ
1916 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1917 return 0;
1918
aad7e08d
AV
1919 counter = mlx5_flow_rule_counter(flow->rule);
1920 if (!counter)
1921 return 0;
1922
1923 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1924
d897a638 1925 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
fed06ee8 1926
aad7e08d
AV
1927 return 0;
1928}
1929
e8f887ac
AV
1930static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1931 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1932 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1933 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1934 .automatic_shrinking = true,
1935};
1936
1937int mlx5e_tc_init(struct mlx5e_priv *priv)
1938{
acff797c 1939 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1940
1941 tc->ht_params = mlx5e_tc_flow_ht_params;
1942 return rhashtable_init(&tc->ht, &tc->ht_params);
1943}
1944
1945static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1946{
1947 struct mlx5e_tc_flow *flow = ptr;
1948 struct mlx5e_priv *priv = arg;
1949
961e8979 1950 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1951 kfree(flow);
1952}
1953
1954void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1955{
acff797c 1956 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1957
1958 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1959
acff797c
MG
1960 if (!IS_ERR_OR_NULL(tc->t)) {
1961 mlx5_destroy_flow_table(tc->t);
1962 tc->t = NULL;
e8f887ac
AV
1963 }
1964}