net/mlx5e: Allow TC csum offload if applied together with pedit action
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
d79b6df6 45#include <net/tc_act/tc_pedit.h>
26c02749 46#include <net/tc_act/tc_csum.h>
a54e20b4 47#include <net/vxlan.h>
f6dfb4c3 48#include <net/arp.h>
e8f887ac 49#include "en.h"
1d447a39 50#include "en_rep.h"
232c0013 51#include "en_tc.h"
03a9d11e 52#include "eswitch.h"
bbd00f7e 53#include "vxlan.h"
e8f887ac 54
3bc4b7bf
OG
55struct mlx5_nic_flow_attr {
56 u32 action;
57 u32 flow_tag;
2f4fe4ca 58 u32 mod_hdr_id;
3bc4b7bf
OG
59};
60
65ba8fb7
OG
61enum {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
3bc4b7bf 63 MLX5E_TC_FLOW_NIC = BIT(1),
0b67a38f 64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65ba8fb7
OG
65};
66
e8f887ac
AV
67struct mlx5e_tc_flow {
68 struct rhash_head node;
69 u64 cookie;
65ba8fb7 70 u8 flags;
74491de9 71 struct mlx5_flow_handle *rule;
a54e20b4 72 struct list_head encap; /* flows sharing the same encap */
3bc4b7bf
OG
73 union {
74 struct mlx5_esw_flow_attr esw_attr[0];
75 struct mlx5_nic_flow_attr nic_attr[0];
76 };
e8f887ac
AV
77};
78
17091853
OG
79struct mlx5e_tc_flow_parse_attr {
80 struct mlx5_flow_spec spec;
d79b6df6
OG
81 int num_mod_hdr_actions;
82 void *mod_hdr_actions;
17091853
OG
83};
84
a54e20b4
HHZ
85enum {
86 MLX5_HEADER_TYPE_VXLAN = 0x0,
87 MLX5_HEADER_TYPE_NVGRE = 0x1,
88};
89
acff797c
MG
90#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
91#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 92
74491de9
MB
93static struct mlx5_flow_handle *
94mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
17091853 95 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 96 struct mlx5e_tc_flow *flow)
e8f887ac 97{
aa0cbbae 98 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
aad7e08d 99 struct mlx5_core_dev *dev = priv->mdev;
aa0cbbae 100 struct mlx5_flow_destination dest = {};
66958ed9 101 struct mlx5_flow_act flow_act = {
3bc4b7bf
OG
102 .action = attr->action,
103 .flow_tag = attr->flow_tag,
66958ed9
HHZ
104 .encap_id = 0,
105 };
aad7e08d 106 struct mlx5_fc *counter = NULL;
74491de9 107 struct mlx5_flow_handle *rule;
e8f887ac 108 bool table_created = false;
2f4fe4ca 109 int err;
e8f887ac 110
3bc4b7bf 111 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
aad7e08d
AV
112 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
113 dest.ft = priv->fs.vlan.ft.t;
3bc4b7bf 114 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
115 counter = mlx5_fc_create(dev, true);
116 if (IS_ERR(counter))
117 return ERR_CAST(counter);
118
119 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
120 dest.counter = counter;
121 }
122
2f4fe4ca
OG
123 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
124 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
125 parse_attr->num_mod_hdr_actions,
126 parse_attr->mod_hdr_actions,
127 &attr->mod_hdr_id);
d7e75a32 128 flow_act.modify_id = attr->mod_hdr_id;
2f4fe4ca
OG
129 kfree(parse_attr->mod_hdr_actions);
130 if (err) {
131 rule = ERR_PTR(err);
132 goto err_create_mod_hdr_id;
133 }
134 }
135
acff797c
MG
136 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
137 priv->fs.tc.t =
138 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
139 MLX5E_TC_PRIO,
140 MLX5E_TC_TABLE_NUM_ENTRIES,
141 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 142 0, 0);
acff797c 143 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
144 netdev_err(priv->netdev,
145 "Failed to create tc offload table\n");
aad7e08d
AV
146 rule = ERR_CAST(priv->fs.tc.t);
147 goto err_create_ft;
e8f887ac
AV
148 }
149
150 table_created = true;
151 }
152
17091853
OG
153 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
154 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
155 &flow_act, &dest, 1);
aad7e08d
AV
156
157 if (IS_ERR(rule))
158 goto err_add_rule;
159
160 return rule;
e8f887ac 161
aad7e08d
AV
162err_add_rule:
163 if (table_created) {
acff797c
MG
164 mlx5_destroy_flow_table(priv->fs.tc.t);
165 priv->fs.tc.t = NULL;
e8f887ac 166 }
aad7e08d 167err_create_ft:
2f4fe4ca
OG
168 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
169 mlx5_modify_header_dealloc(priv->mdev,
170 attr->mod_hdr_id);
171err_create_mod_hdr_id:
aad7e08d 172 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
173
174 return rule;
175}
176
d85cdccb
OG
177static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
178 struct mlx5e_tc_flow *flow)
179{
180 struct mlx5_fc *counter = NULL;
181
aa0cbbae
OG
182 counter = mlx5_flow_rule_counter(flow->rule);
183 mlx5_del_flow_rules(flow->rule);
184 mlx5_fc_destroy(priv->mdev, counter);
d85cdccb
OG
185
186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187 mlx5_destroy_flow_table(priv->fs.tc.t);
188 priv->fs.tc.t = NULL;
189 }
2f4fe4ca
OG
190
191 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
192 mlx5_modify_header_dealloc(priv->mdev,
193 flow->nic_attr->mod_hdr_id);
d85cdccb
OG
194}
195
aa0cbbae
OG
196static void mlx5e_detach_encap(struct mlx5e_priv *priv,
197 struct mlx5e_tc_flow *flow);
198
74491de9
MB
199static struct mlx5_flow_handle *
200mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
17091853 201 struct mlx5e_tc_flow_parse_attr *parse_attr,
aa0cbbae 202 struct mlx5e_tc_flow *flow)
adb4c123
OG
203{
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aa0cbbae
OG
205 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
206 struct mlx5_flow_handle *rule;
8b32580d
OG
207 int err;
208
209 err = mlx5_eswitch_add_vlan_action(esw, attr);
aa0cbbae
OG
210 if (err) {
211 rule = ERR_PTR(err);
212 goto err_add_vlan;
213 }
adb4c123 214
d7e75a32
OG
215 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
216 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
217 parse_attr->num_mod_hdr_actions,
218 parse_attr->mod_hdr_actions,
219 &attr->mod_hdr_id);
220 kfree(parse_attr->mod_hdr_actions);
221 if (err) {
222 rule = ERR_PTR(err);
223 goto err_mod_hdr;
224 }
225 }
226
aa0cbbae
OG
227 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
228 if (IS_ERR(rule))
229 goto err_add_rule;
adb4c123 230
aa0cbbae
OG
231 return rule;
232
233err_add_rule:
d7e75a32
OG
234 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
235 mlx5_modify_header_dealloc(priv->mdev,
236 attr->mod_hdr_id);
237err_mod_hdr:
aa0cbbae
OG
238 mlx5_eswitch_del_vlan_action(esw, attr);
239err_add_vlan:
240 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
241 mlx5e_detach_encap(priv, flow);
aa0cbbae
OG
242 return rule;
243}
d85cdccb
OG
244
245static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
246 struct mlx5e_tc_flow *flow)
247{
248 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
d7e75a32 249 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
d85cdccb 250
232c0013
HHZ
251 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
252 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
0b67a38f 253 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
232c0013 254 }
d85cdccb 255
ecf5bb79 256 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
d85cdccb 257
232c0013 258 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
d85cdccb 259 mlx5e_detach_encap(priv, flow);
232c0013
HHZ
260 kvfree(flow->esw_attr->parse_attr);
261 }
d7e75a32
OG
262
263 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
264 mlx5_modify_header_dealloc(priv->mdev,
265 attr->mod_hdr_id);
d85cdccb
OG
266}
267
232c0013
HHZ
268void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
269 struct mlx5e_encap_entry *e)
270{
271 struct mlx5e_tc_flow *flow;
272 int err;
273
274 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
275 e->encap_size, e->encap_header,
276 &e->encap_id);
277 if (err) {
278 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
279 err);
280 return;
281 }
282 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 283 mlx5e_rep_queue_neigh_stats_work(priv);
232c0013
HHZ
284
285 list_for_each_entry(flow, &e->flows, encap) {
286 flow->esw_attr->encap_id = e->encap_id;
287 flow->rule = mlx5e_tc_add_fdb_flow(priv,
288 flow->esw_attr->parse_attr,
289 flow);
290 if (IS_ERR(flow->rule)) {
291 err = PTR_ERR(flow->rule);
292 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
293 err);
294 continue;
295 }
296 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
297 }
298}
299
300void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
301 struct mlx5e_encap_entry *e)
302{
303 struct mlx5e_tc_flow *flow;
304 struct mlx5_fc *counter;
305
306 list_for_each_entry(flow, &e->flows, encap) {
307 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
308 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
309 counter = mlx5_flow_rule_counter(flow->rule);
310 mlx5_del_flow_rules(flow->rule);
311 mlx5_fc_destroy(priv->mdev, counter);
312 }
313 }
314
315 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
316 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
317 mlx5_encap_dealloc(priv->mdev, e->encap_id);
318 }
319}
320
f6dfb4c3
HHZ
321void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
322{
323 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
324 u64 bytes, packets, lastuse = 0;
325 struct mlx5e_tc_flow *flow;
326 struct mlx5e_encap_entry *e;
327 struct mlx5_fc *counter;
328 struct neigh_table *tbl;
329 bool neigh_used = false;
330 struct neighbour *n;
331
332 if (m_neigh->family == AF_INET)
333 tbl = &arp_tbl;
334#if IS_ENABLED(CONFIG_IPV6)
335 else if (m_neigh->family == AF_INET6)
336 tbl = ipv6_stub->nd_tbl;
337#endif
338 else
339 return;
340
341 list_for_each_entry(e, &nhe->encap_list, encap_list) {
342 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
343 continue;
344 list_for_each_entry(flow, &e->flows, encap) {
345 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
346 counter = mlx5_flow_rule_counter(flow->rule);
347 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
348 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
349 neigh_used = true;
350 break;
351 }
352 }
353 }
354 }
355
356 if (neigh_used) {
357 nhe->reported_lastuse = jiffies;
358
359 /* find the relevant neigh according to the cached device and
360 * dst ip pair
361 */
362 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
363 if (!n) {
364 WARN(1, "The neighbour already freed\n");
365 return;
366 }
367
368 neigh_event_send(n, NULL);
369 neigh_release(n);
370 }
371}
372
d85cdccb
OG
373static void mlx5e_detach_encap(struct mlx5e_priv *priv,
374 struct mlx5e_tc_flow *flow)
375{
5067b602
RD
376 struct list_head *next = flow->encap.next;
377
378 list_del(&flow->encap);
379 if (list_empty(next)) {
c1ae1152 380 struct mlx5e_encap_entry *e;
5067b602 381
c1ae1152 382 e = list_entry(next, struct mlx5e_encap_entry, flows);
232c0013
HHZ
383 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
384
385 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
5067b602 386 mlx5_encap_dealloc(priv->mdev, e->encap_id);
232c0013 387
cdc5a7f3 388 hash_del_rcu(&e->encap_hlist);
232c0013 389 kfree(e->encap_header);
5067b602
RD
390 kfree(e);
391 }
392}
393
e8f887ac 394static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 395 struct mlx5e_tc_flow *flow)
e8f887ac 396{
d85cdccb
OG
397 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
398 mlx5e_tc_del_fdb_flow(priv, flow);
399 else
400 mlx5e_tc_del_nic_flow(priv, flow);
e8f887ac
AV
401}
402
bbd00f7e
HHZ
403static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
404 struct tc_cls_flower_offload *f)
405{
406 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
407 outer_headers);
408 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
409 outer_headers);
410 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
411 misc_parameters);
412 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
413 misc_parameters);
414
415 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
416 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
417
418 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
419 struct flow_dissector_key_keyid *key =
420 skb_flow_dissector_target(f->dissector,
421 FLOW_DISSECTOR_KEY_ENC_KEYID,
422 f->key);
423 struct flow_dissector_key_keyid *mask =
424 skb_flow_dissector_target(f->dissector,
425 FLOW_DISSECTOR_KEY_ENC_KEYID,
426 f->mask);
427 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
428 be32_to_cpu(mask->keyid));
429 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
430 be32_to_cpu(key->keyid));
431 }
432}
433
434static int parse_tunnel_attr(struct mlx5e_priv *priv,
435 struct mlx5_flow_spec *spec,
436 struct tc_cls_flower_offload *f)
437{
438 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
439 outer_headers);
440 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
441 outer_headers);
442
2e72eb43
OG
443 struct flow_dissector_key_control *enc_control =
444 skb_flow_dissector_target(f->dissector,
445 FLOW_DISSECTOR_KEY_ENC_CONTROL,
446 f->key);
447
bbd00f7e
HHZ
448 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
449 struct flow_dissector_key_ports *key =
450 skb_flow_dissector_target(f->dissector,
451 FLOW_DISSECTOR_KEY_ENC_PORTS,
452 f->key);
453 struct flow_dissector_key_ports *mask =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_ENC_PORTS,
456 f->mask);
1ad9a00a
PB
457 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
458 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
459 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
bbd00f7e
HHZ
460
461 /* Full udp dst port must be given */
462 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 463 goto vxlan_match_offload_err;
bbd00f7e 464
1ad9a00a 465 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
bbd00f7e
HHZ
466 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
467 parse_vxlan_attr(spec, f);
2fcd82e9
OG
468 else {
469 netdev_warn(priv->netdev,
470 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 471 return -EOPNOTSUPP;
2fcd82e9 472 }
bbd00f7e
HHZ
473
474 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
475 udp_dport, ntohs(mask->dst));
476 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
477 udp_dport, ntohs(key->dst));
478
cd377663
OG
479 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
480 udp_sport, ntohs(mask->src));
481 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
482 udp_sport, ntohs(key->src));
bbd00f7e 483 } else { /* udp dst port must be given */
2fcd82e9
OG
484vxlan_match_offload_err:
485 netdev_warn(priv->netdev,
486 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
487 return -EOPNOTSUPP;
bbd00f7e
HHZ
488 }
489
2e72eb43 490 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
491 struct flow_dissector_key_ipv4_addrs *key =
492 skb_flow_dissector_target(f->dissector,
493 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
494 f->key);
495 struct flow_dissector_key_ipv4_addrs *mask =
496 skb_flow_dissector_target(f->dissector,
497 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
498 f->mask);
499 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500 src_ipv4_src_ipv6.ipv4_layout.ipv4,
501 ntohl(mask->src));
502 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
503 src_ipv4_src_ipv6.ipv4_layout.ipv4,
504 ntohl(key->src));
505
506 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
507 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
508 ntohl(mask->dst));
509 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
510 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
511 ntohl(key->dst));
bbd00f7e 512
2e72eb43
OG
513 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
514 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
19f44401
OG
515 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
516 struct flow_dissector_key_ipv6_addrs *key =
517 skb_flow_dissector_target(f->dissector,
518 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
519 f->key);
520 struct flow_dissector_key_ipv6_addrs *mask =
521 skb_flow_dissector_target(f->dissector,
522 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
523 f->mask);
524
525 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
526 src_ipv4_src_ipv6.ipv6_layout.ipv6),
527 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
528 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
529 src_ipv4_src_ipv6.ipv6_layout.ipv6),
530 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
531
532 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
534 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
537 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
538
539 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
540 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
2e72eb43 541 }
bbd00f7e
HHZ
542
543 /* Enforce DMAC when offloading incoming tunneled flows.
544 * Flow counters require a match on the DMAC.
545 */
546 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
547 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
548 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
549 dmac_47_16), priv->netdev->dev_addr);
550
551 /* let software handle IP fragments */
552 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
553 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
554
555 return 0;
556}
557
de0af0bf
RD
558static int __parse_cls_flower(struct mlx5e_priv *priv,
559 struct mlx5_flow_spec *spec,
560 struct tc_cls_flower_offload *f,
561 u8 *min_inline)
e3a2b7ed 562{
c5bb1730
MG
563 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
564 outer_headers);
565 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
566 outer_headers);
e3a2b7ed
AV
567 u16 addr_type = 0;
568 u8 ip_proto = 0;
569
de0af0bf
RD
570 *min_inline = MLX5_INLINE_MODE_L2;
571
e3a2b7ed
AV
572 if (f->dissector->used_keys &
573 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
574 BIT(FLOW_DISSECTOR_KEY_BASIC) |
575 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 576 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
577 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
578 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
579 BIT(FLOW_DISSECTOR_KEY_PORTS) |
580 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
581 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
582 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
583 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
584 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
585 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
586 f->dissector->used_keys);
587 return -EOPNOTSUPP;
588 }
589
bbd00f7e
HHZ
590 if ((dissector_uses_key(f->dissector,
591 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
592 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
593 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
594 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
595 struct flow_dissector_key_control *key =
596 skb_flow_dissector_target(f->dissector,
597 FLOW_DISSECTOR_KEY_ENC_CONTROL,
598 f->key);
599 switch (key->addr_type) {
600 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
19f44401 601 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
bbd00f7e
HHZ
602 if (parse_tunnel_attr(priv, spec, f))
603 return -EOPNOTSUPP;
604 break;
605 default:
606 return -EOPNOTSUPP;
607 }
608
609 /* In decap flow, header pointers should point to the inner
610 * headers, outer header were already set by parse_tunnel_attr
611 */
612 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
613 inner_headers);
614 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
615 inner_headers);
616 }
617
e3a2b7ed
AV
618 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
619 struct flow_dissector_key_control *key =
620 skb_flow_dissector_target(f->dissector,
1dbd0d37 621 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 622 f->key);
3f7d0eb4
OG
623
624 struct flow_dissector_key_control *mask =
625 skb_flow_dissector_target(f->dissector,
626 FLOW_DISSECTOR_KEY_CONTROL,
627 f->mask);
e3a2b7ed 628 addr_type = key->addr_type;
3f7d0eb4
OG
629
630 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
631 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
632 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
633 key->flags & FLOW_DIS_IS_FRAGMENT);
0827444d
OG
634
635 /* the HW doesn't need L3 inline to match on frag=no */
636 if (key->flags & FLOW_DIS_IS_FRAGMENT)
637 *min_inline = MLX5_INLINE_MODE_IP;
3f7d0eb4 638 }
e3a2b7ed
AV
639 }
640
641 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
642 struct flow_dissector_key_basic *key =
643 skb_flow_dissector_target(f->dissector,
644 FLOW_DISSECTOR_KEY_BASIC,
645 f->key);
646 struct flow_dissector_key_basic *mask =
647 skb_flow_dissector_target(f->dissector,
648 FLOW_DISSECTOR_KEY_BASIC,
649 f->mask);
650 ip_proto = key->ip_proto;
651
652 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
653 ntohs(mask->n_proto));
654 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
655 ntohs(key->n_proto));
656
657 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
658 mask->ip_proto);
659 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
660 key->ip_proto);
de0af0bf
RD
661
662 if (mask->ip_proto)
663 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
664 }
665
666 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
667 struct flow_dissector_key_eth_addrs *key =
668 skb_flow_dissector_target(f->dissector,
669 FLOW_DISSECTOR_KEY_ETH_ADDRS,
670 f->key);
671 struct flow_dissector_key_eth_addrs *mask =
672 skb_flow_dissector_target(f->dissector,
673 FLOW_DISSECTOR_KEY_ETH_ADDRS,
674 f->mask);
675
676 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
677 dmac_47_16),
678 mask->dst);
679 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
680 dmac_47_16),
681 key->dst);
682
683 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
684 smac_47_16),
685 mask->src);
686 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
687 smac_47_16),
688 key->src);
689 }
690
095b6cfd
OG
691 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
692 struct flow_dissector_key_vlan *key =
693 skb_flow_dissector_target(f->dissector,
694 FLOW_DISSECTOR_KEY_VLAN,
695 f->key);
696 struct flow_dissector_key_vlan *mask =
697 skb_flow_dissector_target(f->dissector,
698 FLOW_DISSECTOR_KEY_VLAN,
699 f->mask);
358d79a4 700 if (mask->vlan_id || mask->vlan_priority) {
10543365
MHY
701 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
702 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
095b6cfd
OG
703
704 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
706
707 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
708 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
709 }
710 }
711
e3a2b7ed
AV
712 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
713 struct flow_dissector_key_ipv4_addrs *key =
714 skb_flow_dissector_target(f->dissector,
715 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
716 f->key);
717 struct flow_dissector_key_ipv4_addrs *mask =
718 skb_flow_dissector_target(f->dissector,
719 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
720 f->mask);
721
722 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
723 src_ipv4_src_ipv6.ipv4_layout.ipv4),
724 &mask->src, sizeof(mask->src));
725 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
726 src_ipv4_src_ipv6.ipv4_layout.ipv4),
727 &key->src, sizeof(key->src));
728 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
729 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
730 &mask->dst, sizeof(mask->dst));
731 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
732 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
733 &key->dst, sizeof(key->dst));
de0af0bf
RD
734
735 if (mask->src || mask->dst)
736 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
737 }
738
739 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
740 struct flow_dissector_key_ipv6_addrs *key =
741 skb_flow_dissector_target(f->dissector,
742 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
743 f->key);
744 struct flow_dissector_key_ipv6_addrs *mask =
745 skb_flow_dissector_target(f->dissector,
746 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
747 f->mask);
748
749 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
750 src_ipv4_src_ipv6.ipv6_layout.ipv6),
751 &mask->src, sizeof(mask->src));
752 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
753 src_ipv4_src_ipv6.ipv6_layout.ipv6),
754 &key->src, sizeof(key->src));
755
756 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
757 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
758 &mask->dst, sizeof(mask->dst));
759 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
760 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
761 &key->dst, sizeof(key->dst));
de0af0bf
RD
762
763 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
764 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
765 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
766 }
767
768 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
769 struct flow_dissector_key_ports *key =
770 skb_flow_dissector_target(f->dissector,
771 FLOW_DISSECTOR_KEY_PORTS,
772 f->key);
773 struct flow_dissector_key_ports *mask =
774 skb_flow_dissector_target(f->dissector,
775 FLOW_DISSECTOR_KEY_PORTS,
776 f->mask);
777 switch (ip_proto) {
778 case IPPROTO_TCP:
779 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
780 tcp_sport, ntohs(mask->src));
781 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
782 tcp_sport, ntohs(key->src));
783
784 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
785 tcp_dport, ntohs(mask->dst));
786 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
787 tcp_dport, ntohs(key->dst));
788 break;
789
790 case IPPROTO_UDP:
791 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
792 udp_sport, ntohs(mask->src));
793 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
794 udp_sport, ntohs(key->src));
795
796 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
797 udp_dport, ntohs(mask->dst));
798 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
799 udp_dport, ntohs(key->dst));
800 break;
801 default:
802 netdev_err(priv->netdev,
803 "Only UDP and TCP transport are supported\n");
804 return -EINVAL;
805 }
de0af0bf
RD
806
807 if (mask->src || mask->dst)
808 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
809 }
810
811 return 0;
812}
813
de0af0bf 814static int parse_cls_flower(struct mlx5e_priv *priv,
65ba8fb7 815 struct mlx5e_tc_flow *flow,
de0af0bf
RD
816 struct mlx5_flow_spec *spec,
817 struct tc_cls_flower_offload *f)
818{
819 struct mlx5_core_dev *dev = priv->mdev;
820 struct mlx5_eswitch *esw = dev->priv.eswitch;
1d447a39
SM
821 struct mlx5e_rep_priv *rpriv = priv->ppriv;
822 struct mlx5_eswitch_rep *rep;
de0af0bf
RD
823 u8 min_inline;
824 int err;
825
826 err = __parse_cls_flower(priv, spec, f, &min_inline);
827
1d447a39
SM
828 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
829 rep = rpriv->rep;
830 if (rep->vport != FDB_UPLINK_VPORT &&
831 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
832 esw->offloads.inline_mode < min_inline)) {
de0af0bf
RD
833 netdev_warn(priv->netdev,
834 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
835 min_inline, esw->offloads.inline_mode);
836 return -EOPNOTSUPP;
837 }
838 }
839
840 return err;
841}
842
d79b6df6
OG
843struct pedit_headers {
844 struct ethhdr eth;
845 struct iphdr ip4;
846 struct ipv6hdr ip6;
847 struct tcphdr tcp;
848 struct udphdr udp;
849};
850
851static int pedit_header_offsets[] = {
852 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
853 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
854 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
855 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
856 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
857};
858
859#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
860
861static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
862 struct pedit_headers *masks,
863 struct pedit_headers *vals)
864{
865 u32 *curr_pmask, *curr_pval;
866
867 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
868 goto out_err;
869
870 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
871 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
872
873 if (*curr_pmask & mask) /* disallow acting twice on the same location */
874 goto out_err;
875
876 *curr_pmask |= mask;
877 *curr_pval |= (val & mask);
878
879 return 0;
880
881out_err:
882 return -EOPNOTSUPP;
883}
884
885struct mlx5_fields {
886 u8 field;
887 u8 size;
888 u32 offset;
889};
890
891static struct mlx5_fields fields[] = {
892 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
893 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
894 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
897
898 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
899 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
900 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
901 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
902
903 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
904 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
905 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
906 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
907 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
908 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
909 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
910 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
911
912 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
913 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
914 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
915
916 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
917 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
918};
919
920/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
921 * max from the SW pedit action. On success, it says how many HW actions were
922 * actually parsed.
923 */
924static int offload_pedit_fields(struct pedit_headers *masks,
925 struct pedit_headers *vals,
926 struct mlx5e_tc_flow_parse_attr *parse_attr)
927{
928 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
929 int i, action_size, nactions, max_actions, first, last;
930 void *s_masks_p, *a_masks_p, *vals_p;
931 u32 s_mask, a_mask, val;
932 struct mlx5_fields *f;
933 u8 cmd, field_bsize;
934 unsigned long mask;
935 void *action;
936
937 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
938 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
939 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
940 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
941
942 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
943 action = parse_attr->mod_hdr_actions;
944 max_actions = parse_attr->num_mod_hdr_actions;
945 nactions = 0;
946
947 for (i = 0; i < ARRAY_SIZE(fields); i++) {
948 f = &fields[i];
949 /* avoid seeing bits set from previous iterations */
950 s_mask = a_mask = mask = val = 0;
951
952 s_masks_p = (void *)set_masks + f->offset;
953 a_masks_p = (void *)add_masks + f->offset;
954
955 memcpy(&s_mask, s_masks_p, f->size);
956 memcpy(&a_mask, a_masks_p, f->size);
957
958 if (!s_mask && !a_mask) /* nothing to offload here */
959 continue;
960
961 if (s_mask && a_mask) {
962 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
963 return -EOPNOTSUPP;
964 }
965
966 if (nactions == max_actions) {
967 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
968 return -EOPNOTSUPP;
969 }
970
971 if (s_mask) {
972 cmd = MLX5_ACTION_TYPE_SET;
973 mask = s_mask;
974 vals_p = (void *)set_vals + f->offset;
975 /* clear to denote we consumed this field */
976 memset(s_masks_p, 0, f->size);
977 } else {
978 cmd = MLX5_ACTION_TYPE_ADD;
979 mask = a_mask;
980 vals_p = (void *)add_vals + f->offset;
981 /* clear to denote we consumed this field */
982 memset(a_masks_p, 0, f->size);
983 }
984
985 memcpy(&val, vals_p, f->size);
986
987 field_bsize = f->size * BITS_PER_BYTE;
988 first = find_first_bit(&mask, field_bsize);
989 last = find_last_bit(&mask, field_bsize);
990 if (first > 0 || last != (field_bsize - 1)) {
991 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
992 mask);
993 return -EOPNOTSUPP;
994 }
995
996 MLX5_SET(set_action_in, action, action_type, cmd);
997 MLX5_SET(set_action_in, action, field, f->field);
998
999 if (cmd == MLX5_ACTION_TYPE_SET) {
1000 MLX5_SET(set_action_in, action, offset, 0);
1001 /* length is num of bits to be written, zero means length of 32 */
1002 MLX5_SET(set_action_in, action, length, field_bsize);
1003 }
1004
1005 if (field_bsize == 32)
1006 MLX5_SET(set_action_in, action, data, ntohl(val));
1007 else if (field_bsize == 16)
1008 MLX5_SET(set_action_in, action, data, ntohs(val));
1009 else if (field_bsize == 8)
1010 MLX5_SET(set_action_in, action, data, val);
1011
1012 action += action_size;
1013 nactions++;
1014 }
1015
1016 parse_attr->num_mod_hdr_actions = nactions;
1017 return 0;
1018}
1019
1020static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1021 const struct tc_action *a, int namespace,
1022 struct mlx5e_tc_flow_parse_attr *parse_attr)
1023{
1024 int nkeys, action_size, max_actions;
1025
1026 nkeys = tcf_pedit_nkeys(a);
1027 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1028
1029 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1030 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1031 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1032 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1033
1034 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1035 max_actions = min(max_actions, nkeys * 16);
1036
1037 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1038 if (!parse_attr->mod_hdr_actions)
1039 return -ENOMEM;
1040
1041 parse_attr->num_mod_hdr_actions = max_actions;
1042 return 0;
1043}
1044
1045static const struct pedit_headers zero_masks = {};
1046
1047static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1048 const struct tc_action *a, int namespace,
1049 struct mlx5e_tc_flow_parse_attr *parse_attr)
1050{
1051 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1052 int nkeys, i, err = -EOPNOTSUPP;
1053 u32 mask, val, offset;
1054 u8 cmd, htype;
1055
1056 nkeys = tcf_pedit_nkeys(a);
1057
1058 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1059 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1060
1061 for (i = 0; i < nkeys; i++) {
1062 htype = tcf_pedit_htype(a, i);
1063 cmd = tcf_pedit_cmd(a, i);
1064 err = -EOPNOTSUPP; /* can't be all optimistic */
1065
1066 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1067 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1068 goto out_err;
1069 }
1070
1071 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1072 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1073 goto out_err;
1074 }
1075
1076 mask = tcf_pedit_mask(a, i);
1077 val = tcf_pedit_val(a, i);
1078 offset = tcf_pedit_offset(a, i);
1079
1080 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1081 if (err)
1082 goto out_err;
1083 }
1084
1085 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1086 if (err)
1087 goto out_err;
1088
1089 err = offload_pedit_fields(masks, vals, parse_attr);
1090 if (err < 0)
1091 goto out_dealloc_parsed_actions;
1092
1093 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1094 cmd_masks = &masks[cmd];
1095 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1096 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1097 cmd);
1098 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1099 16, 1, cmd_masks, sizeof(zero_masks), true);
1100 err = -EOPNOTSUPP;
1101 goto out_dealloc_parsed_actions;
1102 }
1103 }
1104
1105 return 0;
1106
1107out_dealloc_parsed_actions:
1108 kfree(parse_attr->mod_hdr_actions);
1109out_err:
1110 return err;
1111}
1112
26c02749
OG
1113static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1114{
1115 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1116 TCA_CSUM_UPDATE_FLAG_UDP;
1117
1118 /* The HW recalcs checksums only if re-writing headers */
1119 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1120 netdev_warn(priv->netdev,
1121 "TC csum action is only offloaded with pedit\n");
1122 return false;
1123 }
1124
1125 if (update_flags & ~prot_flags) {
1126 netdev_warn(priv->netdev,
1127 "can't offload TC csum action for some header/s - flags %#x\n",
1128 update_flags);
1129 return false;
1130 }
1131
1132 return true;
1133}
1134
5c40348c 1135static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
aa0cbbae
OG
1136 struct mlx5e_tc_flow_parse_attr *parse_attr,
1137 struct mlx5e_tc_flow *flow)
e3a2b7ed 1138{
aa0cbbae 1139 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
e3a2b7ed 1140 const struct tc_action *a;
22dc13c8 1141 LIST_HEAD(actions);
2f4fe4ca 1142 int err;
e3a2b7ed
AV
1143
1144 if (tc_no_actions(exts))
1145 return -EINVAL;
1146
3bc4b7bf
OG
1147 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1148 attr->action = 0;
e3a2b7ed 1149
22dc13c8
WC
1150 tcf_exts_to_list(exts, &actions);
1151 list_for_each_entry(a, &actions, list) {
e3a2b7ed 1152 /* Only support a single action per rule */
3bc4b7bf 1153 if (attr->action)
e3a2b7ed
AV
1154 return -EINVAL;
1155
1156 if (is_tcf_gact_shot(a)) {
3bc4b7bf 1157 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
1158 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1159 flow_table_properties_nic_receive.flow_counter))
3bc4b7bf 1160 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
1161 continue;
1162 }
1163
2f4fe4ca
OG
1164 if (is_tcf_pedit(a)) {
1165 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1166 parse_attr);
1167 if (err)
1168 return err;
1169
1170 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1171 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1172 continue;
1173 }
1174
26c02749
OG
1175 if (is_tcf_csum(a)) {
1176 if (csum_offload_supported(priv, attr->action,
1177 tcf_csum_update_flags(a)))
1178 continue;
1179
1180 return -EOPNOTSUPP;
1181 }
1182
e3a2b7ed
AV
1183 if (is_tcf_skbedit_mark(a)) {
1184 u32 mark = tcf_skbedit_mark(a);
1185
1186 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1187 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1188 mark);
1189 return -EINVAL;
1190 }
1191
3bc4b7bf
OG
1192 attr->flow_tag = mark;
1193 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e3a2b7ed
AV
1194 continue;
1195 }
1196
1197 return -EINVAL;
1198 }
1199
1200 return 0;
1201}
1202
76f7444d
OG
1203static inline int cmp_encap_info(struct ip_tunnel_key *a,
1204 struct ip_tunnel_key *b)
a54e20b4
HHZ
1205{
1206 return memcmp(a, b, sizeof(*a));
1207}
1208
76f7444d 1209static inline int hash_encap_info(struct ip_tunnel_key *key)
a54e20b4 1210{
76f7444d 1211 return jhash(key, sizeof(*key), 0);
a54e20b4
HHZ
1212}
1213
1214static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1215 struct net_device *mirred_dev,
1216 struct net_device **out_dev,
1217 struct flowi4 *fl4,
1218 struct neighbour **out_n,
a54e20b4
HHZ
1219 int *out_ttl)
1220{
3e621b19 1221 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
a54e20b4
HHZ
1222 struct rtable *rt;
1223 struct neighbour *n = NULL;
a54e20b4
HHZ
1224
1225#if IS_ENABLED(CONFIG_INET)
abeffce9
AB
1226 int ret;
1227
a54e20b4 1228 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
abeffce9
AB
1229 ret = PTR_ERR_OR_ZERO(rt);
1230 if (ret)
1231 return ret;
a54e20b4
HHZ
1232#else
1233 return -EOPNOTSUPP;
1234#endif
3e621b19
HHZ
1235 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1236 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1237 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1238 else
1239 *out_dev = rt->dst.dev;
a54e20b4 1240
75c33da8 1241 *out_ttl = ip4_dst_hoplimit(&rt->dst);
a54e20b4
HHZ
1242 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1243 ip_rt_put(rt);
1244 if (!n)
1245 return -ENOMEM;
1246
1247 *out_n = n;
a54e20b4
HHZ
1248 return 0;
1249}
1250
ce99f6b9
OG
1251static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1252 struct net_device *mirred_dev,
1253 struct net_device **out_dev,
1254 struct flowi6 *fl6,
1255 struct neighbour **out_n,
1256 int *out_ttl)
1257{
1258 struct neighbour *n = NULL;
1259 struct dst_entry *dst;
1260
1261#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1262 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1263 int ret;
1264
1265 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
321fa4ff
AB
1266 ret = dst->error;
1267 if (ret) {
ce99f6b9
OG
1268 dst_release(dst);
1269 return ret;
1270 }
1271
1272 *out_ttl = ip6_dst_hoplimit(dst);
1273
1274 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1275 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1276 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1277 else
1278 *out_dev = dst->dev;
1279#else
1280 return -EOPNOTSUPP;
1281#endif
1282
1283 n = dst_neigh_lookup(dst, &fl6->daddr);
1284 dst_release(dst);
1285 if (!n)
1286 return -ENOMEM;
1287
1288 *out_n = n;
1289 return 0;
1290}
1291
32f3671f
OG
1292static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1293 char buf[], int encap_size,
1294 unsigned char h_dest[ETH_ALEN],
1295 int ttl,
1296 __be32 daddr,
1297 __be32 saddr,
1298 __be16 udp_dst_port,
1299 __be32 vx_vni)
a54e20b4 1300{
a54e20b4
HHZ
1301 struct ethhdr *eth = (struct ethhdr *)buf;
1302 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1303 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1304 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1305
1306 memset(buf, 0, encap_size);
1307
1308 ether_addr_copy(eth->h_dest, h_dest);
1309 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1310 eth->h_proto = htons(ETH_P_IP);
1311
1312 ip->daddr = daddr;
1313 ip->saddr = saddr;
1314
1315 ip->ttl = ttl;
1316 ip->protocol = IPPROTO_UDP;
1317 ip->version = 0x4;
1318 ip->ihl = 0x5;
1319
1320 udp->dest = udp_dst_port;
1321 vxh->vx_flags = VXLAN_HF_VNI;
1322 vxh->vx_vni = vxlan_vni_field(vx_vni);
a54e20b4
HHZ
1323}
1324
225aabaf
OG
1325static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1326 char buf[], int encap_size,
1327 unsigned char h_dest[ETH_ALEN],
1328 int ttl,
1329 struct in6_addr *daddr,
1330 struct in6_addr *saddr,
1331 __be16 udp_dst_port,
1332 __be32 vx_vni)
ce99f6b9 1333{
ce99f6b9
OG
1334 struct ethhdr *eth = (struct ethhdr *)buf;
1335 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1336 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1337 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1338
1339 memset(buf, 0, encap_size);
1340
1341 ether_addr_copy(eth->h_dest, h_dest);
1342 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1343 eth->h_proto = htons(ETH_P_IPV6);
1344
1345 ip6_flow_hdr(ip6h, 0, 0);
1346 /* the HW fills up ipv6 payload len */
1347 ip6h->nexthdr = IPPROTO_UDP;
1348 ip6h->hop_limit = ttl;
1349 ip6h->daddr = *daddr;
1350 ip6h->saddr = *saddr;
1351
1352 udp->dest = udp_dst_port;
1353 vxh->vx_flags = VXLAN_HF_VNI;
1354 vxh->vx_vni = vxlan_vni_field(vx_vni);
ce99f6b9
OG
1355}
1356
a54e20b4
HHZ
1357static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1358 struct net_device *mirred_dev,
1a8552bd 1359 struct mlx5e_encap_entry *e)
a54e20b4
HHZ
1360{
1361 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
32f3671f 1362 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
76f7444d 1363 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1364 struct net_device *out_dev;
a42485eb 1365 struct neighbour *n = NULL;
a54e20b4 1366 struct flowi4 fl4 = {};
a54e20b4 1367 char *encap_header;
32f3671f 1368 int ttl, err;
033354d5 1369 u8 nud_state;
32f3671f
OG
1370
1371 if (max_encap_size < ipv4_encap_size) {
1372 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1373 ipv4_encap_size, max_encap_size);
1374 return -EOPNOTSUPP;
1375 }
a54e20b4 1376
32f3671f 1377 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
a54e20b4
HHZ
1378 if (!encap_header)
1379 return -ENOMEM;
1380
1381 switch (e->tunnel_type) {
1382 case MLX5_HEADER_TYPE_VXLAN:
1383 fl4.flowi4_proto = IPPROTO_UDP;
76f7444d 1384 fl4.fl4_dport = tun_key->tp_dst;
a54e20b4
HHZ
1385 break;
1386 default:
1387 err = -EOPNOTSUPP;
1388 goto out;
1389 }
9a941117 1390 fl4.flowi4_tos = tun_key->tos;
76f7444d 1391 fl4.daddr = tun_key->u.ipv4.dst;
9a941117 1392 fl4.saddr = tun_key->u.ipv4.src;
a54e20b4 1393
1a8552bd 1394 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
9a941117 1395 &fl4, &n, &ttl);
a54e20b4
HHZ
1396 if (err)
1397 goto out;
1398
232c0013
HHZ
1399 /* used by mlx5e_detach_encap to lookup a neigh hash table
1400 * entry in the neigh hash table when a user deletes a rule
1401 */
1402 e->m_neigh.dev = n->dev;
f6dfb4c3 1403 e->m_neigh.family = n->ops->family;
232c0013
HHZ
1404 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1405 e->out_dev = out_dev;
1406
1407 /* It's importent to add the neigh to the hash table before checking
1408 * the neigh validity state. So if we'll get a notification, in case the
1409 * neigh changes it's validity state, we would find the relevant neigh
1410 * in the hash.
1411 */
1412 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1413 if (err)
1414 goto out;
1415
033354d5
HHZ
1416 read_lock_bh(&n->lock);
1417 nud_state = n->nud_state;
1418 ether_addr_copy(e->h_dest, n->ha);
1419 read_unlock_bh(&n->lock);
1420
a54e20b4
HHZ
1421 switch (e->tunnel_type) {
1422 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1423 gen_vxlan_header_ipv4(out_dev, encap_header,
32f3671f
OG
1424 ipv4_encap_size, e->h_dest, ttl,
1425 fl4.daddr,
1426 fl4.saddr, tun_key->tp_dst,
1427 tunnel_id_to_key32(tun_key->tun_id));
a54e20b4
HHZ
1428 break;
1429 default:
1430 err = -EOPNOTSUPP;
232c0013
HHZ
1431 goto destroy_neigh_entry;
1432 }
1433 e->encap_size = ipv4_encap_size;
1434 e->encap_header = encap_header;
1435
1436 if (!(nud_state & NUD_VALID)) {
1437 neigh_event_send(n, NULL);
1438 neigh_release(n);
1439 return -EAGAIN;
a54e20b4
HHZ
1440 }
1441
1442 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
32f3671f 1443 ipv4_encap_size, encap_header, &e->encap_id);
232c0013
HHZ
1444 if (err)
1445 goto destroy_neigh_entry;
1446
1447 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1448 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
1449 neigh_release(n);
1450 return err;
1451
1452destroy_neigh_entry:
1453 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
a54e20b4
HHZ
1454out:
1455 kfree(encap_header);
232c0013
HHZ
1456 if (n)
1457 neigh_release(n);
a54e20b4
HHZ
1458 return err;
1459}
1460
ce99f6b9
OG
1461static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1462 struct net_device *mirred_dev,
1a8552bd 1463 struct mlx5e_encap_entry *e)
ce99f6b9
OG
1464{
1465 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
225aabaf 1466 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
ce99f6b9 1467 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1a8552bd 1468 struct net_device *out_dev;
ce99f6b9
OG
1469 struct neighbour *n = NULL;
1470 struct flowi6 fl6 = {};
1471 char *encap_header;
225aabaf 1472 int err, ttl = 0;
033354d5 1473 u8 nud_state;
ce99f6b9 1474
225aabaf
OG
1475 if (max_encap_size < ipv6_encap_size) {
1476 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1477 ipv6_encap_size, max_encap_size);
1478 return -EOPNOTSUPP;
1479 }
ce99f6b9 1480
225aabaf 1481 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
ce99f6b9
OG
1482 if (!encap_header)
1483 return -ENOMEM;
1484
1485 switch (e->tunnel_type) {
1486 case MLX5_HEADER_TYPE_VXLAN:
1487 fl6.flowi6_proto = IPPROTO_UDP;
1488 fl6.fl6_dport = tun_key->tp_dst;
1489 break;
1490 default:
1491 err = -EOPNOTSUPP;
1492 goto out;
1493 }
1494
1495 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1496 fl6.daddr = tun_key->u.ipv6.dst;
1497 fl6.saddr = tun_key->u.ipv6.src;
1498
1a8552bd 1499 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
ce99f6b9
OG
1500 &fl6, &n, &ttl);
1501 if (err)
1502 goto out;
1503
232c0013
HHZ
1504 /* used by mlx5e_detach_encap to lookup a neigh hash table
1505 * entry in the neigh hash table when a user deletes a rule
1506 */
1507 e->m_neigh.dev = n->dev;
f6dfb4c3 1508 e->m_neigh.family = n->ops->family;
232c0013
HHZ
1509 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1510 e->out_dev = out_dev;
1511
1512 /* It's importent to add the neigh to the hash table before checking
1513 * the neigh validity state. So if we'll get a notification, in case the
1514 * neigh changes it's validity state, we would find the relevant neigh
1515 * in the hash.
1516 */
1517 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1518 if (err)
1519 goto out;
1520
033354d5
HHZ
1521 read_lock_bh(&n->lock);
1522 nud_state = n->nud_state;
1523 ether_addr_copy(e->h_dest, n->ha);
1524 read_unlock_bh(&n->lock);
1525
ce99f6b9
OG
1526 switch (e->tunnel_type) {
1527 case MLX5_HEADER_TYPE_VXLAN:
1a8552bd 1528 gen_vxlan_header_ipv6(out_dev, encap_header,
225aabaf
OG
1529 ipv6_encap_size, e->h_dest, ttl,
1530 &fl6.daddr,
1531 &fl6.saddr, tun_key->tp_dst,
1532 tunnel_id_to_key32(tun_key->tun_id));
ce99f6b9
OG
1533 break;
1534 default:
1535 err = -EOPNOTSUPP;
232c0013
HHZ
1536 goto destroy_neigh_entry;
1537 }
1538
1539 e->encap_size = ipv6_encap_size;
1540 e->encap_header = encap_header;
1541
1542 if (!(nud_state & NUD_VALID)) {
1543 neigh_event_send(n, NULL);
1544 neigh_release(n);
1545 return -EAGAIN;
ce99f6b9
OG
1546 }
1547
1548 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
225aabaf 1549 ipv6_encap_size, encap_header, &e->encap_id);
232c0013
HHZ
1550 if (err)
1551 goto destroy_neigh_entry;
1552
1553 e->flags |= MLX5_ENCAP_ENTRY_VALID;
f6dfb4c3 1554 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
232c0013
HHZ
1555 neigh_release(n);
1556 return err;
1557
1558destroy_neigh_entry:
1559 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
ce99f6b9 1560out:
ce99f6b9 1561 kfree(encap_header);
232c0013
HHZ
1562 if (n)
1563 neigh_release(n);
ce99f6b9
OG
1564 return err;
1565}
1566
a54e20b4
HHZ
1567static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1568 struct ip_tunnel_info *tun_info,
1569 struct net_device *mirred_dev,
45247bf2
OG
1570 struct net_device **encap_dev,
1571 struct mlx5e_tc_flow *flow)
a54e20b4
HHZ
1572{
1573 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1ad9a00a 1574 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
a54e20b4 1575 unsigned short family = ip_tunnel_info_af(tun_info);
45247bf2
OG
1576 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1577 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
a54e20b4 1578 struct ip_tunnel_key *key = &tun_info->key;
c1ae1152 1579 struct mlx5e_encap_entry *e;
45247bf2 1580 int tunnel_type, err = 0;
a54e20b4
HHZ
1581 uintptr_t hash_key;
1582 bool found = false;
a54e20b4 1583
2fcd82e9 1584 /* udp dst port must be set */
a54e20b4 1585 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 1586 goto vxlan_encap_offload_err;
a54e20b4 1587
cd377663 1588 /* setting udp src port isn't supported */
2fcd82e9
OG
1589 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1590vxlan_encap_offload_err:
1591 netdev_warn(priv->netdev,
1592 "must set udp dst port and not set udp src port\n");
cd377663 1593 return -EOPNOTSUPP;
2fcd82e9 1594 }
cd377663 1595
1ad9a00a 1596 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
a54e20b4 1597 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
a54e20b4
HHZ
1598 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1599 } else {
2fcd82e9
OG
1600 netdev_warn(priv->netdev,
1601 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
1602 return -EOPNOTSUPP;
1603 }
1604
76f7444d 1605 hash_key = hash_encap_info(key);
a54e20b4
HHZ
1606
1607 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1608 encap_hlist, hash_key) {
76f7444d 1609 if (!cmp_encap_info(&e->tun_info.key, key)) {
a54e20b4
HHZ
1610 found = true;
1611 break;
1612 }
1613 }
1614
45247bf2
OG
1615 if (found)
1616 goto attach_flow;
a54e20b4
HHZ
1617
1618 e = kzalloc(sizeof(*e), GFP_KERNEL);
1619 if (!e)
1620 return -ENOMEM;
1621
76f7444d 1622 e->tun_info = *tun_info;
a54e20b4
HHZ
1623 e->tunnel_type = tunnel_type;
1624 INIT_LIST_HEAD(&e->flows);
1625
ce99f6b9 1626 if (family == AF_INET)
1a8552bd 1627 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
ce99f6b9 1628 else if (family == AF_INET6)
1a8552bd 1629 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
ce99f6b9 1630
232c0013 1631 if (err && err != -EAGAIN)
a54e20b4
HHZ
1632 goto out_err;
1633
a54e20b4
HHZ
1634 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1635
45247bf2
OG
1636attach_flow:
1637 list_add(&flow->encap, &e->flows);
1638 *encap_dev = e->out_dev;
232c0013
HHZ
1639 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1640 attr->encap_id = e->encap_id;
45247bf2 1641
232c0013 1642 return err;
a54e20b4
HHZ
1643
1644out_err:
1645 kfree(e);
1646 return err;
1647}
1648
03a9d11e 1649static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
d7e75a32 1650 struct mlx5e_tc_flow_parse_attr *parse_attr,
a54e20b4 1651 struct mlx5e_tc_flow *flow)
03a9d11e 1652{
ecf5bb79 1653 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1d447a39 1654 struct mlx5e_rep_priv *rpriv = priv->ppriv;
a54e20b4 1655 struct ip_tunnel_info *info = NULL;
03a9d11e 1656 const struct tc_action *a;
22dc13c8 1657 LIST_HEAD(actions);
a54e20b4 1658 bool encap = false;
232c0013 1659 int err = 0;
03a9d11e
OG
1660
1661 if (tc_no_actions(exts))
1662 return -EINVAL;
1663
776b12b6 1664 memset(attr, 0, sizeof(*attr));
1d447a39 1665 attr->in_rep = rpriv->rep;
03a9d11e 1666
22dc13c8
WC
1667 tcf_exts_to_list(exts, &actions);
1668 list_for_each_entry(a, &actions, list) {
03a9d11e 1669 if (is_tcf_gact_shot(a)) {
8b32580d
OG
1670 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1671 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
1672 continue;
1673 }
1674
d7e75a32
OG
1675 if (is_tcf_pedit(a)) {
1676 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1677 parse_attr);
1678 if (err)
1679 return err;
1680
1681 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1682 continue;
1683 }
1684
26c02749
OG
1685 if (is_tcf_csum(a)) {
1686 if (csum_offload_supported(priv, attr->action,
1687 tcf_csum_update_flags(a)))
1688 continue;
1689
1690 return -EOPNOTSUPP;
1691 }
1692
5724b8b5 1693 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e 1694 int ifindex = tcf_mirred_ifindex(a);
45247bf2 1695 struct net_device *out_dev, *encap_dev = NULL;
03a9d11e 1696 struct mlx5e_priv *out_priv;
03a9d11e
OG
1697
1698 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1699
a54e20b4
HHZ
1700 if (switchdev_port_same_parent_id(priv->netdev,
1701 out_dev)) {
1702 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1703 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1704 out_priv = netdev_priv(out_dev);
1d447a39
SM
1705 rpriv = out_priv->ppriv;
1706 attr->out_rep = rpriv->rep;
a54e20b4
HHZ
1707 } else if (encap) {
1708 err = mlx5e_attach_encap(priv, info,
45247bf2 1709 out_dev, &encap_dev, flow);
232c0013 1710 if (err && err != -EAGAIN)
a54e20b4 1711 return err;
a54e20b4
HHZ
1712 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1713 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1714 MLX5_FLOW_CONTEXT_ACTION_COUNT;
45247bf2 1715 out_priv = netdev_priv(encap_dev);
1d447a39
SM
1716 rpriv = out_priv->ppriv;
1717 attr->out_rep = rpriv->rep;
232c0013 1718 attr->parse_attr = parse_attr;
a54e20b4 1719 } else {
03a9d11e
OG
1720 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1721 priv->netdev->name, out_dev->name);
1722 return -EINVAL;
1723 }
a54e20b4
HHZ
1724 continue;
1725 }
03a9d11e 1726
a54e20b4
HHZ
1727 if (is_tcf_tunnel_set(a)) {
1728 info = tcf_tunnel_info(a);
1729 if (info)
1730 encap = true;
1731 else
1732 return -EOPNOTSUPP;
03a9d11e
OG
1733 continue;
1734 }
1735
8b32580d 1736 if (is_tcf_vlan(a)) {
09c91ddf 1737 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
8b32580d 1738 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
09c91ddf 1739 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
8b32580d
OG
1740 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1741 return -EOPNOTSUPP;
1742
1743 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1744 attr->vlan = tcf_vlan_push_vid(a);
09c91ddf
OG
1745 } else { /* action is TCA_VLAN_ACT_MODIFY */
1746 return -EOPNOTSUPP;
8b32580d
OG
1747 }
1748 continue;
1749 }
1750
bbd00f7e
HHZ
1751 if (is_tcf_tunnel_release(a)) {
1752 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1753 continue;
1754 }
1755
03a9d11e
OG
1756 return -EINVAL;
1757 }
232c0013 1758 return err;
03a9d11e
OG
1759}
1760
e3a2b7ed
AV
1761int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1762 struct tc_cls_flower_offload *f)
1763{
3bc4b7bf 1764 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
17091853 1765 struct mlx5e_tc_flow_parse_attr *parse_attr;
acff797c 1766 struct mlx5e_tc_table *tc = &priv->fs.tc;
3bc4b7bf
OG
1767 struct mlx5e_tc_flow *flow;
1768 int attr_size, err = 0;
65ba8fb7 1769 u8 flow_flags = 0;
e3a2b7ed 1770
65ba8fb7
OG
1771 if (esw && esw->mode == SRIOV_OFFLOADS) {
1772 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1773 attr_size = sizeof(struct mlx5_esw_flow_attr);
3bc4b7bf
OG
1774 } else {
1775 flow_flags = MLX5E_TC_FLOW_NIC;
1776 attr_size = sizeof(struct mlx5_nic_flow_attr);
65ba8fb7 1777 }
e3a2b7ed 1778
65ba8fb7 1779 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
17091853
OG
1780 parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1781 if (!parse_attr || !flow) {
e3a2b7ed
AV
1782 err = -ENOMEM;
1783 goto err_free;
1784 }
1785
1786 flow->cookie = f->cookie;
65ba8fb7 1787 flow->flags = flow_flags;
e3a2b7ed 1788
17091853 1789 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
e3a2b7ed
AV
1790 if (err < 0)
1791 goto err_free;
1792
65ba8fb7 1793 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
d7e75a32 1794 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
adb4c123 1795 if (err < 0)
232c0013 1796 goto err_handle_encap_flow;
aa0cbbae 1797 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
adb4c123 1798 } else {
aa0cbbae 1799 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
adb4c123
OG
1800 if (err < 0)
1801 goto err_free;
aa0cbbae 1802 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
adb4c123 1803 }
e3a2b7ed 1804
e3a2b7ed
AV
1805 if (IS_ERR(flow->rule)) {
1806 err = PTR_ERR(flow->rule);
aa0cbbae 1807 goto err_free;
e3a2b7ed
AV
1808 }
1809
0b67a38f 1810 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
5c40348c
OG
1811 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1812 tc->ht_params);
1813 if (err)
1814 goto err_del_rule;
1815
232c0013
HHZ
1816 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1817 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1818 kvfree(parse_attr);
1819 return err;
e3a2b7ed 1820
5c40348c 1821err_del_rule:
5e86397a 1822 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1823
232c0013
HHZ
1824err_handle_encap_flow:
1825 if (err == -EAGAIN) {
1826 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1827 tc->ht_params);
1828 if (err)
1829 mlx5e_tc_del_flow(priv, flow);
1830 else
1831 return 0;
1832 }
1833
e3a2b7ed 1834err_free:
17091853 1835 kvfree(parse_attr);
232c0013 1836 kfree(flow);
e3a2b7ed
AV
1837 return err;
1838}
1839
1840int mlx5e_delete_flower(struct mlx5e_priv *priv,
1841 struct tc_cls_flower_offload *f)
1842{
1843 struct mlx5e_tc_flow *flow;
acff797c 1844 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1845
1846 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1847 tc->ht_params);
1848 if (!flow)
1849 return -EINVAL;
1850
1851 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1852
961e8979 1853 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed
AV
1854
1855 kfree(flow);
1856
1857 return 0;
1858}
1859
aad7e08d
AV
1860int mlx5e_stats_flower(struct mlx5e_priv *priv,
1861 struct tc_cls_flower_offload *f)
1862{
1863 struct mlx5e_tc_table *tc = &priv->fs.tc;
1864 struct mlx5e_tc_flow *flow;
1865 struct tc_action *a;
1866 struct mlx5_fc *counter;
22dc13c8 1867 LIST_HEAD(actions);
aad7e08d
AV
1868 u64 bytes;
1869 u64 packets;
1870 u64 lastuse;
1871
1872 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1873 tc->ht_params);
1874 if (!flow)
1875 return -EINVAL;
1876
0b67a38f
HHZ
1877 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1878 return 0;
1879
aad7e08d
AV
1880 counter = mlx5_flow_rule_counter(flow->rule);
1881 if (!counter)
1882 return 0;
1883
1884 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1885
fed06ee8
OG
1886 preempt_disable();
1887
22dc13c8
WC
1888 tcf_exts_to_list(f->exts, &actions);
1889 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1890 tcf_action_stats_update(a, bytes, packets, lastuse);
1891
fed06ee8
OG
1892 preempt_enable();
1893
aad7e08d
AV
1894 return 0;
1895}
1896
e8f887ac
AV
1897static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1898 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1899 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1900 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1901 .automatic_shrinking = true,
1902};
1903
1904int mlx5e_tc_init(struct mlx5e_priv *priv)
1905{
acff797c 1906 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1907
1908 tc->ht_params = mlx5e_tc_flow_ht_params;
1909 return rhashtable_init(&tc->ht, &tc->ht_params);
1910}
1911
1912static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1913{
1914 struct mlx5e_tc_flow *flow = ptr;
1915 struct mlx5e_priv *priv = arg;
1916
961e8979 1917 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1918 kfree(flow);
1919}
1920
1921void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1922{
acff797c 1923 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1924
1925 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1926
acff797c
MG
1927 if (!IS_ERR_OR_NULL(tc->t)) {
1928 mlx5_destroy_flow_table(tc->t);
1929 tc->t = NULL;
e8f887ac
AV
1930 }
1931}