net/mlx5e: Properly get address type of encapsulation IP headers
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed 33#include <net/flow_dissector.h>
3f7d0eb4 34#include <net/sch_generic.h>
e3a2b7ed
AV
35#include <net/pkt_cls.h>
36#include <net/tc_act/tc_gact.h>
12185a9f 37#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
38#include <linux/mlx5/fs.h>
39#include <linux/mlx5/device.h>
40#include <linux/rhashtable.h>
03a9d11e
OG
41#include <net/switchdev.h>
42#include <net/tc_act/tc_mirred.h>
776b12b6 43#include <net/tc_act/tc_vlan.h>
bbd00f7e 44#include <net/tc_act/tc_tunnel_key.h>
a54e20b4 45#include <net/vxlan.h>
e8f887ac
AV
46#include "en.h"
47#include "en_tc.h"
03a9d11e 48#include "eswitch.h"
bbd00f7e 49#include "vxlan.h"
e8f887ac
AV
50
51struct mlx5e_tc_flow {
52 struct rhash_head node;
53 u64 cookie;
74491de9 54 struct mlx5_flow_handle *rule;
a54e20b4 55 struct list_head encap; /* flows sharing the same encap */
776b12b6 56 struct mlx5_esw_flow_attr *attr;
e8f887ac
AV
57};
58
a54e20b4
HHZ
59enum {
60 MLX5_HEADER_TYPE_VXLAN = 0x0,
61 MLX5_HEADER_TYPE_NVGRE = 0x1,
62};
63
acff797c
MG
64#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65#define MLX5E_TC_TABLE_NUM_GROUPS 4
e8f887ac 66
74491de9
MB
67static struct mlx5_flow_handle *
68mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69 struct mlx5_flow_spec *spec,
70 u32 action, u32 flow_tag)
e8f887ac 71{
aad7e08d
AV
72 struct mlx5_core_dev *dev = priv->mdev;
73 struct mlx5_flow_destination dest = { 0 };
66958ed9
HHZ
74 struct mlx5_flow_act flow_act = {
75 .action = action,
76 .flow_tag = flow_tag,
77 .encap_id = 0,
78 };
aad7e08d 79 struct mlx5_fc *counter = NULL;
74491de9 80 struct mlx5_flow_handle *rule;
e8f887ac
AV
81 bool table_created = false;
82
aad7e08d
AV
83 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85 dest.ft = priv->fs.vlan.ft.t;
55130287 86 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
aad7e08d
AV
87 counter = mlx5_fc_create(dev, true);
88 if (IS_ERR(counter))
89 return ERR_CAST(counter);
90
91 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92 dest.counter = counter;
93 }
94
acff797c
MG
95 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96 priv->fs.tc.t =
97 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98 MLX5E_TC_PRIO,
99 MLX5E_TC_TABLE_NUM_ENTRIES,
100 MLX5E_TC_TABLE_NUM_GROUPS,
c9f1b073 101 0, 0);
acff797c 102 if (IS_ERR(priv->fs.tc.t)) {
e8f887ac
AV
103 netdev_err(priv->netdev,
104 "Failed to create tc offload table\n");
aad7e08d
AV
105 rule = ERR_CAST(priv->fs.tc.t);
106 goto err_create_ft;
e8f887ac
AV
107 }
108
109 table_created = true;
110 }
111
c5bb1730 112 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
66958ed9 113 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
aad7e08d
AV
114
115 if (IS_ERR(rule))
116 goto err_add_rule;
117
118 return rule;
e8f887ac 119
aad7e08d
AV
120err_add_rule:
121 if (table_created) {
acff797c
MG
122 mlx5_destroy_flow_table(priv->fs.tc.t);
123 priv->fs.tc.t = NULL;
e8f887ac 124 }
aad7e08d
AV
125err_create_ft:
126 mlx5_fc_destroy(dev, counter);
e8f887ac
AV
127
128 return rule;
129}
130
74491de9
MB
131static struct mlx5_flow_handle *
132mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133 struct mlx5_flow_spec *spec,
134 struct mlx5_esw_flow_attr *attr)
adb4c123
OG
135{
136 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
8b32580d
OG
137 int err;
138
139 err = mlx5_eswitch_add_vlan_action(esw, attr);
140 if (err)
141 return ERR_PTR(err);
adb4c123 142
776b12b6 143 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
adb4c123
OG
144}
145
5067b602
RD
146static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147 struct mlx5e_tc_flow *flow) {
148 struct list_head *next = flow->encap.next;
149
150 list_del(&flow->encap);
151 if (list_empty(next)) {
152 struct mlx5_encap_entry *e;
153
154 e = list_entry(next, struct mlx5_encap_entry, flows);
155 if (e->n) {
156 mlx5_encap_dealloc(priv->mdev, e->encap_id);
157 neigh_release(e->n);
158 }
159 hlist_del_rcu(&e->encap_hlist);
160 kfree(e);
161 }
162}
163
e8f887ac 164static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
961e8979 165 struct mlx5e_tc_flow *flow)
e8f887ac 166{
8b32580d 167 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
aad7e08d
AV
168 struct mlx5_fc *counter = NULL;
169
961e8979 170 counter = mlx5_flow_rule_counter(flow->rule);
aad7e08d 171
961e8979 172 mlx5_del_flow_rules(flow->rule);
86a33ae1 173
5067b602 174 if (esw && esw->mode == SRIOV_OFFLOADS) {
961e8979 175 mlx5_eswitch_del_vlan_action(esw, flow->attr);
5067b602
RD
176 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
177 mlx5e_detach_encap(priv, flow);
178 }
8b32580d 179
aad7e08d
AV
180 mlx5_fc_destroy(priv->mdev, counter);
181
5c40348c 182 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
acff797c
MG
183 mlx5_destroy_flow_table(priv->fs.tc.t);
184 priv->fs.tc.t = NULL;
e8f887ac
AV
185 }
186}
187
bbd00f7e
HHZ
188static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
189 struct tc_cls_flower_offload *f)
190{
191 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
192 outer_headers);
193 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
194 outer_headers);
195 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196 misc_parameters);
197 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198 misc_parameters);
199
200 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
201 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
202
203 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
204 struct flow_dissector_key_keyid *key =
205 skb_flow_dissector_target(f->dissector,
206 FLOW_DISSECTOR_KEY_ENC_KEYID,
207 f->key);
208 struct flow_dissector_key_keyid *mask =
209 skb_flow_dissector_target(f->dissector,
210 FLOW_DISSECTOR_KEY_ENC_KEYID,
211 f->mask);
212 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
213 be32_to_cpu(mask->keyid));
214 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
215 be32_to_cpu(key->keyid));
216 }
217}
218
219static int parse_tunnel_attr(struct mlx5e_priv *priv,
220 struct mlx5_flow_spec *spec,
221 struct tc_cls_flower_offload *f)
222{
223 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224 outer_headers);
225 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226 outer_headers);
227
2e72eb43
OG
228 struct flow_dissector_key_control *enc_control =
229 skb_flow_dissector_target(f->dissector,
230 FLOW_DISSECTOR_KEY_ENC_CONTROL,
231 f->key);
232
bbd00f7e
HHZ
233 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
234 struct flow_dissector_key_ports *key =
235 skb_flow_dissector_target(f->dissector,
236 FLOW_DISSECTOR_KEY_ENC_PORTS,
237 f->key);
238 struct flow_dissector_key_ports *mask =
239 skb_flow_dissector_target(f->dissector,
240 FLOW_DISSECTOR_KEY_ENC_PORTS,
241 f->mask);
242
243 /* Full udp dst port must be given */
244 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
2fcd82e9 245 goto vxlan_match_offload_err;
bbd00f7e 246
bbd00f7e
HHZ
247 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
248 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
249 parse_vxlan_attr(spec, f);
2fcd82e9
OG
250 else {
251 netdev_warn(priv->netdev,
252 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
bbd00f7e 253 return -EOPNOTSUPP;
2fcd82e9 254 }
bbd00f7e
HHZ
255
256 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
257 udp_dport, ntohs(mask->dst));
258 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
259 udp_dport, ntohs(key->dst));
260
cd377663
OG
261 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
262 udp_sport, ntohs(mask->src));
263 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
264 udp_sport, ntohs(key->src));
bbd00f7e 265 } else { /* udp dst port must be given */
2fcd82e9
OG
266vxlan_match_offload_err:
267 netdev_warn(priv->netdev,
268 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
269 return -EOPNOTSUPP;
bbd00f7e
HHZ
270 }
271
2e72eb43 272 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
bbd00f7e
HHZ
273 struct flow_dissector_key_ipv4_addrs *key =
274 skb_flow_dissector_target(f->dissector,
275 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
276 f->key);
277 struct flow_dissector_key_ipv4_addrs *mask =
278 skb_flow_dissector_target(f->dissector,
279 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280 f->mask);
281 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
282 src_ipv4_src_ipv6.ipv4_layout.ipv4,
283 ntohl(mask->src));
284 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
285 src_ipv4_src_ipv6.ipv4_layout.ipv4,
286 ntohl(key->src));
287
288 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
289 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
290 ntohl(mask->dst));
291 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
292 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
293 ntohl(key->dst));
bbd00f7e 294
2e72eb43
OG
295 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
296 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
297 }
bbd00f7e
HHZ
298
299 /* Enforce DMAC when offloading incoming tunneled flows.
300 * Flow counters require a match on the DMAC.
301 */
302 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
303 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
304 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
305 dmac_47_16), priv->netdev->dev_addr);
306
307 /* let software handle IP fragments */
308 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
309 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
310
311 return 0;
312}
313
de0af0bf
RD
314static int __parse_cls_flower(struct mlx5e_priv *priv,
315 struct mlx5_flow_spec *spec,
316 struct tc_cls_flower_offload *f,
317 u8 *min_inline)
e3a2b7ed 318{
c5bb1730
MG
319 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
320 outer_headers);
321 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
322 outer_headers);
e3a2b7ed
AV
323 u16 addr_type = 0;
324 u8 ip_proto = 0;
325
de0af0bf
RD
326 *min_inline = MLX5_INLINE_MODE_L2;
327
e3a2b7ed
AV
328 if (f->dissector->used_keys &
329 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
330 BIT(FLOW_DISSECTOR_KEY_BASIC) |
331 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
095b6cfd 332 BIT(FLOW_DISSECTOR_KEY_VLAN) |
e3a2b7ed
AV
333 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
334 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
bbd00f7e
HHZ
335 BIT(FLOW_DISSECTOR_KEY_PORTS) |
336 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
337 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
338 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
339 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
340 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
e3a2b7ed
AV
341 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
342 f->dissector->used_keys);
343 return -EOPNOTSUPP;
344 }
345
bbd00f7e
HHZ
346 if ((dissector_uses_key(f->dissector,
347 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
348 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
349 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
350 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
351 struct flow_dissector_key_control *key =
352 skb_flow_dissector_target(f->dissector,
353 FLOW_DISSECTOR_KEY_ENC_CONTROL,
354 f->key);
355 switch (key->addr_type) {
356 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
357 if (parse_tunnel_attr(priv, spec, f))
358 return -EOPNOTSUPP;
359 break;
2fcd82e9
OG
360 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
361 netdev_warn(priv->netdev,
362 "IPv6 tunnel decap offload isn't supported\n");
bbd00f7e
HHZ
363 default:
364 return -EOPNOTSUPP;
365 }
366
367 /* In decap flow, header pointers should point to the inner
368 * headers, outer header were already set by parse_tunnel_attr
369 */
370 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
371 inner_headers);
372 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
373 inner_headers);
374 }
375
e3a2b7ed
AV
376 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
377 struct flow_dissector_key_control *key =
378 skb_flow_dissector_target(f->dissector,
1dbd0d37 379 FLOW_DISSECTOR_KEY_CONTROL,
e3a2b7ed 380 f->key);
3f7d0eb4
OG
381
382 struct flow_dissector_key_control *mask =
383 skb_flow_dissector_target(f->dissector,
384 FLOW_DISSECTOR_KEY_CONTROL,
385 f->mask);
e3a2b7ed 386 addr_type = key->addr_type;
3f7d0eb4
OG
387
388 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
389 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
390 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
391 key->flags & FLOW_DIS_IS_FRAGMENT);
392 }
e3a2b7ed
AV
393 }
394
395 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
396 struct flow_dissector_key_basic *key =
397 skb_flow_dissector_target(f->dissector,
398 FLOW_DISSECTOR_KEY_BASIC,
399 f->key);
400 struct flow_dissector_key_basic *mask =
401 skb_flow_dissector_target(f->dissector,
402 FLOW_DISSECTOR_KEY_BASIC,
403 f->mask);
404 ip_proto = key->ip_proto;
405
406 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
407 ntohs(mask->n_proto));
408 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
409 ntohs(key->n_proto));
410
411 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
412 mask->ip_proto);
413 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
414 key->ip_proto);
de0af0bf
RD
415
416 if (mask->ip_proto)
417 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
418 }
419
420 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
421 struct flow_dissector_key_eth_addrs *key =
422 skb_flow_dissector_target(f->dissector,
423 FLOW_DISSECTOR_KEY_ETH_ADDRS,
424 f->key);
425 struct flow_dissector_key_eth_addrs *mask =
426 skb_flow_dissector_target(f->dissector,
427 FLOW_DISSECTOR_KEY_ETH_ADDRS,
428 f->mask);
429
430 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
431 dmac_47_16),
432 mask->dst);
433 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
434 dmac_47_16),
435 key->dst);
436
437 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
438 smac_47_16),
439 mask->src);
440 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
441 smac_47_16),
442 key->src);
443 }
444
095b6cfd
OG
445 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
446 struct flow_dissector_key_vlan *key =
447 skb_flow_dissector_target(f->dissector,
448 FLOW_DISSECTOR_KEY_VLAN,
449 f->key);
450 struct flow_dissector_key_vlan *mask =
451 skb_flow_dissector_target(f->dissector,
452 FLOW_DISSECTOR_KEY_VLAN,
453 f->mask);
358d79a4 454 if (mask->vlan_id || mask->vlan_priority) {
095b6cfd
OG
455 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
456 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
457
458 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
459 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
358d79a4
OG
460
461 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
462 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
095b6cfd
OG
463 }
464 }
465
e3a2b7ed
AV
466 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
467 struct flow_dissector_key_ipv4_addrs *key =
468 skb_flow_dissector_target(f->dissector,
469 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
470 f->key);
471 struct flow_dissector_key_ipv4_addrs *mask =
472 skb_flow_dissector_target(f->dissector,
473 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
474 f->mask);
475
476 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
477 src_ipv4_src_ipv6.ipv4_layout.ipv4),
478 &mask->src, sizeof(mask->src));
479 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
480 src_ipv4_src_ipv6.ipv4_layout.ipv4),
481 &key->src, sizeof(key->src));
482 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
483 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
484 &mask->dst, sizeof(mask->dst));
485 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
486 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
487 &key->dst, sizeof(key->dst));
de0af0bf
RD
488
489 if (mask->src || mask->dst)
490 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
491 }
492
493 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
494 struct flow_dissector_key_ipv6_addrs *key =
495 skb_flow_dissector_target(f->dissector,
496 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
497 f->key);
498 struct flow_dissector_key_ipv6_addrs *mask =
499 skb_flow_dissector_target(f->dissector,
500 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
501 f->mask);
502
503 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
504 src_ipv4_src_ipv6.ipv6_layout.ipv6),
505 &mask->src, sizeof(mask->src));
506 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
507 src_ipv4_src_ipv6.ipv6_layout.ipv6),
508 &key->src, sizeof(key->src));
509
510 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
511 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
512 &mask->dst, sizeof(mask->dst));
513 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
514 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
515 &key->dst, sizeof(key->dst));
de0af0bf
RD
516
517 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
518 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
519 *min_inline = MLX5_INLINE_MODE_IP;
e3a2b7ed
AV
520 }
521
522 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
523 struct flow_dissector_key_ports *key =
524 skb_flow_dissector_target(f->dissector,
525 FLOW_DISSECTOR_KEY_PORTS,
526 f->key);
527 struct flow_dissector_key_ports *mask =
528 skb_flow_dissector_target(f->dissector,
529 FLOW_DISSECTOR_KEY_PORTS,
530 f->mask);
531 switch (ip_proto) {
532 case IPPROTO_TCP:
533 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
534 tcp_sport, ntohs(mask->src));
535 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
536 tcp_sport, ntohs(key->src));
537
538 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
539 tcp_dport, ntohs(mask->dst));
540 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
541 tcp_dport, ntohs(key->dst));
542 break;
543
544 case IPPROTO_UDP:
545 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
546 udp_sport, ntohs(mask->src));
547 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
548 udp_sport, ntohs(key->src));
549
550 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
551 udp_dport, ntohs(mask->dst));
552 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
553 udp_dport, ntohs(key->dst));
554 break;
555 default:
556 netdev_err(priv->netdev,
557 "Only UDP and TCP transport are supported\n");
558 return -EINVAL;
559 }
de0af0bf
RD
560
561 if (mask->src || mask->dst)
562 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
e3a2b7ed
AV
563 }
564
565 return 0;
566}
567
de0af0bf
RD
568static int parse_cls_flower(struct mlx5e_priv *priv,
569 struct mlx5_flow_spec *spec,
570 struct tc_cls_flower_offload *f)
571{
572 struct mlx5_core_dev *dev = priv->mdev;
573 struct mlx5_eswitch *esw = dev->priv.eswitch;
574 struct mlx5_eswitch_rep *rep = priv->ppriv;
575 u8 min_inline;
576 int err;
577
578 err = __parse_cls_flower(priv, spec, f, &min_inline);
579
580 if (!err && esw->mode == SRIOV_OFFLOADS &&
581 rep->vport != FDB_UPLINK_VPORT) {
582 if (min_inline > esw->offloads.inline_mode) {
583 netdev_warn(priv->netdev,
584 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
585 min_inline, esw->offloads.inline_mode);
586 return -EOPNOTSUPP;
587 }
588 }
589
590 return err;
591}
592
5c40348c
OG
593static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
594 u32 *action, u32 *flow_tag)
e3a2b7ed
AV
595{
596 const struct tc_action *a;
22dc13c8 597 LIST_HEAD(actions);
e3a2b7ed
AV
598
599 if (tc_no_actions(exts))
600 return -EINVAL;
601
602 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
603 *action = 0;
604
22dc13c8
WC
605 tcf_exts_to_list(exts, &actions);
606 list_for_each_entry(a, &actions, list) {
e3a2b7ed
AV
607 /* Only support a single action per rule */
608 if (*action)
609 return -EINVAL;
610
611 if (is_tcf_gact_shot(a)) {
612 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
aad7e08d
AV
613 if (MLX5_CAP_FLOWTABLE(priv->mdev,
614 flow_table_properties_nic_receive.flow_counter))
615 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
e3a2b7ed
AV
616 continue;
617 }
618
619 if (is_tcf_skbedit_mark(a)) {
620 u32 mark = tcf_skbedit_mark(a);
621
622 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
623 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
624 mark);
625 return -EINVAL;
626 }
627
628 *flow_tag = mark;
629 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
630 continue;
631 }
632
633 return -EINVAL;
634 }
635
636 return 0;
637}
638
a54e20b4
HHZ
639static inline int cmp_encap_info(struct mlx5_encap_info *a,
640 struct mlx5_encap_info *b)
641{
642 return memcmp(a, b, sizeof(*a));
643}
644
645static inline int hash_encap_info(struct mlx5_encap_info *info)
646{
647 return jhash(info, sizeof(*info), 0);
648}
649
650static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
651 struct net_device *mirred_dev,
652 struct net_device **out_dev,
653 struct flowi4 *fl4,
654 struct neighbour **out_n,
655 __be32 *saddr,
656 int *out_ttl)
657{
658 struct rtable *rt;
659 struct neighbour *n = NULL;
660 int ttl;
661
662#if IS_ENABLED(CONFIG_INET)
663 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
a42485eb
OG
664 if (IS_ERR(rt))
665 return PTR_ERR(rt);
a54e20b4
HHZ
666#else
667 return -EOPNOTSUPP;
668#endif
669
670 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
a42485eb 671 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
a54e20b4
HHZ
672 ip_rt_put(rt);
673 return -EOPNOTSUPP;
674 }
675
676 ttl = ip4_dst_hoplimit(&rt->dst);
677 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
678 ip_rt_put(rt);
679 if (!n)
680 return -ENOMEM;
681
682 *out_n = n;
683 *saddr = fl4->saddr;
684 *out_ttl = ttl;
685 *out_dev = rt->dst.dev;
686
687 return 0;
688}
689
690static int gen_vxlan_header_ipv4(struct net_device *out_dev,
691 char buf[],
692 unsigned char h_dest[ETH_ALEN],
693 int ttl,
694 __be32 daddr,
695 __be32 saddr,
696 __be16 udp_dst_port,
697 __be32 vx_vni)
698{
699 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
700 struct ethhdr *eth = (struct ethhdr *)buf;
701 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
702 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
703 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
704
705 memset(buf, 0, encap_size);
706
707 ether_addr_copy(eth->h_dest, h_dest);
708 ether_addr_copy(eth->h_source, out_dev->dev_addr);
709 eth->h_proto = htons(ETH_P_IP);
710
711 ip->daddr = daddr;
712 ip->saddr = saddr;
713
714 ip->ttl = ttl;
715 ip->protocol = IPPROTO_UDP;
716 ip->version = 0x4;
717 ip->ihl = 0x5;
718
719 udp->dest = udp_dst_port;
720 vxh->vx_flags = VXLAN_HF_VNI;
721 vxh->vx_vni = vxlan_vni_field(vx_vni);
722
723 return encap_size;
724}
725
726static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
727 struct net_device *mirred_dev,
728 struct mlx5_encap_entry *e,
729 struct net_device **out_dev)
730{
731 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
a42485eb 732 struct neighbour *n = NULL;
a54e20b4 733 struct flowi4 fl4 = {};
a54e20b4
HHZ
734 char *encap_header;
735 int encap_size;
736 __be32 saddr;
737 int ttl;
738 int err;
739
740 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
741 if (!encap_header)
742 return -ENOMEM;
743
744 switch (e->tunnel_type) {
745 case MLX5_HEADER_TYPE_VXLAN:
746 fl4.flowi4_proto = IPPROTO_UDP;
747 fl4.fl4_dport = e->tun_info.tp_dst;
748 break;
749 default:
750 err = -EOPNOTSUPP;
751 goto out;
752 }
753 fl4.daddr = e->tun_info.daddr;
754
755 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
756 &fl4, &n, &saddr, &ttl);
757 if (err)
758 goto out;
759
760 e->n = n;
761 e->out_dev = *out_dev;
762
763 if (!(n->nud_state & NUD_VALID)) {
a42485eb
OG
764 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
765 err = -EOPNOTSUPP;
a54e20b4
HHZ
766 goto out;
767 }
768
769 neigh_ha_snapshot(e->h_dest, n, *out_dev);
770
771 switch (e->tunnel_type) {
772 case MLX5_HEADER_TYPE_VXLAN:
773 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
774 e->h_dest, ttl,
775 e->tun_info.daddr,
776 saddr, e->tun_info.tp_dst,
777 e->tun_info.tun_id);
778 break;
779 default:
780 err = -EOPNOTSUPP;
781 goto out;
782 }
783
784 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
785 encap_size, encap_header, &e->encap_id);
786out:
a42485eb
OG
787 if (err && n)
788 neigh_release(n);
a54e20b4
HHZ
789 kfree(encap_header);
790 return err;
791}
792
793static int mlx5e_attach_encap(struct mlx5e_priv *priv,
794 struct ip_tunnel_info *tun_info,
795 struct net_device *mirred_dev,
796 struct mlx5_esw_flow_attr *attr)
797{
798 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
799 unsigned short family = ip_tunnel_info_af(tun_info);
800 struct ip_tunnel_key *key = &tun_info->key;
801 struct mlx5_encap_info info;
802 struct mlx5_encap_entry *e;
803 struct net_device *out_dev;
804 uintptr_t hash_key;
805 bool found = false;
806 int tunnel_type;
807 int err;
808
2fcd82e9 809 /* udp dst port must be set */
a54e20b4 810 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2fcd82e9 811 goto vxlan_encap_offload_err;
a54e20b4 812
cd377663 813 /* setting udp src port isn't supported */
2fcd82e9
OG
814 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
815vxlan_encap_offload_err:
816 netdev_warn(priv->netdev,
817 "must set udp dst port and not set udp src port\n");
cd377663 818 return -EOPNOTSUPP;
2fcd82e9 819 }
cd377663 820
a54e20b4
HHZ
821 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
822 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
823 info.tp_dst = key->tp_dst;
824 info.tun_id = tunnel_id_to_key32(key->tun_id);
825 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
826 } else {
2fcd82e9
OG
827 netdev_warn(priv->netdev,
828 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
a54e20b4
HHZ
829 return -EOPNOTSUPP;
830 }
831
832 switch (family) {
833 case AF_INET:
834 info.daddr = key->u.ipv4.dst;
835 break;
2fcd82e9
OG
836 case AF_INET6:
837 netdev_warn(priv->netdev,
838 "IPv6 tunnel encap offload isn't supported\n");
a54e20b4
HHZ
839 default:
840 return -EOPNOTSUPP;
841 }
842
843 hash_key = hash_encap_info(&info);
844
845 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
846 encap_hlist, hash_key) {
847 if (!cmp_encap_info(&e->tun_info, &info)) {
848 found = true;
849 break;
850 }
851 }
852
853 if (found) {
854 attr->encap = e;
855 return 0;
856 }
857
858 e = kzalloc(sizeof(*e), GFP_KERNEL);
859 if (!e)
860 return -ENOMEM;
861
862 e->tun_info = info;
863 e->tunnel_type = tunnel_type;
864 INIT_LIST_HEAD(&e->flows);
865
866 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
867 if (err)
868 goto out_err;
869
870 attr->encap = e;
871 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
872
873 return err;
874
875out_err:
876 kfree(e);
877 return err;
878}
879
03a9d11e 880static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
a54e20b4 881 struct mlx5e_tc_flow *flow)
03a9d11e 882{
a54e20b4
HHZ
883 struct mlx5_esw_flow_attr *attr = flow->attr;
884 struct ip_tunnel_info *info = NULL;
03a9d11e 885 const struct tc_action *a;
22dc13c8 886 LIST_HEAD(actions);
a54e20b4
HHZ
887 bool encap = false;
888 int err;
03a9d11e
OG
889
890 if (tc_no_actions(exts))
891 return -EINVAL;
892
776b12b6
OG
893 memset(attr, 0, sizeof(*attr));
894 attr->in_rep = priv->ppriv;
03a9d11e 895
22dc13c8
WC
896 tcf_exts_to_list(exts, &actions);
897 list_for_each_entry(a, &actions, list) {
03a9d11e 898 if (is_tcf_gact_shot(a)) {
8b32580d
OG
899 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
900 MLX5_FLOW_CONTEXT_ACTION_COUNT;
03a9d11e
OG
901 continue;
902 }
903
5724b8b5 904 if (is_tcf_mirred_egress_redirect(a)) {
03a9d11e
OG
905 int ifindex = tcf_mirred_ifindex(a);
906 struct net_device *out_dev;
907 struct mlx5e_priv *out_priv;
03a9d11e
OG
908
909 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
910
a54e20b4
HHZ
911 if (switchdev_port_same_parent_id(priv->netdev,
912 out_dev)) {
913 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
914 MLX5_FLOW_CONTEXT_ACTION_COUNT;
915 out_priv = netdev_priv(out_dev);
916 attr->out_rep = out_priv->ppriv;
917 } else if (encap) {
918 err = mlx5e_attach_encap(priv, info,
919 out_dev, attr);
920 if (err)
921 return err;
922 list_add(&flow->encap, &attr->encap->flows);
923 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
924 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
925 MLX5_FLOW_CONTEXT_ACTION_COUNT;
926 out_priv = netdev_priv(attr->encap->out_dev);
927 attr->out_rep = out_priv->ppriv;
928 } else {
03a9d11e
OG
929 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
930 priv->netdev->name, out_dev->name);
931 return -EINVAL;
932 }
a54e20b4
HHZ
933 continue;
934 }
03a9d11e 935
a54e20b4
HHZ
936 if (is_tcf_tunnel_set(a)) {
937 info = tcf_tunnel_info(a);
938 if (info)
939 encap = true;
940 else
941 return -EOPNOTSUPP;
03a9d11e
OG
942 continue;
943 }
944
8b32580d
OG
945 if (is_tcf_vlan(a)) {
946 if (tcf_vlan_action(a) == VLAN_F_POP) {
947 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
948 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
949 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
950 return -EOPNOTSUPP;
951
952 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
953 attr->vlan = tcf_vlan_push_vid(a);
954 }
955 continue;
956 }
957
bbd00f7e
HHZ
958 if (is_tcf_tunnel_release(a)) {
959 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
960 continue;
961 }
962
03a9d11e
OG
963 return -EINVAL;
964 }
965 return 0;
966}
967
e3a2b7ed
AV
968int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
969 struct tc_cls_flower_offload *f)
970{
acff797c 971 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed 972 int err = 0;
776b12b6
OG
973 bool fdb_flow = false;
974 u32 flow_tag, action;
e3a2b7ed 975 struct mlx5e_tc_flow *flow;
c5bb1730 976 struct mlx5_flow_spec *spec;
adb4c123 977 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
e3a2b7ed 978
776b12b6
OG
979 if (esw && esw->mode == SRIOV_OFFLOADS)
980 fdb_flow = true;
981
53636068
RD
982 if (fdb_flow)
983 flow = kzalloc(sizeof(*flow) +
984 sizeof(struct mlx5_esw_flow_attr),
985 GFP_KERNEL);
986 else
987 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
e3a2b7ed 988
c5bb1730
MG
989 spec = mlx5_vzalloc(sizeof(*spec));
990 if (!spec || !flow) {
e3a2b7ed
AV
991 err = -ENOMEM;
992 goto err_free;
993 }
994
995 flow->cookie = f->cookie;
996
c5bb1730 997 err = parse_cls_flower(priv, spec, f);
e3a2b7ed
AV
998 if (err < 0)
999 goto err_free;
1000
776b12b6
OG
1001 if (fdb_flow) {
1002 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
a54e20b4 1003 err = parse_tc_fdb_actions(priv, f->exts, flow);
adb4c123
OG
1004 if (err < 0)
1005 goto err_free;
776b12b6 1006 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
adb4c123
OG
1007 } else {
1008 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1009 if (err < 0)
1010 goto err_free;
1011 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1012 }
e3a2b7ed 1013
e3a2b7ed
AV
1014 if (IS_ERR(flow->rule)) {
1015 err = PTR_ERR(flow->rule);
5c40348c 1016 goto err_free;
e3a2b7ed
AV
1017 }
1018
5c40348c
OG
1019 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1020 tc->ht_params);
1021 if (err)
1022 goto err_del_rule;
1023
e3a2b7ed
AV
1024 goto out;
1025
5c40348c 1026err_del_rule:
74491de9 1027 mlx5_del_flow_rules(flow->rule);
e3a2b7ed
AV
1028
1029err_free:
53636068 1030 kfree(flow);
e3a2b7ed 1031out:
c5bb1730 1032 kvfree(spec);
e3a2b7ed
AV
1033 return err;
1034}
1035
1036int mlx5e_delete_flower(struct mlx5e_priv *priv,
1037 struct tc_cls_flower_offload *f)
1038{
1039 struct mlx5e_tc_flow *flow;
acff797c 1040 struct mlx5e_tc_table *tc = &priv->fs.tc;
e3a2b7ed
AV
1041
1042 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1043 tc->ht_params);
1044 if (!flow)
1045 return -EINVAL;
1046
1047 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1048
961e8979 1049 mlx5e_tc_del_flow(priv, flow);
e3a2b7ed 1050
a54e20b4 1051
e3a2b7ed
AV
1052 kfree(flow);
1053
1054 return 0;
1055}
1056
aad7e08d
AV
1057int mlx5e_stats_flower(struct mlx5e_priv *priv,
1058 struct tc_cls_flower_offload *f)
1059{
1060 struct mlx5e_tc_table *tc = &priv->fs.tc;
1061 struct mlx5e_tc_flow *flow;
1062 struct tc_action *a;
1063 struct mlx5_fc *counter;
22dc13c8 1064 LIST_HEAD(actions);
aad7e08d
AV
1065 u64 bytes;
1066 u64 packets;
1067 u64 lastuse;
1068
1069 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1070 tc->ht_params);
1071 if (!flow)
1072 return -EINVAL;
1073
1074 counter = mlx5_flow_rule_counter(flow->rule);
1075 if (!counter)
1076 return 0;
1077
1078 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1079
22dc13c8
WC
1080 tcf_exts_to_list(f->exts, &actions);
1081 list_for_each_entry(a, &actions, list)
aad7e08d
AV
1082 tcf_action_stats_update(a, bytes, packets, lastuse);
1083
1084 return 0;
1085}
1086
e8f887ac
AV
1087static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1088 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1089 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1090 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1091 .automatic_shrinking = true,
1092};
1093
1094int mlx5e_tc_init(struct mlx5e_priv *priv)
1095{
acff797c 1096 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1097
1098 tc->ht_params = mlx5e_tc_flow_ht_params;
1099 return rhashtable_init(&tc->ht, &tc->ht_params);
1100}
1101
1102static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1103{
1104 struct mlx5e_tc_flow *flow = ptr;
1105 struct mlx5e_priv *priv = arg;
1106
961e8979 1107 mlx5e_tc_del_flow(priv, flow);
e8f887ac
AV
1108 kfree(flow);
1109}
1110
1111void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1112{
acff797c 1113 struct mlx5e_tc_table *tc = &priv->fs.tc;
e8f887ac
AV
1114
1115 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1116
acff797c
MG
1117 if (!IS_ERR_OR_NULL(tc->t)) {
1118 mlx5_destroy_flow_table(tc->t);
1119 tc->t = NULL;
e8f887ac
AV
1120 }
1121}