net/mlx5e: Don't make internal use of errno to denote missing neigh
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc_tun.c
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
3
4 #include <net/vxlan.h>
5 #include <net/gre.h>
6 #include "lib/vxlan.h"
7 #include "en/tc_tun.h"
8
9 static int get_route_and_out_devs(struct mlx5e_priv *priv,
10                                   struct net_device *dev,
11                                   struct net_device **route_dev,
12                                   struct net_device **out_dev)
13 {
14         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
15         struct net_device *uplink_dev, *uplink_upper;
16         bool dst_is_lag_dev;
17
18         uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
19         uplink_upper = netdev_master_upper_dev_get(uplink_dev);
20         dst_is_lag_dev = (uplink_upper &&
21                           netif_is_lag_master(uplink_upper) &&
22                           dev == uplink_upper &&
23                           mlx5_lag_is_sriov(priv->mdev));
24
25         /* if the egress device isn't on the same HW e-switch or
26          * it's a LAG device, use the uplink
27          */
28         if (!netdev_port_same_parent_id(priv->netdev, dev) ||
29             dst_is_lag_dev) {
30                 *route_dev = uplink_dev;
31                 *out_dev = *route_dev;
32         } else {
33                 *route_dev = dev;
34                 if (is_vlan_dev(*route_dev))
35                         *out_dev = uplink_dev;
36                 else if (mlx5e_eswitch_rep(dev))
37                         *out_dev = *route_dev;
38                 else
39                         return -EOPNOTSUPP;
40         }
41
42         return 0;
43 }
44
45 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
46                                    struct net_device *mirred_dev,
47                                    struct net_device **out_dev,
48                                    struct net_device **route_dev,
49                                    struct flowi4 *fl4,
50                                    struct neighbour **out_n,
51                                    u8 *out_ttl)
52 {
53         struct rtable *rt;
54         struct neighbour *n = NULL;
55
56 #if IS_ENABLED(CONFIG_INET)
57         int ret;
58
59         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
60         ret = PTR_ERR_OR_ZERO(rt);
61         if (ret)
62                 return ret;
63 #else
64         return -EOPNOTSUPP;
65 #endif
66
67         ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
68         if (ret < 0)
69                 return ret;
70
71         if (!(*out_ttl))
72                 *out_ttl = ip4_dst_hoplimit(&rt->dst);
73         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
74         ip_rt_put(rt);
75         if (!n)
76                 return -ENOMEM;
77
78         *out_n = n;
79         return 0;
80 }
81
82 static const char *mlx5e_netdev_kind(struct net_device *dev)
83 {
84         if (dev->rtnl_link_ops)
85                 return dev->rtnl_link_ops->kind;
86         else
87                 return "";
88 }
89
90 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
91                                    struct net_device *mirred_dev,
92                                    struct net_device **out_dev,
93                                    struct net_device **route_dev,
94                                    struct flowi6 *fl6,
95                                    struct neighbour **out_n,
96                                    u8 *out_ttl)
97 {
98         struct neighbour *n = NULL;
99         struct dst_entry *dst;
100
101 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
102         int ret;
103
104         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
105                                          fl6);
106         if (ret < 0)
107                 return ret;
108
109         if (!(*out_ttl))
110                 *out_ttl = ip6_dst_hoplimit(dst);
111
112         ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
113         if (ret < 0)
114                 return ret;
115 #else
116         return -EOPNOTSUPP;
117 #endif
118
119         n = dst_neigh_lookup(dst, &fl6->daddr);
120         dst_release(dst);
121         if (!n)
122                 return -ENOMEM;
123
124         *out_n = n;
125         return 0;
126 }
127
128 static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
129 {
130         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
131         struct udphdr *udp = (struct udphdr *)(buf);
132         struct vxlanhdr *vxh = (struct vxlanhdr *)
133                                ((char *)udp + sizeof(struct udphdr));
134
135         udp->dest = tun_key->tp_dst;
136         vxh->vx_flags = VXLAN_HF_VNI;
137         vxh->vx_vni = vxlan_vni_field(tun_id);
138
139         return 0;
140 }
141
142 static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
143 {
144         __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
145         int hdr_len;
146         struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
147
148         /* the HW does not calculate GRE csum or sequences */
149         if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
150                 return -EOPNOTSUPP;
151
152         greh->protocol = htons(ETH_P_TEB);
153
154         /* GRE key */
155         hdr_len = gre_calc_hlen(tun_key->tun_flags);
156         greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
157         if (tun_key->tun_flags & TUNNEL_KEY) {
158                 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
159
160                 *ptr = tun_id;
161         }
162
163         return 0;
164 }
165
166 static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
167                                       struct mlx5e_encap_entry *e)
168 {
169         int err = 0;
170         struct ip_tunnel_key *key = &e->tun_info.key;
171
172         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
173                 *ip_proto = IPPROTO_UDP;
174                 err = mlx5e_gen_vxlan_header(buf, key);
175         } else if  (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
176                 *ip_proto = IPPROTO_GRE;
177                 err = mlx5e_gen_gre_header(buf, key);
178         } else {
179                 pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
180                         , e->tunnel_type);
181                 err = -EOPNOTSUPP;
182         }
183
184         return err;
185 }
186
187 static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
188                              struct mlx5e_encap_entry *e,
189                              u16 proto)
190 {
191         struct ethhdr *eth = (struct ethhdr *)buf;
192         char *ip;
193
194         ether_addr_copy(eth->h_dest, e->h_dest);
195         ether_addr_copy(eth->h_source, dev->dev_addr);
196         if (is_vlan_dev(dev)) {
197                 struct vlan_hdr *vlan = (struct vlan_hdr *)
198                                         ((char *)eth + ETH_HLEN);
199                 ip = (char *)vlan + VLAN_HLEN;
200                 eth->h_proto = vlan_dev_vlan_proto(dev);
201                 vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
202                 vlan->h_vlan_encapsulated_proto = htons(proto);
203         } else {
204                 eth->h_proto = htons(proto);
205                 ip = (char *)eth + ETH_HLEN;
206         }
207
208         return ip;
209 }
210
211 int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
212                                     struct net_device *mirred_dev,
213                                     struct mlx5e_encap_entry *e)
214 {
215         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
216         struct ip_tunnel_key *tun_key = &e->tun_info.key;
217         struct net_device *out_dev, *route_dev;
218         struct neighbour *n = NULL;
219         struct flowi4 fl4 = {};
220         int ipv4_encap_size;
221         char *encap_header;
222         u8 nud_state, ttl;
223         struct iphdr *ip;
224         int err;
225
226         /* add the IP fields */
227         fl4.flowi4_tos = tun_key->tos;
228         fl4.daddr = tun_key->u.ipv4.dst;
229         fl4.saddr = tun_key->u.ipv4.src;
230         ttl = tun_key->ttl;
231
232         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
233                                       &fl4, &n, &ttl);
234         if (err)
235                 return err;
236
237         ipv4_encap_size =
238                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
239                 sizeof(struct iphdr) +
240                 e->tunnel_hlen;
241
242         if (max_encap_size < ipv4_encap_size) {
243                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
244                                ipv4_encap_size, max_encap_size);
245                 return -EOPNOTSUPP;
246         }
247
248         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
249         if (!encap_header)
250                 return -ENOMEM;
251
252         /* used by mlx5e_detach_encap to lookup a neigh hash table
253          * entry in the neigh hash table when a user deletes a rule
254          */
255         e->m_neigh.dev = n->dev;
256         e->m_neigh.family = n->ops->family;
257         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
258         e->out_dev = out_dev;
259         e->route_dev = route_dev;
260
261         /* It's important to add the neigh to the hash table before checking
262          * the neigh validity state. So if we'll get a notification, in case the
263          * neigh changes it's validity state, we would find the relevant neigh
264          * in the hash.
265          */
266         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
267         if (err)
268                 goto free_encap;
269
270         read_lock_bh(&n->lock);
271         nud_state = n->nud_state;
272         ether_addr_copy(e->h_dest, n->ha);
273         read_unlock_bh(&n->lock);
274
275         /* add ethernet header */
276         ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
277                                              ETH_P_IP);
278
279         /* add ip header */
280         ip->tos = tun_key->tos;
281         ip->version = 0x4;
282         ip->ihl = 0x5;
283         ip->ttl = ttl;
284         ip->daddr = fl4.daddr;
285         ip->saddr = fl4.saddr;
286
287         /* add tunneling protocol header */
288         err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
289                                          &ip->protocol, e);
290         if (err)
291                 goto destroy_neigh_entry;
292
293         e->encap_size = ipv4_encap_size;
294         e->encap_header = encap_header;
295
296         if (!(nud_state & NUD_VALID)) {
297                 neigh_event_send(n, NULL);
298                 /* the encap entry will be made valid on neigh update event
299                  * and not used before that.
300                  */
301                 goto out;
302         }
303
304         err = mlx5_packet_reformat_alloc(priv->mdev,
305                                          e->reformat_type,
306                                          ipv4_encap_size, encap_header,
307                                          MLX5_FLOW_NAMESPACE_FDB,
308                                          &e->encap_id);
309         if (err)
310                 goto destroy_neigh_entry;
311
312         e->flags |= MLX5_ENCAP_ENTRY_VALID;
313         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
314         neigh_release(n);
315         return err;
316
317 destroy_neigh_entry:
318         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
319 free_encap:
320         kfree(encap_header);
321 out:
322         if (n)
323                 neigh_release(n);
324         return err;
325 }
326
327 int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
328                                     struct net_device *mirred_dev,
329                                     struct mlx5e_encap_entry *e)
330 {
331         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
332         struct ip_tunnel_key *tun_key = &e->tun_info.key;
333         struct net_device *out_dev, *route_dev;
334         struct neighbour *n = NULL;
335         struct flowi6 fl6 = {};
336         struct ipv6hdr *ip6h;
337         int ipv6_encap_size;
338         char *encap_header;
339         u8 nud_state, ttl;
340         int err;
341
342         ttl = tun_key->ttl;
343
344         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
345         fl6.daddr = tun_key->u.ipv6.dst;
346         fl6.saddr = tun_key->u.ipv6.src;
347
348         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
349                                       &fl6, &n, &ttl);
350         if (err)
351                 return err;
352
353         ipv6_encap_size =
354                 (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
355                 sizeof(struct ipv6hdr) +
356                 e->tunnel_hlen;
357
358         if (max_encap_size < ipv6_encap_size) {
359                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
360                                ipv6_encap_size, max_encap_size);
361                 return -EOPNOTSUPP;
362         }
363
364         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
365         if (!encap_header)
366                 return -ENOMEM;
367
368         /* used by mlx5e_detach_encap to lookup a neigh hash table
369          * entry in the neigh hash table when a user deletes a rule
370          */
371         e->m_neigh.dev = n->dev;
372         e->m_neigh.family = n->ops->family;
373         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
374         e->out_dev = out_dev;
375         e->route_dev = route_dev;
376
377         /* It's importent to add the neigh to the hash table before checking
378          * the neigh validity state. So if we'll get a notification, in case the
379          * neigh changes it's validity state, we would find the relevant neigh
380          * in the hash.
381          */
382         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
383         if (err)
384                 goto free_encap;
385
386         read_lock_bh(&n->lock);
387         nud_state = n->nud_state;
388         ether_addr_copy(e->h_dest, n->ha);
389         read_unlock_bh(&n->lock);
390
391         /* add ethernet header */
392         ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
393                                                  ETH_P_IPV6);
394
395         /* add ip header */
396         ip6_flow_hdr(ip6h, tun_key->tos, 0);
397         /* the HW fills up ipv6 payload len */
398         ip6h->hop_limit   = ttl;
399         ip6h->daddr       = fl6.daddr;
400         ip6h->saddr       = fl6.saddr;
401
402         /* add tunneling protocol header */
403         err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
404                                          &ip6h->nexthdr, e);
405         if (err)
406                 goto destroy_neigh_entry;
407
408         e->encap_size = ipv6_encap_size;
409         e->encap_header = encap_header;
410
411         if (!(nud_state & NUD_VALID)) {
412                 neigh_event_send(n, NULL);
413                 /* the encap entry will be made valid on neigh update event
414                  * and not used before that.
415                  */
416                 goto out;
417         }
418
419         err = mlx5_packet_reformat_alloc(priv->mdev,
420                                          e->reformat_type,
421                                          ipv6_encap_size, encap_header,
422                                          MLX5_FLOW_NAMESPACE_FDB,
423                                          &e->encap_id);
424         if (err)
425                 goto destroy_neigh_entry;
426
427         e->flags |= MLX5_ENCAP_ENTRY_VALID;
428         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
429         neigh_release(n);
430         return err;
431
432 destroy_neigh_entry:
433         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
434 free_encap:
435         kfree(encap_header);
436 out:
437         if (n)
438                 neigh_release(n);
439         return err;
440 }
441
442 int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
443 {
444         if (netif_is_vxlan(tunnel_dev))
445                 return MLX5E_TC_TUNNEL_TYPE_VXLAN;
446         else if (netif_is_gretap(tunnel_dev) ||
447                  netif_is_ip6gretap(tunnel_dev))
448                 return MLX5E_TC_TUNNEL_TYPE_GRETAP;
449         else
450                 return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
451 }
452
453 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
454                                     struct net_device *netdev)
455 {
456         int tunnel_type = mlx5e_tc_tun_get_type(netdev);
457
458         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
459             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
460                 return true;
461         else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
462                  MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
463                 return true;
464         else
465                 return false;
466 }
467
468 int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
469                                  struct mlx5e_priv *priv,
470                                  struct mlx5e_encap_entry *e,
471                                  struct netlink_ext_ack *extack)
472 {
473         e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
474
475         if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
476                 int dst_port =  be16_to_cpu(e->tun_info.key.tp_dst);
477
478                 if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
479                         NL_SET_ERR_MSG_MOD(extack,
480                                            "vxlan udp dport was not registered with the HW");
481                         netdev_warn(priv->netdev,
482                                     "%d isn't an offloaded vxlan udp dport\n",
483                                     dst_port);
484                         return -EOPNOTSUPP;
485                 }
486                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
487                 e->tunnel_hlen = VXLAN_HLEN;
488         } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
489                 e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
490                 e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
491         } else {
492                 e->reformat_type = -1;
493                 e->tunnel_hlen = -1;
494                 return -EOPNOTSUPP;
495         }
496         return 0;
497 }
498
499 static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
500                                     struct mlx5_flow_spec *spec,
501                                     struct tc_cls_flower_offload *f,
502                                     void *headers_c,
503                                     void *headers_v)
504 {
505         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
506         struct netlink_ext_ack *extack = f->common.extack;
507         void *misc_c = MLX5_ADDR_OF(fte_match_param,
508                                     spec->match_criteria,
509                                     misc_parameters);
510         void *misc_v = MLX5_ADDR_OF(fte_match_param,
511                                     spec->match_value,
512                                     misc_parameters);
513         struct flow_match_ports enc_ports;
514
515         flow_rule_match_enc_ports(rule, &enc_ports);
516
517         /* Full udp dst port must be given */
518         if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
519             memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) {
520                 NL_SET_ERR_MSG_MOD(extack,
521                                    "VXLAN decap filter must include enc_dst_port condition");
522                 netdev_warn(priv->netdev,
523                             "VXLAN decap filter must include enc_dst_port condition\n");
524                 return -EOPNOTSUPP;
525         }
526
527         /* udp dst port must be knonwn as a VXLAN port */
528         if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) {
529                 NL_SET_ERR_MSG_MOD(extack,
530                                    "Matched UDP port is not registered as a VXLAN port");
531                 netdev_warn(priv->netdev,
532                             "UDP port %d is not registered as a VXLAN port\n",
533                             be16_to_cpu(enc_ports.key->dst));
534                 return -EOPNOTSUPP;
535         }
536
537         /* dst UDP port is valid here */
538         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
539         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
540
541         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
542                  ntohs(enc_ports.mask->dst));
543         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
544                  ntohs(enc_ports.key->dst));
545
546         MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
547                  ntohs(enc_ports.mask->src));
548         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
549                  ntohs(enc_ports.key->src));
550
551         /* match on VNI */
552         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
553                 struct flow_match_enc_keyid enc_keyid;
554
555                 flow_rule_match_enc_keyid(rule, &enc_keyid);
556
557                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
558                          be32_to_cpu(enc_keyid.mask->keyid));
559                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
560                          be32_to_cpu(enc_keyid.key->keyid));
561         }
562         return 0;
563 }
564
565 static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
566                                      struct mlx5_flow_spec *spec,
567                                      struct tc_cls_flower_offload *f,
568                                      void *outer_headers_c,
569                                      void *outer_headers_v)
570 {
571         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
572                                     misc_parameters);
573         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
574                                     misc_parameters);
575         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
576
577         if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
578                 NL_SET_ERR_MSG_MOD(f->common.extack,
579                                    "GRE HW offloading is not supported");
580                 netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
581                 return -EOPNOTSUPP;
582         }
583
584         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
585         MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
586                  ip_protocol, IPPROTO_GRE);
587
588         /* gre protocol*/
589         MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
590         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
591
592         /* gre key */
593         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
594                 struct flow_match_enc_keyid enc_keyid;
595
596                 flow_rule_match_enc_keyid(rule, &enc_keyid);
597                 MLX5_SET(fte_match_set_misc, misc_c,
598                          gre_key.key, be32_to_cpu(enc_keyid.mask->keyid));
599                 MLX5_SET(fte_match_set_misc, misc_v,
600                          gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
601         }
602
603         return 0;
604 }
605
606 int mlx5e_tc_tun_parse(struct net_device *filter_dev,
607                        struct mlx5e_priv *priv,
608                        struct mlx5_flow_spec *spec,
609                        struct tc_cls_flower_offload *f,
610                        void *headers_c,
611                        void *headers_v, u8 *match_level)
612 {
613         int tunnel_type;
614         int err = 0;
615
616         tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
617         if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
618                 *match_level = MLX5_MATCH_L4;
619                 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
620                                                headers_c, headers_v);
621         } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
622                 *match_level = MLX5_MATCH_L3;
623                 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
624                                                 headers_c, headers_v);
625         } else {
626                 netdev_warn(priv->netdev,
627                             "decapsulation offload is not supported for %s net device (%d)\n",
628                             mlx5e_netdev_kind(filter_dev), tunnel_type);
629                 return -EOPNOTSUPP;
630         }
631         return err;
632 }