df81d86ee16bdbff6978e4fa0db5507aab9c7d3a
[linux-block.git] / drivers / net / ethernet / netronome / nfp / flower / offload.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18         (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19          TCPHDR_PSH | TCPHDR_URG)
20
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22         (FLOW_DIS_IS_FRAGMENT | \
23          FLOW_DIS_FIRST_FRAG)
24
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26         (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27          BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28          BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29          BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30          BIT(FLOW_DISSECTOR_KEY_TCP) | \
31          BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32          BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33          BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37          BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40          BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41          BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42          BIT(FLOW_DISSECTOR_KEY_IP))
43
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51          BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
57
58 #define NFP_FLOWER_MERGE_FIELDS \
59         (NFP_FLOWER_LAYER_PORT | \
60          NFP_FLOWER_LAYER_MAC | \
61          NFP_FLOWER_LAYER_TP | \
62          NFP_FLOWER_LAYER_IPV4 | \
63          NFP_FLOWER_LAYER_IPV6)
64
65 struct nfp_flower_merge_check {
66         union {
67                 struct {
68                         __be16 tci;
69                         struct nfp_flower_mac_mpls l2;
70                         struct nfp_flower_tp_ports l4;
71                         union {
72                                 struct nfp_flower_ipv4 ipv4;
73                                 struct nfp_flower_ipv6 ipv6;
74                         };
75                 };
76                 unsigned long vals[8];
77         };
78 };
79
80 static int
81 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
82                      u8 mtype)
83 {
84         u32 meta_len, key_len, mask_len, act_len, tot_len;
85         struct sk_buff *skb;
86         unsigned char *msg;
87
88         meta_len =  sizeof(struct nfp_fl_rule_metadata);
89         key_len = nfp_flow->meta.key_len;
90         mask_len = nfp_flow->meta.mask_len;
91         act_len = nfp_flow->meta.act_len;
92
93         tot_len = meta_len + key_len + mask_len + act_len;
94
95         /* Convert to long words as firmware expects
96          * lengths in units of NFP_FL_LW_SIZ.
97          */
98         nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
99         nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
100         nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
101
102         skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
103         if (!skb)
104                 return -ENOMEM;
105
106         msg = nfp_flower_cmsg_get_data(skb);
107         memcpy(msg, &nfp_flow->meta, meta_len);
108         memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
109         memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
110         memcpy(&msg[meta_len + key_len + mask_len],
111                nfp_flow->action_data, act_len);
112
113         /* Convert back to bytes as software expects
114          * lengths in units of bytes.
115          */
116         nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
117         nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
118         nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
119
120         nfp_ctrl_tx(app->ctrl, skb);
121
122         return 0;
123 }
124
125 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
126 {
127         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
128
129         return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
130                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
131                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
132                flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
133 }
134
135 static int
136 nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
137                           u32 *key_layer_two, int *key_size)
138 {
139         if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
140                 return -EOPNOTSUPP;
141
142         if (enc_opts->key->len > 0) {
143                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
144                 *key_size += sizeof(struct nfp_flower_geneve_options);
145         }
146
147         return 0;
148 }
149
150 static int
151 nfp_flower_calculate_key_layers(struct nfp_app *app,
152                                 struct net_device *netdev,
153                                 struct nfp_fl_key_ls *ret_key_ls,
154                                 struct tc_cls_flower_offload *flow,
155                                 enum nfp_flower_tun_type *tun_type)
156 {
157         struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
158         struct flow_dissector *dissector = rule->match.dissector;
159         struct flow_match_basic basic = { NULL, NULL};
160         struct nfp_flower_priv *priv = app->priv;
161         u32 key_layer_two;
162         u8 key_layer;
163         int key_size;
164         int err;
165
166         if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
167                 return -EOPNOTSUPP;
168
169         /* If any tun dissector is used then the required set must be used. */
170         if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
171             (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
172             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
173                 return -EOPNOTSUPP;
174
175         key_layer_two = 0;
176         key_layer = NFP_FLOWER_LAYER_PORT;
177         key_size = sizeof(struct nfp_flower_meta_tci) +
178                    sizeof(struct nfp_flower_in_port);
179
180         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
181             flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
182                 key_layer |= NFP_FLOWER_LAYER_MAC;
183                 key_size += sizeof(struct nfp_flower_mac_mpls);
184         }
185
186         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
187                 struct flow_match_vlan vlan;
188
189                 flow_rule_match_vlan(rule, &vlan);
190                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
191                     vlan.key->vlan_priority)
192                         return -EOPNOTSUPP;
193         }
194
195         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
196                 struct flow_match_enc_opts enc_op = { NULL, NULL };
197                 struct flow_match_ipv4_addrs ipv4_addrs;
198                 struct flow_match_control enc_ctl;
199                 struct flow_match_ports enc_ports;
200
201                 flow_rule_match_enc_control(rule, &enc_ctl);
202
203                 if (enc_ctl.mask->addr_type != 0xffff ||
204                     enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
205                         return -EOPNOTSUPP;
206
207                 /* These fields are already verified as used. */
208                 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
209                 if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
210                         return -EOPNOTSUPP;
211
212                 flow_rule_match_enc_ports(rule, &enc_ports);
213                 if (enc_ports.mask->dst != cpu_to_be16(~0))
214                         return -EOPNOTSUPP;
215
216                 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
217                         flow_rule_match_enc_opts(rule, &enc_op);
218
219                 switch (enc_ports.key->dst) {
220                 case htons(IANA_VXLAN_UDP_PORT):
221                         *tun_type = NFP_FL_TUNNEL_VXLAN;
222                         key_layer |= NFP_FLOWER_LAYER_VXLAN;
223                         key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
224
225                         if (enc_op.key)
226                                 return -EOPNOTSUPP;
227                         break;
228                 case htons(GENEVE_UDP_PORT):
229                         if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
230                                 return -EOPNOTSUPP;
231                         *tun_type = NFP_FL_TUNNEL_GENEVE;
232                         key_layer |= NFP_FLOWER_LAYER_EXT_META;
233                         key_size += sizeof(struct nfp_flower_ext_meta);
234                         key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
235                         key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
236
237                         if (!enc_op.key)
238                                 break;
239                         if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
240                                 return -EOPNOTSUPP;
241                         err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
242                                                         &key_size);
243                         if (err)
244                                 return err;
245                         break;
246                 default:
247                         return -EOPNOTSUPP;
248                 }
249
250                 /* Ensure the ingress netdev matches the expected tun type. */
251                 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
252                         return -EOPNOTSUPP;
253         }
254
255         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
256                 flow_rule_match_basic(rule, &basic);
257
258         if (basic.mask && basic.mask->n_proto) {
259                 /* Ethernet type is present in the key. */
260                 switch (basic.key->n_proto) {
261                 case cpu_to_be16(ETH_P_IP):
262                         key_layer |= NFP_FLOWER_LAYER_IPV4;
263                         key_size += sizeof(struct nfp_flower_ipv4);
264                         break;
265
266                 case cpu_to_be16(ETH_P_IPV6):
267                         key_layer |= NFP_FLOWER_LAYER_IPV6;
268                         key_size += sizeof(struct nfp_flower_ipv6);
269                         break;
270
271                 /* Currently we do not offload ARP
272                  * because we rely on it to get to the host.
273                  */
274                 case cpu_to_be16(ETH_P_ARP):
275                         return -EOPNOTSUPP;
276
277                 case cpu_to_be16(ETH_P_MPLS_UC):
278                 case cpu_to_be16(ETH_P_MPLS_MC):
279                         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
280                                 key_layer |= NFP_FLOWER_LAYER_MAC;
281                                 key_size += sizeof(struct nfp_flower_mac_mpls);
282                         }
283                         break;
284
285                 /* Will be included in layer 2. */
286                 case cpu_to_be16(ETH_P_8021Q):
287                         break;
288
289                 default:
290                         /* Other ethtype - we need check the masks for the
291                          * remainder of the key to ensure we can offload.
292                          */
293                         if (nfp_flower_check_higher_than_mac(flow))
294                                 return -EOPNOTSUPP;
295                         break;
296                 }
297         }
298
299         if (basic.mask && basic.mask->ip_proto) {
300                 /* Ethernet type is present in the key. */
301                 switch (basic.key->ip_proto) {
302                 case IPPROTO_TCP:
303                 case IPPROTO_UDP:
304                 case IPPROTO_SCTP:
305                 case IPPROTO_ICMP:
306                 case IPPROTO_ICMPV6:
307                         key_layer |= NFP_FLOWER_LAYER_TP;
308                         key_size += sizeof(struct nfp_flower_tp_ports);
309                         break;
310                 default:
311                         /* Other ip proto - we need check the masks for the
312                          * remainder of the key to ensure we can offload.
313                          */
314                         return -EOPNOTSUPP;
315                 }
316         }
317
318         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
319                 struct flow_match_tcp tcp;
320                 u32 tcp_flags;
321
322                 flow_rule_match_tcp(rule, &tcp);
323                 tcp_flags = be16_to_cpu(tcp.key->flags);
324
325                 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
326                         return -EOPNOTSUPP;
327
328                 /* We only support PSH and URG flags when either
329                  * FIN, SYN or RST is present as well.
330                  */
331                 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
332                     !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
333                         return -EOPNOTSUPP;
334
335                 /* We need to store TCP flags in the either the IPv4 or IPv6 key
336                  * space, thus we need to ensure we include a IPv4/IPv6 key
337                  * layer if we have not done so already.
338                  */
339                 if (!basic.key)
340                         return -EOPNOTSUPP;
341
342                 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
343                     !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
344                         switch (basic.key->n_proto) {
345                         case cpu_to_be16(ETH_P_IP):
346                                 key_layer |= NFP_FLOWER_LAYER_IPV4;
347                                 key_size += sizeof(struct nfp_flower_ipv4);
348                                 break;
349
350                         case cpu_to_be16(ETH_P_IPV6):
351                                         key_layer |= NFP_FLOWER_LAYER_IPV6;
352                                 key_size += sizeof(struct nfp_flower_ipv6);
353                                 break;
354
355                         default:
356                                 return -EOPNOTSUPP;
357                         }
358                 }
359         }
360
361         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
362                 struct flow_match_control ctl;
363
364                 flow_rule_match_control(rule, &ctl);
365                 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
366                         return -EOPNOTSUPP;
367         }
368
369         ret_key_ls->key_layer = key_layer;
370         ret_key_ls->key_layer_two = key_layer_two;
371         ret_key_ls->key_size = key_size;
372
373         return 0;
374 }
375
376 static struct nfp_fl_payload *
377 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
378 {
379         struct nfp_fl_payload *flow_pay;
380
381         flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
382         if (!flow_pay)
383                 return NULL;
384
385         flow_pay->meta.key_len = key_layer->key_size;
386         flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
387         if (!flow_pay->unmasked_data)
388                 goto err_free_flow;
389
390         flow_pay->meta.mask_len = key_layer->key_size;
391         flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
392         if (!flow_pay->mask_data)
393                 goto err_free_unmasked;
394
395         flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
396         if (!flow_pay->action_data)
397                 goto err_free_mask;
398
399         flow_pay->nfp_tun_ipv4_addr = 0;
400         flow_pay->meta.flags = 0;
401         INIT_LIST_HEAD(&flow_pay->linked_flows);
402         flow_pay->in_hw = false;
403
404         return flow_pay;
405
406 err_free_mask:
407         kfree(flow_pay->mask_data);
408 err_free_unmasked:
409         kfree(flow_pay->unmasked_data);
410 err_free_flow:
411         kfree(flow_pay);
412         return NULL;
413 }
414
415 static int
416 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
417                                      struct nfp_flower_merge_check *merge,
418                                      u8 *last_act_id, int *act_out)
419 {
420         struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
421         struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
422         struct nfp_fl_set_ip4_addrs *ipv4_add;
423         struct nfp_fl_set_ipv6_addr *ipv6_add;
424         struct nfp_fl_push_vlan *push_vlan;
425         struct nfp_fl_set_tport *tport;
426         struct nfp_fl_set_eth *eth;
427         struct nfp_fl_act_head *a;
428         unsigned int act_off = 0;
429         u8 act_id = 0;
430         u8 *ports;
431         int i;
432
433         while (act_off < flow->meta.act_len) {
434                 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
435                 act_id = a->jump_id;
436
437                 switch (act_id) {
438                 case NFP_FL_ACTION_OPCODE_OUTPUT:
439                         if (act_out)
440                                 (*act_out)++;
441                         break;
442                 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
443                         push_vlan = (struct nfp_fl_push_vlan *)a;
444                         if (push_vlan->vlan_tci)
445                                 merge->tci = cpu_to_be16(0xffff);
446                         break;
447                 case NFP_FL_ACTION_OPCODE_POP_VLAN:
448                         merge->tci = cpu_to_be16(0);
449                         break;
450                 case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
451                         /* New tunnel header means l2 to l4 can be matched. */
452                         eth_broadcast_addr(&merge->l2.mac_dst[0]);
453                         eth_broadcast_addr(&merge->l2.mac_src[0]);
454                         memset(&merge->l4, 0xff,
455                                sizeof(struct nfp_flower_tp_ports));
456                         memset(&merge->ipv4, 0xff,
457                                sizeof(struct nfp_flower_ipv4));
458                         break;
459                 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
460                         eth = (struct nfp_fl_set_eth *)a;
461                         for (i = 0; i < ETH_ALEN; i++)
462                                 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
463                         for (i = 0; i < ETH_ALEN; i++)
464                                 merge->l2.mac_src[i] |=
465                                         eth->eth_addr_mask[ETH_ALEN + i];
466                         break;
467                 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
468                         ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
469                         merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
470                         merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
471                         break;
472                 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
473                         ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
474                         merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
475                         merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
476                         break;
477                 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
478                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
479                         for (i = 0; i < 4; i++)
480                                 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
481                                         ipv6_add->ipv6[i].mask;
482                         break;
483                 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
484                         ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
485                         for (i = 0; i < 4; i++)
486                                 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
487                                         ipv6_add->ipv6[i].mask;
488                         break;
489                 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
490                         ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
491                         merge->ipv6.ip_ext.ttl |=
492                                 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
493                         merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
494                         merge->ipv6.ipv6_flow_label_exthdr |=
495                                 ipv6_tc_hl_fl->ipv6_label_mask;
496                         break;
497                 case NFP_FL_ACTION_OPCODE_SET_UDP:
498                 case NFP_FL_ACTION_OPCODE_SET_TCP:
499                         tport = (struct nfp_fl_set_tport *)a;
500                         ports = (u8 *)&merge->l4.port_src;
501                         for (i = 0; i < 4; i++)
502                                 ports[i] |= tport->tp_port_mask[i];
503                         break;
504                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
505                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
506                 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
507                         break;
508                 default:
509                         return -EOPNOTSUPP;
510                 }
511
512                 act_off += a->len_lw << NFP_FL_LW_SIZ;
513         }
514
515         if (last_act_id)
516                 *last_act_id = act_id;
517
518         return 0;
519 }
520
521 static int
522 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
523                                 struct nfp_flower_merge_check *merge,
524                                 bool extra_fields)
525 {
526         struct nfp_flower_meta_tci *meta_tci;
527         u8 *mask = flow->mask_data;
528         u8 key_layer, match_size;
529
530         memset(merge, 0, sizeof(struct nfp_flower_merge_check));
531
532         meta_tci = (struct nfp_flower_meta_tci *)mask;
533         key_layer = meta_tci->nfp_flow_key_layer;
534
535         if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
536                 return -EOPNOTSUPP;
537
538         merge->tci = meta_tci->tci;
539         mask += sizeof(struct nfp_flower_meta_tci);
540
541         if (key_layer & NFP_FLOWER_LAYER_EXT_META)
542                 mask += sizeof(struct nfp_flower_ext_meta);
543
544         mask += sizeof(struct nfp_flower_in_port);
545
546         if (key_layer & NFP_FLOWER_LAYER_MAC) {
547                 match_size = sizeof(struct nfp_flower_mac_mpls);
548                 memcpy(&merge->l2, mask, match_size);
549                 mask += match_size;
550         }
551
552         if (key_layer & NFP_FLOWER_LAYER_TP) {
553                 match_size = sizeof(struct nfp_flower_tp_ports);
554                 memcpy(&merge->l4, mask, match_size);
555                 mask += match_size;
556         }
557
558         if (key_layer & NFP_FLOWER_LAYER_IPV4) {
559                 match_size = sizeof(struct nfp_flower_ipv4);
560                 memcpy(&merge->ipv4, mask, match_size);
561         }
562
563         if (key_layer & NFP_FLOWER_LAYER_IPV6) {
564                 match_size = sizeof(struct nfp_flower_ipv6);
565                 memcpy(&merge->ipv6, mask, match_size);
566         }
567
568         return 0;
569 }
570
571 static int
572 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
573                      struct nfp_fl_payload *sub_flow2)
574 {
575         /* Two flows can be merged if sub_flow2 only matches on bits that are
576          * either matched by sub_flow1 or set by a sub_flow1 action. This
577          * ensures that every packet that hits sub_flow1 and recirculates is
578          * guaranteed to hit sub_flow2.
579          */
580         struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
581         int err, act_out = 0;
582         u8 last_act_id = 0;
583
584         err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
585                                               true);
586         if (err)
587                 return err;
588
589         err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
590                                               false);
591         if (err)
592                 return err;
593
594         err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
595                                                    &last_act_id, &act_out);
596         if (err)
597                 return err;
598
599         /* Must only be 1 output action and it must be the last in sequence. */
600         if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
601                 return -EOPNOTSUPP;
602
603         /* Reject merge if sub_flow2 matches on something that is not matched
604          * on or set in an action by sub_flow1.
605          */
606         err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
607                             sub_flow1_merge.vals,
608                             sizeof(struct nfp_flower_merge_check) * 8);
609         if (err)
610                 return -EINVAL;
611
612         return 0;
613 }
614
615 static unsigned int
616 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
617                             bool *tunnel_act)
618 {
619         unsigned int act_off = 0, act_len;
620         struct nfp_fl_act_head *a;
621         u8 act_id = 0;
622
623         while (act_off < len) {
624                 a = (struct nfp_fl_act_head *)&act_src[act_off];
625                 act_len = a->len_lw << NFP_FL_LW_SIZ;
626                 act_id = a->jump_id;
627
628                 switch (act_id) {
629                 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
630                         if (tunnel_act)
631                                 *tunnel_act = true;
632                 case NFP_FL_ACTION_OPCODE_PRE_LAG:
633                         memcpy(act_dst + act_off, act_src + act_off, act_len);
634                         break;
635                 default:
636                         return act_off;
637                 }
638
639                 act_off += act_len;
640         }
641
642         return act_off;
643 }
644
645 static int nfp_fl_verify_post_tun_acts(char *acts, int len)
646 {
647         struct nfp_fl_act_head *a;
648         unsigned int act_off = 0;
649
650         while (act_off < len) {
651                 a = (struct nfp_fl_act_head *)&acts[act_off];
652                 if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
653                         return -EOPNOTSUPP;
654
655                 act_off += a->len_lw << NFP_FL_LW_SIZ;
656         }
657
658         return 0;
659 }
660
661 static int
662 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
663                         struct nfp_fl_payload *sub_flow2,
664                         struct nfp_fl_payload *merge_flow)
665 {
666         unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
667         bool tunnel_act = false;
668         char *merge_act;
669         int err;
670
671         /* The last action of sub_flow1 must be output - do not merge this. */
672         sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
673         sub2_act_len = sub_flow2->meta.act_len;
674
675         if (!sub2_act_len)
676                 return -EINVAL;
677
678         if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
679                 return -EINVAL;
680
681         /* A shortcut can only be applied if there is a single action. */
682         if (sub1_act_len)
683                 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
684         else
685                 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
686
687         merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
688         merge_act = merge_flow->action_data;
689
690         /* Copy any pre-actions to the start of merge flow action list. */
691         pre_off1 = nfp_flower_copy_pre_actions(merge_act,
692                                                sub_flow1->action_data,
693                                                sub1_act_len, &tunnel_act);
694         merge_act += pre_off1;
695         sub1_act_len -= pre_off1;
696         pre_off2 = nfp_flower_copy_pre_actions(merge_act,
697                                                sub_flow2->action_data,
698                                                sub2_act_len, NULL);
699         merge_act += pre_off2;
700         sub2_act_len -= pre_off2;
701
702         /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
703          * a tunnel, sub_flow 2 can only have output actions for a valid merge.
704          */
705         if (tunnel_act) {
706                 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
707
708                 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
709                 if (err)
710                         return err;
711         }
712
713         /* Copy remaining actions from sub_flows 1 and 2. */
714         memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
715         merge_act += sub1_act_len;
716         memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
717
718         return 0;
719 }
720
721 /* Flow link code should only be accessed under RTNL. */
722 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
723 {
724         list_del(&link->merge_flow.list);
725         list_del(&link->sub_flow.list);
726         kfree(link);
727 }
728
729 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
730                                     struct nfp_fl_payload *sub_flow)
731 {
732         struct nfp_fl_payload_link *link;
733
734         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
735                 if (link->sub_flow.flow == sub_flow) {
736                         nfp_flower_unlink_flow(link);
737                         return;
738                 }
739 }
740
741 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
742                                  struct nfp_fl_payload *sub_flow)
743 {
744         struct nfp_fl_payload_link *link;
745
746         link = kmalloc(sizeof(*link), GFP_KERNEL);
747         if (!link)
748                 return -ENOMEM;
749
750         link->merge_flow.flow = merge_flow;
751         list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
752         link->sub_flow.flow = sub_flow;
753         list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
754
755         return 0;
756 }
757
758 /**
759  * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
760  * @app:        Pointer to the APP handle
761  * @sub_flow1:  Initial flow matched to produce merge hint
762  * @sub_flow2:  Post recirculation flow matched in merge hint
763  *
764  * Combines 2 flows (if valid) to a single flow, removing the initial from hw
765  * and offloading the new, merged flow.
766  *
767  * Return: negative value on error, 0 in success.
768  */
769 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
770                                      struct nfp_fl_payload *sub_flow1,
771                                      struct nfp_fl_payload *sub_flow2)
772 {
773         struct tc_cls_flower_offload merge_tc_off;
774         struct nfp_flower_priv *priv = app->priv;
775         struct nfp_fl_payload *merge_flow;
776         struct nfp_fl_key_ls merge_key_ls;
777         int err;
778
779         ASSERT_RTNL();
780
781         if (sub_flow1 == sub_flow2 ||
782             nfp_flower_is_merge_flow(sub_flow1) ||
783             nfp_flower_is_merge_flow(sub_flow2))
784                 return -EINVAL;
785
786         err = nfp_flower_can_merge(sub_flow1, sub_flow2);
787         if (err)
788                 return err;
789
790         merge_key_ls.key_size = sub_flow1->meta.key_len;
791
792         merge_flow = nfp_flower_allocate_new(&merge_key_ls);
793         if (!merge_flow)
794                 return -ENOMEM;
795
796         merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
797         merge_flow->ingress_dev = sub_flow1->ingress_dev;
798
799         memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
800                sub_flow1->meta.key_len);
801         memcpy(merge_flow->mask_data, sub_flow1->mask_data,
802                sub_flow1->meta.mask_len);
803
804         err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
805         if (err)
806                 goto err_destroy_merge_flow;
807
808         err = nfp_flower_link_flows(merge_flow, sub_flow1);
809         if (err)
810                 goto err_destroy_merge_flow;
811
812         err = nfp_flower_link_flows(merge_flow, sub_flow2);
813         if (err)
814                 goto err_unlink_sub_flow1;
815
816         merge_tc_off.cookie = merge_flow->tc_flower_cookie;
817         err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
818                                         merge_flow->ingress_dev);
819         if (err)
820                 goto err_unlink_sub_flow2;
821
822         err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
823                                      nfp_flower_table_params);
824         if (err)
825                 goto err_release_metadata;
826
827         err = nfp_flower_xmit_flow(app, merge_flow,
828                                    NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
829         if (err)
830                 goto err_remove_rhash;
831
832         merge_flow->in_hw = true;
833         sub_flow1->in_hw = false;
834
835         return 0;
836
837 err_remove_rhash:
838         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
839                                             &merge_flow->fl_node,
840                                             nfp_flower_table_params));
841 err_release_metadata:
842         nfp_modify_flow_metadata(app, merge_flow);
843 err_unlink_sub_flow2:
844         nfp_flower_unlink_flows(merge_flow, sub_flow2);
845 err_unlink_sub_flow1:
846         nfp_flower_unlink_flows(merge_flow, sub_flow1);
847 err_destroy_merge_flow:
848         kfree(merge_flow->action_data);
849         kfree(merge_flow->mask_data);
850         kfree(merge_flow->unmasked_data);
851         kfree(merge_flow);
852         return err;
853 }
854
855 /**
856  * nfp_flower_add_offload() - Adds a new flow to hardware.
857  * @app:        Pointer to the APP handle
858  * @netdev:     netdev structure.
859  * @flow:       TC flower classifier offload structure.
860  *
861  * Adds a new flow to the repeated hash structure and action payload.
862  *
863  * Return: negative value on error, 0 if configured successfully.
864  */
865 static int
866 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
867                        struct tc_cls_flower_offload *flow)
868 {
869         enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
870         struct nfp_flower_priv *priv = app->priv;
871         struct nfp_fl_payload *flow_pay;
872         struct nfp_fl_key_ls *key_layer;
873         struct nfp_port *port = NULL;
874         int err;
875
876         if (nfp_netdev_is_nfp_repr(netdev))
877                 port = nfp_port_from_netdev(netdev);
878
879         key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
880         if (!key_layer)
881                 return -ENOMEM;
882
883         err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
884                                               &tun_type);
885         if (err)
886                 goto err_free_key_ls;
887
888         flow_pay = nfp_flower_allocate_new(key_layer);
889         if (!flow_pay) {
890                 err = -ENOMEM;
891                 goto err_free_key_ls;
892         }
893
894         err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
895                                             flow_pay, tun_type);
896         if (err)
897                 goto err_destroy_flow;
898
899         err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
900         if (err)
901                 goto err_destroy_flow;
902
903         err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
904         if (err)
905                 goto err_destroy_flow;
906
907         flow_pay->tc_flower_cookie = flow->cookie;
908         err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
909                                      nfp_flower_table_params);
910         if (err)
911                 goto err_release_metadata;
912
913         err = nfp_flower_xmit_flow(app, flow_pay,
914                                    NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
915         if (err)
916                 goto err_remove_rhash;
917
918         if (port)
919                 port->tc_offload_cnt++;
920
921         flow_pay->in_hw = true;
922
923         /* Deallocate flow payload when flower rule has been destroyed. */
924         kfree(key_layer);
925
926         return 0;
927
928 err_remove_rhash:
929         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
930                                             &flow_pay->fl_node,
931                                             nfp_flower_table_params));
932 err_release_metadata:
933         nfp_modify_flow_metadata(app, flow_pay);
934 err_destroy_flow:
935         kfree(flow_pay->action_data);
936         kfree(flow_pay->mask_data);
937         kfree(flow_pay->unmasked_data);
938         kfree(flow_pay);
939 err_free_key_ls:
940         kfree(key_layer);
941         return err;
942 }
943
944 static void
945 nfp_flower_remove_merge_flow(struct nfp_app *app,
946                              struct nfp_fl_payload *del_sub_flow,
947                              struct nfp_fl_payload *merge_flow)
948 {
949         struct nfp_flower_priv *priv = app->priv;
950         struct nfp_fl_payload_link *link, *temp;
951         struct nfp_fl_payload *origin;
952         bool mod = false;
953         int err;
954
955         link = list_first_entry(&merge_flow->linked_flows,
956                                 struct nfp_fl_payload_link, merge_flow.list);
957         origin = link->sub_flow.flow;
958
959         /* Re-add rule the merge had overwritten if it has not been deleted. */
960         if (origin != del_sub_flow)
961                 mod = true;
962
963         err = nfp_modify_flow_metadata(app, merge_flow);
964         if (err) {
965                 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
966                 goto err_free_links;
967         }
968
969         if (!mod) {
970                 err = nfp_flower_xmit_flow(app, merge_flow,
971                                            NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
972                 if (err) {
973                         nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
974                         goto err_free_links;
975                 }
976         } else {
977                 __nfp_modify_flow_metadata(priv, origin);
978                 err = nfp_flower_xmit_flow(app, origin,
979                                            NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
980                 if (err)
981                         nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
982                 origin->in_hw = true;
983         }
984
985 err_free_links:
986         /* Clean any links connected with the merged flow. */
987         list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
988                                  merge_flow.list)
989                 nfp_flower_unlink_flow(link);
990
991         kfree(merge_flow->action_data);
992         kfree(merge_flow->mask_data);
993         kfree(merge_flow->unmasked_data);
994         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
995                                             &merge_flow->fl_node,
996                                             nfp_flower_table_params));
997         kfree_rcu(merge_flow, rcu);
998 }
999
1000 static void
1001 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1002                                   struct nfp_fl_payload *sub_flow)
1003 {
1004         struct nfp_fl_payload_link *link, *temp;
1005
1006         /* Remove any merge flow formed from the deleted sub_flow. */
1007         list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1008                                  sub_flow.list)
1009                 nfp_flower_remove_merge_flow(app, sub_flow,
1010                                              link->merge_flow.flow);
1011 }
1012
1013 /**
1014  * nfp_flower_del_offload() - Removes a flow from hardware.
1015  * @app:        Pointer to the APP handle
1016  * @netdev:     netdev structure.
1017  * @flow:       TC flower classifier offload structure
1018  *
1019  * Removes a flow from the repeated hash structure and clears the
1020  * action payload. Any flows merged from this are also deleted.
1021  *
1022  * Return: negative value on error, 0 if removed successfully.
1023  */
1024 static int
1025 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1026                        struct tc_cls_flower_offload *flow)
1027 {
1028         struct nfp_flower_priv *priv = app->priv;
1029         struct nfp_fl_payload *nfp_flow;
1030         struct nfp_port *port = NULL;
1031         int err;
1032
1033         if (nfp_netdev_is_nfp_repr(netdev))
1034                 port = nfp_port_from_netdev(netdev);
1035
1036         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1037         if (!nfp_flow)
1038                 return -ENOENT;
1039
1040         err = nfp_modify_flow_metadata(app, nfp_flow);
1041         if (err)
1042                 goto err_free_merge_flow;
1043
1044         if (nfp_flow->nfp_tun_ipv4_addr)
1045                 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1046
1047         if (!nfp_flow->in_hw) {
1048                 err = 0;
1049                 goto err_free_merge_flow;
1050         }
1051
1052         err = nfp_flower_xmit_flow(app, nfp_flow,
1053                                    NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1054         /* Fall through on error. */
1055
1056 err_free_merge_flow:
1057         nfp_flower_del_linked_merge_flows(app, nfp_flow);
1058         if (port)
1059                 port->tc_offload_cnt--;
1060         kfree(nfp_flow->action_data);
1061         kfree(nfp_flow->mask_data);
1062         kfree(nfp_flow->unmasked_data);
1063         WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1064                                             &nfp_flow->fl_node,
1065                                             nfp_flower_table_params));
1066         kfree_rcu(nfp_flow, rcu);
1067         return err;
1068 }
1069
1070 static void
1071 __nfp_flower_update_merge_stats(struct nfp_app *app,
1072                                 struct nfp_fl_payload *merge_flow)
1073 {
1074         struct nfp_flower_priv *priv = app->priv;
1075         struct nfp_fl_payload_link *link;
1076         struct nfp_fl_payload *sub_flow;
1077         u64 pkts, bytes, used;
1078         u32 ctx_id;
1079
1080         ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1081         pkts = priv->stats[ctx_id].pkts;
1082         /* Do not cycle subflows if no stats to distribute. */
1083         if (!pkts)
1084                 return;
1085         bytes = priv->stats[ctx_id].bytes;
1086         used = priv->stats[ctx_id].used;
1087
1088         /* Reset stats for the merge flow. */
1089         priv->stats[ctx_id].pkts = 0;
1090         priv->stats[ctx_id].bytes = 0;
1091
1092         /* The merge flow has received stats updates from firmware.
1093          * Distribute these stats to all subflows that form the merge.
1094          * The stats will collected from TC via the subflows.
1095          */
1096         list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1097                 sub_flow = link->sub_flow.flow;
1098                 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1099                 priv->stats[ctx_id].pkts += pkts;
1100                 priv->stats[ctx_id].bytes += bytes;
1101                 max_t(u64, priv->stats[ctx_id].used, used);
1102         }
1103 }
1104
1105 static void
1106 nfp_flower_update_merge_stats(struct nfp_app *app,
1107                               struct nfp_fl_payload *sub_flow)
1108 {
1109         struct nfp_fl_payload_link *link;
1110
1111         /* Get merge flows that the subflow forms to distribute their stats. */
1112         list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1113                 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1114 }
1115
1116 /**
1117  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1118  * @app:        Pointer to the APP handle
1119  * @netdev:     Netdev structure.
1120  * @flow:       TC flower classifier offload structure
1121  *
1122  * Populates a flow statistics structure which which corresponds to a
1123  * specific flow.
1124  *
1125  * Return: negative value on error, 0 if stats populated successfully.
1126  */
1127 static int
1128 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1129                      struct tc_cls_flower_offload *flow)
1130 {
1131         struct nfp_flower_priv *priv = app->priv;
1132         struct nfp_fl_payload *nfp_flow;
1133         u32 ctx_id;
1134
1135         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1136         if (!nfp_flow)
1137                 return -EINVAL;
1138
1139         ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1140
1141         spin_lock_bh(&priv->stats_lock);
1142         /* If request is for a sub_flow, update stats from merged flows. */
1143         if (!list_empty(&nfp_flow->linked_flows))
1144                 nfp_flower_update_merge_stats(app, nfp_flow);
1145
1146         flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1147                           priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
1148
1149         priv->stats[ctx_id].pkts = 0;
1150         priv->stats[ctx_id].bytes = 0;
1151         spin_unlock_bh(&priv->stats_lock);
1152
1153         return 0;
1154 }
1155
1156 static int
1157 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1158                         struct tc_cls_flower_offload *flower)
1159 {
1160         if (!eth_proto_is_802_3(flower->common.protocol))
1161                 return -EOPNOTSUPP;
1162
1163         switch (flower->command) {
1164         case TC_CLSFLOWER_REPLACE:
1165                 return nfp_flower_add_offload(app, netdev, flower);
1166         case TC_CLSFLOWER_DESTROY:
1167                 return nfp_flower_del_offload(app, netdev, flower);
1168         case TC_CLSFLOWER_STATS:
1169                 return nfp_flower_get_stats(app, netdev, flower);
1170         default:
1171                 return -EOPNOTSUPP;
1172         }
1173 }
1174
1175 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1176                                         void *type_data, void *cb_priv)
1177 {
1178         struct nfp_repr *repr = cb_priv;
1179
1180         if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1181                 return -EOPNOTSUPP;
1182
1183         switch (type) {
1184         case TC_SETUP_CLSFLOWER:
1185                 return nfp_flower_repr_offload(repr->app, repr->netdev,
1186                                                type_data);
1187         default:
1188                 return -EOPNOTSUPP;
1189         }
1190 }
1191
1192 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1193                                      struct tc_block_offload *f)
1194 {
1195         struct nfp_repr *repr = netdev_priv(netdev);
1196
1197         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1198                 return -EOPNOTSUPP;
1199
1200         switch (f->command) {
1201         case TC_BLOCK_BIND:
1202                 return tcf_block_cb_register(f->block,
1203                                              nfp_flower_setup_tc_block_cb,
1204                                              repr, repr, f->extack);
1205         case TC_BLOCK_UNBIND:
1206                 tcf_block_cb_unregister(f->block,
1207                                         nfp_flower_setup_tc_block_cb,
1208                                         repr);
1209                 return 0;
1210         default:
1211                 return -EOPNOTSUPP;
1212         }
1213 }
1214
1215 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1216                         enum tc_setup_type type, void *type_data)
1217 {
1218         switch (type) {
1219         case TC_SETUP_BLOCK:
1220                 return nfp_flower_setup_tc_block(netdev, type_data);
1221         default:
1222                 return -EOPNOTSUPP;
1223         }
1224 }
1225
1226 struct nfp_flower_indr_block_cb_priv {
1227         struct net_device *netdev;
1228         struct nfp_app *app;
1229         struct list_head list;
1230 };
1231
1232 static struct nfp_flower_indr_block_cb_priv *
1233 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1234                                      struct net_device *netdev)
1235 {
1236         struct nfp_flower_indr_block_cb_priv *cb_priv;
1237         struct nfp_flower_priv *priv = app->priv;
1238
1239         /* All callback list access should be protected by RTNL. */
1240         ASSERT_RTNL();
1241
1242         list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1243                 if (cb_priv->netdev == netdev)
1244                         return cb_priv;
1245
1246         return NULL;
1247 }
1248
1249 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1250                                           void *type_data, void *cb_priv)
1251 {
1252         struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1253         struct tc_cls_flower_offload *flower = type_data;
1254
1255         if (flower->common.chain_index)
1256                 return -EOPNOTSUPP;
1257
1258         switch (type) {
1259         case TC_SETUP_CLSFLOWER:
1260                 return nfp_flower_repr_offload(priv->app, priv->netdev,
1261                                                type_data);
1262         default:
1263                 return -EOPNOTSUPP;
1264         }
1265 }
1266
1267 static int
1268 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1269                                struct tc_block_offload *f)
1270 {
1271         struct nfp_flower_indr_block_cb_priv *cb_priv;
1272         struct nfp_flower_priv *priv = app->priv;
1273         int err;
1274
1275         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1276             !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1277               nfp_flower_internal_port_can_offload(app, netdev)))
1278                 return -EOPNOTSUPP;
1279
1280         switch (f->command) {
1281         case TC_BLOCK_BIND:
1282                 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1283                 if (!cb_priv)
1284                         return -ENOMEM;
1285
1286                 cb_priv->netdev = netdev;
1287                 cb_priv->app = app;
1288                 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1289
1290                 err = tcf_block_cb_register(f->block,
1291                                             nfp_flower_setup_indr_block_cb,
1292                                             cb_priv, cb_priv, f->extack);
1293                 if (err) {
1294                         list_del(&cb_priv->list);
1295                         kfree(cb_priv);
1296                 }
1297
1298                 return err;
1299         case TC_BLOCK_UNBIND:
1300                 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1301                 if (!cb_priv)
1302                         return -ENOENT;
1303
1304                 tcf_block_cb_unregister(f->block,
1305                                         nfp_flower_setup_indr_block_cb,
1306                                         cb_priv);
1307                 list_del(&cb_priv->list);
1308                 kfree(cb_priv);
1309
1310                 return 0;
1311         default:
1312                 return -EOPNOTSUPP;
1313         }
1314         return 0;
1315 }
1316
1317 static int
1318 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
1319                             enum tc_setup_type type, void *type_data)
1320 {
1321         switch (type) {
1322         case TC_SETUP_BLOCK:
1323                 return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
1324                                                       type_data);
1325         default:
1326                 return -EOPNOTSUPP;
1327         }
1328 }
1329
1330 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
1331                                        struct net_device *netdev,
1332                                        unsigned long event)
1333 {
1334         int err;
1335
1336         if (!nfp_fl_is_netdev_to_offload(netdev))
1337                 return NOTIFY_OK;
1338
1339         if (event == NETDEV_REGISTER) {
1340                 err = __tc_indr_block_cb_register(netdev, app,
1341                                                   nfp_flower_indr_setup_tc_cb,
1342                                                   app);
1343                 if (err)
1344                         nfp_flower_cmsg_warn(app,
1345                                              "Indirect block reg failed - %s\n",
1346                                              netdev->name);
1347         } else if (event == NETDEV_UNREGISTER) {
1348                 __tc_indr_block_cb_unregister(netdev,
1349                                               nfp_flower_indr_setup_tc_cb, app);
1350         }
1351
1352         return NOTIFY_OK;
1353 }