Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_tc_flower.c
1 /*
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
39
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
43
44 #define STATS_CHECK_PERIOD (HZ / 2)
45
46 static struct ch_tc_pedit_fields pedits[] = {
47         PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48         PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49         PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50         PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51         PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52         PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53         PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54         PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55         PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56         PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57         PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58         PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59         PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60         PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61         PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62         PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63         PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64         PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 };
66
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68 {
69         struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70         if (new)
71                 spin_lock_init(&new->lock);
72         return new;
73 }
74
75 /* Must be called with either RTNL or rcu_read_lock */
76 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
77                                                    unsigned long flower_cookie)
78 {
79         return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
80                                       adap->flower_ht_params);
81 }
82
83 static void cxgb4_process_flow_match(struct net_device *dev,
84                                      struct flow_cls_offload *cls,
85                                      struct ch_filter_specification *fs)
86 {
87         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
88         u16 addr_type = 0;
89
90         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
91                 struct flow_match_control match;
92
93                 flow_rule_match_control(rule, &match);
94                 addr_type = match.key->addr_type;
95         }
96
97         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
98                 struct flow_match_basic match;
99                 u16 ethtype_key, ethtype_mask;
100
101                 flow_rule_match_basic(rule, &match);
102                 ethtype_key = ntohs(match.key->n_proto);
103                 ethtype_mask = ntohs(match.mask->n_proto);
104
105                 if (ethtype_key == ETH_P_ALL) {
106                         ethtype_key = 0;
107                         ethtype_mask = 0;
108                 }
109
110                 if (ethtype_key == ETH_P_IPV6)
111                         fs->type = 1;
112
113                 fs->val.ethtype = ethtype_key;
114                 fs->mask.ethtype = ethtype_mask;
115                 fs->val.proto = match.key->ip_proto;
116                 fs->mask.proto = match.mask->ip_proto;
117         }
118
119         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
120                 struct flow_match_ipv4_addrs match;
121
122                 flow_rule_match_ipv4_addrs(rule, &match);
123                 fs->type = 0;
124                 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
125                 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
126                 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
127                 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
128
129                 /* also initialize nat_lip/fip to same values */
130                 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
131                 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
132         }
133
134         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
135                 struct flow_match_ipv6_addrs match;
136
137                 flow_rule_match_ipv6_addrs(rule, &match);
138                 fs->type = 1;
139                 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
140                        sizeof(match.key->dst));
141                 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
142                        sizeof(match.key->src));
143                 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
144                        sizeof(match.mask->dst));
145                 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
146                        sizeof(match.mask->src));
147
148                 /* also initialize nat_lip/fip to same values */
149                 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
150                        sizeof(match.key->dst));
151                 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
152                        sizeof(match.key->src));
153         }
154
155         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
156                 struct flow_match_ports match;
157
158                 flow_rule_match_ports(rule, &match);
159                 fs->val.lport = cpu_to_be16(match.key->dst);
160                 fs->mask.lport = cpu_to_be16(match.mask->dst);
161                 fs->val.fport = cpu_to_be16(match.key->src);
162                 fs->mask.fport = cpu_to_be16(match.mask->src);
163
164                 /* also initialize nat_lport/fport to same values */
165                 fs->nat_lport = cpu_to_be16(match.key->dst);
166                 fs->nat_fport = cpu_to_be16(match.key->src);
167         }
168
169         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
170                 struct flow_match_ip match;
171
172                 flow_rule_match_ip(rule, &match);
173                 fs->val.tos = match.key->tos;
174                 fs->mask.tos = match.mask->tos;
175         }
176
177         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
178                 struct flow_match_enc_keyid match;
179
180                 flow_rule_match_enc_keyid(rule, &match);
181                 fs->val.vni = be32_to_cpu(match.key->keyid);
182                 fs->mask.vni = be32_to_cpu(match.mask->keyid);
183                 if (fs->mask.vni) {
184                         fs->val.encap_vld = 1;
185                         fs->mask.encap_vld = 1;
186                 }
187         }
188
189         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
190                 struct flow_match_vlan match;
191                 u16 vlan_tci, vlan_tci_mask;
192
193                 flow_rule_match_vlan(rule, &match);
194                 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
195                                                VLAN_PRIO_SHIFT);
196                 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
197                                                      VLAN_PRIO_SHIFT);
198                 fs->val.ivlan = vlan_tci;
199                 fs->mask.ivlan = vlan_tci_mask;
200
201                 fs->val.ivlan_vld = 1;
202                 fs->mask.ivlan_vld = 1;
203
204                 /* Chelsio adapters use ivlan_vld bit to match vlan packets
205                  * as 802.1Q. Also, when vlan tag is present in packets,
206                  * ethtype match is used then to match on ethtype of inner
207                  * header ie. the header following the vlan header.
208                  * So, set the ivlan_vld based on ethtype info supplied by
209                  * TC for vlan packets if its 802.1Q. And then reset the
210                  * ethtype value else, hw will try to match the supplied
211                  * ethtype value with ethtype of inner header.
212                  */
213                 if (fs->val.ethtype == ETH_P_8021Q) {
214                         fs->val.ethtype = 0;
215                         fs->mask.ethtype = 0;
216                 }
217         }
218
219         /* Match only packets coming from the ingress port where this
220          * filter will be created.
221          */
222         fs->val.iport = netdev2pinfo(dev)->port_id;
223         fs->mask.iport = ~0;
224 }
225
226 static int cxgb4_validate_flow_match(struct net_device *dev,
227                                      struct flow_cls_offload *cls)
228 {
229         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
230         struct flow_dissector *dissector = rule->match.dissector;
231         u16 ethtype_mask = 0;
232         u16 ethtype_key = 0;
233
234         if (dissector->used_keys &
235             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
236               BIT(FLOW_DISSECTOR_KEY_BASIC) |
237               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
238               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
239               BIT(FLOW_DISSECTOR_KEY_PORTS) |
240               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
241               BIT(FLOW_DISSECTOR_KEY_VLAN) |
242               BIT(FLOW_DISSECTOR_KEY_IP))) {
243                 netdev_warn(dev, "Unsupported key used: 0x%x\n",
244                             dissector->used_keys);
245                 return -EOPNOTSUPP;
246         }
247
248         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
249                 struct flow_match_basic match;
250
251                 flow_rule_match_basic(rule, &match);
252                 ethtype_key = ntohs(match.key->n_proto);
253                 ethtype_mask = ntohs(match.mask->n_proto);
254         }
255
256         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
257                 u16 eth_ip_type = ethtype_key & ethtype_mask;
258                 struct flow_match_ip match;
259
260                 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
261                         netdev_err(dev, "IP Key supported only with IPv4/v6");
262                         return -EINVAL;
263                 }
264
265                 flow_rule_match_ip(rule, &match);
266                 if (match.mask->ttl) {
267                         netdev_warn(dev, "ttl match unsupported for offload");
268                         return -EOPNOTSUPP;
269                 }
270         }
271
272         return 0;
273 }
274
275 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
276                           u8 field)
277 {
278         u32 set_val = val & ~mask;
279         u32 offset = 0;
280         u8 size = 1;
281         int i;
282
283         for (i = 0; i < ARRAY_SIZE(pedits); i++) {
284                 if (pedits[i].field == field) {
285                         offset = pedits[i].offset;
286                         size = pedits[i].size;
287                         break;
288                 }
289         }
290         memcpy((u8 *)fs + offset, &set_val, size);
291 }
292
293 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
294                                 u32 mask, u32 offset, u8 htype)
295 {
296         switch (htype) {
297         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
298                 switch (offset) {
299                 case PEDIT_ETH_DMAC_31_0:
300                         fs->newdmac = 1;
301                         offload_pedit(fs, val, mask, ETH_DMAC_31_0);
302                         break;
303                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
304                         if (~mask & PEDIT_ETH_DMAC_MASK)
305                                 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
306                         else
307                                 offload_pedit(fs, val >> 16, mask >> 16,
308                                               ETH_SMAC_15_0);
309                         break;
310                 case PEDIT_ETH_SMAC_47_16:
311                         fs->newsmac = 1;
312                         offload_pedit(fs, val, mask, ETH_SMAC_47_16);
313                 }
314                 break;
315         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
316                 switch (offset) {
317                 case PEDIT_IP4_SRC:
318                         offload_pedit(fs, val, mask, IP4_SRC);
319                         break;
320                 case PEDIT_IP4_DST:
321                         offload_pedit(fs, val, mask, IP4_DST);
322                 }
323                 fs->nat_mode = NAT_MODE_ALL;
324                 break;
325         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
326                 switch (offset) {
327                 case PEDIT_IP6_SRC_31_0:
328                         offload_pedit(fs, val, mask, IP6_SRC_31_0);
329                         break;
330                 case PEDIT_IP6_SRC_63_32:
331                         offload_pedit(fs, val, mask, IP6_SRC_63_32);
332                         break;
333                 case PEDIT_IP6_SRC_95_64:
334                         offload_pedit(fs, val, mask, IP6_SRC_95_64);
335                         break;
336                 case PEDIT_IP6_SRC_127_96:
337                         offload_pedit(fs, val, mask, IP6_SRC_127_96);
338                         break;
339                 case PEDIT_IP6_DST_31_0:
340                         offload_pedit(fs, val, mask, IP6_DST_31_0);
341                         break;
342                 case PEDIT_IP6_DST_63_32:
343                         offload_pedit(fs, val, mask, IP6_DST_63_32);
344                         break;
345                 case PEDIT_IP6_DST_95_64:
346                         offload_pedit(fs, val, mask, IP6_DST_95_64);
347                         break;
348                 case PEDIT_IP6_DST_127_96:
349                         offload_pedit(fs, val, mask, IP6_DST_127_96);
350                 }
351                 fs->nat_mode = NAT_MODE_ALL;
352                 break;
353         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
354                 switch (offset) {
355                 case PEDIT_TCP_SPORT_DPORT:
356                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
357                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
358                                               cpu_to_be32(mask) >> 16,
359                                               TCP_SPORT);
360                         else
361                                 offload_pedit(fs, cpu_to_be32(val),
362                                               cpu_to_be32(mask), TCP_DPORT);
363                 }
364                 fs->nat_mode = NAT_MODE_ALL;
365                 break;
366         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
367                 switch (offset) {
368                 case PEDIT_UDP_SPORT_DPORT:
369                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
370                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
371                                               cpu_to_be32(mask) >> 16,
372                                               UDP_SPORT);
373                         else
374                                 offload_pedit(fs, cpu_to_be32(val),
375                                               cpu_to_be32(mask), UDP_DPORT);
376                 }
377                 fs->nat_mode = NAT_MODE_ALL;
378         }
379 }
380
381 void cxgb4_process_flow_actions(struct net_device *in,
382                                 struct flow_action *actions,
383                                 struct ch_filter_specification *fs)
384 {
385         struct flow_action_entry *act;
386         int i;
387
388         flow_action_for_each(i, act, actions) {
389                 switch (act->id) {
390                 case FLOW_ACTION_ACCEPT:
391                         fs->action = FILTER_PASS;
392                         break;
393                 case FLOW_ACTION_DROP:
394                         fs->action = FILTER_DROP;
395                         break;
396                 case FLOW_ACTION_REDIRECT: {
397                         struct net_device *out = act->dev;
398                         struct port_info *pi = netdev_priv(out);
399
400                         fs->action = FILTER_SWITCH;
401                         fs->eport = pi->port_id;
402                         }
403                         break;
404                 case FLOW_ACTION_VLAN_POP:
405                 case FLOW_ACTION_VLAN_PUSH:
406                 case FLOW_ACTION_VLAN_MANGLE: {
407                         u8 prio = act->vlan.prio;
408                         u16 vid = act->vlan.vid;
409                         u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
410                         switch (act->id) {
411                         case FLOW_ACTION_VLAN_POP:
412                                 fs->newvlan |= VLAN_REMOVE;
413                                 break;
414                         case FLOW_ACTION_VLAN_PUSH:
415                                 fs->newvlan |= VLAN_INSERT;
416                                 fs->vlan = vlan_tci;
417                                 break;
418                         case FLOW_ACTION_VLAN_MANGLE:
419                                 fs->newvlan |= VLAN_REWRITE;
420                                 fs->vlan = vlan_tci;
421                                 break;
422                         default:
423                                 break;
424                         }
425                         }
426                         break;
427                 case FLOW_ACTION_MANGLE: {
428                         u32 mask, val, offset;
429                         u8 htype;
430
431                         htype = act->mangle.htype;
432                         mask = act->mangle.mask;
433                         val = act->mangle.val;
434                         offset = act->mangle.offset;
435
436                         process_pedit_field(fs, val, mask, offset, htype);
437                         }
438                         break;
439                 default:
440                         break;
441                 }
442         }
443 }
444
445 static bool valid_l4_mask(u32 mask)
446 {
447         u16 hi, lo;
448
449         /* Either the upper 16-bits (SPORT) OR the lower
450          * 16-bits (DPORT) can be set, but NOT BOTH.
451          */
452         hi = (mask >> 16) & 0xFFFF;
453         lo = mask & 0xFFFF;
454
455         return hi && lo ? false : true;
456 }
457
458 static bool valid_pedit_action(struct net_device *dev,
459                                const struct flow_action_entry *act)
460 {
461         u32 mask, offset;
462         u8 htype;
463
464         htype = act->mangle.htype;
465         mask = act->mangle.mask;
466         offset = act->mangle.offset;
467
468         switch (htype) {
469         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
470                 switch (offset) {
471                 case PEDIT_ETH_DMAC_31_0:
472                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
473                 case PEDIT_ETH_SMAC_47_16:
474                         break;
475                 default:
476                         netdev_err(dev, "%s: Unsupported pedit field\n",
477                                    __func__);
478                         return false;
479                 }
480                 break;
481         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
482                 switch (offset) {
483                 case PEDIT_IP4_SRC:
484                 case PEDIT_IP4_DST:
485                         break;
486                 default:
487                         netdev_err(dev, "%s: Unsupported pedit field\n",
488                                    __func__);
489                         return false;
490                 }
491                 break;
492         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
493                 switch (offset) {
494                 case PEDIT_IP6_SRC_31_0:
495                 case PEDIT_IP6_SRC_63_32:
496                 case PEDIT_IP6_SRC_95_64:
497                 case PEDIT_IP6_SRC_127_96:
498                 case PEDIT_IP6_DST_31_0:
499                 case PEDIT_IP6_DST_63_32:
500                 case PEDIT_IP6_DST_95_64:
501                 case PEDIT_IP6_DST_127_96:
502                         break;
503                 default:
504                         netdev_err(dev, "%s: Unsupported pedit field\n",
505                                    __func__);
506                         return false;
507                 }
508                 break;
509         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
510                 switch (offset) {
511                 case PEDIT_TCP_SPORT_DPORT:
512                         if (!valid_l4_mask(~mask)) {
513                                 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
514                                            __func__);
515                                 return false;
516                         }
517                         break;
518                 default:
519                         netdev_err(dev, "%s: Unsupported pedit field\n",
520                                    __func__);
521                         return false;
522                 }
523                 break;
524         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
525                 switch (offset) {
526                 case PEDIT_UDP_SPORT_DPORT:
527                         if (!valid_l4_mask(~mask)) {
528                                 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
529                                            __func__);
530                                 return false;
531                         }
532                         break;
533                 default:
534                         netdev_err(dev, "%s: Unsupported pedit field\n",
535                                    __func__);
536                         return false;
537                 }
538                 break;
539         default:
540                 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
541                 return false;
542         }
543         return true;
544 }
545
546 int cxgb4_validate_flow_actions(struct net_device *dev,
547                                 struct flow_action *actions)
548 {
549         struct flow_action_entry *act;
550         bool act_redir = false;
551         bool act_pedit = false;
552         bool act_vlan = false;
553         int i;
554
555         flow_action_for_each(i, act, actions) {
556                 switch (act->id) {
557                 case FLOW_ACTION_ACCEPT:
558                 case FLOW_ACTION_DROP:
559                         /* Do nothing */
560                         break;
561                 case FLOW_ACTION_REDIRECT: {
562                         struct adapter *adap = netdev2adap(dev);
563                         struct net_device *n_dev, *target_dev;
564                         unsigned int i;
565                         bool found = false;
566
567                         target_dev = act->dev;
568                         for_each_port(adap, i) {
569                                 n_dev = adap->port[i];
570                                 if (target_dev == n_dev) {
571                                         found = true;
572                                         break;
573                                 }
574                         }
575
576                         /* If interface doesn't belong to our hw, then
577                          * the provided output port is not valid
578                          */
579                         if (!found) {
580                                 netdev_err(dev, "%s: Out port invalid\n",
581                                            __func__);
582                                 return -EINVAL;
583                         }
584                         act_redir = true;
585                         }
586                         break;
587                 case FLOW_ACTION_VLAN_POP:
588                 case FLOW_ACTION_VLAN_PUSH:
589                 case FLOW_ACTION_VLAN_MANGLE: {
590                         u16 proto = be16_to_cpu(act->vlan.proto);
591
592                         switch (act->id) {
593                         case FLOW_ACTION_VLAN_POP:
594                                 break;
595                         case FLOW_ACTION_VLAN_PUSH:
596                         case FLOW_ACTION_VLAN_MANGLE:
597                                 if (proto != ETH_P_8021Q) {
598                                         netdev_err(dev, "%s: Unsupported vlan proto\n",
599                                                    __func__);
600                                         return -EOPNOTSUPP;
601                                 }
602                                 break;
603                         default:
604                                 netdev_err(dev, "%s: Unsupported vlan action\n",
605                                            __func__);
606                                 return -EOPNOTSUPP;
607                         }
608                         act_vlan = true;
609                         }
610                         break;
611                 case FLOW_ACTION_MANGLE: {
612                         bool pedit_valid = valid_pedit_action(dev, act);
613
614                         if (!pedit_valid)
615                                 return -EOPNOTSUPP;
616                         act_pedit = true;
617                         }
618                         break;
619                 default:
620                         netdev_err(dev, "%s: Unsupported action\n", __func__);
621                         return -EOPNOTSUPP;
622                 }
623         }
624
625         if ((act_pedit || act_vlan) && !act_redir) {
626                 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
627                            __func__);
628                 return -EINVAL;
629         }
630
631         return 0;
632 }
633
634 int cxgb4_tc_flower_replace(struct net_device *dev,
635                             struct flow_cls_offload *cls)
636 {
637         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
638         struct netlink_ext_ack *extack = cls->common.extack;
639         struct adapter *adap = netdev2adap(dev);
640         struct ch_tc_flower_entry *ch_flower;
641         struct ch_filter_specification *fs;
642         struct filter_ctx ctx;
643         int fidx, ret;
644
645         if (cxgb4_validate_flow_actions(dev, &rule->action))
646                 return -EOPNOTSUPP;
647
648         if (cxgb4_validate_flow_match(dev, cls))
649                 return -EOPNOTSUPP;
650
651         ch_flower = allocate_flower_entry();
652         if (!ch_flower) {
653                 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
654                 return -ENOMEM;
655         }
656
657         fs = &ch_flower->fs;
658         fs->hitcnts = 1;
659         cxgb4_process_flow_match(dev, cls, fs);
660         cxgb4_process_flow_actions(dev, &rule->action, fs);
661
662         fs->hash = is_filter_exact_match(adap, fs);
663         if (fs->hash) {
664                 fidx = 0;
665         } else {
666                 u8 inet_family;
667
668                 inet_family = fs->type ? PF_INET6 : PF_INET;
669
670                 /* Note that TC uses prio 0 to indicate stack to
671                  * generate automatic prio and hence doesn't pass prio
672                  * 0 to driver. However, the hardware TCAM index
673                  * starts from 0. Hence, the -1 here.
674                  */
675                 if (cls->common.prio <= adap->tids.nftids)
676                         fidx = cls->common.prio - 1;
677                 else
678                         fidx = cxgb4_get_free_ftid(dev, inet_family);
679
680                 /* Only insert FLOWER rule if its priority doesn't
681                  * conflict with existing rules in the LETCAM.
682                  */
683                 if (fidx < 0 ||
684                     !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
685                         NL_SET_ERR_MSG_MOD(extack,
686                                            "No free LETCAM index available");
687                         ret = -ENOMEM;
688                         goto free_entry;
689                 }
690         }
691
692         fs->tc_prio = cls->common.prio;
693         fs->tc_cookie = cls->cookie;
694
695         init_completion(&ctx.completion);
696         ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
697         if (ret) {
698                 netdev_err(dev, "%s: filter creation err %d\n",
699                            __func__, ret);
700                 goto free_entry;
701         }
702
703         /* Wait for reply */
704         ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
705         if (!ret) {
706                 ret = -ETIMEDOUT;
707                 goto free_entry;
708         }
709
710         ret = ctx.result;
711         /* Check if hw returned error for filter creation */
712         if (ret)
713                 goto free_entry;
714
715         ch_flower->tc_flower_cookie = cls->cookie;
716         ch_flower->filter_id = ctx.tid;
717         ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
718                                      adap->flower_ht_params);
719         if (ret)
720                 goto del_filter;
721
722         return 0;
723
724 del_filter:
725         cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
726
727 free_entry:
728         kfree(ch_flower);
729         return ret;
730 }
731
732 int cxgb4_tc_flower_destroy(struct net_device *dev,
733                             struct flow_cls_offload *cls)
734 {
735         struct adapter *adap = netdev2adap(dev);
736         struct ch_tc_flower_entry *ch_flower;
737         int ret;
738
739         ch_flower = ch_flower_lookup(adap, cls->cookie);
740         if (!ch_flower)
741                 return -ENOENT;
742
743         ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
744         if (ret)
745                 goto err;
746
747         ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
748                                      adap->flower_ht_params);
749         if (ret) {
750                 netdev_err(dev, "Flow remove from rhashtable failed");
751                 goto err;
752         }
753         kfree_rcu(ch_flower, rcu);
754
755 err:
756         return ret;
757 }
758
759 static void ch_flower_stats_handler(struct work_struct *work)
760 {
761         struct adapter *adap = container_of(work, struct adapter,
762                                             flower_stats_work);
763         struct ch_tc_flower_entry *flower_entry;
764         struct ch_tc_flower_stats *ofld_stats;
765         struct rhashtable_iter iter;
766         u64 packets;
767         u64 bytes;
768         int ret;
769
770         rhashtable_walk_enter(&adap->flower_tbl, &iter);
771         do {
772                 rhashtable_walk_start(&iter);
773
774                 while ((flower_entry = rhashtable_walk_next(&iter)) &&
775                        !IS_ERR(flower_entry)) {
776                         ret = cxgb4_get_filter_counters(adap->port[0],
777                                                         flower_entry->filter_id,
778                                                         &packets, &bytes,
779                                                         flower_entry->fs.hash);
780                         if (!ret) {
781                                 spin_lock(&flower_entry->lock);
782                                 ofld_stats = &flower_entry->stats;
783
784                                 if (ofld_stats->prev_packet_count != packets) {
785                                         ofld_stats->prev_packet_count = packets;
786                                         ofld_stats->last_used = jiffies;
787                                 }
788                                 spin_unlock(&flower_entry->lock);
789                         }
790                 }
791
792                 rhashtable_walk_stop(&iter);
793
794         } while (flower_entry == ERR_PTR(-EAGAIN));
795         rhashtable_walk_exit(&iter);
796         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
797 }
798
799 static void ch_flower_stats_cb(struct timer_list *t)
800 {
801         struct adapter *adap = from_timer(adap, t, flower_stats_timer);
802
803         schedule_work(&adap->flower_stats_work);
804 }
805
806 int cxgb4_tc_flower_stats(struct net_device *dev,
807                           struct flow_cls_offload *cls)
808 {
809         struct adapter *adap = netdev2adap(dev);
810         struct ch_tc_flower_stats *ofld_stats;
811         struct ch_tc_flower_entry *ch_flower;
812         u64 packets;
813         u64 bytes;
814         int ret;
815
816         ch_flower = ch_flower_lookup(adap, cls->cookie);
817         if (!ch_flower) {
818                 ret = -ENOENT;
819                 goto err;
820         }
821
822         ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
823                                         &packets, &bytes,
824                                         ch_flower->fs.hash);
825         if (ret < 0)
826                 goto err;
827
828         spin_lock_bh(&ch_flower->lock);
829         ofld_stats = &ch_flower->stats;
830         if (ofld_stats->packet_count != packets) {
831                 if (ofld_stats->prev_packet_count != packets)
832                         ofld_stats->last_used = jiffies;
833                 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
834                                   packets - ofld_stats->packet_count,
835                                   ofld_stats->last_used);
836
837                 ofld_stats->packet_count = packets;
838                 ofld_stats->byte_count = bytes;
839                 ofld_stats->prev_packet_count = packets;
840         }
841         spin_unlock_bh(&ch_flower->lock);
842         return 0;
843
844 err:
845         return ret;
846 }
847
848 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
849         .nelem_hint = 384,
850         .head_offset = offsetof(struct ch_tc_flower_entry, node),
851         .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
852         .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
853         .max_size = 524288,
854         .min_size = 512,
855         .automatic_shrinking = true
856 };
857
858 int cxgb4_init_tc_flower(struct adapter *adap)
859 {
860         int ret;
861
862         if (adap->tc_flower_initialized)
863                 return -EEXIST;
864
865         adap->flower_ht_params = cxgb4_tc_flower_ht_params;
866         ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
867         if (ret)
868                 return ret;
869
870         INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
871         timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
872         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
873         adap->tc_flower_initialized = true;
874         return 0;
875 }
876
877 void cxgb4_cleanup_tc_flower(struct adapter *adap)
878 {
879         if (!adap->tc_flower_initialized)
880                 return;
881
882         if (adap->flower_stats_timer.function)
883                 del_timer_sync(&adap->flower_stats_timer);
884         cancel_work_sync(&adap->flower_stats_work);
885         rhashtable_destroy(&adap->flower_tbl);
886         adap->tc_flower_initialized = false;
887 }