RDMA/hns: Fix error return code in hns_roce_v1_rsv_lp_qp()
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_tc_flower.c
1 /*
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
39
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
43
44 #define STATS_CHECK_PERIOD (HZ / 2)
45
46 static struct ch_tc_pedit_fields pedits[] = {
47         PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48         PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49         PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50         PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51         PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52         PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53         PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54         PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55         PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56         PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57         PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58         PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59         PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60         PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61         PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62         PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63         PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64         PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 };
66
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68 {
69         struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70         spin_lock_init(&new->lock);
71         return new;
72 }
73
74 /* Must be called with either RTNL or rcu_read_lock */
75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
76                                                    unsigned long flower_cookie)
77 {
78         return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
79                                       adap->flower_ht_params);
80 }
81
82 static void cxgb4_process_flow_match(struct net_device *dev,
83                                      struct flow_cls_offload *cls,
84                                      struct ch_filter_specification *fs)
85 {
86         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
87         u16 addr_type = 0;
88
89         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
90                 struct flow_match_control match;
91
92                 flow_rule_match_control(rule, &match);
93                 addr_type = match.key->addr_type;
94         }
95
96         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
97                 struct flow_match_basic match;
98                 u16 ethtype_key, ethtype_mask;
99
100                 flow_rule_match_basic(rule, &match);
101                 ethtype_key = ntohs(match.key->n_proto);
102                 ethtype_mask = ntohs(match.mask->n_proto);
103
104                 if (ethtype_key == ETH_P_ALL) {
105                         ethtype_key = 0;
106                         ethtype_mask = 0;
107                 }
108
109                 if (ethtype_key == ETH_P_IPV6)
110                         fs->type = 1;
111
112                 fs->val.ethtype = ethtype_key;
113                 fs->mask.ethtype = ethtype_mask;
114                 fs->val.proto = match.key->ip_proto;
115                 fs->mask.proto = match.mask->ip_proto;
116         }
117
118         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
119                 struct flow_match_ipv4_addrs match;
120
121                 flow_rule_match_ipv4_addrs(rule, &match);
122                 fs->type = 0;
123                 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
124                 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
125                 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
126                 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
127
128                 /* also initialize nat_lip/fip to same values */
129                 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
130                 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
131         }
132
133         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
134                 struct flow_match_ipv6_addrs match;
135
136                 flow_rule_match_ipv6_addrs(rule, &match);
137                 fs->type = 1;
138                 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
139                        sizeof(match.key->dst));
140                 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
141                        sizeof(match.key->src));
142                 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
143                        sizeof(match.mask->dst));
144                 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
145                        sizeof(match.mask->src));
146
147                 /* also initialize nat_lip/fip to same values */
148                 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
149                        sizeof(match.key->dst));
150                 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
151                        sizeof(match.key->src));
152         }
153
154         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
155                 struct flow_match_ports match;
156
157                 flow_rule_match_ports(rule, &match);
158                 fs->val.lport = cpu_to_be16(match.key->dst);
159                 fs->mask.lport = cpu_to_be16(match.mask->dst);
160                 fs->val.fport = cpu_to_be16(match.key->src);
161                 fs->mask.fport = cpu_to_be16(match.mask->src);
162
163                 /* also initialize nat_lport/fport to same values */
164                 fs->nat_lport = cpu_to_be16(match.key->dst);
165                 fs->nat_fport = cpu_to_be16(match.key->src);
166         }
167
168         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
169                 struct flow_match_ip match;
170
171                 flow_rule_match_ip(rule, &match);
172                 fs->val.tos = match.key->tos;
173                 fs->mask.tos = match.mask->tos;
174         }
175
176         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
177                 struct flow_match_enc_keyid match;
178
179                 flow_rule_match_enc_keyid(rule, &match);
180                 fs->val.vni = be32_to_cpu(match.key->keyid);
181                 fs->mask.vni = be32_to_cpu(match.mask->keyid);
182                 if (fs->mask.vni) {
183                         fs->val.encap_vld = 1;
184                         fs->mask.encap_vld = 1;
185                 }
186         }
187
188         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
189                 struct flow_match_vlan match;
190                 u16 vlan_tci, vlan_tci_mask;
191
192                 flow_rule_match_vlan(rule, &match);
193                 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
194                                                VLAN_PRIO_SHIFT);
195                 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
196                                                      VLAN_PRIO_SHIFT);
197                 fs->val.ivlan = vlan_tci;
198                 fs->mask.ivlan = vlan_tci_mask;
199
200                 fs->val.ivlan_vld = 1;
201                 fs->mask.ivlan_vld = 1;
202
203                 /* Chelsio adapters use ivlan_vld bit to match vlan packets
204                  * as 802.1Q. Also, when vlan tag is present in packets,
205                  * ethtype match is used then to match on ethtype of inner
206                  * header ie. the header following the vlan header.
207                  * So, set the ivlan_vld based on ethtype info supplied by
208                  * TC for vlan packets if its 802.1Q. And then reset the
209                  * ethtype value else, hw will try to match the supplied
210                  * ethtype value with ethtype of inner header.
211                  */
212                 if (fs->val.ethtype == ETH_P_8021Q) {
213                         fs->val.ethtype = 0;
214                         fs->mask.ethtype = 0;
215                 }
216         }
217
218         /* Match only packets coming from the ingress port where this
219          * filter will be created.
220          */
221         fs->val.iport = netdev2pinfo(dev)->port_id;
222         fs->mask.iport = ~0;
223 }
224
225 static int cxgb4_validate_flow_match(struct net_device *dev,
226                                      struct flow_cls_offload *cls)
227 {
228         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
229         struct flow_dissector *dissector = rule->match.dissector;
230         u16 ethtype_mask = 0;
231         u16 ethtype_key = 0;
232
233         if (dissector->used_keys &
234             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
235               BIT(FLOW_DISSECTOR_KEY_BASIC) |
236               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
237               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
238               BIT(FLOW_DISSECTOR_KEY_PORTS) |
239               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
240               BIT(FLOW_DISSECTOR_KEY_VLAN) |
241               BIT(FLOW_DISSECTOR_KEY_IP))) {
242                 netdev_warn(dev, "Unsupported key used: 0x%x\n",
243                             dissector->used_keys);
244                 return -EOPNOTSUPP;
245         }
246
247         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
248                 struct flow_match_basic match;
249
250                 flow_rule_match_basic(rule, &match);
251                 ethtype_key = ntohs(match.key->n_proto);
252                 ethtype_mask = ntohs(match.mask->n_proto);
253         }
254
255         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
256                 u16 eth_ip_type = ethtype_key & ethtype_mask;
257                 struct flow_match_ip match;
258
259                 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
260                         netdev_err(dev, "IP Key supported only with IPv4/v6");
261                         return -EINVAL;
262                 }
263
264                 flow_rule_match_ip(rule, &match);
265                 if (match.mask->ttl) {
266                         netdev_warn(dev, "ttl match unsupported for offload");
267                         return -EOPNOTSUPP;
268                 }
269         }
270
271         return 0;
272 }
273
274 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
275                           u8 field)
276 {
277         u32 set_val = val & ~mask;
278         u32 offset = 0;
279         u8 size = 1;
280         int i;
281
282         for (i = 0; i < ARRAY_SIZE(pedits); i++) {
283                 if (pedits[i].field == field) {
284                         offset = pedits[i].offset;
285                         size = pedits[i].size;
286                         break;
287                 }
288         }
289         memcpy((u8 *)fs + offset, &set_val, size);
290 }
291
292 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
293                                 u32 mask, u32 offset, u8 htype)
294 {
295         switch (htype) {
296         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
297                 switch (offset) {
298                 case PEDIT_ETH_DMAC_31_0:
299                         fs->newdmac = 1;
300                         offload_pedit(fs, val, mask, ETH_DMAC_31_0);
301                         break;
302                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
303                         if (~mask & PEDIT_ETH_DMAC_MASK)
304                                 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
305                         else
306                                 offload_pedit(fs, val >> 16, mask >> 16,
307                                               ETH_SMAC_15_0);
308                         break;
309                 case PEDIT_ETH_SMAC_47_16:
310                         fs->newsmac = 1;
311                         offload_pedit(fs, val, mask, ETH_SMAC_47_16);
312                 }
313                 break;
314         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
315                 switch (offset) {
316                 case PEDIT_IP4_SRC:
317                         offload_pedit(fs, val, mask, IP4_SRC);
318                         break;
319                 case PEDIT_IP4_DST:
320                         offload_pedit(fs, val, mask, IP4_DST);
321                 }
322                 fs->nat_mode = NAT_MODE_ALL;
323                 break;
324         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
325                 switch (offset) {
326                 case PEDIT_IP6_SRC_31_0:
327                         offload_pedit(fs, val, mask, IP6_SRC_31_0);
328                         break;
329                 case PEDIT_IP6_SRC_63_32:
330                         offload_pedit(fs, val, mask, IP6_SRC_63_32);
331                         break;
332                 case PEDIT_IP6_SRC_95_64:
333                         offload_pedit(fs, val, mask, IP6_SRC_95_64);
334                         break;
335                 case PEDIT_IP6_SRC_127_96:
336                         offload_pedit(fs, val, mask, IP6_SRC_127_96);
337                         break;
338                 case PEDIT_IP6_DST_31_0:
339                         offload_pedit(fs, val, mask, IP6_DST_31_0);
340                         break;
341                 case PEDIT_IP6_DST_63_32:
342                         offload_pedit(fs, val, mask, IP6_DST_63_32);
343                         break;
344                 case PEDIT_IP6_DST_95_64:
345                         offload_pedit(fs, val, mask, IP6_DST_95_64);
346                         break;
347                 case PEDIT_IP6_DST_127_96:
348                         offload_pedit(fs, val, mask, IP6_DST_127_96);
349                 }
350                 fs->nat_mode = NAT_MODE_ALL;
351                 break;
352         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
353                 switch (offset) {
354                 case PEDIT_TCP_SPORT_DPORT:
355                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
356                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
357                                               cpu_to_be32(mask) >> 16,
358                                               TCP_SPORT);
359                         else
360                                 offload_pedit(fs, cpu_to_be32(val),
361                                               cpu_to_be32(mask), TCP_DPORT);
362                 }
363                 fs->nat_mode = NAT_MODE_ALL;
364                 break;
365         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
366                 switch (offset) {
367                 case PEDIT_UDP_SPORT_DPORT:
368                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
369                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
370                                               cpu_to_be32(mask) >> 16,
371                                               UDP_SPORT);
372                         else
373                                 offload_pedit(fs, cpu_to_be32(val),
374                                               cpu_to_be32(mask), UDP_DPORT);
375                 }
376                 fs->nat_mode = NAT_MODE_ALL;
377         }
378 }
379
380 static void cxgb4_process_flow_actions(struct net_device *in,
381                                        struct flow_cls_offload *cls,
382                                        struct ch_filter_specification *fs)
383 {
384         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
385         struct flow_action_entry *act;
386         int i;
387
388         flow_action_for_each(i, act, &rule->action) {
389                 switch (act->id) {
390                 case FLOW_ACTION_ACCEPT:
391                         fs->action = FILTER_PASS;
392                         break;
393                 case FLOW_ACTION_DROP:
394                         fs->action = FILTER_DROP;
395                         break;
396                 case FLOW_ACTION_REDIRECT: {
397                         struct net_device *out = act->dev;
398                         struct port_info *pi = netdev_priv(out);
399
400                         fs->action = FILTER_SWITCH;
401                         fs->eport = pi->port_id;
402                         }
403                         break;
404                 case FLOW_ACTION_VLAN_POP:
405                 case FLOW_ACTION_VLAN_PUSH:
406                 case FLOW_ACTION_VLAN_MANGLE: {
407                         u8 prio = act->vlan.prio;
408                         u16 vid = act->vlan.vid;
409                         u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
410                         switch (act->id) {
411                         case FLOW_ACTION_VLAN_POP:
412                                 fs->newvlan |= VLAN_REMOVE;
413                                 break;
414                         case FLOW_ACTION_VLAN_PUSH:
415                                 fs->newvlan |= VLAN_INSERT;
416                                 fs->vlan = vlan_tci;
417                                 break;
418                         case FLOW_ACTION_VLAN_MANGLE:
419                                 fs->newvlan |= VLAN_REWRITE;
420                                 fs->vlan = vlan_tci;
421                                 break;
422                         default:
423                                 break;
424                         }
425                         }
426                         break;
427                 case FLOW_ACTION_MANGLE: {
428                         u32 mask, val, offset;
429                         u8 htype;
430
431                         htype = act->mangle.htype;
432                         mask = act->mangle.mask;
433                         val = act->mangle.val;
434                         offset = act->mangle.offset;
435
436                         process_pedit_field(fs, val, mask, offset, htype);
437                         }
438                         break;
439                 default:
440                         break;
441                 }
442         }
443 }
444
445 static bool valid_l4_mask(u32 mask)
446 {
447         u16 hi, lo;
448
449         /* Either the upper 16-bits (SPORT) OR the lower
450          * 16-bits (DPORT) can be set, but NOT BOTH.
451          */
452         hi = (mask >> 16) & 0xFFFF;
453         lo = mask & 0xFFFF;
454
455         return hi && lo ? false : true;
456 }
457
458 static bool valid_pedit_action(struct net_device *dev,
459                                const struct flow_action_entry *act)
460 {
461         u32 mask, offset;
462         u8 htype;
463
464         htype = act->mangle.htype;
465         mask = act->mangle.mask;
466         offset = act->mangle.offset;
467
468         switch (htype) {
469         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
470                 switch (offset) {
471                 case PEDIT_ETH_DMAC_31_0:
472                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
473                 case PEDIT_ETH_SMAC_47_16:
474                         break;
475                 default:
476                         netdev_err(dev, "%s: Unsupported pedit field\n",
477                                    __func__);
478                         return false;
479                 }
480                 break;
481         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
482                 switch (offset) {
483                 case PEDIT_IP4_SRC:
484                 case PEDIT_IP4_DST:
485                         break;
486                 default:
487                         netdev_err(dev, "%s: Unsupported pedit field\n",
488                                    __func__);
489                         return false;
490                 }
491                 break;
492         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
493                 switch (offset) {
494                 case PEDIT_IP6_SRC_31_0:
495                 case PEDIT_IP6_SRC_63_32:
496                 case PEDIT_IP6_SRC_95_64:
497                 case PEDIT_IP6_SRC_127_96:
498                 case PEDIT_IP6_DST_31_0:
499                 case PEDIT_IP6_DST_63_32:
500                 case PEDIT_IP6_DST_95_64:
501                 case PEDIT_IP6_DST_127_96:
502                         break;
503                 default:
504                         netdev_err(dev, "%s: Unsupported pedit field\n",
505                                    __func__);
506                         return false;
507                 }
508                 break;
509         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
510                 switch (offset) {
511                 case PEDIT_TCP_SPORT_DPORT:
512                         if (!valid_l4_mask(~mask)) {
513                                 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
514                                            __func__);
515                                 return false;
516                         }
517                         break;
518                 default:
519                         netdev_err(dev, "%s: Unsupported pedit field\n",
520                                    __func__);
521                         return false;
522                 }
523                 break;
524         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
525                 switch (offset) {
526                 case PEDIT_UDP_SPORT_DPORT:
527                         if (!valid_l4_mask(~mask)) {
528                                 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
529                                            __func__);
530                                 return false;
531                         }
532                         break;
533                 default:
534                         netdev_err(dev, "%s: Unsupported pedit field\n",
535                                    __func__);
536                         return false;
537                 }
538                 break;
539         default:
540                 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
541                 return false;
542         }
543         return true;
544 }
545
546 static int cxgb4_validate_flow_actions(struct net_device *dev,
547                                        struct flow_cls_offload *cls)
548 {
549         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
550         struct flow_action_entry *act;
551         bool act_redir = false;
552         bool act_pedit = false;
553         bool act_vlan = false;
554         int i;
555
556         flow_action_for_each(i, act, &rule->action) {
557                 switch (act->id) {
558                 case FLOW_ACTION_ACCEPT:
559                 case FLOW_ACTION_DROP:
560                         /* Do nothing */
561                         break;
562                 case FLOW_ACTION_REDIRECT: {
563                         struct adapter *adap = netdev2adap(dev);
564                         struct net_device *n_dev, *target_dev;
565                         unsigned int i;
566                         bool found = false;
567
568                         target_dev = act->dev;
569                         for_each_port(adap, i) {
570                                 n_dev = adap->port[i];
571                                 if (target_dev == n_dev) {
572                                         found = true;
573                                         break;
574                                 }
575                         }
576
577                         /* If interface doesn't belong to our hw, then
578                          * the provided output port is not valid
579                          */
580                         if (!found) {
581                                 netdev_err(dev, "%s: Out port invalid\n",
582                                            __func__);
583                                 return -EINVAL;
584                         }
585                         act_redir = true;
586                         }
587                         break;
588                 case FLOW_ACTION_VLAN_POP:
589                 case FLOW_ACTION_VLAN_PUSH:
590                 case FLOW_ACTION_VLAN_MANGLE: {
591                         u16 proto = be16_to_cpu(act->vlan.proto);
592
593                         switch (act->id) {
594                         case FLOW_ACTION_VLAN_POP:
595                                 break;
596                         case FLOW_ACTION_VLAN_PUSH:
597                         case FLOW_ACTION_VLAN_MANGLE:
598                                 if (proto != ETH_P_8021Q) {
599                                         netdev_err(dev, "%s: Unsupported vlan proto\n",
600                                                    __func__);
601                                         return -EOPNOTSUPP;
602                                 }
603                                 break;
604                         default:
605                                 netdev_err(dev, "%s: Unsupported vlan action\n",
606                                            __func__);
607                                 return -EOPNOTSUPP;
608                         }
609                         act_vlan = true;
610                         }
611                         break;
612                 case FLOW_ACTION_MANGLE: {
613                         bool pedit_valid = valid_pedit_action(dev, act);
614
615                         if (!pedit_valid)
616                                 return -EOPNOTSUPP;
617                         act_pedit = true;
618                         }
619                         break;
620                 default:
621                         netdev_err(dev, "%s: Unsupported action\n", __func__);
622                         return -EOPNOTSUPP;
623                 }
624         }
625
626         if ((act_pedit || act_vlan) && !act_redir) {
627                 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
628                            __func__);
629                 return -EINVAL;
630         }
631
632         return 0;
633 }
634
635 int cxgb4_tc_flower_replace(struct net_device *dev,
636                             struct flow_cls_offload *cls)
637 {
638         struct adapter *adap = netdev2adap(dev);
639         struct ch_tc_flower_entry *ch_flower;
640         struct ch_filter_specification *fs;
641         struct filter_ctx ctx;
642         int fidx;
643         int ret;
644
645         if (cxgb4_validate_flow_actions(dev, cls))
646                 return -EOPNOTSUPP;
647
648         if (cxgb4_validate_flow_match(dev, cls))
649                 return -EOPNOTSUPP;
650
651         ch_flower = allocate_flower_entry();
652         if (!ch_flower) {
653                 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
654                 return -ENOMEM;
655         }
656
657         fs = &ch_flower->fs;
658         fs->hitcnts = 1;
659         cxgb4_process_flow_match(dev, cls, fs);
660         cxgb4_process_flow_actions(dev, cls, fs);
661
662         fs->hash = is_filter_exact_match(adap, fs);
663         if (fs->hash) {
664                 fidx = 0;
665         } else {
666                 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
667                 if (fidx < 0) {
668                         netdev_err(dev, "%s: No fidx for offload.\n", __func__);
669                         ret = -ENOMEM;
670                         goto free_entry;
671                 }
672         }
673
674         init_completion(&ctx.completion);
675         ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
676         if (ret) {
677                 netdev_err(dev, "%s: filter creation err %d\n",
678                            __func__, ret);
679                 goto free_entry;
680         }
681
682         /* Wait for reply */
683         ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
684         if (!ret) {
685                 ret = -ETIMEDOUT;
686                 goto free_entry;
687         }
688
689         ret = ctx.result;
690         /* Check if hw returned error for filter creation */
691         if (ret)
692                 goto free_entry;
693
694         ch_flower->tc_flower_cookie = cls->cookie;
695         ch_flower->filter_id = ctx.tid;
696         ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
697                                      adap->flower_ht_params);
698         if (ret)
699                 goto del_filter;
700
701         return 0;
702
703 del_filter:
704         cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
705
706 free_entry:
707         kfree(ch_flower);
708         return ret;
709 }
710
711 int cxgb4_tc_flower_destroy(struct net_device *dev,
712                             struct flow_cls_offload *cls)
713 {
714         struct adapter *adap = netdev2adap(dev);
715         struct ch_tc_flower_entry *ch_flower;
716         int ret;
717
718         ch_flower = ch_flower_lookup(adap, cls->cookie);
719         if (!ch_flower)
720                 return -ENOENT;
721
722         ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
723         if (ret)
724                 goto err;
725
726         ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
727                                      adap->flower_ht_params);
728         if (ret) {
729                 netdev_err(dev, "Flow remove from rhashtable failed");
730                 goto err;
731         }
732         kfree_rcu(ch_flower, rcu);
733
734 err:
735         return ret;
736 }
737
738 static void ch_flower_stats_handler(struct work_struct *work)
739 {
740         struct adapter *adap = container_of(work, struct adapter,
741                                             flower_stats_work);
742         struct ch_tc_flower_entry *flower_entry;
743         struct ch_tc_flower_stats *ofld_stats;
744         struct rhashtable_iter iter;
745         u64 packets;
746         u64 bytes;
747         int ret;
748
749         rhashtable_walk_enter(&adap->flower_tbl, &iter);
750         do {
751                 rhashtable_walk_start(&iter);
752
753                 while ((flower_entry = rhashtable_walk_next(&iter)) &&
754                        !IS_ERR(flower_entry)) {
755                         ret = cxgb4_get_filter_counters(adap->port[0],
756                                                         flower_entry->filter_id,
757                                                         &packets, &bytes,
758                                                         flower_entry->fs.hash);
759                         if (!ret) {
760                                 spin_lock(&flower_entry->lock);
761                                 ofld_stats = &flower_entry->stats;
762
763                                 if (ofld_stats->prev_packet_count != packets) {
764                                         ofld_stats->prev_packet_count = packets;
765                                         ofld_stats->last_used = jiffies;
766                                 }
767                                 spin_unlock(&flower_entry->lock);
768                         }
769                 }
770
771                 rhashtable_walk_stop(&iter);
772
773         } while (flower_entry == ERR_PTR(-EAGAIN));
774         rhashtable_walk_exit(&iter);
775         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
776 }
777
778 static void ch_flower_stats_cb(struct timer_list *t)
779 {
780         struct adapter *adap = from_timer(adap, t, flower_stats_timer);
781
782         schedule_work(&adap->flower_stats_work);
783 }
784
785 int cxgb4_tc_flower_stats(struct net_device *dev,
786                           struct flow_cls_offload *cls)
787 {
788         struct adapter *adap = netdev2adap(dev);
789         struct ch_tc_flower_stats *ofld_stats;
790         struct ch_tc_flower_entry *ch_flower;
791         u64 packets;
792         u64 bytes;
793         int ret;
794
795         ch_flower = ch_flower_lookup(adap, cls->cookie);
796         if (!ch_flower) {
797                 ret = -ENOENT;
798                 goto err;
799         }
800
801         ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
802                                         &packets, &bytes,
803                                         ch_flower->fs.hash);
804         if (ret < 0)
805                 goto err;
806
807         spin_lock_bh(&ch_flower->lock);
808         ofld_stats = &ch_flower->stats;
809         if (ofld_stats->packet_count != packets) {
810                 if (ofld_stats->prev_packet_count != packets)
811                         ofld_stats->last_used = jiffies;
812                 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
813                                   packets - ofld_stats->packet_count,
814                                   ofld_stats->last_used);
815
816                 ofld_stats->packet_count = packets;
817                 ofld_stats->byte_count = bytes;
818                 ofld_stats->prev_packet_count = packets;
819         }
820         spin_unlock_bh(&ch_flower->lock);
821         return 0;
822
823 err:
824         return ret;
825 }
826
827 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
828         .nelem_hint = 384,
829         .head_offset = offsetof(struct ch_tc_flower_entry, node),
830         .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
831         .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
832         .max_size = 524288,
833         .min_size = 512,
834         .automatic_shrinking = true
835 };
836
837 int cxgb4_init_tc_flower(struct adapter *adap)
838 {
839         int ret;
840
841         if (adap->tc_flower_initialized)
842                 return -EEXIST;
843
844         adap->flower_ht_params = cxgb4_tc_flower_ht_params;
845         ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
846         if (ret)
847                 return ret;
848
849         INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
850         timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
851         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
852         adap->tc_flower_initialized = true;
853         return 0;
854 }
855
856 void cxgb4_cleanup_tc_flower(struct adapter *adap)
857 {
858         if (!adap->tc_flower_initialized)
859                 return;
860
861         if (adap->flower_stats_timer.function)
862                 del_timer_sync(&adap->flower_stats_timer);
863         cancel_work_sync(&adap->flower_stats_work);
864         rhashtable_destroy(&adap->flower_tbl);
865         adap->tc_flower_initialized = false;
866 }