2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
44 #define STATS_CHECK_PERIOD (HZ / 2)
46 static struct ch_tc_pedit_fields pedits[] = {
47 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61 PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62 PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63 PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64 PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 spin_lock_init(&new->lock);
74 /* Must be called with either RTNL or rcu_read_lock */
75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
76 unsigned long flower_cookie)
78 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
79 adap->flower_ht_params);
82 static void cxgb4_process_flow_match(struct net_device *dev,
83 struct flow_cls_offload *cls,
84 struct ch_filter_specification *fs)
86 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
89 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
90 struct flow_match_control match;
92 flow_rule_match_control(rule, &match);
93 addr_type = match.key->addr_type;
96 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
97 struct flow_match_basic match;
98 u16 ethtype_key, ethtype_mask;
100 flow_rule_match_basic(rule, &match);
101 ethtype_key = ntohs(match.key->n_proto);
102 ethtype_mask = ntohs(match.mask->n_proto);
104 if (ethtype_key == ETH_P_ALL) {
109 if (ethtype_key == ETH_P_IPV6)
112 fs->val.ethtype = ethtype_key;
113 fs->mask.ethtype = ethtype_mask;
114 fs->val.proto = match.key->ip_proto;
115 fs->mask.proto = match.mask->ip_proto;
118 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
119 struct flow_match_ipv4_addrs match;
121 flow_rule_match_ipv4_addrs(rule, &match);
123 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
124 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
125 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
126 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
128 /* also initialize nat_lip/fip to same values */
129 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
130 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
133 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
134 struct flow_match_ipv6_addrs match;
136 flow_rule_match_ipv6_addrs(rule, &match);
138 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
139 sizeof(match.key->dst));
140 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
141 sizeof(match.key->src));
142 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
143 sizeof(match.mask->dst));
144 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
145 sizeof(match.mask->src));
147 /* also initialize nat_lip/fip to same values */
148 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
149 sizeof(match.key->dst));
150 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
151 sizeof(match.key->src));
154 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
155 struct flow_match_ports match;
157 flow_rule_match_ports(rule, &match);
158 fs->val.lport = cpu_to_be16(match.key->dst);
159 fs->mask.lport = cpu_to_be16(match.mask->dst);
160 fs->val.fport = cpu_to_be16(match.key->src);
161 fs->mask.fport = cpu_to_be16(match.mask->src);
163 /* also initialize nat_lport/fport to same values */
164 fs->nat_lport = cpu_to_be16(match.key->dst);
165 fs->nat_fport = cpu_to_be16(match.key->src);
168 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
169 struct flow_match_ip match;
171 flow_rule_match_ip(rule, &match);
172 fs->val.tos = match.key->tos;
173 fs->mask.tos = match.mask->tos;
176 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
177 struct flow_match_enc_keyid match;
179 flow_rule_match_enc_keyid(rule, &match);
180 fs->val.vni = be32_to_cpu(match.key->keyid);
181 fs->mask.vni = be32_to_cpu(match.mask->keyid);
183 fs->val.encap_vld = 1;
184 fs->mask.encap_vld = 1;
188 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
189 struct flow_match_vlan match;
190 u16 vlan_tci, vlan_tci_mask;
192 flow_rule_match_vlan(rule, &match);
193 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
195 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
197 fs->val.ivlan = vlan_tci;
198 fs->mask.ivlan = vlan_tci_mask;
200 fs->val.ivlan_vld = 1;
201 fs->mask.ivlan_vld = 1;
203 /* Chelsio adapters use ivlan_vld bit to match vlan packets
204 * as 802.1Q. Also, when vlan tag is present in packets,
205 * ethtype match is used then to match on ethtype of inner
206 * header ie. the header following the vlan header.
207 * So, set the ivlan_vld based on ethtype info supplied by
208 * TC for vlan packets if its 802.1Q. And then reset the
209 * ethtype value else, hw will try to match the supplied
210 * ethtype value with ethtype of inner header.
212 if (fs->val.ethtype == ETH_P_8021Q) {
214 fs->mask.ethtype = 0;
218 /* Match only packets coming from the ingress port where this
219 * filter will be created.
221 fs->val.iport = netdev2pinfo(dev)->port_id;
225 static int cxgb4_validate_flow_match(struct net_device *dev,
226 struct flow_cls_offload *cls)
228 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
229 struct flow_dissector *dissector = rule->match.dissector;
230 u16 ethtype_mask = 0;
233 if (dissector->used_keys &
234 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
235 BIT(FLOW_DISSECTOR_KEY_BASIC) |
236 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
237 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
238 BIT(FLOW_DISSECTOR_KEY_PORTS) |
239 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
240 BIT(FLOW_DISSECTOR_KEY_VLAN) |
241 BIT(FLOW_DISSECTOR_KEY_IP))) {
242 netdev_warn(dev, "Unsupported key used: 0x%x\n",
243 dissector->used_keys);
247 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
248 struct flow_match_basic match;
250 flow_rule_match_basic(rule, &match);
251 ethtype_key = ntohs(match.key->n_proto);
252 ethtype_mask = ntohs(match.mask->n_proto);
255 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
256 u16 eth_ip_type = ethtype_key & ethtype_mask;
257 struct flow_match_ip match;
259 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
260 netdev_err(dev, "IP Key supported only with IPv4/v6");
264 flow_rule_match_ip(rule, &match);
265 if (match.mask->ttl) {
266 netdev_warn(dev, "ttl match unsupported for offload");
274 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
277 u32 set_val = val & ~mask;
282 for (i = 0; i < ARRAY_SIZE(pedits); i++) {
283 if (pedits[i].field == field) {
284 offset = pedits[i].offset;
285 size = pedits[i].size;
289 memcpy((u8 *)fs + offset, &set_val, size);
292 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
293 u32 mask, u32 offset, u8 htype)
296 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
298 case PEDIT_ETH_DMAC_31_0:
300 offload_pedit(fs, val, mask, ETH_DMAC_31_0);
302 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
303 if (~mask & PEDIT_ETH_DMAC_MASK)
304 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
306 offload_pedit(fs, val >> 16, mask >> 16,
309 case PEDIT_ETH_SMAC_47_16:
311 offload_pedit(fs, val, mask, ETH_SMAC_47_16);
314 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
317 offload_pedit(fs, val, mask, IP4_SRC);
320 offload_pedit(fs, val, mask, IP4_DST);
322 fs->nat_mode = NAT_MODE_ALL;
324 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
326 case PEDIT_IP6_SRC_31_0:
327 offload_pedit(fs, val, mask, IP6_SRC_31_0);
329 case PEDIT_IP6_SRC_63_32:
330 offload_pedit(fs, val, mask, IP6_SRC_63_32);
332 case PEDIT_IP6_SRC_95_64:
333 offload_pedit(fs, val, mask, IP6_SRC_95_64);
335 case PEDIT_IP6_SRC_127_96:
336 offload_pedit(fs, val, mask, IP6_SRC_127_96);
338 case PEDIT_IP6_DST_31_0:
339 offload_pedit(fs, val, mask, IP6_DST_31_0);
341 case PEDIT_IP6_DST_63_32:
342 offload_pedit(fs, val, mask, IP6_DST_63_32);
344 case PEDIT_IP6_DST_95_64:
345 offload_pedit(fs, val, mask, IP6_DST_95_64);
347 case PEDIT_IP6_DST_127_96:
348 offload_pedit(fs, val, mask, IP6_DST_127_96);
350 fs->nat_mode = NAT_MODE_ALL;
352 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
354 case PEDIT_TCP_SPORT_DPORT:
355 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
356 offload_pedit(fs, cpu_to_be32(val) >> 16,
357 cpu_to_be32(mask) >> 16,
360 offload_pedit(fs, cpu_to_be32(val),
361 cpu_to_be32(mask), TCP_DPORT);
363 fs->nat_mode = NAT_MODE_ALL;
365 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
367 case PEDIT_UDP_SPORT_DPORT:
368 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
369 offload_pedit(fs, cpu_to_be32(val) >> 16,
370 cpu_to_be32(mask) >> 16,
373 offload_pedit(fs, cpu_to_be32(val),
374 cpu_to_be32(mask), UDP_DPORT);
376 fs->nat_mode = NAT_MODE_ALL;
380 static void cxgb4_process_flow_actions(struct net_device *in,
381 struct flow_cls_offload *cls,
382 struct ch_filter_specification *fs)
384 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
385 struct flow_action_entry *act;
388 flow_action_for_each(i, act, &rule->action) {
390 case FLOW_ACTION_ACCEPT:
391 fs->action = FILTER_PASS;
393 case FLOW_ACTION_DROP:
394 fs->action = FILTER_DROP;
396 case FLOW_ACTION_REDIRECT: {
397 struct net_device *out = act->dev;
398 struct port_info *pi = netdev_priv(out);
400 fs->action = FILTER_SWITCH;
401 fs->eport = pi->port_id;
404 case FLOW_ACTION_VLAN_POP:
405 case FLOW_ACTION_VLAN_PUSH:
406 case FLOW_ACTION_VLAN_MANGLE: {
407 u8 prio = act->vlan.prio;
408 u16 vid = act->vlan.vid;
409 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
411 case FLOW_ACTION_VLAN_POP:
412 fs->newvlan |= VLAN_REMOVE;
414 case FLOW_ACTION_VLAN_PUSH:
415 fs->newvlan |= VLAN_INSERT;
418 case FLOW_ACTION_VLAN_MANGLE:
419 fs->newvlan |= VLAN_REWRITE;
427 case FLOW_ACTION_MANGLE: {
428 u32 mask, val, offset;
431 htype = act->mangle.htype;
432 mask = act->mangle.mask;
433 val = act->mangle.val;
434 offset = act->mangle.offset;
436 process_pedit_field(fs, val, mask, offset, htype);
445 static bool valid_l4_mask(u32 mask)
449 /* Either the upper 16-bits (SPORT) OR the lower
450 * 16-bits (DPORT) can be set, but NOT BOTH.
452 hi = (mask >> 16) & 0xFFFF;
455 return hi && lo ? false : true;
458 static bool valid_pedit_action(struct net_device *dev,
459 const struct flow_action_entry *act)
464 htype = act->mangle.htype;
465 mask = act->mangle.mask;
466 offset = act->mangle.offset;
469 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
471 case PEDIT_ETH_DMAC_31_0:
472 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
473 case PEDIT_ETH_SMAC_47_16:
476 netdev_err(dev, "%s: Unsupported pedit field\n",
481 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
487 netdev_err(dev, "%s: Unsupported pedit field\n",
492 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
494 case PEDIT_IP6_SRC_31_0:
495 case PEDIT_IP6_SRC_63_32:
496 case PEDIT_IP6_SRC_95_64:
497 case PEDIT_IP6_SRC_127_96:
498 case PEDIT_IP6_DST_31_0:
499 case PEDIT_IP6_DST_63_32:
500 case PEDIT_IP6_DST_95_64:
501 case PEDIT_IP6_DST_127_96:
504 netdev_err(dev, "%s: Unsupported pedit field\n",
509 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
511 case PEDIT_TCP_SPORT_DPORT:
512 if (!valid_l4_mask(~mask)) {
513 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
519 netdev_err(dev, "%s: Unsupported pedit field\n",
524 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
526 case PEDIT_UDP_SPORT_DPORT:
527 if (!valid_l4_mask(~mask)) {
528 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
534 netdev_err(dev, "%s: Unsupported pedit field\n",
540 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
546 static int cxgb4_validate_flow_actions(struct net_device *dev,
547 struct flow_cls_offload *cls)
549 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
550 struct flow_action_entry *act;
551 bool act_redir = false;
552 bool act_pedit = false;
553 bool act_vlan = false;
556 flow_action_for_each(i, act, &rule->action) {
558 case FLOW_ACTION_ACCEPT:
559 case FLOW_ACTION_DROP:
562 case FLOW_ACTION_REDIRECT: {
563 struct adapter *adap = netdev2adap(dev);
564 struct net_device *n_dev, *target_dev;
568 target_dev = act->dev;
569 for_each_port(adap, i) {
570 n_dev = adap->port[i];
571 if (target_dev == n_dev) {
577 /* If interface doesn't belong to our hw, then
578 * the provided output port is not valid
581 netdev_err(dev, "%s: Out port invalid\n",
588 case FLOW_ACTION_VLAN_POP:
589 case FLOW_ACTION_VLAN_PUSH:
590 case FLOW_ACTION_VLAN_MANGLE: {
591 u16 proto = be16_to_cpu(act->vlan.proto);
594 case FLOW_ACTION_VLAN_POP:
596 case FLOW_ACTION_VLAN_PUSH:
597 case FLOW_ACTION_VLAN_MANGLE:
598 if (proto != ETH_P_8021Q) {
599 netdev_err(dev, "%s: Unsupported vlan proto\n",
605 netdev_err(dev, "%s: Unsupported vlan action\n",
612 case FLOW_ACTION_MANGLE: {
613 bool pedit_valid = valid_pedit_action(dev, act);
621 netdev_err(dev, "%s: Unsupported action\n", __func__);
626 if ((act_pedit || act_vlan) && !act_redir) {
627 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
635 int cxgb4_tc_flower_replace(struct net_device *dev,
636 struct flow_cls_offload *cls)
638 struct adapter *adap = netdev2adap(dev);
639 struct ch_tc_flower_entry *ch_flower;
640 struct ch_filter_specification *fs;
641 struct filter_ctx ctx;
645 if (cxgb4_validate_flow_actions(dev, cls))
648 if (cxgb4_validate_flow_match(dev, cls))
651 ch_flower = allocate_flower_entry();
653 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
659 cxgb4_process_flow_match(dev, cls, fs);
660 cxgb4_process_flow_actions(dev, cls, fs);
662 fs->hash = is_filter_exact_match(adap, fs);
666 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
668 netdev_err(dev, "%s: No fidx for offload.\n", __func__);
674 init_completion(&ctx.completion);
675 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
677 netdev_err(dev, "%s: filter creation err %d\n",
683 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
690 /* Check if hw returned error for filter creation */
694 ch_flower->tc_flower_cookie = cls->cookie;
695 ch_flower->filter_id = ctx.tid;
696 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
697 adap->flower_ht_params);
704 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
711 int cxgb4_tc_flower_destroy(struct net_device *dev,
712 struct flow_cls_offload *cls)
714 struct adapter *adap = netdev2adap(dev);
715 struct ch_tc_flower_entry *ch_flower;
718 ch_flower = ch_flower_lookup(adap, cls->cookie);
722 ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
726 ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
727 adap->flower_ht_params);
729 netdev_err(dev, "Flow remove from rhashtable failed");
732 kfree_rcu(ch_flower, rcu);
738 static void ch_flower_stats_handler(struct work_struct *work)
740 struct adapter *adap = container_of(work, struct adapter,
742 struct ch_tc_flower_entry *flower_entry;
743 struct ch_tc_flower_stats *ofld_stats;
744 struct rhashtable_iter iter;
749 rhashtable_walk_enter(&adap->flower_tbl, &iter);
751 rhashtable_walk_start(&iter);
753 while ((flower_entry = rhashtable_walk_next(&iter)) &&
754 !IS_ERR(flower_entry)) {
755 ret = cxgb4_get_filter_counters(adap->port[0],
756 flower_entry->filter_id,
758 flower_entry->fs.hash);
760 spin_lock(&flower_entry->lock);
761 ofld_stats = &flower_entry->stats;
763 if (ofld_stats->prev_packet_count != packets) {
764 ofld_stats->prev_packet_count = packets;
765 ofld_stats->last_used = jiffies;
767 spin_unlock(&flower_entry->lock);
771 rhashtable_walk_stop(&iter);
773 } while (flower_entry == ERR_PTR(-EAGAIN));
774 rhashtable_walk_exit(&iter);
775 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
778 static void ch_flower_stats_cb(struct timer_list *t)
780 struct adapter *adap = from_timer(adap, t, flower_stats_timer);
782 schedule_work(&adap->flower_stats_work);
785 int cxgb4_tc_flower_stats(struct net_device *dev,
786 struct flow_cls_offload *cls)
788 struct adapter *adap = netdev2adap(dev);
789 struct ch_tc_flower_stats *ofld_stats;
790 struct ch_tc_flower_entry *ch_flower;
795 ch_flower = ch_flower_lookup(adap, cls->cookie);
801 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
807 spin_lock_bh(&ch_flower->lock);
808 ofld_stats = &ch_flower->stats;
809 if (ofld_stats->packet_count != packets) {
810 if (ofld_stats->prev_packet_count != packets)
811 ofld_stats->last_used = jiffies;
812 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
813 packets - ofld_stats->packet_count,
814 ofld_stats->last_used);
816 ofld_stats->packet_count = packets;
817 ofld_stats->byte_count = bytes;
818 ofld_stats->prev_packet_count = packets;
820 spin_unlock_bh(&ch_flower->lock);
827 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
829 .head_offset = offsetof(struct ch_tc_flower_entry, node),
830 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
831 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
834 .automatic_shrinking = true
837 int cxgb4_init_tc_flower(struct adapter *adap)
841 if (adap->tc_flower_initialized)
844 adap->flower_ht_params = cxgb4_tc_flower_ht_params;
845 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
849 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
850 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
851 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
852 adap->tc_flower_initialized = true;
856 void cxgb4_cleanup_tc_flower(struct adapter *adap)
858 if (!adap->tc_flower_initialized)
861 if (adap->flower_stats_timer.function)
862 del_timer_sync(&adap->flower_stats_timer);
863 cancel_work_sync(&adap->flower_stats_work);
864 rhashtable_destroy(&adap->flower_tbl);
865 adap->tc_flower_initialized = false;