1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
9 #include <linux/ipv6.h>
10 #include <net/flow_offload.h>
11 #include <net/pkt_cls.h>
13 #include "mtk_eth_soc.h"
16 struct mtk_flow_data {
26 struct in6_addr src_addr;
27 struct in6_addr dst_addr;
47 static const struct rhashtable_params mtk_flow_ht_params = {
48 .head_offset = offsetof(struct mtk_flow_entry, node),
49 .key_offset = offsetof(struct mtk_flow_entry, cookie),
50 .key_len = sizeof(unsigned long),
51 .automatic_shrinking = true,
55 mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
56 struct mtk_flow_data *data, bool egress)
58 return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
59 data->v4.src_addr, data->src_port,
60 data->v4.dst_addr, data->dst_port);
64 mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
65 struct mtk_flow_data *data)
67 return mtk_foe_entry_set_ipv6_tuple(eth, foe,
68 data->v6.src_addr.s6_addr32, data->src_port,
69 data->v6.dst_addr.s6_addr32, data->dst_port);
73 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
75 void *dest = eth + act->mangle.offset;
76 const void *src = &act->mangle.val;
78 if (act->mangle.offset > 8)
81 if (act->mangle.mask == 0xffff) {
86 memcpy(dest, src, act->mangle.mask ? 2 : 4);
90 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
92 struct net_device_path_stack stack;
93 struct net_device_path *path;
99 if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
102 err = dev_fill_forward_path(dev, addr, &stack);
106 path = &stack.path[stack.num_paths - 1];
107 if (path->type != DEV_PATH_MTK_WDMA)
110 info->wdma_idx = path->mtk_wdma.wdma_idx;
111 info->queue = path->mtk_wdma.queue;
112 info->bss = path->mtk_wdma.bss;
113 info->wcid = path->mtk_wdma.wcid;
120 mtk_flow_mangle_ports(const struct flow_action_entry *act,
121 struct mtk_flow_data *data)
123 u32 val = ntohl(act->mangle.val);
125 switch (act->mangle.offset) {
127 if (act->mangle.mask == ~htonl(0xffff))
128 data->dst_port = cpu_to_be16(val);
130 data->src_port = cpu_to_be16(val >> 16);
133 data->dst_port = cpu_to_be16(val);
143 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
144 struct mtk_flow_data *data)
148 switch (act->mangle.offset) {
149 case offsetof(struct iphdr, saddr):
150 dest = &data->v4.src_addr;
152 case offsetof(struct iphdr, daddr):
153 dest = &data->v4.dst_addr;
159 memcpy(dest, &act->mangle.val, sizeof(u32));
165 mtk_flow_get_dsa_port(struct net_device **dev)
167 #if IS_ENABLED(CONFIG_NET_DSA)
170 dp = dsa_port_from_netdev(*dev);
174 if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
177 *dev = dsa_port_to_master(dp);
186 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
187 struct net_device *dev, const u8 *dest_mac,
190 struct mtk_wdma_info info = {};
191 int pse_port, dsa_port, queue;
193 if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
194 mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
195 info.bss, info.wcid);
196 if (mtk_is_netsys_v2_or_greater(eth)) {
197 switch (info.wdma_idx) {
210 *wed_index = info.wdma_idx;
214 dsa_port = mtk_flow_get_dsa_port(&dev);
216 if (dev == eth->netdev[0])
217 pse_port = PSE_GDM1_PORT;
218 else if (dev == eth->netdev[1])
219 pse_port = PSE_GDM2_PORT;
220 else if (dev == eth->netdev[2])
221 pse_port = PSE_GDM3_PORT;
226 mtk_foe_entry_set_dsa(eth, foe, dsa_port);
227 queue = 3 + dsa_port;
229 queue = pse_port - 1;
231 mtk_foe_entry_set_queue(eth, foe, queue);
234 mtk_foe_entry_set_pse_port(eth, foe, pse_port);
240 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
243 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
244 struct flow_action_entry *act;
245 struct mtk_flow_data data = {};
246 struct mtk_foe_entry foe;
247 struct net_device *odev = NULL;
248 struct mtk_flow_entry *entry;
249 int offload_type = 0;
256 if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params))
259 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
260 struct flow_match_meta match;
262 flow_rule_match_meta(rule, &match);
267 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
268 struct flow_match_control match;
270 flow_rule_match_control(rule, &match);
271 addr_type = match.key->addr_type;
276 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
277 struct flow_match_basic match;
279 flow_rule_match_basic(rule, &match);
280 l4proto = match.key->ip_proto;
287 offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
288 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
289 struct flow_match_eth_addrs match;
291 flow_rule_match_eth_addrs(rule, &match);
292 memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
293 memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
298 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
299 struct flow_match_vlan match;
301 flow_rule_match_vlan(rule, &match);
303 if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
306 data.vlan_in = match.key->vlan_id;
309 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
310 offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
312 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
313 offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
319 flow_action_for_each(i, act, &rule->action) {
321 case FLOW_ACTION_MANGLE:
322 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
324 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
325 mtk_flow_offload_mangle_eth(act, &data.eth);
327 case FLOW_ACTION_REDIRECT:
330 case FLOW_ACTION_CSUM:
332 case FLOW_ACTION_VLAN_PUSH:
333 if (data.vlan.num == 1 ||
334 act->vlan.proto != htons(ETH_P_8021Q))
337 data.vlan.id = act->vlan.vid;
338 data.vlan.proto = act->vlan.proto;
341 case FLOW_ACTION_VLAN_POP:
343 case FLOW_ACTION_PPPOE_PUSH:
344 if (data.pppoe.num == 1)
347 data.pppoe.sid = act->pppoe.sid;
355 if (!is_valid_ether_addr(data.eth.h_source) ||
356 !is_valid_ether_addr(data.eth.h_dest))
359 err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
360 data.eth.h_source, data.eth.h_dest);
364 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
365 struct flow_match_ports ports;
367 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
370 flow_rule_match_ports(rule, &ports);
371 data.src_port = ports.key->src;
372 data.dst_port = ports.key->dst;
373 } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
377 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
378 struct flow_match_ipv4_addrs addrs;
380 flow_rule_match_ipv4_addrs(rule, &addrs);
382 data.v4.src_addr = addrs.key->src;
383 data.v4.dst_addr = addrs.key->dst;
385 mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
388 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
389 struct flow_match_ipv6_addrs addrs;
391 flow_rule_match_ipv6_addrs(rule, &addrs);
393 data.v6.src_addr = addrs.key->src;
394 data.v6.dst_addr = addrs.key->dst;
396 mtk_flow_set_ipv6_addr(eth, &foe, &data);
399 flow_action_for_each(i, act, &rule->action) {
400 if (act->id != FLOW_ACTION_MANGLE)
403 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
406 switch (act->mangle.htype) {
407 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
408 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
409 err = mtk_flow_mangle_ports(act, &data);
411 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
412 err = mtk_flow_mangle_ipv4(act, &data);
414 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
415 /* handled earlier */
425 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
426 err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
431 if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
432 foe.bridge.vlan = data.vlan_in;
434 if (data.vlan.num == 1) {
435 if (data.vlan.proto != htons(ETH_P_8021Q))
438 mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
440 if (data.pppoe.num == 1)
441 mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
443 err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
448 if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
451 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
455 entry->cookie = f->cookie;
456 memcpy(&entry->data, &foe, sizeof(entry->data));
457 entry->wed_index = wed_index;
458 entry->ppe_index = ppe_index;
460 err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
464 err = rhashtable_insert_fast(ð->flow_table, &entry->node,
472 mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
476 mtk_wed_flow_remove(wed_index);
481 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
483 struct mtk_flow_entry *entry;
485 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
490 mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
491 rhashtable_remove_fast(ð->flow_table, &entry->node,
493 if (entry->wed_index >= 0)
494 mtk_wed_flow_remove(entry->wed_index);
501 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
503 struct mtk_flow_entry *entry;
504 struct mtk_foe_accounting diff;
507 entry = rhashtable_lookup(ð->flow_table, &f->cookie,
512 idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
513 f->stats.lastused = jiffies - idle * HZ;
515 if (entry->hash != 0xFFFF &&
516 mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
518 f->stats.pkts += diff.packets;
519 f->stats.bytes += diff.bytes;
525 static DEFINE_MUTEX(mtk_flow_offload_mutex);
527 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
532 mutex_lock(&mtk_flow_offload_mutex);
533 switch (cls->command) {
534 case FLOW_CLS_REPLACE:
535 err = mtk_flow_offload_replace(eth, cls, ppe_index);
537 case FLOW_CLS_DESTROY:
538 err = mtk_flow_offload_destroy(eth, cls);
541 err = mtk_flow_offload_stats(eth, cls);
547 mutex_unlock(&mtk_flow_offload_mutex);
553 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
555 struct flow_cls_offload *cls = type_data;
556 struct net_device *dev = cb_priv;
560 mac = netdev_priv(dev);
563 if (!tc_can_offload(dev))
566 if (type != TC_SETUP_CLSFLOWER)
569 return mtk_flow_offload_cmd(eth, cls, 0);
573 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
575 struct mtk_mac *mac = netdev_priv(dev);
576 struct mtk_eth *eth = mac->hw;
577 static LIST_HEAD(block_cb_list);
578 struct flow_block_cb *block_cb;
581 if (!eth->soc->offload_version)
584 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
587 cb = mtk_eth_setup_tc_block_cb;
588 f->driver_block_list = &block_cb_list;
590 switch (f->command) {
591 case FLOW_BLOCK_BIND:
592 block_cb = flow_block_cb_lookup(f->block, cb, dev);
594 flow_block_cb_incref(block_cb);
597 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
598 if (IS_ERR(block_cb))
599 return PTR_ERR(block_cb);
601 flow_block_cb_incref(block_cb);
602 flow_block_cb_add(block_cb, f);
603 list_add_tail(&block_cb->driver_list, &block_cb_list);
605 case FLOW_BLOCK_UNBIND:
606 block_cb = flow_block_cb_lookup(f->block, cb, dev);
610 if (!flow_block_cb_decref(block_cb)) {
611 flow_block_cb_remove(block_cb, f);
612 list_del(&block_cb->driver_list);
620 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
626 return mtk_eth_setup_tc_block(dev, type_data);
632 int mtk_eth_offload_init(struct mtk_eth *eth)
634 return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);