Merge tag 'lkmm.2023.04.07a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck...
[linux-block.git] / drivers / net / ethernet / mediatek / mtk_ppe.c
CommitLineData
ba37b7ca
FF
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
3
4#include <linux/kernel.h>
ba37b7ca 5#include <linux/io.h>
c5d66587 6#include <linux/iopoll.h>
ba37b7ca
FF
7#include <linux/etherdevice.h>
8#include <linux/platform_device.h>
33fc42de
FF
9#include <linux/if_ether.h>
10#include <linux/if_vlan.h>
5f36ca1b 11#include <net/dst_metadata.h>
33fc42de 12#include <net/dsa.h>
c4f033d9 13#include "mtk_eth_soc.h"
ba37b7ca
FF
14#include "mtk_ppe.h"
15#include "mtk_ppe_regs.h"
16
c4f033d9
FF
17static DEFINE_SPINLOCK(ppe_lock);
18
33fc42de
FF
19static const struct rhashtable_params mtk_flow_l2_ht_params = {
20 .head_offset = offsetof(struct mtk_flow_entry, l2_node),
21 .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
22 .key_len = offsetof(struct mtk_foe_bridge, key_end),
23 .automatic_shrinking = true,
24};
25
ba37b7ca
FF
26static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
27{
28 writel(val, ppe->base + reg);
29}
30
31static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
32{
33 return readl(ppe->base + reg);
34}
35
36static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
37{
38 u32 val;
39
40 val = ppe_r32(ppe, reg);
41 val &= ~mask;
42 val |= set;
43 ppe_w32(ppe, reg, val);
44
45 return val;
46}
47
48static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
49{
50 return ppe_m32(ppe, reg, 0, val);
51}
52
53static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
54{
55 return ppe_m32(ppe, reg, val, 0);
56}
57
c4f033d9
FF
58static u32 mtk_eth_timestamp(struct mtk_eth *eth)
59{
03a3180e 60 return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
c4f033d9
FF
61}
62
ba37b7ca
FF
63static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
64{
c5d66587
IL
65 int ret;
66 u32 val;
ba37b7ca 67
c5d66587
IL
68 ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
69 !(val & MTK_PPE_GLO_CFG_BUSY),
70 20, MTK_PPE_WAIT_TIMEOUT_US);
ba37b7ca 71
c5d66587
IL
72 if (ret)
73 dev_err(ppe->dev, "PPE table busy");
ba37b7ca 74
c5d66587 75 return ret;
ba37b7ca
FF
76}
77
78static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
79{
80 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
81 ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
82}
83
84static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
85{
86 mtk_ppe_cache_clear(ppe);
87
88 ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
89 enable * MTK_PPE_CACHE_CTL_EN);
90}
91
ba2fc48c 92static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
ba37b7ca
FF
93{
94 u32 hv1, hv2, hv3;
95 u32 hash;
96
03a3180e 97 switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
ba37b7ca
FF
98 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
99 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
100 hv1 = e->ipv4.orig.ports;
101 hv2 = e->ipv4.orig.dest_ip;
102 hv3 = e->ipv4.orig.src_ip;
103 break;
104 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
105 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
106 hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
107 hv1 ^= e->ipv6.ports;
108
109 hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
110 hv2 ^= e->ipv6.dest_ip[0];
111
112 hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
113 hv3 ^= e->ipv6.src_ip[0];
114 break;
115 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
116 case MTK_PPE_PKT_TYPE_IPV6_6RD:
117 default:
118 WARN_ON_ONCE(1);
119 return MTK_PPE_HASH_MASK;
120 }
121
122 hash = (hv1 & hv2) | ((~hv1) & hv3);
123 hash = (hash >> 24) | ((hash & 0xffffff) << 8);
124 hash ^= hv1 ^ hv2 ^ hv3;
125 hash ^= hash >> 16;
ba2fc48c 126 hash <<= (ffs(eth->soc->hash_offset) - 1);
ba37b7ca
FF
127 hash &= MTK_PPE_ENTRIES - 1;
128
129 return hash;
130}
131
132static inline struct mtk_foe_mac_info *
03a3180e 133mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
ba37b7ca 134{
03a3180e 135 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
ba37b7ca 136
33fc42de
FF
137 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
138 return &entry->bridge.l2;
139
ba37b7ca
FF
140 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
141 return &entry->ipv6.l2;
142
143 return &entry->ipv4.l2;
144}
145
146static inline u32 *
03a3180e 147mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
ba37b7ca 148{
03a3180e 149 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
ba37b7ca 150
33fc42de
FF
151 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
152 return &entry->bridge.ib2;
153
ba37b7ca
FF
154 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
155 return &entry->ipv6.ib2;
156
157 return &entry->ipv4.ib2;
158}
159
03a3180e
LB
160int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
161 int type, int l4proto, u8 pse_port, u8 *src_mac,
162 u8 *dest_mac)
ba37b7ca
FF
163{
164 struct mtk_foe_mac_info *l2;
165 u32 ports_pad, val;
166
167 memset(entry, 0, sizeof(*entry));
168
03a3180e
LB
169 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
170 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
171 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
172 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
173 MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
174 entry->ib1 = val;
ba37b7ca 175
03a3180e
LB
176 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
177 FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
178 } else {
71ba8e48
FF
179 int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
180
03a3180e
LB
181 val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
182 FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
183 FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
184 MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
185 entry->ib1 = val;
186
187 val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
71ba8e48 188 FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
03a3180e
LB
189 FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
190 }
ba37b7ca
FF
191
192 if (is_multicast_ether_addr(dest_mac))
03a3180e 193 val |= mtk_get_ib2_multicast_mask(eth);
ba37b7ca
FF
194
195 ports_pad = 0xa5a5a500 | (l4proto & 0xff);
196 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
197 entry->ipv4.orig.ports = ports_pad;
198 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
199 entry->ipv6.ports = ports_pad;
200
33fc42de
FF
201 if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
202 ether_addr_copy(entry->bridge.src_mac, src_mac);
203 ether_addr_copy(entry->bridge.dest_mac, dest_mac);
204 entry->bridge.ib2 = val;
205 l2 = &entry->bridge.l2;
206 } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
ba37b7ca
FF
207 entry->ipv6.ib2 = val;
208 l2 = &entry->ipv6.l2;
209 } else {
210 entry->ipv4.ib2 = val;
211 l2 = &entry->ipv4.l2;
212 }
213
214 l2->dest_mac_hi = get_unaligned_be32(dest_mac);
215 l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
216 l2->src_mac_hi = get_unaligned_be32(src_mac);
217 l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
218
219 if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
220 l2->etype = ETH_P_IPV6;
221 else
222 l2->etype = ETH_P_IP;
223
224 return 0;
225}
226
03a3180e
LB
227int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
228 struct mtk_foe_entry *entry, u8 port)
ba37b7ca 229{
03a3180e
LB
230 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
231 u32 val = *ib2;
ba37b7ca 232
03a3180e
LB
233 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
234 val &= ~MTK_FOE_IB2_DEST_PORT_V2;
235 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
236 } else {
237 val &= ~MTK_FOE_IB2_DEST_PORT;
238 val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
239 }
ba37b7ca
FF
240 *ib2 = val;
241
242 return 0;
243}
244
03a3180e
LB
245int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
246 struct mtk_foe_entry *entry, bool egress,
ba37b7ca
FF
247 __be32 src_addr, __be16 src_port,
248 __be32 dest_addr, __be16 dest_port)
249{
03a3180e 250 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
ba37b7ca
FF
251 struct mtk_ipv4_tuple *t;
252
253 switch (type) {
254 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
255 if (egress) {
256 t = &entry->ipv4.new;
257 break;
258 }
259 fallthrough;
260 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
261 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
262 t = &entry->ipv4.orig;
263 break;
264 case MTK_PPE_PKT_TYPE_IPV6_6RD:
265 entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
266 entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
267 return 0;
268 default:
269 WARN_ON_ONCE(1);
270 return -EINVAL;
271 }
272
273 t->src_ip = be32_to_cpu(src_addr);
274 t->dest_ip = be32_to_cpu(dest_addr);
275
276 if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
277 return 0;
278
279 t->src_port = be16_to_cpu(src_port);
280 t->dest_port = be16_to_cpu(dest_port);
281
282 return 0;
283}
284
03a3180e
LB
285int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
286 struct mtk_foe_entry *entry,
ba37b7ca
FF
287 __be32 *src_addr, __be16 src_port,
288 __be32 *dest_addr, __be16 dest_port)
289{
03a3180e 290 int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
ba37b7ca
FF
291 u32 *src, *dest;
292 int i;
293
294 switch (type) {
295 case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
296 src = entry->dslite.tunnel_src_ip;
297 dest = entry->dslite.tunnel_dest_ip;
298 break;
299 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
300 case MTK_PPE_PKT_TYPE_IPV6_6RD:
301 entry->ipv6.src_port = be16_to_cpu(src_port);
302 entry->ipv6.dest_port = be16_to_cpu(dest_port);
303 fallthrough;
304 case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
305 src = entry->ipv6.src_ip;
306 dest = entry->ipv6.dest_ip;
307 break;
308 default:
309 WARN_ON_ONCE(1);
310 return -EINVAL;
3b2c32f9 311 }
ba37b7ca
FF
312
313 for (i = 0; i < 4; i++)
314 src[i] = be32_to_cpu(src_addr[i]);
315 for (i = 0; i < 4; i++)
316 dest[i] = be32_to_cpu(dest_addr[i]);
317
318 return 0;
319}
320
03a3180e
LB
321int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
322 int port)
ba37b7ca 323{
03a3180e 324 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
ba37b7ca
FF
325
326 l2->etype = BIT(port);
327
03a3180e
LB
328 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
329 entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
ba37b7ca
FF
330 else
331 l2->etype |= BIT(8);
332
03a3180e 333 entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
ba37b7ca
FF
334
335 return 0;
336}
337
03a3180e
LB
338int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
339 int vid)
ba37b7ca 340{
03a3180e 341 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
ba37b7ca 342
fb7da771 343 switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
ba37b7ca 344 case 0:
03a3180e
LB
345 entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
346 mtk_prep_ib1_vlan_layer(eth, 1);
ba37b7ca
FF
347 l2->vlan1 = vid;
348 return 0;
349 case 1:
03a3180e 350 if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
ba37b7ca
FF
351 l2->vlan1 = vid;
352 l2->etype |= BIT(8);
353 } else {
354 l2->vlan2 = vid;
03a3180e 355 entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
ba37b7ca
FF
356 }
357 return 0;
358 default:
359 return -ENOSPC;
360 }
361}
362
03a3180e
LB
363int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
364 int sid)
ba37b7ca 365{
03a3180e 366 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
ba37b7ca 367
03a3180e
LB
368 if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
369 (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
ba37b7ca
FF
370 l2->etype = ETH_P_PPP_SES;
371
03a3180e 372 entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
ba37b7ca
FF
373 l2->pppoe_id = sid;
374
375 return 0;
376}
377
03a3180e
LB
378int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
379 int wdma_idx, int txq, int bss, int wcid)
a333215e 380{
03a3180e
LB
381 struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
382 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
a333215e 383
03a3180e
LB
384 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
385 *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
386 *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
387 MTK_FOE_IB2_WDMA_WINFO_V2;
388 l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
389 FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
390 } else {
391 *ib2 &= ~MTK_FOE_IB2_PORT_MG;
392 *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
393 if (wdma_idx)
394 *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
395 l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
396 FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
397 FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
398 }
a333215e
FF
399
400 return 0;
401}
402
8bd8dcc5
FF
403int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
404 unsigned int queue)
405{
406 u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
407
408 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
409 *ib2 &= ~MTK_FOE_IB2_QID_V2;
410 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
411 *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
412 } else {
413 *ib2 &= ~MTK_FOE_IB2_QID;
414 *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
415 *ib2 |= MTK_FOE_IB2_PSE_QOS;
416 }
417
418 return 0;
419}
420
c4f033d9 421static bool
03a3180e
LB
422mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
423 struct mtk_foe_entry *data)
c4f033d9
FF
424{
425 int type, len;
426
427 if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
428 return false;
429
03a3180e 430 type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
c4f033d9
FF
431 if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
432 len = offsetof(struct mtk_foe_entry, ipv6._rsv);
433 else
434 len = offsetof(struct mtk_foe_entry, ipv4.ib2);
435
436 return !memcmp(&entry->data.data, &data->data, len - 4);
437}
438
33fc42de
FF
439static void
440__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
441{
442 struct hlist_head *head;
443 struct hlist_node *tmp;
444
445 if (entry->type == MTK_FLOW_TYPE_L2) {
446 rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
447 mtk_flow_l2_ht_params);
448
449 head = &entry->l2_flows;
450 hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
451 __mtk_foe_entry_clear(ppe, entry);
452 return;
453 }
454
455 hlist_del_init(&entry->list);
456 if (entry->hash != 0xffff) {
9d8cb4c0
LB
457 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
458
459 hwe->ib1 &= ~MTK_FOE_IB1_STATE;
e52f7c1d 460 hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
33fc42de 461 dma_wmb();
92453132 462 mtk_ppe_cache_clear(ppe);
33fc42de
FF
463 }
464 entry->hash = 0xffff;
465
466 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
467 return;
468
469 hlist_del_init(&entry->l2_data.list);
470 kfree(entry);
471}
472
473static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
474{
03a3180e
LB
475 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
476 u16 now = mtk_eth_timestamp(ppe->eth);
477 u16 timestamp = ib1 & ib1_ts_mask;
33fc42de
FF
478
479 if (timestamp > now)
03a3180e 480 return ib1_ts_mask + 1 - timestamp + now;
33fc42de
FF
481 else
482 return now - timestamp;
483}
484
485static void
486mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
487{
03a3180e 488 u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
33fc42de
FF
489 struct mtk_flow_entry *cur;
490 struct mtk_foe_entry *hwe;
491 struct hlist_node *tmp;
492 int idle;
493
494 idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
495 hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
496 int cur_idle;
497 u32 ib1;
498
9d8cb4c0 499 hwe = mtk_foe_get_entry(ppe, cur->hash);
33fc42de
FF
500 ib1 = READ_ONCE(hwe->ib1);
501
502 if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
503 cur->hash = 0xffff;
504 __mtk_foe_entry_clear(ppe, cur);
505 continue;
506 }
507
508 cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
509 if (cur_idle >= idle)
510 continue;
511
512 idle = cur_idle;
03a3180e
LB
513 entry->data.ib1 &= ~ib1_ts_mask;
514 entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
33fc42de
FF
515 }
516}
517
c4f033d9
FF
518static void
519mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
ba37b7ca 520{
9d8cb4c0 521 struct mtk_foe_entry foe = {};
ba37b7ca 522 struct mtk_foe_entry *hwe;
c4f033d9
FF
523
524 spin_lock_bh(&ppe_lock);
33fc42de
FF
525
526 if (entry->type == MTK_FLOW_TYPE_L2) {
527 mtk_flow_entry_update_l2(ppe, entry);
528 goto out;
529 }
530
c4f033d9
FF
531 if (entry->hash == 0xffff)
532 goto out;
ba37b7ca 533
9d8cb4c0
LB
534 hwe = mtk_foe_get_entry(ppe, entry->hash);
535 memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
03a3180e 536 if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
c4f033d9
FF
537 entry->hash = 0xffff;
538 goto out;
539 }
540
541 entry->data.ib1 = foe.ib1;
542
543out:
544 spin_unlock_bh(&ppe_lock);
545}
546
547static void
548__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
549 u16 hash)
550{
03a3180e
LB
551 struct mtk_eth *eth = ppe->eth;
552 u16 timestamp = mtk_eth_timestamp(eth);
c4f033d9 553 struct mtk_foe_entry *hwe;
c4f033d9 554
03a3180e
LB
555 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
556 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
557 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
558 timestamp);
559 } else {
560 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
561 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
562 timestamp);
563 }
ba37b7ca 564
9d8cb4c0 565 hwe = mtk_foe_get_entry(ppe, hash);
454b20e1 566 memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
ba37b7ca
FF
567 wmb();
568 hwe->ib1 = entry->ib1;
569
570 dma_wmb();
571
572 mtk_ppe_cache_clear(ppe);
c4f033d9 573}
ba37b7ca 574
c4f033d9
FF
575void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
576{
577 spin_lock_bh(&ppe_lock);
33fc42de 578 __mtk_foe_entry_clear(ppe, entry);
c4f033d9
FF
579 spin_unlock_bh(&ppe_lock);
580}
581
33fc42de
FF
582static int
583mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
584{
585 entry->type = MTK_FLOW_TYPE_L2;
586
587 return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
588 mtk_flow_l2_ht_params);
589}
590
c4f033d9
FF
591int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
592{
ba2fc48c 593 const struct mtk_soc_data *soc = ppe->eth->soc;
03a3180e 594 int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
33fc42de
FF
595 u32 hash;
596
597 if (type == MTK_PPE_PKT_TYPE_BRIDGE)
598 return mtk_foe_entry_commit_l2(ppe, entry);
c4f033d9 599
ba2fc48c 600 hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
c4f033d9
FF
601 entry->hash = 0xffff;
602 spin_lock_bh(&ppe_lock);
ba2fc48c 603 hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
c4f033d9
FF
604 spin_unlock_bh(&ppe_lock);
605
606 return 0;
607}
608
33fc42de
FF
609static void
610mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
611 u16 hash)
612{
ba2fc48c 613 const struct mtk_soc_data *soc = ppe->eth->soc;
33fc42de 614 struct mtk_flow_entry *flow_info;
9d8cb4c0 615 struct mtk_foe_entry foe = {}, *hwe;
33fc42de 616 struct mtk_foe_mac_info *l2;
03a3180e 617 u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
33fc42de
FF
618 int type;
619
f3eceaed 620 flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
33fc42de
FF
621 if (!flow_info)
622 return;
623
624 flow_info->l2_data.base_flow = entry;
625 flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
626 flow_info->hash = hash;
ba2fc48c
LB
627 hlist_add_head(&flow_info->list,
628 &ppe->foe_flow[hash / soc->hash_offset]);
33fc42de
FF
629 hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
630
9d8cb4c0
LB
631 hwe = mtk_foe_get_entry(ppe, hash);
632 memcpy(&foe, hwe, soc->foe_entry_size);
33fc42de
FF
633 foe.ib1 &= ib1_mask;
634 foe.ib1 |= entry->data.ib1 & ~ib1_mask;
635
03a3180e 636 l2 = mtk_foe_entry_l2(ppe->eth, &foe);
33fc42de
FF
637 memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
638
03a3180e 639 type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
33fc42de
FF
640 if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
641 memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
642 else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
643 l2->etype = ETH_P_IPV6;
644
03a3180e 645 *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
33fc42de
FF
646
647 __mtk_foe_entry_commit(ppe, &foe, hash);
648}
649
c4f033d9
FF
650void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
651{
ba2fc48c
LB
652 const struct mtk_soc_data *soc = ppe->eth->soc;
653 struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
9d8cb4c0 654 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
33fc42de
FF
655 struct mtk_flow_entry *entry;
656 struct mtk_foe_bridge key = {};
17a5f6a7 657 struct hlist_node *n;
33fc42de 658 struct ethhdr *eh;
c4f033d9 659 bool found = false;
33fc42de 660 u8 *tag;
c4f033d9
FF
661
662 spin_lock_bh(&ppe_lock);
33fc42de
FF
663
664 if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
665 goto out;
666
17a5f6a7 667 hlist_for_each_entry_safe(entry, n, head, list) {
33fc42de
FF
668 if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
669 if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
670 MTK_FOE_STATE_BIND))
671 continue;
672
673 entry->hash = 0xffff;
674 __mtk_foe_entry_clear(ppe, entry);
675 continue;
676 }
677
03a3180e 678 if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
c4f033d9
FF
679 if (entry->hash != 0xffff)
680 entry->hash = 0xffff;
681 continue;
682 }
683
684 entry->hash = hash;
685 __mtk_foe_entry_commit(ppe, &entry->data, hash);
686 found = true;
687 }
33fc42de
FF
688
689 if (found)
690 goto out;
691
692 eh = eth_hdr(skb);
693 ether_addr_copy(key.dest_mac, eh->h_dest);
694 ether_addr_copy(key.src_mac, eh->h_source);
695 tag = skb->data - 2;
696 key.vlan = 0;
697 switch (skb->protocol) {
698#if IS_ENABLED(CONFIG_NET_DSA)
699 case htons(ETH_P_XDSA):
700 if (!netdev_uses_dsa(skb->dev) ||
701 skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
702 goto out;
703
5f36ca1b
FF
704 if (!skb_metadata_dst(skb))
705 tag += 4;
706
33fc42de
FF
707 if (get_unaligned_be16(tag) != ETH_P_8021Q)
708 break;
709
710 fallthrough;
711#endif
712 case htons(ETH_P_8021Q):
713 key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
714 break;
715 default:
716 break;
717 }
718
719 entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
720 if (!entry)
721 goto out;
722
723 mtk_foe_entry_commit_subflow(ppe, entry, hash);
724
725out:
c4f033d9
FF
726 spin_unlock_bh(&ppe_lock);
727}
728
729int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
730{
c4f033d9 731 mtk_flow_entry_update(ppe, entry);
c4f033d9 732
33fc42de 733 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
ba37b7ca
FF
734}
735
06127504
LB
736int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
737{
738 if (!ppe)
739 return -EINVAL;
740
741 /* disable KA */
742 ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
743 ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
744 ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
745 usleep_range(10000, 11000);
746
747 /* set KA timer to maximum */
748 ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
749 ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
750
751 /* set KA tick select */
752 ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
753 ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
754 usleep_range(10000, 11000);
755
756 /* disable scan mode */
757 ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
758 usleep_range(10000, 11000);
759
760 return mtk_ppe_wait_busy(ppe);
761}
762
c4f033d9 763struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
4ff1a3fc 764 int version, int index)
ba37b7ca 765{
ba2fc48c 766 const struct mtk_soc_data *soc = eth->soc;
c4f033d9 767 struct device *dev = eth->dev;
1ccc723b 768 struct mtk_ppe *ppe;
ba2fc48c 769 u32 foe_flow_size;
9d8cb4c0 770 void *foe;
1ccc723b
FF
771
772 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
773 if (!ppe)
774 return NULL;
ba37b7ca 775
33fc42de
FF
776 rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
777
ba37b7ca
FF
778 /* need to allocate a separate device, since it PPE DMA access is
779 * not coherent.
780 */
781 ppe->base = base;
c4f033d9 782 ppe->eth = eth;
ba37b7ca
FF
783 ppe->dev = dev;
784 ppe->version = version;
785
9d8cb4c0
LB
786 foe = dmam_alloc_coherent(ppe->dev,
787 MTK_PPE_ENTRIES * soc->foe_entry_size,
ba37b7ca
FF
788 &ppe->foe_phys, GFP_KERNEL);
789 if (!foe)
603ea5e7 790 goto err_free_l2_flows;
ba37b7ca
FF
791
792 ppe->foe_table = foe;
793
ba2fc48c
LB
794 foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
795 sizeof(*ppe->foe_flow);
796 ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
797 if (!ppe->foe_flow)
603ea5e7 798 goto err_free_l2_flows;
ba2fc48c 799
4ff1a3fc 800 mtk_ppe_debugfs_init(ppe, index);
ba37b7ca 801
1ccc723b 802 return ppe;
603ea5e7
YC
803
804err_free_l2_flows:
805 rhashtable_destroy(&ppe->l2_flows);
806 return NULL;
807}
808
809void mtk_ppe_deinit(struct mtk_eth *eth)
810{
811 int i;
812
813 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
814 if (!eth->ppe[i])
815 return;
816 rhashtable_destroy(&eth->ppe[i]->l2_flows);
817 }
ba37b7ca
FF
818}
819
820static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
821{
822 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
823 int i, k;
824
9d8cb4c0
LB
825 memset(ppe->foe_table, 0,
826 MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
ba37b7ca
FF
827
828 if (!IS_ENABLED(CONFIG_SOC_MT7621))
829 return;
830
831 /* skip all entries that cross the 1024 byte boundary */
9d8cb4c0
LB
832 for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
833 for (k = 0; k < ARRAY_SIZE(skip); k++) {
834 struct mtk_foe_entry *hwe;
835
836 hwe = mtk_foe_get_entry(ppe, i + skip[k]);
837 hwe->ib1 |= MTK_FOE_IB1_STATIC;
838 }
839 }
ba37b7ca
FF
840}
841
4ff1a3fc 842void mtk_ppe_start(struct mtk_ppe *ppe)
ba37b7ca
FF
843{
844 u32 val;
845
4ff1a3fc
LB
846 if (!ppe)
847 return;
848
ba37b7ca
FF
849 mtk_ppe_init_foe_table(ppe);
850 ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
851
852 val = MTK_PPE_TB_CFG_ENTRY_80B |
853 MTK_PPE_TB_CFG_AGE_NON_L4 |
854 MTK_PPE_TB_CFG_AGE_UNBIND |
855 MTK_PPE_TB_CFG_AGE_TCP |
856 MTK_PPE_TB_CFG_AGE_UDP |
857 MTK_PPE_TB_CFG_AGE_TCP_FIN |
858 FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
859 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
860 FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
861 MTK_PPE_KEEPALIVE_DISABLE) |
862 FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
863 FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
864 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
865 FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
866 MTK_PPE_ENTRIES_SHIFT);
03a3180e
LB
867 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
868 val |= MTK_PPE_TB_CFG_INFO_SEL;
ba37b7ca
FF
869 ppe_w32(ppe, MTK_PPE_TB_CFG, val);
870
871 ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
872 MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
873
874 mtk_ppe_cache_enable(ppe, true);
875
03a3180e 876 val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
ba37b7ca
FF
877 MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
878 MTK_PPE_FLOW_CFG_IP6_6RD |
879 MTK_PPE_FLOW_CFG_IP4_NAT |
880 MTK_PPE_FLOW_CFG_IP4_NAPT |
881 MTK_PPE_FLOW_CFG_IP4_DSLITE |
ba37b7ca 882 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
03a3180e
LB
883 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
884 val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
885 MTK_PPE_MD_TOAP_BYP_CRSN1 |
886 MTK_PPE_MD_TOAP_BYP_CRSN2 |
887 MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
888 else
889 val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
890 MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
ba37b7ca
FF
891 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
892
893 val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
894 FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
895 ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
896
897 val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
898 FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
899 ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
900
901 val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
902 FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
903 ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
904
905 val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
906 ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
907
908 val = MTK_PPE_BIND_LIMIT1_FULL |
909 FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
910 ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
911
912 val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
913 FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
914 ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
915
916 /* enable PPE */
917 val = MTK_PPE_GLO_CFG_EN |
918 MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
919 MTK_PPE_GLO_CFG_IP4_CS_DROP |
920 MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
921 ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
922
923 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
03a3180e
LB
924
925 if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
926 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
927 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
928 }
ba37b7ca
FF
929}
930
931int mtk_ppe_stop(struct mtk_ppe *ppe)
932{
933 u32 val;
934 int i;
935
4ff1a3fc
LB
936 if (!ppe)
937 return 0;
938
9d8cb4c0
LB
939 for (i = 0; i < MTK_PPE_ENTRIES; i++) {
940 struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
941
942 hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
943 MTK_FOE_STATE_INVALID);
944 }
ba37b7ca
FF
945
946 mtk_ppe_cache_enable(ppe, false);
947
948 /* disable offload engine */
949 ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
950 ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
951
952 /* disable aging */
953 val = MTK_PPE_TB_CFG_AGE_NON_L4 |
954 MTK_PPE_TB_CFG_AGE_UNBIND |
955 MTK_PPE_TB_CFG_AGE_TCP |
956 MTK_PPE_TB_CFG_AGE_UDP |
957 MTK_PPE_TB_CFG_AGE_TCP_FIN;
958 ppe_clear(ppe, MTK_PPE_TB_CFG, val);
959
960 return mtk_ppe_wait_busy(ppe);
961}