1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include <net/switchdev.h>
8 #include <linux/if_bridge.h>
9 #include <linux/iopoll.h>
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
14 /* Commands for Mac Table Command register */
15 #define MAC_CMD_LEARN 0 /* Insert (Learn) 1 entry */
16 #define MAC_CMD_UNLEARN 1 /* Unlearn (Forget) 1 entry */
17 #define MAC_CMD_LOOKUP 2 /* Look up 1 entry */
18 #define MAC_CMD_READ 3 /* Read entry at Mac Table Index */
19 #define MAC_CMD_WRITE 4 /* Write entry at Mac Table Index */
20 #define MAC_CMD_SCAN 5 /* Scan (Age or find next) */
21 #define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
22 #define MAC_CMD_CLEAR_ALL 7 /* Delete all entries in table */
24 /* Commands for MAC_ENTRY_ADDR_TYPE */
25 #define MAC_ENTRY_ADDR_TYPE_UPSID_PN 0
26 #define MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
27 #define MAC_ENTRY_ADDR_TYPE_GLAG 2
28 #define MAC_ENTRY_ADDR_TYPE_MC_IDX 3
30 #define TABLE_UPDATE_SLEEP_US 10
31 #define TABLE_UPDATE_TIMEOUT_US 100000
33 struct sparx5_mact_entry {
34 struct list_head list;
35 unsigned char mac[ETH_ALEN];
37 #define MAC_ENT_ALIVE BIT(0)
38 #define MAC_ENT_MOVED BIT(1)
39 #define MAC_ENT_LOCK BIT(2)
44 static int sparx5_mact_get_status(struct sparx5 *sparx5)
46 return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
49 static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
53 return readx_poll_timeout(sparx5_mact_get_status,
55 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
56 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
59 static void sparx5_mact_select(struct sparx5 *sparx5,
60 const unsigned char mac[ETH_ALEN],
63 u32 macl = 0, mach = 0;
65 /* Set the MAC address to handle and the vlan associated in a format
66 * understood by the hardware.
76 spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
77 spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
80 int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
81 const unsigned char mac[ETH_ALEN], u16 vid)
85 if (pgid < SPX5_PORTS) {
86 type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
88 addr += (pgid / 32) << 5; /* Add upsid */
90 type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
91 addr = pgid - SPX5_PORTS;
94 mutex_lock(&sparx5->lock);
96 sparx5_mact_select(sparx5, mac, vid);
98 /* MAC entry properties */
99 spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
100 LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
101 LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
102 LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
103 sparx5, LRN_MAC_ACCESS_CFG_2);
104 spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
106 /* Insert/learn new entry */
107 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
108 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
109 sparx5, LRN_COMMON_ACCESS_CTRL);
111 ret = sparx5_mact_wait_for_completion(sparx5);
113 mutex_unlock(&sparx5->lock);
118 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
120 struct sparx5_port *port = netdev_priv(dev);
121 struct sparx5 *sparx5 = port->sparx5;
123 return sparx5_mact_forget(sparx5, addr, port->pvid);
126 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
128 struct sparx5_port *port = netdev_priv(dev);
129 struct sparx5 *sparx5 = port->sparx5;
131 return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
134 static int sparx5_mact_get(struct sparx5 *sparx5,
135 unsigned char mac[ETH_ALEN],
136 u16 *vid, u32 *pcfg2)
138 u32 mach, macl, cfg2;
141 cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
142 if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
143 mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
144 macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
145 mac[0] = ((mach >> 8) & 0xff);
146 mac[1] = ((mach >> 0) & 0xff);
147 mac[2] = ((macl >> 24) & 0xff);
148 mac[3] = ((macl >> 16) & 0xff);
149 mac[4] = ((macl >> 8) & 0xff);
150 mac[5] = ((macl >> 0) & 0xff);
159 bool sparx5_mact_getnext(struct sparx5 *sparx5,
160 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
165 mutex_lock(&sparx5->lock);
167 sparx5_mact_select(sparx5, mac, *vid);
169 spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
170 LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
171 sparx5, LRN_SCAN_NEXT_CFG);
172 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
173 (MAC_CMD_FIND_SMALLEST) |
174 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
175 sparx5, LRN_COMMON_ACCESS_CTRL);
177 ret = sparx5_mact_wait_for_completion(sparx5);
179 ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
184 mutex_unlock(&sparx5->lock);
189 int sparx5_mact_find(struct sparx5 *sparx5,
190 const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
195 mutex_lock(&sparx5->lock);
197 sparx5_mact_select(sparx5, mac, vid);
199 /* Issue a lookup command */
200 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
201 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
202 sparx5, LRN_COMMON_ACCESS_CTRL);
204 ret = sparx5_mact_wait_for_completion(sparx5);
206 cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
207 if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
213 mutex_unlock(&sparx5->lock);
218 int sparx5_mact_forget(struct sparx5 *sparx5,
219 const unsigned char mac[ETH_ALEN], u16 vid)
223 mutex_lock(&sparx5->lock);
225 sparx5_mact_select(sparx5, mac, vid);
227 /* Issue an unlearn command */
228 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
229 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
230 sparx5, LRN_COMMON_ACCESS_CTRL);
232 ret = sparx5_mact_wait_for_completion(sparx5);
234 mutex_unlock(&sparx5->lock);
239 static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
240 const unsigned char *mac,
241 u16 vid, u16 port_index)
243 struct sparx5_mact_entry *mact_entry;
245 mact_entry = devm_kzalloc(sparx5->dev,
246 sizeof(*mact_entry), GFP_ATOMIC);
250 memcpy(mact_entry->mac, mac, ETH_ALEN);
251 mact_entry->vid = vid;
252 mact_entry->port = port_index;
256 static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
257 const unsigned char *mac,
258 u16 vid, u16 port_index)
260 struct sparx5_mact_entry *mact_entry;
261 struct sparx5_mact_entry *res = NULL;
263 mutex_lock(&sparx5->mact_lock);
264 list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
265 if (mact_entry->vid == vid &&
266 ether_addr_equal(mac, mact_entry->mac) &&
267 mact_entry->port == port_index) {
272 mutex_unlock(&sparx5->mact_lock);
277 static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
278 const char *mac, u16 vid,
279 struct net_device *dev, bool offloaded)
281 struct switchdev_notifier_fdb_info info = {};
285 info.offloaded = offloaded;
286 call_switchdev_notifiers(type, dev, &info.info, NULL);
289 int sparx5_add_mact_entry(struct sparx5 *sparx5,
290 struct net_device *dev,
292 const unsigned char *addr, u16 vid)
294 struct sparx5_mact_entry *mact_entry;
298 ret = sparx5_mact_find(sparx5, addr, vid, &cfg2);
302 /* In case the entry already exists, don't add it again to SW,
303 * just update HW, but we need to look in the actual HW because
304 * it is possible for an entry to be learn by HW and before the
305 * mact thread to start the frame will reach CPU and the CPU will
306 * add the entry but without the extern_learn flag.
308 mact_entry = find_mact_entry(sparx5, addr, vid, portno);
312 /* Add the entry in SW MAC table not to get the notification when
313 * SW is pulling again
315 mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
319 mutex_lock(&sparx5->mact_lock);
320 list_add_tail(&mact_entry->list, &sparx5->mact_entries);
321 mutex_unlock(&sparx5->mact_lock);
324 ret = sparx5_mact_learn(sparx5, portno, addr, vid);
327 if (mact_entry->flags == 0) {
328 mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
329 sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
336 int sparx5_del_mact_entry(struct sparx5 *sparx5,
337 const unsigned char *addr,
340 struct sparx5_mact_entry *mact_entry, *tmp;
342 /* Delete the entry in SW MAC table not to get the notification when
343 * SW is pulling again
345 mutex_lock(&sparx5->mact_lock);
346 list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
348 if ((vid == 0 || mact_entry->vid == vid) &&
349 ether_addr_equal(addr, mact_entry->mac)) {
350 list_del(&mact_entry->list);
351 devm_kfree(sparx5->dev, mact_entry);
353 sparx5_mact_forget(sparx5, addr, mact_entry->vid);
356 mutex_unlock(&sparx5->mact_lock);
361 static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
362 unsigned char mac[ETH_ALEN],
365 struct sparx5_mact_entry *mact_entry;
369 if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
370 MAC_ENTRY_ADDR_TYPE_UPSID_PN)
373 port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
374 if (port >= SPX5_PORTS)
377 if (!test_bit(port, sparx5->bridge_mask))
380 mutex_lock(&sparx5->mact_lock);
381 list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
382 if (mact_entry->vid == vid &&
383 ether_addr_equal(mac, mact_entry->mac)) {
385 mact_entry->flags |= MAC_ENT_ALIVE;
386 if (mact_entry->port != port) {
387 dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
388 mact_entry->port, port);
389 mact_entry->port = port;
390 mact_entry->flags |= MAC_ENT_MOVED;
396 mutex_unlock(&sparx5->mact_lock);
398 if (found && !(mact_entry->flags & MAC_ENT_MOVED))
399 /* Present, not moved */
403 /* Entry not found - now add */
404 mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
408 mact_entry->flags |= MAC_ENT_ALIVE;
409 mutex_lock(&sparx5->mact_lock);
410 list_add_tail(&mact_entry->list, &sparx5->mact_entries);
411 mutex_unlock(&sparx5->mact_lock);
414 /* New or moved entry - notify bridge */
415 sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
416 mac, vid, sparx5->ports[port]->ndev,
420 void sparx5_mact_pull_work(struct work_struct *work)
422 struct delayed_work *del_work = to_delayed_work(work);
423 struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
425 struct sparx5_mact_entry *mact_entry, *tmp;
426 unsigned char mac[ETH_ALEN];
431 /* Reset MAC entry flags */
432 mutex_lock(&sparx5->mact_lock);
433 list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
434 mact_entry->flags &= MAC_ENT_LOCK;
435 mutex_unlock(&sparx5->mact_lock);
437 /* MAIN mac address processing loop */
439 memset(mac, 0, sizeof(mac));
441 mutex_lock(&sparx5->lock);
442 sparx5_mact_select(sparx5, mac, vid);
443 spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
444 sparx5, LRN_SCAN_NEXT_CFG);
445 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
446 (MAC_CMD_FIND_SMALLEST) |
447 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
448 sparx5, LRN_COMMON_ACCESS_CTRL);
449 ret = sparx5_mact_wait_for_completion(sparx5);
451 ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
452 mutex_unlock(&sparx5->lock);
454 sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
457 mutex_lock(&sparx5->mact_lock);
458 list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
460 /* If the entry is in HW or permanent, then skip */
461 if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
464 sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
465 mact_entry->mac, mact_entry->vid,
466 sparx5->ports[mact_entry->port]->ndev,
469 list_del(&mact_entry->list);
470 devm_kfree(sparx5->dev, mact_entry);
472 mutex_unlock(&sparx5->mact_lock);
474 queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
475 SPX5_MACT_PULL_DELAY);
478 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
480 int value = max(1, msecs / 10); /* unit 10 ms */
482 spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
483 LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
484 LRN_AUTOAGE_CFG_UNIT_SIZE |
485 LRN_AUTOAGE_CFG_PERIOD_VAL,
490 void sparx5_mact_init(struct sparx5 *sparx5)
492 mutex_init(&sparx5->lock);
494 /* Flush MAC table */
495 spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
496 LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
497 sparx5, LRN_COMMON_ACCESS_CTRL);
499 if (sparx5_mact_wait_for_completion(sparx5) != 0)
500 dev_warn(sparx5->dev, "MAC flush error\n");
502 sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);