1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom BCM7xxx System Port Ethernet MAC driver
5 * Copyright (C) 2014 Broadcom Corporation
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/dsa/brcm.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
19 #include <linux/of_net.h>
20 #include <linux/of_mdio.h>
21 #include <linux/phy.h>
22 #include <linux/phy_fixed.h>
24 #include <linux/clk.h>
28 #include "bcmsysport.h"
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
34 u32 reg = readl_relaxed(priv->base + offset + off); \
37 static inline void name##_writel(struct bcm_sysport_priv *priv, \
40 writel_relaxed(val, priv->base + offset + off); \
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
47 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
55 * same layout, except it has been moved by 4 bytes up, *sigh*
57 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59 if (priv->is_lite && off >= RDMA_STATUS)
61 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
64 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66 if (priv->is_lite && off >= RDMA_STATUS)
68 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
71 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
83 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
84 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
86 #define BCM_SYSPORT_INTR_L2(which) \
87 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
90 priv->irq##which##_mask &= ~(mask); \
91 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
93 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
96 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
97 priv->irq##which##_mask |= (mask); \
100 BCM_SYSPORT_INTR_L2(0)
101 BCM_SYSPORT_INTR_L2(1)
103 /* Register accesses to GISB/RBUS registers are expensive (few hundred
104 * nanoseconds), so keep the check for 64-bits explicit here to save
105 * one register write per-packet on 32-bits platforms.
107 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
111 #ifdef CONFIG_PHYS_ADDR_T_64BIT
112 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
113 d + DESC_ADDR_HI_STATUS_LEN);
115 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
118 /* Ethtool operations */
119 static void bcm_sysport_set_rx_csum(struct net_device *dev,
120 netdev_features_t wanted)
122 struct bcm_sysport_priv *priv = netdev_priv(dev);
125 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
126 reg = rxchk_readl(priv, RXCHK_CONTROL);
127 /* Clear L2 header checks, which would prevent BPDUs
128 * from being received.
130 reg &= ~RXCHK_L2_HDR_DIS;
136 /* If UniMAC forwards CRC, we need to skip over it to get
137 * a valid CHK bit to be set in the per-packet status word
139 if (priv->rx_chk_en && priv->crc_fwd)
140 reg |= RXCHK_SKIP_FCS;
142 reg &= ~RXCHK_SKIP_FCS;
144 /* If Broadcom tags are enabled (e.g: using a switch), make
145 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
146 * tag after the Ethernet MAC Source Address.
148 if (netdev_uses_dsa(dev))
149 reg |= RXCHK_BRCM_TAG_EN;
151 reg &= ~RXCHK_BRCM_TAG_EN;
153 rxchk_writel(priv, reg, RXCHK_CONTROL);
156 static void bcm_sysport_set_tx_csum(struct net_device *dev,
157 netdev_features_t wanted)
159 struct bcm_sysport_priv *priv = netdev_priv(dev);
162 /* Hardware transmit checksum requires us to enable the Transmit status
163 * block prepended to the packet contents
165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
166 NETIF_F_HW_VLAN_CTAG_TX));
167 reg = tdma_readl(priv, TDMA_CONTROL);
169 reg |= tdma_control_bit(priv, TSB_EN);
171 reg &= ~tdma_control_bit(priv, TSB_EN);
172 /* Indicating that software inserts Broadcom tags is needed for the TX
173 * checksum to be computed correctly when using VLAN HW acceleration,
174 * else it has no effect, so it can always be turned on.
176 if (netdev_uses_dsa(dev))
177 reg |= tdma_control_bit(priv, SW_BRCM_TAG);
179 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
180 tdma_writel(priv, reg, TDMA_CONTROL);
182 /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
183 if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
184 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
187 static int bcm_sysport_set_features(struct net_device *dev,
188 netdev_features_t features)
190 struct bcm_sysport_priv *priv = netdev_priv(dev);
193 ret = clk_prepare_enable(priv->clk);
197 /* Read CRC forward */
199 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
201 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
202 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
204 bcm_sysport_set_rx_csum(dev, features);
205 bcm_sysport_set_tx_csum(dev, features);
207 clk_disable_unprepare(priv->clk);
212 /* Hardware counters must be kept in sync because the order/offset
213 * is important here (order in structure declaration = order in hardware)
215 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
217 STAT_NETDEV64(rx_packets),
218 STAT_NETDEV64(tx_packets),
219 STAT_NETDEV64(rx_bytes),
220 STAT_NETDEV64(tx_bytes),
221 STAT_NETDEV(rx_errors),
222 STAT_NETDEV(tx_errors),
223 STAT_NETDEV(rx_dropped),
224 STAT_NETDEV(tx_dropped),
225 STAT_NETDEV(multicast),
226 /* UniMAC RSV counters */
227 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
228 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
229 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
230 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
231 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
232 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
233 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
234 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
235 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
236 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
237 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
238 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
239 STAT_MIB_RX("rx_multicast", mib.rx.mca),
240 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
241 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
242 STAT_MIB_RX("rx_control", mib.rx.cf),
243 STAT_MIB_RX("rx_pause", mib.rx.pf),
244 STAT_MIB_RX("rx_unknown", mib.rx.uo),
245 STAT_MIB_RX("rx_align", mib.rx.aln),
246 STAT_MIB_RX("rx_outrange", mib.rx.flr),
247 STAT_MIB_RX("rx_code", mib.rx.cde),
248 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
249 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
250 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
251 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
252 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
253 STAT_MIB_RX("rx_unicast", mib.rx.uc),
254 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
255 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
256 /* UniMAC TSV counters */
257 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
258 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
259 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
260 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
261 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
262 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
263 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
264 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
265 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
266 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
267 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
268 STAT_MIB_TX("tx_multicast", mib.tx.mca),
269 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
270 STAT_MIB_TX("tx_pause", mib.tx.pf),
271 STAT_MIB_TX("tx_control", mib.tx.cf),
272 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
273 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
274 STAT_MIB_TX("tx_defer", mib.tx.drf),
275 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
276 STAT_MIB_TX("tx_single_col", mib.tx.scl),
277 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
278 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
279 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
280 STAT_MIB_TX("tx_frags", mib.tx.frg),
281 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
282 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
283 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
284 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
285 STAT_MIB_TX("tx_unicast", mib.tx.uc),
286 /* UniMAC RUNT counters */
287 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
288 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
289 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
290 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
291 /* RXCHK misc statistics */
292 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
293 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
294 RXCHK_OTHER_DISC_CNTR),
295 /* RBUF misc statistics */
296 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
297 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
298 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
299 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
300 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
301 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
302 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
303 /* Per TX-queue statistics are dynamically appended */
306 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
308 static void bcm_sysport_get_drvinfo(struct net_device *dev,
309 struct ethtool_drvinfo *info)
311 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
312 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
315 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
317 struct bcm_sysport_priv *priv = netdev_priv(dev);
319 return priv->msg_enable;
322 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
324 struct bcm_sysport_priv *priv = netdev_priv(dev);
326 priv->msg_enable = enable;
329 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
332 case BCM_SYSPORT_STAT_NETDEV:
333 case BCM_SYSPORT_STAT_NETDEV64:
334 case BCM_SYSPORT_STAT_RXCHK:
335 case BCM_SYSPORT_STAT_RBUF:
336 case BCM_SYSPORT_STAT_SOFT:
343 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
345 struct bcm_sysport_priv *priv = netdev_priv(dev);
346 const struct bcm_sysport_stats *s;
349 switch (string_set) {
351 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
352 s = &bcm_sysport_gstrings_stats[i];
354 !bcm_sysport_lite_stat_valid(s->type))
358 /* Include per-queue statistics */
359 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
365 static void bcm_sysport_get_strings(struct net_device *dev,
366 u32 stringset, u8 *data)
368 struct bcm_sysport_priv *priv = netdev_priv(dev);
369 const struct bcm_sysport_stats *s;
375 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
376 s = &bcm_sysport_gstrings_stats[i];
378 !bcm_sysport_lite_stat_valid(s->type))
381 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
386 for (i = 0; i < dev->num_tx_queues; i++) {
387 snprintf(buf, sizeof(buf), "txq%d_packets", i);
388 memcpy(data + j * ETH_GSTRING_LEN, buf,
392 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
393 memcpy(data + j * ETH_GSTRING_LEN, buf,
403 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
407 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
408 const struct bcm_sysport_stats *s;
413 s = &bcm_sysport_gstrings_stats[i];
415 case BCM_SYSPORT_STAT_NETDEV:
416 case BCM_SYSPORT_STAT_NETDEV64:
417 case BCM_SYSPORT_STAT_SOFT:
419 case BCM_SYSPORT_STAT_MIB_RX:
420 case BCM_SYSPORT_STAT_MIB_TX:
421 case BCM_SYSPORT_STAT_RUNT:
425 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
426 offset = UMAC_MIB_STAT_OFFSET;
427 val = umac_readl(priv, UMAC_MIB_START + j + offset);
429 case BCM_SYSPORT_STAT_RXCHK:
430 val = rxchk_readl(priv, s->reg_offset);
432 rxchk_writel(priv, 0, s->reg_offset);
434 case BCM_SYSPORT_STAT_RBUF:
435 val = rbuf_readl(priv, s->reg_offset);
437 rbuf_writel(priv, 0, s->reg_offset);
442 p = (char *)priv + s->stat_offset;
446 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
449 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
450 u64 *tx_bytes, u64 *tx_packets)
452 struct bcm_sysport_tx_ring *ring;
453 u64 bytes = 0, packets = 0;
457 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
458 ring = &priv->tx_rings[q];
460 start = u64_stats_fetch_begin_irq(&priv->syncp);
462 packets = ring->packets;
463 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
466 *tx_packets += packets;
470 static void bcm_sysport_get_stats(struct net_device *dev,
471 struct ethtool_stats *stats, u64 *data)
473 struct bcm_sysport_priv *priv = netdev_priv(dev);
474 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
475 struct u64_stats_sync *syncp = &priv->syncp;
476 struct bcm_sysport_tx_ring *ring;
477 u64 tx_bytes = 0, tx_packets = 0;
481 if (netif_running(dev)) {
482 bcm_sysport_update_mib_counters(priv);
483 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
484 stats64->tx_bytes = tx_bytes;
485 stats64->tx_packets = tx_packets;
488 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
489 const struct bcm_sysport_stats *s;
492 s = &bcm_sysport_gstrings_stats[i];
493 if (s->type == BCM_SYSPORT_STAT_NETDEV)
494 p = (char *)&dev->stats;
495 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
500 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
504 if (s->stat_sizeof == sizeof(u64) &&
505 s->type == BCM_SYSPORT_STAT_NETDEV64) {
507 start = u64_stats_fetch_begin_irq(syncp);
509 } while (u64_stats_fetch_retry_irq(syncp, start));
515 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
516 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
517 * needs to point to how many total statistics we have minus the
518 * number of per TX queue statistics
520 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
521 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
523 for (i = 0; i < dev->num_tx_queues; i++) {
524 ring = &priv->tx_rings[i];
525 data[j] = ring->packets;
527 data[j] = ring->bytes;
532 static void bcm_sysport_get_wol(struct net_device *dev,
533 struct ethtool_wolinfo *wol)
535 struct bcm_sysport_priv *priv = netdev_priv(dev);
537 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
538 wol->wolopts = priv->wolopts;
540 if (!(priv->wolopts & WAKE_MAGICSECURE))
543 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
546 static int bcm_sysport_set_wol(struct net_device *dev,
547 struct ethtool_wolinfo *wol)
549 struct bcm_sysport_priv *priv = netdev_priv(dev);
550 struct device *kdev = &priv->pdev->dev;
551 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
553 if (!device_can_wakeup(kdev))
556 if (wol->wolopts & ~supported)
559 if (wol->wolopts & WAKE_MAGICSECURE)
560 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
562 /* Flag the device and relevant IRQ as wakeup capable */
564 device_set_wakeup_enable(kdev, 1);
565 if (priv->wol_irq_disabled)
566 enable_irq_wake(priv->wol_irq);
567 priv->wol_irq_disabled = 0;
569 device_set_wakeup_enable(kdev, 0);
570 /* Avoid unbalanced disable_irq_wake calls */
571 if (!priv->wol_irq_disabled)
572 disable_irq_wake(priv->wol_irq);
573 priv->wol_irq_disabled = 1;
576 priv->wolopts = wol->wolopts;
581 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
586 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
587 reg &= ~(RDMA_INTR_THRESH_MASK |
588 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
590 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
591 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
594 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
595 struct ethtool_coalesce *ec)
597 struct bcm_sysport_priv *priv = ring->priv;
600 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
601 reg &= ~(RING_INTR_THRESH_MASK |
602 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
603 reg |= ec->tx_max_coalesced_frames;
604 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
606 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
609 static int bcm_sysport_get_coalesce(struct net_device *dev,
610 struct ethtool_coalesce *ec,
611 struct kernel_ethtool_coalesce *kernel_coal,
612 struct netlink_ext_ack *extack)
614 struct bcm_sysport_priv *priv = netdev_priv(dev);
617 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
619 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
620 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
622 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
624 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
625 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
626 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
631 static int bcm_sysport_set_coalesce(struct net_device *dev,
632 struct ethtool_coalesce *ec,
633 struct kernel_ethtool_coalesce *kernel_coal,
634 struct netlink_ext_ack *extack)
636 struct bcm_sysport_priv *priv = netdev_priv(dev);
637 struct dim_cq_moder moder;
641 /* Base system clock is 125Mhz, DMA timeout is this reference clock
642 * divided by 1024, which yield roughly 8.192 us, our maximum value has
643 * to fit in the RING_TIMEOUT_MASK (16 bits).
645 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
646 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
647 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
648 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
651 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
652 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
655 for (i = 0; i < dev->num_tx_queues; i++)
656 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
658 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
659 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
660 usecs = priv->rx_coalesce_usecs;
661 pkts = priv->rx_max_coalesced_frames;
663 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
664 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
669 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
671 /* Apply desired coalescing parameters */
672 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
677 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
679 dev_consume_skb_any(cb->skb);
681 dma_unmap_addr_set(cb, dma_addr, 0);
684 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
685 struct bcm_sysport_cb *cb)
687 struct device *kdev = &priv->pdev->dev;
688 struct net_device *ndev = priv->netdev;
689 struct sk_buff *skb, *rx_skb;
692 /* Allocate a new SKB for a new packet */
693 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
694 GFP_ATOMIC | __GFP_NOWARN);
696 priv->mib.alloc_rx_buff_failed++;
697 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
701 mapping = dma_map_single(kdev, skb->data,
702 RX_BUF_LENGTH, DMA_FROM_DEVICE);
703 if (dma_mapping_error(kdev, mapping)) {
704 priv->mib.rx_dma_failed++;
705 dev_kfree_skb_any(skb);
706 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
710 /* Grab the current SKB on the ring */
713 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
714 RX_BUF_LENGTH, DMA_FROM_DEVICE);
716 /* Put the new SKB on the ring */
718 dma_unmap_addr_set(cb, dma_addr, mapping);
719 dma_desc_set_addr(priv, cb->bd_addr, mapping);
721 netif_dbg(priv, rx_status, ndev, "RX refill\n");
723 /* Return the current SKB to the caller */
727 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
729 struct bcm_sysport_cb *cb;
733 for (i = 0; i < priv->num_rx_bds; i++) {
734 cb = &priv->rx_cbs[i];
735 skb = bcm_sysport_rx_refill(priv, cb);
744 /* Poll the hardware for up to budget packets to process */
745 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
748 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
749 struct net_device *ndev = priv->netdev;
750 unsigned int processed = 0, to_process;
751 unsigned int processed_bytes = 0;
752 struct bcm_sysport_cb *cb;
754 unsigned int p_index;
758 /* Clear status before servicing to reduce spurious interrupts */
759 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
761 /* Determine how much we should process since last call, SYSTEMPORT Lite
762 * groups the producer and consumer indexes into the same 32-bit
763 * which we access using RDMA_CONS_INDEX
766 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
768 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
769 p_index &= RDMA_PROD_INDEX_MASK;
771 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
773 netif_dbg(priv, rx_status, ndev,
774 "p_index=%d rx_c_index=%d to_process=%d\n",
775 p_index, priv->rx_c_index, to_process);
777 while ((processed < to_process) && (processed < budget)) {
778 cb = &priv->rx_cbs[priv->rx_read_ptr];
779 skb = bcm_sysport_rx_refill(priv, cb);
782 /* We do not have a backing SKB, so we do not a corresponding
783 * DMA mapping for this incoming packet since
784 * bcm_sysport_rx_refill always either has both skb and mapping
787 if (unlikely(!skb)) {
788 netif_err(priv, rx_err, ndev, "out of memory!\n");
789 ndev->stats.rx_dropped++;
790 ndev->stats.rx_errors++;
794 /* Extract the Receive Status Block prepended */
795 rsb = (struct bcm_rsb *)skb->data;
796 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
797 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
800 netif_dbg(priv, rx_status, ndev,
801 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
802 p_index, priv->rx_c_index, priv->rx_read_ptr,
805 if (unlikely(len > RX_BUF_LENGTH)) {
806 netif_err(priv, rx_status, ndev, "oversized packet\n");
807 ndev->stats.rx_length_errors++;
808 ndev->stats.rx_errors++;
809 dev_kfree_skb_any(skb);
813 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
814 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
815 ndev->stats.rx_dropped++;
816 ndev->stats.rx_errors++;
817 dev_kfree_skb_any(skb);
821 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
822 netif_err(priv, rx_err, ndev, "error packet\n");
823 if (status & RX_STATUS_OVFLOW)
824 ndev->stats.rx_over_errors++;
825 ndev->stats.rx_dropped++;
826 ndev->stats.rx_errors++;
827 dev_kfree_skb_any(skb);
833 /* Hardware validated our checksum */
834 if (likely(status & DESC_L4_CSUM))
835 skb->ip_summed = CHECKSUM_UNNECESSARY;
837 /* Hardware pre-pends packets with 2bytes before Ethernet
838 * header plus we have the Receive Status Block, strip off all
839 * of this from the SKB.
841 skb_pull(skb, sizeof(*rsb) + 2);
842 len -= (sizeof(*rsb) + 2);
843 processed_bytes += len;
845 /* UniMAC may forward CRC */
847 skb_trim(skb, len - ETH_FCS_LEN);
851 skb->protocol = eth_type_trans(skb, ndev);
852 ndev->stats.rx_packets++;
853 ndev->stats.rx_bytes += len;
854 u64_stats_update_begin(&priv->syncp);
855 stats64->rx_packets++;
856 stats64->rx_bytes += len;
857 u64_stats_update_end(&priv->syncp);
859 napi_gro_receive(&priv->napi, skb);
864 if (priv->rx_read_ptr == priv->num_rx_bds)
865 priv->rx_read_ptr = 0;
868 priv->dim.packets = processed;
869 priv->dim.bytes = processed_bytes;
874 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
875 struct bcm_sysport_cb *cb,
876 unsigned int *bytes_compl,
877 unsigned int *pkts_compl)
879 struct bcm_sysport_priv *priv = ring->priv;
880 struct device *kdev = &priv->pdev->dev;
883 *bytes_compl += cb->skb->len;
884 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
885 dma_unmap_len(cb, dma_len),
888 bcm_sysport_free_cb(cb);
890 } else if (dma_unmap_addr(cb, dma_addr)) {
891 *bytes_compl += dma_unmap_len(cb, dma_len);
892 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
893 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
894 dma_unmap_addr_set(cb, dma_addr, 0);
898 /* Reclaim queued SKBs for transmission completion, lockless version */
899 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
900 struct bcm_sysport_tx_ring *ring)
902 unsigned int pkts_compl = 0, bytes_compl = 0;
903 struct net_device *ndev = priv->netdev;
904 unsigned int txbds_processed = 0;
905 struct bcm_sysport_cb *cb;
906 unsigned int txbds_ready;
907 unsigned int c_index;
910 /* Clear status before servicing to reduce spurious interrupts */
911 if (!ring->priv->is_lite)
912 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
914 intrl2_0_writel(ring->priv, BIT(ring->index +
915 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
917 /* Compute how many descriptors have been processed since last call */
918 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
919 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
920 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
922 netif_dbg(priv, tx_done, ndev,
923 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
924 ring->index, ring->c_index, c_index, txbds_ready);
926 while (txbds_processed < txbds_ready) {
927 cb = &ring->cbs[ring->clean_index];
928 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
933 if (likely(ring->clean_index < ring->size - 1))
936 ring->clean_index = 0;
939 u64_stats_update_begin(&priv->syncp);
940 ring->packets += pkts_compl;
941 ring->bytes += bytes_compl;
942 u64_stats_update_end(&priv->syncp);
944 ring->c_index = c_index;
946 netif_dbg(priv, tx_done, ndev,
947 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
948 ring->index, ring->c_index, pkts_compl, bytes_compl);
953 /* Locked version of the per-ring TX reclaim routine */
954 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
955 struct bcm_sysport_tx_ring *ring)
957 struct netdev_queue *txq;
958 unsigned int released;
961 txq = netdev_get_tx_queue(priv->netdev, ring->index);
963 spin_lock_irqsave(&ring->lock, flags);
964 released = __bcm_sysport_tx_reclaim(priv, ring);
966 netif_tx_wake_queue(txq);
968 spin_unlock_irqrestore(&ring->lock, flags);
973 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
974 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
975 struct bcm_sysport_tx_ring *ring)
979 spin_lock_irqsave(&ring->lock, flags);
980 __bcm_sysport_tx_reclaim(priv, ring);
981 spin_unlock_irqrestore(&ring->lock, flags);
984 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
986 struct bcm_sysport_tx_ring *ring =
987 container_of(napi, struct bcm_sysport_tx_ring, napi);
988 unsigned int work_done = 0;
990 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
992 if (work_done == 0) {
994 /* re-enable TX interrupt */
995 if (!ring->priv->is_lite)
996 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
998 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
999 INTRL2_0_TDMA_MBDONE_SHIFT));
1007 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1011 for (q = 0; q < priv->netdev->num_tx_queues; q++)
1012 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1015 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1017 struct bcm_sysport_priv *priv =
1018 container_of(napi, struct bcm_sysport_priv, napi);
1019 struct dim_sample dim_sample = {};
1020 unsigned int work_done = 0;
1022 work_done = bcm_sysport_desc_rx(priv, budget);
1024 priv->rx_c_index += work_done;
1025 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1027 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1028 * maintained by HW, but writes to it will be ignore while RDMA
1032 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1034 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1036 if (work_done < budget) {
1037 napi_complete_done(napi, work_done);
1038 /* re-enable RX interrupts */
1039 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1042 if (priv->dim.use_dim) {
1043 dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1044 priv->dim.bytes, &dim_sample);
1045 net_dim(&priv->dim.dim, dim_sample);
1051 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1055 reg = umac_readl(priv, UMAC_MPD_CTRL);
1060 umac_writel(priv, reg, UMAC_MPD_CTRL);
1063 bit = RBUF_ACPI_EN_LITE;
1067 reg = rbuf_readl(priv, RBUF_CONTROL);
1072 rbuf_writel(priv, reg, RBUF_CONTROL);
1075 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1080 /* Disable RXCHK, active filters and Broadcom tag matching */
1081 reg = rxchk_readl(priv, RXCHK_CONTROL);
1082 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1083 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1084 rxchk_writel(priv, reg, RXCHK_CONTROL);
1086 /* Make sure we restore correct CID index in case HW lost
1087 * its context during deep idle state
1089 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1090 rxchk_writel(priv, priv->filters_loc[index] <<
1091 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1092 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1095 /* Clear the MagicPacket detection logic */
1096 mpd_enable_set(priv, false);
1098 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1099 if (reg & INTRL2_0_MPD)
1100 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1102 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1103 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1104 RXCHK_BRCM_TAG_MATCH_MASK;
1105 netdev_info(priv->netdev,
1106 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1109 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1112 static void bcm_sysport_dim_work(struct work_struct *work)
1114 struct dim *dim = container_of(work, struct dim, work);
1115 struct bcm_sysport_net_dim *ndim =
1116 container_of(dim, struct bcm_sysport_net_dim, dim);
1117 struct bcm_sysport_priv *priv =
1118 container_of(ndim, struct bcm_sysport_priv, dim);
1119 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1122 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1123 dim->state = DIM_START_MEASURE;
1126 /* RX and misc interrupt routine */
1127 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1129 struct net_device *dev = dev_id;
1130 struct bcm_sysport_priv *priv = netdev_priv(dev);
1131 struct bcm_sysport_tx_ring *txr;
1132 unsigned int ring, ring_bit;
1134 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1135 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1136 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1138 if (unlikely(priv->irq0_stat == 0)) {
1139 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1143 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1144 priv->dim.event_ctr++;
1145 if (likely(napi_schedule_prep(&priv->napi))) {
1146 /* disable RX interrupts */
1147 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1148 __napi_schedule_irqoff(&priv->napi);
1152 /* TX ring is full, perform a full reclaim since we do not know
1153 * which one would trigger this interrupt
1155 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1156 bcm_sysport_tx_reclaim_all(priv);
1161 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1162 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1163 if (!(priv->irq0_stat & ring_bit))
1166 txr = &priv->tx_rings[ring];
1168 if (likely(napi_schedule_prep(&txr->napi))) {
1169 intrl2_0_mask_set(priv, ring_bit);
1170 __napi_schedule(&txr->napi);
1177 /* TX interrupt service routine */
1178 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1180 struct net_device *dev = dev_id;
1181 struct bcm_sysport_priv *priv = netdev_priv(dev);
1182 struct bcm_sysport_tx_ring *txr;
1185 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1186 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1187 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1189 if (unlikely(priv->irq1_stat == 0)) {
1190 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1194 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1195 if (!(priv->irq1_stat & BIT(ring)))
1198 txr = &priv->tx_rings[ring];
1200 if (likely(napi_schedule_prep(&txr->napi))) {
1201 intrl2_1_mask_set(priv, BIT(ring));
1202 __napi_schedule_irqoff(&txr->napi);
1209 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1211 struct bcm_sysport_priv *priv = dev_id;
1213 pm_wakeup_event(&priv->pdev->dev, 0);
1218 #ifdef CONFIG_NET_POLL_CONTROLLER
1219 static void bcm_sysport_poll_controller(struct net_device *dev)
1221 struct bcm_sysport_priv *priv = netdev_priv(dev);
1223 disable_irq(priv->irq0);
1224 bcm_sysport_rx_isr(priv->irq0, priv);
1225 enable_irq(priv->irq0);
1227 if (!priv->is_lite) {
1228 disable_irq(priv->irq1);
1229 bcm_sysport_tx_isr(priv->irq1, priv);
1230 enable_irq(priv->irq1);
1235 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1236 struct net_device *dev)
1238 struct bcm_sysport_priv *priv = netdev_priv(dev);
1239 struct sk_buff *nskb;
1240 struct bcm_tsb *tsb;
1246 /* Re-allocate SKB if needed */
1247 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1248 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1250 dev_kfree_skb_any(skb);
1251 priv->mib.tx_realloc_tsb_failed++;
1252 dev->stats.tx_errors++;
1253 dev->stats.tx_dropped++;
1256 dev_consume_skb_any(skb);
1258 priv->mib.tx_realloc_tsb++;
1261 tsb = skb_push(skb, sizeof(*tsb));
1262 /* Zero-out TSB by default */
1263 memset(tsb, 0, sizeof(*tsb));
1265 if (skb_vlan_tag_present(skb)) {
1266 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1267 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1270 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1271 ip_ver = skb->protocol;
1273 case htons(ETH_P_IP):
1274 ip_proto = ip_hdr(skb)->protocol;
1276 case htons(ETH_P_IPV6):
1277 ip_proto = ipv6_hdr(skb)->nexthdr;
1283 /* Get the checksum offset and the L4 (transport) offset */
1284 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1285 /* Account for the HW inserted VLAN tag */
1286 if (skb_vlan_tag_present(skb))
1287 csum_start += VLAN_HLEN;
1288 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1289 csum_info |= (csum_start << L4_PTR_SHIFT);
1291 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1292 csum_info |= L4_LENGTH_VALID;
1293 if (ip_proto == IPPROTO_UDP &&
1294 ip_ver == htons(ETH_P_IP))
1295 csum_info |= L4_UDP;
1300 tsb->l4_ptr_dest_map = csum_info;
1306 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1307 struct net_device *dev)
1309 struct bcm_sysport_priv *priv = netdev_priv(dev);
1310 struct device *kdev = &priv->pdev->dev;
1311 struct bcm_sysport_tx_ring *ring;
1312 unsigned long flags, desc_flags;
1313 struct bcm_sysport_cb *cb;
1314 struct netdev_queue *txq;
1315 u32 len_status, addr_lo;
1316 unsigned int skb_len;
1321 queue = skb_get_queue_mapping(skb);
1322 txq = netdev_get_tx_queue(dev, queue);
1323 ring = &priv->tx_rings[queue];
1325 /* lock against tx reclaim in BH context and TX ring full interrupt */
1326 spin_lock_irqsave(&ring->lock, flags);
1327 if (unlikely(ring->desc_count == 0)) {
1328 netif_tx_stop_queue(txq);
1329 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1330 ret = NETDEV_TX_BUSY;
1334 /* Insert TSB and checksum infos */
1336 skb = bcm_sysport_insert_tsb(skb, dev);
1345 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1346 if (dma_mapping_error(kdev, mapping)) {
1347 priv->mib.tx_dma_failed++;
1348 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1349 skb->data, skb_len);
1354 /* Remember the SKB for future freeing */
1355 cb = &ring->cbs[ring->curr_desc];
1357 dma_unmap_addr_set(cb, dma_addr, mapping);
1358 dma_unmap_len_set(cb, dma_len, skb_len);
1360 addr_lo = lower_32_bits(mapping);
1361 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1362 len_status |= (skb_len << DESC_LEN_SHIFT);
1363 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1365 if (skb->ip_summed == CHECKSUM_PARTIAL)
1366 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1367 if (skb_vlan_tag_present(skb))
1368 len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
1371 if (ring->curr_desc == ring->size)
1372 ring->curr_desc = 0;
1375 /* Ports are latched, so write upper address first */
1376 spin_lock_irqsave(&priv->desc_lock, desc_flags);
1377 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1378 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1379 spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
1381 /* Check ring space and update SW control flow */
1382 if (ring->desc_count == 0)
1383 netif_tx_stop_queue(txq);
1385 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1386 ring->index, ring->desc_count, ring->curr_desc);
1390 spin_unlock_irqrestore(&ring->lock, flags);
1394 static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
1396 netdev_warn(dev, "transmit timeout!\n");
1398 netif_trans_update(dev);
1399 dev->stats.tx_errors++;
1401 netif_tx_wake_all_queues(dev);
1404 /* phylib adjust link callback */
1405 static void bcm_sysport_adj_link(struct net_device *dev)
1407 struct bcm_sysport_priv *priv = netdev_priv(dev);
1408 struct phy_device *phydev = dev->phydev;
1409 unsigned int changed = 0;
1410 u32 cmd_bits = 0, reg;
1412 if (priv->old_link != phydev->link) {
1414 priv->old_link = phydev->link;
1417 if (priv->old_duplex != phydev->duplex) {
1419 priv->old_duplex = phydev->duplex;
1425 switch (phydev->speed) {
1427 cmd_bits = CMD_SPEED_2500;
1430 cmd_bits = CMD_SPEED_1000;
1433 cmd_bits = CMD_SPEED_100;
1436 cmd_bits = CMD_SPEED_10;
1441 cmd_bits <<= CMD_SPEED_SHIFT;
1443 if (phydev->duplex == DUPLEX_HALF)
1444 cmd_bits |= CMD_HD_EN;
1446 if (priv->old_pause != phydev->pause) {
1448 priv->old_pause = phydev->pause;
1452 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1458 reg = umac_readl(priv, UMAC_CMD);
1459 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1460 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1461 CMD_TX_PAUSE_IGNORE);
1463 umac_writel(priv, reg, UMAC_CMD);
1467 phy_print_status(phydev);
1470 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1471 void (*cb)(struct work_struct *work))
1473 struct bcm_sysport_net_dim *dim = &priv->dim;
1475 INIT_WORK(&dim->dim.work, cb);
1476 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1482 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1484 struct bcm_sysport_net_dim *dim = &priv->dim;
1485 struct dim_cq_moder moder;
1488 usecs = priv->rx_coalesce_usecs;
1489 pkts = priv->rx_max_coalesced_frames;
1491 /* If DIM was enabled, re-apply default parameters */
1493 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1498 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1501 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1504 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1508 /* Simple descriptors partitioning for now */
1511 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1513 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1517 /* Initialize SW view of the ring */
1518 spin_lock_init(&ring->lock);
1520 netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
1521 ring->index = index;
1523 ring->clean_index = 0;
1524 ring->alloc_size = ring->size;
1525 ring->desc_count = ring->size;
1526 ring->curr_desc = 0;
1528 /* Initialize HW ring */
1529 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1530 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1531 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1532 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1534 /* Configure QID and port mapping */
1535 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1536 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1537 if (ring->inspect) {
1538 reg |= ring->switch_queue & RING_QID_MASK;
1539 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1541 reg |= RING_IGNORE_STATUS;
1543 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1545 /* Adjust the packet size calculations if SYSTEMPORT is responsible
1546 * for HW insertion of VLAN tags
1548 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1549 reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1550 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1552 /* Enable ACB algorithm 2 */
1553 reg = tdma_readl(priv, TDMA_CONTROL);
1554 reg |= tdma_control_bit(priv, ACB_ALGO);
1555 tdma_writel(priv, reg, TDMA_CONTROL);
1557 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1558 * with the original definition of ACB_ALGO
1560 reg = tdma_readl(priv, TDMA_CONTROL);
1562 reg &= ~BIT(TSB_SWAP1);
1563 /* Set a correct TSB format based on host endian */
1564 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1565 reg |= tdma_control_bit(priv, TSB_SWAP0);
1567 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1568 tdma_writel(priv, reg, TDMA_CONTROL);
1570 /* Program the number of descriptors as MAX_THRESHOLD and half of
1571 * its size for the hysteresis trigger
1573 tdma_writel(priv, ring->size |
1574 1 << RING_HYST_THRESH_SHIFT,
1575 TDMA_DESC_RING_MAX_HYST(index));
1577 /* Enable the ring queue in the arbiter */
1578 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1579 reg |= (1 << index);
1580 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1582 napi_enable(&ring->napi);
1584 netif_dbg(priv, hw, priv->netdev,
1585 "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1586 ring->size, ring->switch_queue,
1592 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1595 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1598 /* Caller should stop the TDMA engine */
1599 reg = tdma_readl(priv, TDMA_STATUS);
1600 if (!(reg & TDMA_DISABLED))
1601 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1603 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1604 * fail, so by checking this pointer we know whether the TX ring was
1605 * fully initialized or not.
1610 napi_disable(&ring->napi);
1611 netif_napi_del(&ring->napi);
1613 bcm_sysport_tx_clean(priv, ring);
1618 ring->alloc_size = 0;
1620 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1624 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1625 unsigned int enable)
1627 unsigned int timeout = 1000;
1630 reg = rdma_readl(priv, RDMA_CONTROL);
1635 rdma_writel(priv, reg, RDMA_CONTROL);
1637 /* Poll for RMDA disabling completion */
1639 reg = rdma_readl(priv, RDMA_STATUS);
1640 if (!!(reg & RDMA_DISABLED) == !enable)
1642 usleep_range(1000, 2000);
1643 } while (timeout-- > 0);
1645 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1651 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1652 unsigned int enable)
1654 unsigned int timeout = 1000;
1657 reg = tdma_readl(priv, TDMA_CONTROL);
1659 reg |= tdma_control_bit(priv, TDMA_EN);
1661 reg &= ~tdma_control_bit(priv, TDMA_EN);
1662 tdma_writel(priv, reg, TDMA_CONTROL);
1664 /* Poll for TMDA disabling completion */
1666 reg = tdma_readl(priv, TDMA_STATUS);
1667 if (!!(reg & TDMA_DISABLED) == !enable)
1670 usleep_range(1000, 2000);
1671 } while (timeout-- > 0);
1673 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1678 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1680 struct bcm_sysport_cb *cb;
1685 /* Initialize SW view of the RX ring */
1686 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1687 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1688 priv->rx_c_index = 0;
1689 priv->rx_read_ptr = 0;
1690 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1692 if (!priv->rx_cbs) {
1693 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1697 for (i = 0; i < priv->num_rx_bds; i++) {
1698 cb = priv->rx_cbs + i;
1699 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1702 ret = bcm_sysport_alloc_rx_bufs(priv);
1704 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1708 /* Initialize HW, ensure RDMA is disabled */
1709 reg = rdma_readl(priv, RDMA_STATUS);
1710 if (!(reg & RDMA_DISABLED))
1711 rdma_enable_set(priv, 0);
1713 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1714 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1715 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1716 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1717 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1718 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1719 /* Operate the queue in ring mode */
1720 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1721 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1722 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1723 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1725 netif_dbg(priv, hw, priv->netdev,
1726 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1727 priv->num_rx_bds, priv->rx_bds);
1732 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1734 struct bcm_sysport_cb *cb;
1738 /* Caller should ensure RDMA is disabled */
1739 reg = rdma_readl(priv, RDMA_STATUS);
1740 if (!(reg & RDMA_DISABLED))
1741 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1743 for (i = 0; i < priv->num_rx_bds; i++) {
1744 cb = &priv->rx_cbs[i];
1745 if (dma_unmap_addr(cb, dma_addr))
1746 dma_unmap_single(&priv->pdev->dev,
1747 dma_unmap_addr(cb, dma_addr),
1748 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1749 bcm_sysport_free_cb(cb);
1752 kfree(priv->rx_cbs);
1753 priv->rx_cbs = NULL;
1755 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1758 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1760 struct bcm_sysport_priv *priv = netdev_priv(dev);
1766 reg = umac_readl(priv, UMAC_CMD);
1767 if (dev->flags & IFF_PROMISC)
1770 reg &= ~CMD_PROMISC;
1771 umac_writel(priv, reg, UMAC_CMD);
1773 /* No support for ALLMULTI */
1774 if (dev->flags & IFF_ALLMULTI)
1778 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1779 u32 mask, unsigned int enable)
1783 if (!priv->is_lite) {
1784 reg = umac_readl(priv, UMAC_CMD);
1789 umac_writel(priv, reg, UMAC_CMD);
1791 reg = gib_readl(priv, GIB_CONTROL);
1796 gib_writel(priv, reg, GIB_CONTROL);
1799 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1800 * to be processed (1 msec).
1803 usleep_range(1000, 2000);
1806 static inline void umac_reset(struct bcm_sysport_priv *priv)
1813 reg = umac_readl(priv, UMAC_CMD);
1814 reg |= CMD_SW_RESET;
1815 umac_writel(priv, reg, UMAC_CMD);
1817 reg = umac_readl(priv, UMAC_CMD);
1818 reg &= ~CMD_SW_RESET;
1819 umac_writel(priv, reg, UMAC_CMD);
1822 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1823 const unsigned char *addr)
1825 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1827 u32 mac1 = (addr[4] << 8) | addr[5];
1829 if (!priv->is_lite) {
1830 umac_writel(priv, mac0, UMAC_MAC0);
1831 umac_writel(priv, mac1, UMAC_MAC1);
1833 gib_writel(priv, mac0, GIB_MAC0);
1834 gib_writel(priv, mac1, GIB_MAC1);
1838 static void topctrl_flush(struct bcm_sysport_priv *priv)
1840 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1841 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1843 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1844 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1847 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1849 struct bcm_sysport_priv *priv = netdev_priv(dev);
1850 struct sockaddr *addr = p;
1852 if (!is_valid_ether_addr(addr->sa_data))
1855 eth_hw_addr_set(dev, addr->sa_data);
1857 /* interface is disabled, changes to MAC will be reflected on next
1860 if (!netif_running(dev))
1863 umac_set_hw_addr(priv, dev->dev_addr);
1868 static void bcm_sysport_get_stats64(struct net_device *dev,
1869 struct rtnl_link_stats64 *stats)
1871 struct bcm_sysport_priv *priv = netdev_priv(dev);
1872 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1875 netdev_stats_to_stats64(stats, &dev->stats);
1877 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1878 &stats->tx_packets);
1881 start = u64_stats_fetch_begin_irq(&priv->syncp);
1882 stats->rx_packets = stats64->rx_packets;
1883 stats->rx_bytes = stats64->rx_bytes;
1884 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1887 static void bcm_sysport_netif_start(struct net_device *dev)
1889 struct bcm_sysport_priv *priv = netdev_priv(dev);
1892 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1893 bcm_sysport_init_rx_coalesce(priv);
1894 napi_enable(&priv->napi);
1896 /* Enable RX interrupt and TX ring full interrupt */
1897 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1899 phy_start(dev->phydev);
1901 /* Enable TX interrupts for the TXQs */
1903 intrl2_1_mask_clear(priv, 0xffffffff);
1905 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1908 static void rbuf_init(struct bcm_sysport_priv *priv)
1912 reg = rbuf_readl(priv, RBUF_CONTROL);
1913 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1914 /* Set a correct RSB format on SYSTEMPORT Lite */
1916 reg &= ~RBUF_RSB_SWAP1;
1918 /* Set a correct RSB format based on host endian */
1919 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1920 reg |= RBUF_RSB_SWAP0;
1922 reg &= ~RBUF_RSB_SWAP0;
1923 rbuf_writel(priv, reg, RBUF_CONTROL);
1926 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1928 intrl2_0_mask_set(priv, 0xffffffff);
1929 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1930 if (!priv->is_lite) {
1931 intrl2_1_mask_set(priv, 0xffffffff);
1932 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1936 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1940 reg = gib_readl(priv, GIB_CONTROL);
1941 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1942 if (netdev_uses_dsa(priv->netdev)) {
1943 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1944 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1946 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1947 reg |= 12 << GIB_IPG_LEN_SHIFT;
1948 gib_writel(priv, reg, GIB_CONTROL);
1951 static int bcm_sysport_open(struct net_device *dev)
1953 struct bcm_sysport_priv *priv = netdev_priv(dev);
1954 struct phy_device *phydev;
1958 clk_prepare_enable(priv->clk);
1963 /* Flush TX and RX FIFOs at TOPCTRL level */
1964 topctrl_flush(priv);
1966 /* Disable the UniMAC RX/TX */
1967 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1969 /* Enable RBUF 2bytes alignment and Receive Status Block */
1972 /* Set maximum frame length */
1974 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1976 gib_set_pad_extension(priv);
1978 /* Apply features again in case we changed them while interface was
1981 bcm_sysport_set_features(dev, dev->features);
1983 /* Set MAC address */
1984 umac_set_hw_addr(priv, dev->dev_addr);
1986 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1987 0, priv->phy_interface);
1989 netdev_err(dev, "could not attach to PHY\n");
1991 goto out_clk_disable;
1994 /* Reset house keeping link status */
1995 priv->old_duplex = -1;
1996 priv->old_link = -1;
1997 priv->old_pause = -1;
1999 /* mask all interrupts and request them */
2000 bcm_sysport_mask_all_intrs(priv);
2002 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2004 netdev_err(dev, "failed to request RX interrupt\n");
2005 goto out_phy_disconnect;
2008 if (!priv->is_lite) {
2009 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2012 netdev_err(dev, "failed to request TX interrupt\n");
2017 /* Initialize both hardware and software ring */
2018 spin_lock_init(&priv->desc_lock);
2019 for (i = 0; i < dev->num_tx_queues; i++) {
2020 ret = bcm_sysport_init_tx_ring(priv, i);
2022 netdev_err(dev, "failed to initialize TX ring %d\n",
2024 goto out_free_tx_ring;
2028 /* Initialize linked-list */
2029 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2031 /* Initialize RX ring */
2032 ret = bcm_sysport_init_rx_ring(priv);
2034 netdev_err(dev, "failed to initialize RX ring\n");
2035 goto out_free_rx_ring;
2039 ret = rdma_enable_set(priv, 1);
2041 goto out_free_rx_ring;
2044 ret = tdma_enable_set(priv, 1);
2046 goto out_clear_rx_int;
2048 /* Turn on UniMAC TX/RX */
2049 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2051 bcm_sysport_netif_start(dev);
2053 netif_tx_start_all_queues(dev);
2058 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2060 bcm_sysport_fini_rx_ring(priv);
2062 for (i = 0; i < dev->num_tx_queues; i++)
2063 bcm_sysport_fini_tx_ring(priv, i);
2065 free_irq(priv->irq1, dev);
2067 free_irq(priv->irq0, dev);
2069 phy_disconnect(phydev);
2071 clk_disable_unprepare(priv->clk);
2075 static void bcm_sysport_netif_stop(struct net_device *dev)
2077 struct bcm_sysport_priv *priv = netdev_priv(dev);
2079 /* stop all software from updating hardware */
2080 netif_tx_disable(dev);
2081 napi_disable(&priv->napi);
2082 cancel_work_sync(&priv->dim.dim.work);
2083 phy_stop(dev->phydev);
2085 /* mask all interrupts */
2086 bcm_sysport_mask_all_intrs(priv);
2089 static int bcm_sysport_stop(struct net_device *dev)
2091 struct bcm_sysport_priv *priv = netdev_priv(dev);
2095 bcm_sysport_netif_stop(dev);
2097 /* Disable UniMAC RX */
2098 umac_enable_set(priv, CMD_RX_EN, 0);
2100 ret = tdma_enable_set(priv, 0);
2102 netdev_err(dev, "timeout disabling RDMA\n");
2106 /* Wait for a maximum packet size to be drained */
2107 usleep_range(2000, 3000);
2109 ret = rdma_enable_set(priv, 0);
2111 netdev_err(dev, "timeout disabling TDMA\n");
2115 /* Disable UniMAC TX */
2116 umac_enable_set(priv, CMD_TX_EN, 0);
2118 /* Free RX/TX rings SW structures */
2119 for (i = 0; i < dev->num_tx_queues; i++)
2120 bcm_sysport_fini_tx_ring(priv, i);
2121 bcm_sysport_fini_rx_ring(priv);
2123 free_irq(priv->irq0, dev);
2125 free_irq(priv->irq1, dev);
2127 /* Disconnect from PHY */
2128 phy_disconnect(dev->phydev);
2130 clk_disable_unprepare(priv->clk);
2135 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2141 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2142 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2143 reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2144 reg &= RXCHK_BRCM_TAG_CID_MASK;
2145 if (reg == location)
2152 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2153 struct ethtool_rxnfc *nfc)
2157 /* This is not a rule that we know about */
2158 index = bcm_sysport_rule_find(priv, nfc->fs.location);
2162 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2167 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2168 struct ethtool_rxnfc *nfc)
2173 /* We cannot match locations greater than what the classification ID
2174 * permits (256 entries)
2176 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2179 /* We cannot support flows that are not destined for a wake-up */
2180 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2183 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2184 if (index >= RXCHK_BRCM_TAG_MAX)
2185 /* All filters are already in use, we cannot match more rules */
2188 /* Location is the classification ID, and index is the position
2189 * within one of our 8 possible filters to be programmed
2191 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2192 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2193 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2194 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2195 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2197 priv->filters_loc[index] = nfc->fs.location;
2198 set_bit(index, priv->filters);
2203 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2208 /* This is not a rule that we know about */
2209 index = bcm_sysport_rule_find(priv, location);
2213 /* No need to disable this filter if it was enabled, this will
2214 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2216 clear_bit(index, priv->filters);
2217 priv->filters_loc[index] = 0;
2222 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2223 struct ethtool_rxnfc *nfc, u32 *rule_locs)
2225 struct bcm_sysport_priv *priv = netdev_priv(dev);
2226 int ret = -EOPNOTSUPP;
2229 case ETHTOOL_GRXCLSRULE:
2230 ret = bcm_sysport_rule_get(priv, nfc);
2239 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2240 struct ethtool_rxnfc *nfc)
2242 struct bcm_sysport_priv *priv = netdev_priv(dev);
2243 int ret = -EOPNOTSUPP;
2246 case ETHTOOL_SRXCLSRLINS:
2247 ret = bcm_sysport_rule_set(priv, nfc);
2249 case ETHTOOL_SRXCLSRLDEL:
2250 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2259 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2260 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2261 ETHTOOL_COALESCE_MAX_FRAMES |
2262 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2263 .get_drvinfo = bcm_sysport_get_drvinfo,
2264 .get_msglevel = bcm_sysport_get_msglvl,
2265 .set_msglevel = bcm_sysport_set_msglvl,
2266 .get_link = ethtool_op_get_link,
2267 .get_strings = bcm_sysport_get_strings,
2268 .get_ethtool_stats = bcm_sysport_get_stats,
2269 .get_sset_count = bcm_sysport_get_sset_count,
2270 .get_wol = bcm_sysport_get_wol,
2271 .set_wol = bcm_sysport_set_wol,
2272 .get_coalesce = bcm_sysport_get_coalesce,
2273 .set_coalesce = bcm_sysport_set_coalesce,
2274 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2275 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2276 .get_rxnfc = bcm_sysport_get_rxnfc,
2277 .set_rxnfc = bcm_sysport_set_rxnfc,
2280 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2281 struct net_device *sb_dev)
2283 struct bcm_sysport_priv *priv = netdev_priv(dev);
2284 u16 queue = skb_get_queue_mapping(skb);
2285 struct bcm_sysport_tx_ring *tx_ring;
2286 unsigned int q, port;
2288 if (!netdev_uses_dsa(dev))
2289 return netdev_pick_tx(dev, skb, NULL);
2291 /* DSA tagging layer will have configured the correct queue */
2292 q = BRCM_TAG_GET_QUEUE(queue);
2293 port = BRCM_TAG_GET_PORT(queue);
2294 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2296 if (unlikely(!tx_ring))
2297 return netdev_pick_tx(dev, skb, NULL);
2299 return tx_ring->index;
2302 static const struct net_device_ops bcm_sysport_netdev_ops = {
2303 .ndo_start_xmit = bcm_sysport_xmit,
2304 .ndo_tx_timeout = bcm_sysport_tx_timeout,
2305 .ndo_open = bcm_sysport_open,
2306 .ndo_stop = bcm_sysport_stop,
2307 .ndo_set_features = bcm_sysport_set_features,
2308 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
2309 .ndo_set_mac_address = bcm_sysport_change_mac,
2310 #ifdef CONFIG_NET_POLL_CONTROLLER
2311 .ndo_poll_controller = bcm_sysport_poll_controller,
2313 .ndo_get_stats64 = bcm_sysport_get_stats64,
2314 .ndo_select_queue = bcm_sysport_select_queue,
2317 static int bcm_sysport_map_queues(struct net_device *dev,
2318 struct net_device *slave_dev)
2320 struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2321 struct bcm_sysport_priv *priv = netdev_priv(dev);
2322 struct bcm_sysport_tx_ring *ring;
2323 unsigned int num_tx_queues;
2324 unsigned int q, qp, port;
2326 /* We can't be setting up queue inspection for non directly attached
2334 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2335 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2336 * per-port (slave_dev) network devices queue, we achieve just that.
2337 * This need to happen now before any slave network device is used such
2338 * it accurately reflects the number of real TX queues.
2341 netif_set_real_num_tx_queues(slave_dev,
2342 slave_dev->num_tx_queues / 2);
2344 num_tx_queues = slave_dev->real_num_tx_queues;
2346 if (priv->per_port_num_tx_queues &&
2347 priv->per_port_num_tx_queues != num_tx_queues)
2348 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2350 priv->per_port_num_tx_queues = num_tx_queues;
2352 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2354 ring = &priv->tx_rings[q];
2359 /* Just remember the mapping actual programming done
2360 * during bcm_sysport_init_tx_ring
2362 ring->switch_queue = qp;
2363 ring->switch_port = port;
2364 ring->inspect = true;
2365 priv->ring_map[qp + port * num_tx_queues] = ring;
2372 static int bcm_sysport_unmap_queues(struct net_device *dev,
2373 struct net_device *slave_dev)
2375 struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2376 struct bcm_sysport_priv *priv = netdev_priv(dev);
2377 struct bcm_sysport_tx_ring *ring;
2378 unsigned int num_tx_queues;
2379 unsigned int q, qp, port;
2383 num_tx_queues = slave_dev->real_num_tx_queues;
2385 for (q = 0; q < dev->num_tx_queues; q++) {
2386 ring = &priv->tx_rings[q];
2388 if (ring->switch_port != port)
2394 ring->inspect = false;
2395 qp = ring->switch_queue;
2396 priv->ring_map[qp + port * num_tx_queues] = NULL;
2402 static int bcm_sysport_netdevice_event(struct notifier_block *nb,
2403 unsigned long event, void *ptr)
2405 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2406 struct netdev_notifier_changeupper_info *info = ptr;
2407 struct bcm_sysport_priv *priv;
2410 priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2411 if (priv->netdev != dev)
2415 case NETDEV_CHANGEUPPER:
2416 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2419 if (!dsa_slave_dev_check(info->upper_dev))
2423 ret = bcm_sysport_map_queues(dev, info->upper_dev);
2425 ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
2429 return notifier_from_errno(ret);
2432 #define REV_FMT "v%2x.%02x"
2434 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2437 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2439 [SYSTEMPORT_LITE] = {
2441 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2445 static const struct of_device_id bcm_sysport_of_match[] = {
2446 { .compatible = "brcm,systemportlite-v1.00",
2447 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2448 { .compatible = "brcm,systemport-v1.00",
2449 .data = &bcm_sysport_params[SYSTEMPORT] },
2450 { .compatible = "brcm,systemport",
2451 .data = &bcm_sysport_params[SYSTEMPORT] },
2454 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2456 static int bcm_sysport_probe(struct platform_device *pdev)
2458 const struct bcm_sysport_hw_params *params;
2459 const struct of_device_id *of_id = NULL;
2460 struct bcm_sysport_priv *priv;
2461 struct device_node *dn;
2462 struct net_device *dev;
2466 dn = pdev->dev.of_node;
2467 of_id = of_match_node(bcm_sysport_of_match, dn);
2468 if (!of_id || !of_id->data)
2471 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2473 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2475 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2479 /* Fairly quickly we need to know the type of adapter we have */
2480 params = of_id->data;
2482 /* Read the Transmit/Receive Queue properties */
2483 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2484 txq = TDMA_NUM_RINGS;
2485 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2488 /* Sanity check the number of transmit queues */
2489 if (!txq || txq > TDMA_NUM_RINGS)
2492 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2496 /* Initialize private members */
2497 priv = netdev_priv(dev);
2499 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2500 if (IS_ERR(priv->clk)) {
2501 ret = PTR_ERR(priv->clk);
2502 goto err_free_netdev;
2505 /* Allocate number of TX rings */
2506 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2507 sizeof(struct bcm_sysport_tx_ring),
2509 if (!priv->tx_rings) {
2511 goto err_free_netdev;
2514 priv->is_lite = params->is_lite;
2515 priv->num_rx_desc_words = params->num_rx_desc_words;
2517 priv->irq0 = platform_get_irq(pdev, 0);
2518 if (!priv->is_lite) {
2519 priv->irq1 = platform_get_irq(pdev, 1);
2520 priv->wol_irq = platform_get_irq(pdev, 2);
2522 priv->wol_irq = platform_get_irq(pdev, 1);
2524 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2526 goto err_free_netdev;
2529 priv->base = devm_platform_ioremap_resource(pdev, 0);
2530 if (IS_ERR(priv->base)) {
2531 ret = PTR_ERR(priv->base);
2532 goto err_free_netdev;
2538 ret = of_get_phy_mode(dn, &priv->phy_interface);
2539 /* Default to GMII interface mode */
2541 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2543 /* In the case of a fixed PHY, the DT node associated
2544 * to the PHY is the Ethernet MAC DT node.
2546 if (of_phy_is_fixed_link(dn)) {
2547 ret = of_phy_register_fixed_link(dn);
2549 dev_err(&pdev->dev, "failed to register fixed PHY\n");
2550 goto err_free_netdev;
2556 /* Initialize netdevice members */
2557 ret = of_get_ethdev_address(dn, dev);
2559 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2560 eth_hw_addr_random(dev);
2563 SET_NETDEV_DEV(dev, &pdev->dev);
2564 dev_set_drvdata(&pdev->dev, dev);
2565 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2566 dev->netdev_ops = &bcm_sysport_netdev_ops;
2567 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2569 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2570 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2571 NETIF_F_HW_VLAN_CTAG_TX;
2572 dev->hw_features |= dev->features;
2573 dev->vlan_features |= dev->features;
2574 dev->max_mtu = UMAC_MAX_MTU_SIZE;
2576 /* Request the WOL interrupt and advertise suspend if available */
2577 priv->wol_irq_disabled = 1;
2578 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2579 bcm_sysport_wol_isr, 0, dev->name, priv);
2581 device_set_wakeup_capable(&pdev->dev, 1);
2583 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2584 if (IS_ERR(priv->wol_clk)) {
2585 ret = PTR_ERR(priv->wol_clk);
2586 goto err_deregister_fixed_link;
2589 /* Set the needed headroom once and for all */
2590 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2591 dev->needed_headroom += sizeof(struct bcm_tsb);
2593 /* libphy will adjust the link state accordingly */
2594 netif_carrier_off(dev);
2596 priv->rx_max_coalesced_frames = 1;
2597 u64_stats_init(&priv->syncp);
2599 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
2601 ret = register_netdevice_notifier(&priv->netdev_notifier);
2603 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2604 goto err_deregister_fixed_link;
2607 ret = register_netdev(dev);
2609 dev_err(&pdev->dev, "failed to register net_device\n");
2610 goto err_deregister_notifier;
2613 clk_prepare_enable(priv->clk);
2615 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2616 dev_info(&pdev->dev,
2617 "Broadcom SYSTEMPORT%s " REV_FMT
2618 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2619 priv->is_lite ? " Lite" : "",
2620 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2621 priv->irq0, priv->irq1, txq, rxq);
2623 clk_disable_unprepare(priv->clk);
2627 err_deregister_notifier:
2628 unregister_netdevice_notifier(&priv->netdev_notifier);
2629 err_deregister_fixed_link:
2630 if (of_phy_is_fixed_link(dn))
2631 of_phy_deregister_fixed_link(dn);
2637 static int bcm_sysport_remove(struct platform_device *pdev)
2639 struct net_device *dev = dev_get_drvdata(&pdev->dev);
2640 struct bcm_sysport_priv *priv = netdev_priv(dev);
2641 struct device_node *dn = pdev->dev.of_node;
2643 /* Not much to do, ndo_close has been called
2644 * and we use managed allocations
2646 unregister_netdevice_notifier(&priv->netdev_notifier);
2647 unregister_netdev(dev);
2648 if (of_phy_is_fixed_link(dn))
2649 of_phy_deregister_fixed_link(dn);
2651 dev_set_drvdata(&pdev->dev, NULL);
2656 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2658 struct net_device *ndev = priv->netdev;
2659 unsigned int timeout = 1000;
2660 unsigned int index, i = 0;
2663 reg = umac_readl(priv, UMAC_MPD_CTRL);
2664 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2667 if (priv->wolopts & WAKE_MAGICSECURE) {
2668 /* Program the SecureOn password */
2669 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2671 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2675 umac_writel(priv, reg, UMAC_MPD_CTRL);
2677 if (priv->wolopts & WAKE_FILTER) {
2678 /* Turn on ACPI matching to steal packets from RBUF */
2679 reg = rbuf_readl(priv, RBUF_CONTROL);
2681 reg |= RBUF_ACPI_EN_LITE;
2683 reg |= RBUF_ACPI_EN;
2684 rbuf_writel(priv, reg, RBUF_CONTROL);
2686 /* Enable RXCHK, active filters and Broadcom tag matching */
2687 reg = rxchk_readl(priv, RXCHK_CONTROL);
2688 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2689 RXCHK_BRCM_TAG_MATCH_SHIFT);
2690 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2691 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2694 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2695 rxchk_writel(priv, reg, RXCHK_CONTROL);
2698 /* Make sure RBUF entered WoL mode as result */
2700 reg = rbuf_readl(priv, RBUF_STATUS);
2701 if (reg & RBUF_WOL_MODE)
2705 } while (timeout-- > 0);
2707 /* Do not leave the UniMAC RBUF matching only MPD packets */
2709 mpd_enable_set(priv, false);
2710 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2714 /* UniMAC receive needs to be turned on */
2715 umac_enable_set(priv, CMD_RX_EN, 1);
2717 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2722 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2724 struct net_device *dev = dev_get_drvdata(d);
2725 struct bcm_sysport_priv *priv = netdev_priv(dev);
2730 if (!netif_running(dev))
2733 netif_device_detach(dev);
2735 bcm_sysport_netif_stop(dev);
2737 phy_suspend(dev->phydev);
2739 /* Disable UniMAC RX */
2740 umac_enable_set(priv, CMD_RX_EN, 0);
2742 ret = rdma_enable_set(priv, 0);
2744 netdev_err(dev, "RDMA timeout!\n");
2748 /* Disable RXCHK if enabled */
2749 if (priv->rx_chk_en) {
2750 reg = rxchk_readl(priv, RXCHK_CONTROL);
2752 rxchk_writel(priv, reg, RXCHK_CONTROL);
2757 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2759 ret = tdma_enable_set(priv, 0);
2761 netdev_err(dev, "TDMA timeout!\n");
2765 /* Wait for a packet boundary */
2766 usleep_range(2000, 3000);
2768 umac_enable_set(priv, CMD_TX_EN, 0);
2770 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2772 /* Free RX/TX rings SW structures */
2773 for (i = 0; i < dev->num_tx_queues; i++)
2774 bcm_sysport_fini_tx_ring(priv, i);
2775 bcm_sysport_fini_rx_ring(priv);
2777 /* Get prepared for Wake-on-LAN */
2778 if (device_may_wakeup(d) && priv->wolopts) {
2779 clk_prepare_enable(priv->wol_clk);
2780 ret = bcm_sysport_suspend_to_wol(priv);
2783 clk_disable_unprepare(priv->clk);
2788 static int __maybe_unused bcm_sysport_resume(struct device *d)
2790 struct net_device *dev = dev_get_drvdata(d);
2791 struct bcm_sysport_priv *priv = netdev_priv(dev);
2795 if (!netif_running(dev))
2798 clk_prepare_enable(priv->clk);
2800 clk_disable_unprepare(priv->wol_clk);
2804 /* Disable the UniMAC RX/TX */
2805 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2807 /* We may have been suspended and never received a WOL event that
2808 * would turn off MPD detection, take care of that now
2810 bcm_sysport_resume_from_wol(priv);
2812 /* Initialize both hardware and software ring */
2813 for (i = 0; i < dev->num_tx_queues; i++) {
2814 ret = bcm_sysport_init_tx_ring(priv, i);
2816 netdev_err(dev, "failed to initialize TX ring %d\n",
2818 goto out_free_tx_rings;
2822 /* Initialize linked-list */
2823 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2825 /* Initialize RX ring */
2826 ret = bcm_sysport_init_rx_ring(priv);
2828 netdev_err(dev, "failed to initialize RX ring\n");
2829 goto out_free_rx_ring;
2832 /* RX pipe enable */
2833 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2835 ret = rdma_enable_set(priv, 1);
2837 netdev_err(dev, "failed to enable RDMA\n");
2838 goto out_free_rx_ring;
2841 /* Restore enabled features */
2842 bcm_sysport_set_features(dev, dev->features);
2846 /* Set maximum frame length */
2848 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2850 gib_set_pad_extension(priv);
2852 /* Set MAC address */
2853 umac_set_hw_addr(priv, dev->dev_addr);
2855 umac_enable_set(priv, CMD_RX_EN, 1);
2857 /* TX pipe enable */
2858 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2860 umac_enable_set(priv, CMD_TX_EN, 1);
2862 ret = tdma_enable_set(priv, 1);
2864 netdev_err(dev, "TDMA timeout!\n");
2865 goto out_free_rx_ring;
2868 phy_resume(dev->phydev);
2870 bcm_sysport_netif_start(dev);
2872 netif_device_attach(dev);
2877 bcm_sysport_fini_rx_ring(priv);
2879 for (i = 0; i < dev->num_tx_queues; i++)
2880 bcm_sysport_fini_tx_ring(priv, i);
2881 clk_disable_unprepare(priv->clk);
2885 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2886 bcm_sysport_suspend, bcm_sysport_resume);
2888 static struct platform_driver bcm_sysport_driver = {
2889 .probe = bcm_sysport_probe,
2890 .remove = bcm_sysport_remove,
2892 .name = "brcm-systemport",
2893 .of_match_table = bcm_sysport_of_match,
2894 .pm = &bcm_sysport_pm_ops,
2897 module_platform_driver(bcm_sysport_driver);
2899 MODULE_AUTHOR("Broadcom Corporation");
2900 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2901 MODULE_ALIAS("platform:brcm-systemport");
2902 MODULE_LICENSE("GPL");