1 // SPDX-License-Identifier: GPL-2.0+
3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
6 * Right now, I am very wasteful with the buffers. I allocate memory
7 * pages and then divide them into 2K frame buffers. This way I know I
8 * have buffers large enough to hold one frame within one buffer descriptor.
9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10 * will be much more memory efficient and will easily handle lots of
13 * Much better multiple PHY support by Magnus Damm.
14 * Copyright (c) 2000 Ericsson Radio Systems AB.
16 * Support for FEC controller of ColdFire processors.
17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20 * Copyright (c) 2004-2006 Macq Electronique SA.
22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
41 #include <net/page_pool/helpers.h>
42 #include <net/selftests.h>
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
46 #include <linux/icmp.h>
47 #include <linux/spinlock.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
51 #include <linux/irq.h>
52 #include <linux/clk.h>
53 #include <linux/crc32.h>
54 #include <linux/platform_device.h>
55 #include <linux/property.h>
56 #include <linux/mdio.h>
57 #include <linux/phy.h>
58 #include <linux/fec.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/gpio/consumer.h>
66 #include <linux/prefetch.h>
67 #include <linux/mfd/syscon.h>
68 #include <linux/regmap.h>
69 #include <soc/imx/cpuidle.h>
70 #include <linux/filter.h>
71 #include <linux/bpf.h>
72 #include <linux/bpf_trace.h>
74 #include <asm/cacheflush.h>
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 int cpu, struct xdp_buff *xdp,
84 #define DRIVER_NAME "fec"
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
88 #define FEC_ENET_RSEM_V 0x84
89 #define FEC_ENET_RSFL_V 16
90 #define FEC_ENET_RAEM_V 0x8
91 #define FEC_ENET_RAFL_V 0x8
92 #define FEC_ENET_OPD_V 0xFFF0
93 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
95 #define FEC_ENET_XDP_PASS 0
96 #define FEC_ENET_XDP_CONSUMED BIT(0)
97 #define FEC_ENET_XDP_TX BIT(1)
98 #define FEC_ENET_XDP_REDIR BIT(2)
104 static const struct fec_devinfo fec_imx25_info = {
105 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
106 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
109 static const struct fec_devinfo fec_imx27_info = {
110 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
111 FEC_QUIRK_HAS_MDIO_C45,
114 static const struct fec_devinfo fec_imx28_info = {
115 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
116 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
117 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
118 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
121 static const struct fec_devinfo fec_imx6q_info = {
122 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
123 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
124 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
125 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
126 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
129 static const struct fec_devinfo fec_mvf600_info = {
130 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
131 FEC_QUIRK_HAS_MDIO_C45,
134 static const struct fec_devinfo fec_imx6x_info = {
135 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
136 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
137 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
138 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
139 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
140 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
141 FEC_QUIRK_HAS_MDIO_C45,
144 static const struct fec_devinfo fec_imx6ul_info = {
145 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
146 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
147 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
148 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
149 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
150 FEC_QUIRK_HAS_MDIO_C45,
153 static const struct fec_devinfo fec_imx8mq_info = {
154 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
155 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
156 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
157 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
158 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
159 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
160 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
161 FEC_QUIRK_HAS_MDIO_C45,
164 static const struct fec_devinfo fec_imx8qm_info = {
165 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
166 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
167 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
168 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
169 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
170 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
171 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
174 static const struct fec_devinfo fec_s32v234_info = {
175 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
176 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
177 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
178 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
179 FEC_QUIRK_HAS_MDIO_C45,
182 static struct platform_device_id fec_devtype[] = {
184 /* keep it for coldfire */
191 MODULE_DEVICE_TABLE(platform, fec_devtype);
193 static const struct of_device_id fec_dt_ids[] = {
194 { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
195 { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
196 { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
197 { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
198 { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
199 { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
200 { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
201 { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
202 { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
203 { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
206 MODULE_DEVICE_TABLE(of, fec_dt_ids);
208 static unsigned char macaddr[ETH_ALEN];
209 module_param_array(macaddr, byte, NULL, 0);
210 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
212 #if defined(CONFIG_M5272)
214 * Some hardware gets it MAC address out of local flash memory.
215 * if this is non-zero then assume it is the address to get MAC from.
217 #if defined(CONFIG_NETtel)
218 #define FEC_FLASHMAC 0xf0006006
219 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
220 #define FEC_FLASHMAC 0xf0006000
221 #elif defined(CONFIG_CANCam)
222 #define FEC_FLASHMAC 0xf0020000
223 #elif defined (CONFIG_M5272C3)
224 #define FEC_FLASHMAC (0xffe04000 + 4)
225 #elif defined(CONFIG_MOD5272)
226 #define FEC_FLASHMAC 0xffc0406b
228 #define FEC_FLASHMAC 0
230 #endif /* CONFIG_M5272 */
232 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
234 * 2048 byte skbufs are allocated. However, alignment requirements
235 * varies between FEC variants. Worst case is 64, so round down by 64.
237 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
238 #define PKT_MINBUF_SIZE 64
240 /* FEC receive acceleration */
241 #define FEC_RACC_IPDIS BIT(1)
242 #define FEC_RACC_PRODIS BIT(2)
243 #define FEC_RACC_SHIFT16 BIT(7)
244 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
246 /* MIB Control Register */
247 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
250 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
251 * size bits. Other FEC hardware does not, so we need to take that into
252 * account when setting it.
254 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
255 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
256 defined(CONFIG_ARM64)
257 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
259 #define OPT_FRAME_SIZE 0
262 /* FEC MII MMFR bits definition */
263 #define FEC_MMFR_ST (1 << 30)
264 #define FEC_MMFR_ST_C45 (0)
265 #define FEC_MMFR_OP_READ (2 << 28)
266 #define FEC_MMFR_OP_READ_C45 (3 << 28)
267 #define FEC_MMFR_OP_WRITE (1 << 28)
268 #define FEC_MMFR_OP_ADDR_WRITE (0)
269 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
270 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
271 #define FEC_MMFR_TA (2 << 16)
272 #define FEC_MMFR_DATA(v) (v & 0xffff)
273 /* FEC ECR bits definition */
274 #define FEC_ECR_RESET BIT(0)
275 #define FEC_ECR_ETHEREN BIT(1)
276 #define FEC_ECR_MAGICEN BIT(2)
277 #define FEC_ECR_SLEEP BIT(3)
278 #define FEC_ECR_EN1588 BIT(4)
279 #define FEC_ECR_BYTESWP BIT(8)
280 /* FEC RCR bits definition */
281 #define FEC_RCR_LOOP BIT(0)
282 #define FEC_RCR_HALFDPX BIT(1)
283 #define FEC_RCR_MII BIT(2)
284 #define FEC_RCR_PROMISC BIT(3)
285 #define FEC_RCR_BC_REJ BIT(4)
286 #define FEC_RCR_FLOWCTL BIT(5)
287 #define FEC_RCR_RMII BIT(8)
288 #define FEC_RCR_10BASET BIT(9)
290 #define FEC_TXWMRK_STRFWD BIT(8)
292 #define FEC_MII_TIMEOUT 30000 /* us */
294 /* Transmitter timeout */
295 #define TX_TIMEOUT (2 * HZ)
297 #define FEC_PAUSE_FLAG_AUTONEG 0x1
298 #define FEC_PAUSE_FLAG_ENABLE 0x2
299 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
300 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
301 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
303 /* Max number of allowed TCP segments for software TSO */
304 #define FEC_MAX_TSO_SEGS 100
305 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
307 #define IS_TSO_HEADER(txq, addr) \
308 ((addr >= txq->tso_hdrs_dma) && \
309 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
313 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
314 struct bufdesc_prop *bd)
316 return (bdp >= bd->last) ? bd->base
317 : (struct bufdesc *)(((void *)bdp) + bd->dsize);
320 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
321 struct bufdesc_prop *bd)
323 return (bdp <= bd->base) ? bd->last
324 : (struct bufdesc *)(((void *)bdp) - bd->dsize);
327 static int fec_enet_get_bd_index(struct bufdesc *bdp,
328 struct bufdesc_prop *bd)
330 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
337 entries = (((const char *)txq->dirty_tx -
338 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
340 return entries >= 0 ? entries : entries + txq->bd.ring_size;
343 static void swap_buffer(void *bufaddr, int len)
346 unsigned int *buf = bufaddr;
348 for (i = 0; i < len; i += 4, buf++)
352 static void fec_dump(struct net_device *ndev)
354 struct fec_enet_private *fep = netdev_priv(ndev);
356 struct fec_enet_priv_tx_q *txq;
359 netdev_info(ndev, "TX ring dump\n");
360 pr_info("Nr SC addr len SKB\n");
362 txq = fep->tx_queue[0];
366 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
368 bdp == txq->bd.cur ? 'S' : ' ',
369 bdp == txq->dirty_tx ? 'H' : ' ',
370 fec16_to_cpu(bdp->cbd_sc),
371 fec32_to_cpu(bdp->cbd_bufaddr),
372 fec16_to_cpu(bdp->cbd_datlen),
373 txq->tx_buf[index].buf_p);
374 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
376 } while (bdp != txq->bd.base);
380 * Coldfire does not support DMA coherent allocations, and has historically used
381 * a band-aid with a manual flush in fec_enet_rx_queue.
383 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
384 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
387 return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
390 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
393 dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
395 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
396 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
399 return dma_alloc_coherent(dev, size, handle, gfp);
402 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
405 dma_free_coherent(dev, size, cpu_addr, handle);
407 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
409 struct fec_dma_devres {
412 dma_addr_t dma_handle;
415 static void fec_dmam_release(struct device *dev, void *res)
417 struct fec_dma_devres *this = res;
419 fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
422 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
425 struct fec_dma_devres *dr;
428 dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
431 vaddr = fec_dma_alloc(dev, size, handle, gfp);
437 dr->dma_handle = *handle;
443 static inline bool is_ipv4_pkt(struct sk_buff *skb)
445 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
449 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
451 /* Only run for packets requiring a checksum. */
452 if (skb->ip_summed != CHECKSUM_PARTIAL)
455 if (unlikely(skb_cow_head(skb, 0)))
458 if (is_ipv4_pkt(skb))
459 ip_hdr(skb)->check = 0;
460 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
466 fec_enet_create_page_pool(struct fec_enet_private *fep,
467 struct fec_enet_priv_rx_q *rxq, int size)
469 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
470 struct page_pool_params pp_params = {
472 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
474 .nid = dev_to_node(&fep->pdev->dev),
475 .dev = &fep->pdev->dev,
476 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
477 .offset = FEC_ENET_XDP_HEADROOM,
478 .max_len = FEC_ENET_RX_FRSIZE,
482 rxq->page_pool = page_pool_create(&pp_params);
483 if (IS_ERR(rxq->page_pool)) {
484 err = PTR_ERR(rxq->page_pool);
485 rxq->page_pool = NULL;
489 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
493 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
496 goto err_unregister_rxq;
501 xdp_rxq_info_unreg(&rxq->xdp_rxq);
503 page_pool_destroy(rxq->page_pool);
504 rxq->page_pool = NULL;
508 static struct bufdesc *
509 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
511 struct net_device *ndev)
513 struct fec_enet_private *fep = netdev_priv(ndev);
514 struct bufdesc *bdp = txq->bd.cur;
515 struct bufdesc_ex *ebdp;
516 int nr_frags = skb_shinfo(skb)->nr_frags;
518 unsigned short status;
519 unsigned int estatus = 0;
520 skb_frag_t *this_frag;
526 for (frag = 0; frag < nr_frags; frag++) {
527 this_frag = &skb_shinfo(skb)->frags[frag];
528 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
529 ebdp = (struct bufdesc_ex *)bdp;
531 status = fec16_to_cpu(bdp->cbd_sc);
532 status &= ~BD_ENET_TX_STATS;
533 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
534 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
536 /* Handle the last BD specially */
537 if (frag == nr_frags - 1) {
538 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
539 if (fep->bufdesc_ex) {
540 estatus |= BD_ENET_TX_INT;
541 if (unlikely(skb_shinfo(skb)->tx_flags &
542 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
543 estatus |= BD_ENET_TX_TS;
547 if (fep->bufdesc_ex) {
548 if (fep->quirks & FEC_QUIRK_HAS_AVB)
549 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
550 if (skb->ip_summed == CHECKSUM_PARTIAL)
551 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
554 ebdp->cbd_esc = cpu_to_fec32(estatus);
557 bufaddr = skb_frag_address(this_frag);
559 index = fec_enet_get_bd_index(bdp, &txq->bd);
560 if (((unsigned long) bufaddr) & fep->tx_align ||
561 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
562 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
563 bufaddr = txq->tx_bounce[index];
565 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
566 swap_buffer(bufaddr, frag_len);
569 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
571 if (dma_mapping_error(&fep->pdev->dev, addr)) {
573 netdev_err(ndev, "Tx DMA memory map failed\n");
574 goto dma_mapping_error;
577 bdp->cbd_bufaddr = cpu_to_fec32(addr);
578 bdp->cbd_datlen = cpu_to_fec16(frag_len);
579 /* Make sure the updates to rest of the descriptor are
580 * performed before transferring ownership.
583 bdp->cbd_sc = cpu_to_fec16(status);
589 for (i = 0; i < frag; i++) {
590 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
591 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
592 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
594 return ERR_PTR(-ENOMEM);
597 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
598 struct sk_buff *skb, struct net_device *ndev)
600 struct fec_enet_private *fep = netdev_priv(ndev);
601 int nr_frags = skb_shinfo(skb)->nr_frags;
602 struct bufdesc *bdp, *last_bdp;
605 unsigned short status;
606 unsigned short buflen;
607 unsigned int estatus = 0;
611 entries_free = fec_enet_get_free_txdesc_num(txq);
612 if (entries_free < MAX_SKB_FRAGS + 1) {
613 dev_kfree_skb_any(skb);
615 netdev_err(ndev, "NOT enough BD for SG!\n");
619 /* Protocol checksum off-load for TCP and UDP. */
620 if (fec_enet_clear_csum(skb, ndev)) {
621 dev_kfree_skb_any(skb);
625 /* Fill in a Tx ring entry */
628 status = fec16_to_cpu(bdp->cbd_sc);
629 status &= ~BD_ENET_TX_STATS;
631 /* Set buffer length and buffer pointer */
633 buflen = skb_headlen(skb);
635 index = fec_enet_get_bd_index(bdp, &txq->bd);
636 if (((unsigned long) bufaddr) & fep->tx_align ||
637 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
638 memcpy(txq->tx_bounce[index], skb->data, buflen);
639 bufaddr = txq->tx_bounce[index];
641 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
642 swap_buffer(bufaddr, buflen);
645 /* Push the data cache so the CPM does not get stale memory data. */
646 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
647 if (dma_mapping_error(&fep->pdev->dev, addr)) {
648 dev_kfree_skb_any(skb);
650 netdev_err(ndev, "Tx DMA memory map failed\n");
655 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
656 if (IS_ERR(last_bdp)) {
657 dma_unmap_single(&fep->pdev->dev, addr,
658 buflen, DMA_TO_DEVICE);
659 dev_kfree_skb_any(skb);
663 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
664 if (fep->bufdesc_ex) {
665 estatus = BD_ENET_TX_INT;
666 if (unlikely(skb_shinfo(skb)->tx_flags &
667 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
668 estatus |= BD_ENET_TX_TS;
671 bdp->cbd_bufaddr = cpu_to_fec32(addr);
672 bdp->cbd_datlen = cpu_to_fec16(buflen);
674 if (fep->bufdesc_ex) {
676 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
678 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
680 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
682 if (fep->quirks & FEC_QUIRK_HAS_AVB)
683 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
685 if (skb->ip_summed == CHECKSUM_PARTIAL)
686 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
689 ebdp->cbd_esc = cpu_to_fec32(estatus);
692 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
693 /* Save skb pointer */
694 txq->tx_buf[index].buf_p = skb;
696 /* Make sure the updates to rest of the descriptor are performed before
697 * transferring ownership.
701 /* Send it on its way. Tell FEC it's ready, interrupt when done,
702 * it's the last BD of the frame, and to put the CRC on the end.
704 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
705 bdp->cbd_sc = cpu_to_fec16(status);
707 /* If this was the last BD in the ring, start at the beginning again. */
708 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
710 skb_tx_timestamp(skb);
712 /* Make sure the update to bdp is performed before txq->bd.cur. */
716 /* Trigger transmission start */
717 writel(0, txq->bd.reg_desc_active);
723 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
724 struct net_device *ndev,
725 struct bufdesc *bdp, int index, char *data,
726 int size, bool last_tcp, bool is_last)
728 struct fec_enet_private *fep = netdev_priv(ndev);
729 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
730 unsigned short status;
731 unsigned int estatus = 0;
734 status = fec16_to_cpu(bdp->cbd_sc);
735 status &= ~BD_ENET_TX_STATS;
737 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
739 if (((unsigned long) data) & fep->tx_align ||
740 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
741 memcpy(txq->tx_bounce[index], data, size);
742 data = txq->tx_bounce[index];
744 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
745 swap_buffer(data, size);
748 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
749 if (dma_mapping_error(&fep->pdev->dev, addr)) {
750 dev_kfree_skb_any(skb);
752 netdev_err(ndev, "Tx DMA memory map failed\n");
756 bdp->cbd_datlen = cpu_to_fec16(size);
757 bdp->cbd_bufaddr = cpu_to_fec32(addr);
759 if (fep->bufdesc_ex) {
760 if (fep->quirks & FEC_QUIRK_HAS_AVB)
761 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
762 if (skb->ip_summed == CHECKSUM_PARTIAL)
763 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
765 ebdp->cbd_esc = cpu_to_fec32(estatus);
768 /* Handle the last BD specially */
770 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
772 status |= BD_ENET_TX_INTR;
774 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
777 bdp->cbd_sc = cpu_to_fec16(status);
783 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
784 struct sk_buff *skb, struct net_device *ndev,
785 struct bufdesc *bdp, int index)
787 struct fec_enet_private *fep = netdev_priv(ndev);
788 int hdr_len = skb_tcp_all_headers(skb);
789 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
791 unsigned long dmabuf;
792 unsigned short status;
793 unsigned int estatus = 0;
795 status = fec16_to_cpu(bdp->cbd_sc);
796 status &= ~BD_ENET_TX_STATS;
797 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
799 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
800 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
801 if (((unsigned long)bufaddr) & fep->tx_align ||
802 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
803 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
804 bufaddr = txq->tx_bounce[index];
806 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
807 swap_buffer(bufaddr, hdr_len);
809 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
810 hdr_len, DMA_TO_DEVICE);
811 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
812 dev_kfree_skb_any(skb);
814 netdev_err(ndev, "Tx DMA memory map failed\n");
819 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
820 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
822 if (fep->bufdesc_ex) {
823 if (fep->quirks & FEC_QUIRK_HAS_AVB)
824 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
825 if (skb->ip_summed == CHECKSUM_PARTIAL)
826 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
828 ebdp->cbd_esc = cpu_to_fec32(estatus);
831 bdp->cbd_sc = cpu_to_fec16(status);
836 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
838 struct net_device *ndev)
840 struct fec_enet_private *fep = netdev_priv(ndev);
841 int hdr_len, total_len, data_left;
842 struct bufdesc *bdp = txq->bd.cur;
844 unsigned int index = 0;
847 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
848 dev_kfree_skb_any(skb);
850 netdev_err(ndev, "NOT enough BD for TSO!\n");
854 /* Protocol checksum off-load for TCP and UDP. */
855 if (fec_enet_clear_csum(skb, ndev)) {
856 dev_kfree_skb_any(skb);
860 /* Initialize the TSO handler, and prepare the first payload */
861 hdr_len = tso_start(skb, &tso);
863 total_len = skb->len - hdr_len;
864 while (total_len > 0) {
867 index = fec_enet_get_bd_index(bdp, &txq->bd);
868 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
869 total_len -= data_left;
871 /* prepare packet headers: MAC + IP + TCP */
872 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
873 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
874 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
878 while (data_left > 0) {
881 size = min_t(int, tso.size, data_left);
882 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
883 index = fec_enet_get_bd_index(bdp, &txq->bd);
884 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
893 tso_build_data(skb, &tso, size);
896 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
899 /* Save skb pointer */
900 txq->tx_buf[index].buf_p = skb;
902 skb_tx_timestamp(skb);
905 /* Trigger transmission start */
906 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
907 !readl(txq->bd.reg_desc_active) ||
908 !readl(txq->bd.reg_desc_active) ||
909 !readl(txq->bd.reg_desc_active) ||
910 !readl(txq->bd.reg_desc_active))
911 writel(0, txq->bd.reg_desc_active);
916 /* TODO: Release all used data descriptors for TSO */
921 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
923 struct fec_enet_private *fep = netdev_priv(ndev);
925 unsigned short queue;
926 struct fec_enet_priv_tx_q *txq;
927 struct netdev_queue *nq;
930 queue = skb_get_queue_mapping(skb);
931 txq = fep->tx_queue[queue];
932 nq = netdev_get_tx_queue(ndev, queue);
935 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
937 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
941 entries_free = fec_enet_get_free_txdesc_num(txq);
942 if (entries_free <= txq->tx_stop_threshold)
943 netif_tx_stop_queue(nq);
948 /* Init RX & TX buffer descriptors
950 static void fec_enet_bd_init(struct net_device *dev)
952 struct fec_enet_private *fep = netdev_priv(dev);
953 struct fec_enet_priv_tx_q *txq;
954 struct fec_enet_priv_rx_q *rxq;
959 for (q = 0; q < fep->num_rx_queues; q++) {
960 /* Initialize the receive buffer descriptors. */
961 rxq = fep->rx_queue[q];
964 for (i = 0; i < rxq->bd.ring_size; i++) {
966 /* Initialize the BD for every fragment in the page. */
967 if (bdp->cbd_bufaddr)
968 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
970 bdp->cbd_sc = cpu_to_fec16(0);
971 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
974 /* Set the last buffer to wrap */
975 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
976 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
978 rxq->bd.cur = rxq->bd.base;
981 for (q = 0; q < fep->num_tx_queues; q++) {
982 /* ...and the same for transmit */
983 txq = fep->tx_queue[q];
987 for (i = 0; i < txq->bd.ring_size; i++) {
988 /* Initialize the BD for every fragment in the page. */
989 bdp->cbd_sc = cpu_to_fec16(0);
990 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
991 if (bdp->cbd_bufaddr &&
992 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
993 dma_unmap_single(&fep->pdev->dev,
994 fec32_to_cpu(bdp->cbd_bufaddr),
995 fec16_to_cpu(bdp->cbd_datlen),
997 if (txq->tx_buf[i].buf_p)
998 dev_kfree_skb_any(txq->tx_buf[i].buf_p);
999 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
1000 if (bdp->cbd_bufaddr)
1001 dma_unmap_single(&fep->pdev->dev,
1002 fec32_to_cpu(bdp->cbd_bufaddr),
1003 fec16_to_cpu(bdp->cbd_datlen),
1006 if (txq->tx_buf[i].buf_p)
1007 xdp_return_frame(txq->tx_buf[i].buf_p);
1009 struct page *page = txq->tx_buf[i].buf_p;
1012 page_pool_put_page(page->pp, page, 0, false);
1015 txq->tx_buf[i].buf_p = NULL;
1016 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1017 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
1018 bdp->cbd_bufaddr = cpu_to_fec32(0);
1019 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1022 /* Set the last buffer to wrap */
1023 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1024 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1025 txq->dirty_tx = bdp;
1029 static void fec_enet_active_rxring(struct net_device *ndev)
1031 struct fec_enet_private *fep = netdev_priv(ndev);
1034 for (i = 0; i < fep->num_rx_queues; i++)
1035 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1038 static void fec_enet_enable_ring(struct net_device *ndev)
1040 struct fec_enet_private *fep = netdev_priv(ndev);
1041 struct fec_enet_priv_tx_q *txq;
1042 struct fec_enet_priv_rx_q *rxq;
1045 for (i = 0; i < fep->num_rx_queues; i++) {
1046 rxq = fep->rx_queue[i];
1047 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1048 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1052 writel(RCMR_MATCHEN | RCMR_CMP(i),
1053 fep->hwp + FEC_RCMR(i));
1056 for (i = 0; i < fep->num_tx_queues; i++) {
1057 txq = fep->tx_queue[i];
1058 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1062 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1063 fep->hwp + FEC_DMA_CFG(i));
1068 * This function is called to start or restart the FEC during a link
1069 * change, transmit timeout, or to reconfigure the FEC. The network
1070 * packet processing for this device must be stopped before this call.
1073 fec_restart(struct net_device *ndev)
1075 struct fec_enet_private *fep = netdev_priv(ndev);
1077 u32 rcntl = OPT_FRAME_SIZE | 0x04;
1078 u32 ecntl = FEC_ECR_ETHEREN;
1080 /* Whack a reset. We should wait for this.
1081 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1082 * instead of reset MAC itself.
1084 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1085 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1086 writel(0, fep->hwp + FEC_ECNTRL);
1088 writel(1, fep->hwp + FEC_ECNTRL);
1093 * enet-mac reset will reset mac address registers too,
1094 * so need to reconfigure it.
1096 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1097 writel((__force u32)cpu_to_be32(temp_mac[0]),
1098 fep->hwp + FEC_ADDR_LOW);
1099 writel((__force u32)cpu_to_be32(temp_mac[1]),
1100 fep->hwp + FEC_ADDR_HIGH);
1102 /* Clear any outstanding interrupt, except MDIO. */
1103 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1105 fec_enet_bd_init(ndev);
1107 fec_enet_enable_ring(ndev);
1109 /* Enable MII mode */
1110 if (fep->full_duplex == DUPLEX_FULL) {
1112 writel(0x04, fep->hwp + FEC_X_CNTRL);
1114 /* No Rcv on Xmit */
1116 writel(0x0, fep->hwp + FEC_X_CNTRL);
1120 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1122 #if !defined(CONFIG_M5272)
1123 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1124 u32 val = readl(fep->hwp + FEC_RACC);
1126 /* align IP header */
1127 val |= FEC_RACC_SHIFT16;
1128 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1129 /* set RX checksum */
1130 val |= FEC_RACC_OPTIONS;
1132 val &= ~FEC_RACC_OPTIONS;
1133 writel(val, fep->hwp + FEC_RACC);
1134 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1139 * The phy interface and speed need to get configured
1140 * differently on enet-mac.
1142 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1143 /* Enable flow control and length check */
1144 rcntl |= 0x40000000 | 0x00000020;
1146 /* RGMII, RMII or MII */
1147 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1148 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1149 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1150 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1152 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1153 rcntl |= FEC_RCR_RMII;
1155 rcntl &= ~FEC_RCR_RMII;
1157 /* 1G, 100M or 10M */
1159 if (ndev->phydev->speed == SPEED_1000)
1161 else if (ndev->phydev->speed == SPEED_100)
1162 rcntl &= ~FEC_RCR_10BASET;
1164 rcntl |= FEC_RCR_10BASET;
1167 #ifdef FEC_MIIGSK_ENR
1168 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1170 /* disable the gasket and wait */
1171 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1172 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1176 * configure the gasket:
1177 * RMII, 50 MHz, no loopback, no echo
1178 * MII, 25 MHz, no loopback, no echo
1180 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1181 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1182 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1183 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1184 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1186 /* re-enable the gasket */
1187 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1192 #if !defined(CONFIG_M5272)
1193 /* enable pause frame*/
1194 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1195 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1196 ndev->phydev && ndev->phydev->pause)) {
1197 rcntl |= FEC_RCR_FLOWCTL;
1199 /* set FIFO threshold parameter to reduce overrun */
1200 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1201 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1202 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1203 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1206 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1208 rcntl &= ~FEC_RCR_FLOWCTL;
1210 #endif /* !defined(CONFIG_M5272) */
1212 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1214 /* Setup multicast filter. */
1215 set_multicast_list(ndev);
1216 #ifndef CONFIG_M5272
1217 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1218 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1221 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1222 /* enable ENET endian swap */
1223 ecntl |= FEC_ECR_BYTESWP;
1224 /* enable ENET store and forward mode */
1225 writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
1228 if (fep->bufdesc_ex)
1229 ecntl |= FEC_ECR_EN1588;
1231 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1233 ecntl |= FEC_ENET_TXC_DLY;
1234 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1236 ecntl |= FEC_ENET_RXC_DLY;
1238 #ifndef CONFIG_M5272
1239 /* Enable the MIB statistic event counters */
1240 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1243 /* And last, enable the transmit and receive processing */
1244 writel(ecntl, fep->hwp + FEC_ECNTRL);
1245 fec_enet_active_rxring(ndev);
1247 if (fep->bufdesc_ex)
1248 fec_ptp_start_cyclecounter(ndev);
1250 /* Enable interrupts we wish to service */
1252 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1254 writel(0, fep->hwp + FEC_IMASK);
1256 /* Init the interrupt coalescing */
1257 if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1258 fec_enet_itr_coal_set(ndev);
1261 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1263 if (!(of_machine_is_compatible("fsl,imx8qm") ||
1264 of_machine_is_compatible("fsl,imx8qxp") ||
1265 of_machine_is_compatible("fsl,imx8dxl")))
1268 return imx_scu_get_handle(&fep->ipc_handle);
1271 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1273 struct device_node *np = fep->pdev->dev.of_node;
1277 if (!np || !fep->ipc_handle)
1280 idx = of_alias_get_id(np, "ethernet");
1283 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1285 val = enabled ? 1 : 0;
1286 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1289 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1291 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1292 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1294 if (stop_gpr->gpr) {
1296 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1298 BIT(stop_gpr->bit));
1300 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1301 BIT(stop_gpr->bit), 0);
1302 } else if (pdata && pdata->sleep_mode_enable) {
1303 pdata->sleep_mode_enable(enabled);
1305 fec_enet_ipg_stop_set(fep, enabled);
1309 static void fec_irqs_disable(struct net_device *ndev)
1311 struct fec_enet_private *fep = netdev_priv(ndev);
1313 writel(0, fep->hwp + FEC_IMASK);
1316 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1318 struct fec_enet_private *fep = netdev_priv(ndev);
1320 writel(0, fep->hwp + FEC_IMASK);
1321 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1325 fec_stop(struct net_device *ndev)
1327 struct fec_enet_private *fep = netdev_priv(ndev);
1328 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
1331 /* We cannot expect a graceful transmit stop without link !!! */
1333 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1335 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1336 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1339 /* Whack a reset. We should wait for this.
1340 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1341 * instead of reset MAC itself.
1343 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1344 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1345 writel(0, fep->hwp + FEC_ECNTRL);
1347 writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
1351 val = readl(fep->hwp + FEC_ECNTRL);
1352 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1353 writel(val, fep->hwp + FEC_ECNTRL);
1355 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1356 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1358 /* We have to keep ENET enabled to have MII interrupt stay working */
1359 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1360 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1361 writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
1362 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1367 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1369 struct fec_enet_private *fep = netdev_priv(ndev);
1373 ndev->stats.tx_errors++;
1375 schedule_work(&fep->tx_timeout_work);
1378 static void fec_enet_timeout_work(struct work_struct *work)
1380 struct fec_enet_private *fep =
1381 container_of(work, struct fec_enet_private, tx_timeout_work);
1382 struct net_device *ndev = fep->netdev;
1385 if (netif_device_present(ndev) || netif_running(ndev)) {
1386 napi_disable(&fep->napi);
1387 netif_tx_lock_bh(ndev);
1389 netif_tx_wake_all_queues(ndev);
1390 netif_tx_unlock_bh(ndev);
1391 napi_enable(&fep->napi);
1397 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1398 struct skb_shared_hwtstamps *hwtstamps)
1400 unsigned long flags;
1403 spin_lock_irqsave(&fep->tmreg_lock, flags);
1404 ns = timecounter_cyc2time(&fep->tc, ts);
1405 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1407 memset(hwtstamps, 0, sizeof(*hwtstamps));
1408 hwtstamps->hwtstamp = ns_to_ktime(ns);
1412 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1414 struct fec_enet_private *fep;
1415 struct xdp_frame *xdpf;
1416 struct bufdesc *bdp;
1417 unsigned short status;
1418 struct sk_buff *skb;
1419 struct fec_enet_priv_tx_q *txq;
1420 struct netdev_queue *nq;
1426 fep = netdev_priv(ndev);
1428 txq = fep->tx_queue[queue_id];
1429 /* get next bdp of dirty_tx */
1430 nq = netdev_get_tx_queue(ndev, queue_id);
1431 bdp = txq->dirty_tx;
1433 /* get next bdp of dirty_tx */
1434 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1436 while (bdp != READ_ONCE(txq->bd.cur)) {
1437 /* Order the load of bd.cur and cbd_sc */
1439 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1440 if (status & BD_ENET_TX_READY)
1443 index = fec_enet_get_bd_index(bdp, &txq->bd);
1445 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1446 skb = txq->tx_buf[index].buf_p;
1447 if (bdp->cbd_bufaddr &&
1448 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1449 dma_unmap_single(&fep->pdev->dev,
1450 fec32_to_cpu(bdp->cbd_bufaddr),
1451 fec16_to_cpu(bdp->cbd_datlen),
1453 bdp->cbd_bufaddr = cpu_to_fec32(0);
1457 /* Tx processing cannot call any XDP (or page pool) APIs if
1458 * the "budget" is 0. Because NAPI is called with budget of
1459 * 0 (such as netpoll) indicates we may be in an IRQ context,
1460 * however, we can't use the page pool from IRQ context.
1462 if (unlikely(!budget))
1465 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1466 xdpf = txq->tx_buf[index].buf_p;
1467 if (bdp->cbd_bufaddr)
1468 dma_unmap_single(&fep->pdev->dev,
1469 fec32_to_cpu(bdp->cbd_bufaddr),
1470 fec16_to_cpu(bdp->cbd_datlen),
1473 page = txq->tx_buf[index].buf_p;
1476 bdp->cbd_bufaddr = cpu_to_fec32(0);
1477 if (unlikely(!txq->tx_buf[index].buf_p)) {
1478 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1482 frame_len = fec16_to_cpu(bdp->cbd_datlen);
1485 /* Check for errors. */
1486 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1487 BD_ENET_TX_RL | BD_ENET_TX_UN |
1489 ndev->stats.tx_errors++;
1490 if (status & BD_ENET_TX_HB) /* No heartbeat */
1491 ndev->stats.tx_heartbeat_errors++;
1492 if (status & BD_ENET_TX_LC) /* Late collision */
1493 ndev->stats.tx_window_errors++;
1494 if (status & BD_ENET_TX_RL) /* Retrans limit */
1495 ndev->stats.tx_aborted_errors++;
1496 if (status & BD_ENET_TX_UN) /* Underrun */
1497 ndev->stats.tx_fifo_errors++;
1498 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1499 ndev->stats.tx_carrier_errors++;
1501 ndev->stats.tx_packets++;
1503 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1504 ndev->stats.tx_bytes += skb->len;
1506 ndev->stats.tx_bytes += frame_len;
1509 /* Deferred means some collisions occurred during transmit,
1510 * but we eventually sent the packet OK.
1512 if (status & BD_ENET_TX_DEF)
1513 ndev->stats.collisions++;
1515 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1516 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1517 * are to time stamp the packet, so we still need to check time
1518 * stamping enabled flag.
1520 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1521 fep->hwts_tx_en) && fep->bufdesc_ex) {
1522 struct skb_shared_hwtstamps shhwtstamps;
1523 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1525 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1526 skb_tstamp_tx(skb, &shhwtstamps);
1529 /* Free the sk buffer associated with this last transmit */
1530 napi_consume_skb(skb, budget);
1531 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1532 xdp_return_frame_rx_napi(xdpf);
1533 } else { /* recycle pages of XDP_TX frames */
1534 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1535 page_pool_put_page(page->pp, page, 0, true);
1538 txq->tx_buf[index].buf_p = NULL;
1539 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1540 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1543 /* Make sure the update to bdp and tx_buf are performed
1547 txq->dirty_tx = bdp;
1549 /* Update pointer to next buffer descriptor to be transmitted */
1550 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1552 /* Since we have freed up a buffer, the ring is no longer full
1554 if (netif_tx_queue_stopped(nq)) {
1555 entries_free = fec_enet_get_free_txdesc_num(txq);
1556 if (entries_free >= txq->tx_wake_threshold)
1557 netif_tx_wake_queue(nq);
1561 /* ERR006358: Keep the transmitter going */
1562 if (bdp != txq->bd.cur &&
1563 readl(txq->bd.reg_desc_active) == 0)
1564 writel(0, txq->bd.reg_desc_active);
1567 static void fec_enet_tx(struct net_device *ndev, int budget)
1569 struct fec_enet_private *fep = netdev_priv(ndev);
1572 /* Make sure that AVB queues are processed first. */
1573 for (i = fep->num_tx_queues - 1; i >= 0; i--)
1574 fec_enet_tx_queue(ndev, i, budget);
1577 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1578 struct bufdesc *bdp, int index)
1580 struct page *new_page;
1581 dma_addr_t phys_addr;
1583 new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1585 rxq->rx_skb_info[index].page = new_page;
1587 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
1588 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1589 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1593 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1594 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1596 unsigned int sync, len = xdp->data_end - xdp->data;
1597 u32 ret = FEC_ENET_XDP_PASS;
1602 act = bpf_prog_run_xdp(prog, xdp);
1604 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1607 sync = xdp->data_end - xdp->data;
1608 sync = max(sync, len);
1612 rxq->stats[RX_XDP_PASS]++;
1613 ret = FEC_ENET_XDP_PASS;
1617 rxq->stats[RX_XDP_REDIRECT]++;
1618 err = xdp_do_redirect(fep->netdev, xdp, prog);
1622 ret = FEC_ENET_XDP_REDIR;
1626 rxq->stats[RX_XDP_TX]++;
1627 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1628 if (unlikely(err)) {
1629 rxq->stats[RX_XDP_TX_ERRORS]++;
1633 ret = FEC_ENET_XDP_TX;
1637 bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1641 fallthrough; /* handle aborts by dropping packet */
1644 rxq->stats[RX_XDP_DROP]++;
1646 ret = FEC_ENET_XDP_CONSUMED;
1647 page = virt_to_head_page(xdp->data);
1648 page_pool_put_page(rxq->page_pool, page, sync, true);
1649 if (act != XDP_DROP)
1650 trace_xdp_exception(fep->netdev, prog, act);
1657 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1658 * When we update through the ring, if the next incoming buffer has
1659 * not been given to the system, we just set the empty indicator,
1660 * effectively tossing the packet.
1663 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1665 struct fec_enet_private *fep = netdev_priv(ndev);
1666 struct fec_enet_priv_rx_q *rxq;
1667 struct bufdesc *bdp;
1668 unsigned short status;
1669 struct sk_buff *skb;
1672 int pkt_received = 0;
1673 struct bufdesc_ex *ebdp = NULL;
1674 bool vlan_packet_rcvd = false;
1677 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1678 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1679 u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1680 u32 data_start = FEC_ENET_XDP_HEADROOM;
1681 int cpu = smp_processor_id();
1682 struct xdp_buff xdp;
1686 #if !defined(CONFIG_M5272)
1687 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1688 * FEC_RACC_SHIFT16 is set by default in the probe function.
1690 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1696 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1698 * Hacky flush of all caches instead of using the DMA API for the TSO
1703 rxq = fep->rx_queue[queue_id];
1705 /* First, grab all of the stats for the incoming packet.
1706 * These get messed up if we get called due to a busy condition.
1709 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1711 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1713 if (pkt_received >= budget)
1717 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1719 /* Check for errors. */
1720 status ^= BD_ENET_RX_LAST;
1721 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1722 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1724 ndev->stats.rx_errors++;
1725 if (status & BD_ENET_RX_OV) {
1727 ndev->stats.rx_fifo_errors++;
1728 goto rx_processing_done;
1730 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1731 | BD_ENET_RX_LAST)) {
1732 /* Frame too long or too short. */
1733 ndev->stats.rx_length_errors++;
1734 if (status & BD_ENET_RX_LAST)
1735 netdev_err(ndev, "rcv is not +last\n");
1737 if (status & BD_ENET_RX_CR) /* CRC Error */
1738 ndev->stats.rx_crc_errors++;
1739 /* Report late collisions as a frame error. */
1740 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1741 ndev->stats.rx_frame_errors++;
1742 goto rx_processing_done;
1745 /* Process the incoming frame. */
1746 ndev->stats.rx_packets++;
1747 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1748 ndev->stats.rx_bytes += pkt_len;
1750 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1751 page = rxq->rx_skb_info[index].page;
1752 dma_sync_single_for_cpu(&fep->pdev->dev,
1753 fec32_to_cpu(bdp->cbd_bufaddr),
1756 prefetch(page_address(page));
1757 fec_enet_update_cbd(rxq, bdp, index);
1760 xdp_buff_clear_frags_flag(&xdp);
1761 /* subtract 16bit shift and FCS */
1762 xdp_prepare_buff(&xdp, page_address(page),
1763 data_start, pkt_len - sub_len, false);
1764 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1766 if (ret != FEC_ENET_XDP_PASS)
1767 goto rx_processing_done;
1770 /* The packet length includes FCS, but we don't want to
1771 * include that when passing upstream as it messes up
1772 * bridging applications.
1774 skb = build_skb(page_address(page), PAGE_SIZE);
1775 if (unlikely(!skb)) {
1776 page_pool_recycle_direct(rxq->page_pool, page);
1777 ndev->stats.rx_dropped++;
1779 netdev_err_once(ndev, "build_skb failed!\n");
1780 goto rx_processing_done;
1783 skb_reserve(skb, data_start);
1784 skb_put(skb, pkt_len - sub_len);
1785 skb_mark_for_recycle(skb);
1787 if (unlikely(need_swap)) {
1788 data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1789 swap_buffer(data, pkt_len);
1793 /* Extract the enhanced buffer descriptor */
1795 if (fep->bufdesc_ex)
1796 ebdp = (struct bufdesc_ex *)bdp;
1798 /* If this is a VLAN packet remove the VLAN Tag */
1799 vlan_packet_rcvd = false;
1800 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1802 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1803 /* Push and remove the vlan tag */
1804 struct vlan_hdr *vlan_header =
1805 (struct vlan_hdr *) (data + ETH_HLEN);
1806 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1808 vlan_packet_rcvd = true;
1810 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1811 skb_pull(skb, VLAN_HLEN);
1814 skb->protocol = eth_type_trans(skb, ndev);
1816 /* Get receive timestamp from the skb */
1817 if (fep->hwts_rx_en && fep->bufdesc_ex)
1818 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1819 skb_hwtstamps(skb));
1821 if (fep->bufdesc_ex &&
1822 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1823 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1824 /* don't check it */
1825 skb->ip_summed = CHECKSUM_UNNECESSARY;
1827 skb_checksum_none_assert(skb);
1831 /* Handle received VLAN packets */
1832 if (vlan_packet_rcvd)
1833 __vlan_hwaccel_put_tag(skb,
1837 skb_record_rx_queue(skb, queue_id);
1838 napi_gro_receive(&fep->napi, skb);
1841 /* Clear the status flags for this buffer */
1842 status &= ~BD_ENET_RX_STATS;
1844 /* Mark the buffer empty */
1845 status |= BD_ENET_RX_EMPTY;
1847 if (fep->bufdesc_ex) {
1848 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1850 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1854 /* Make sure the updates to rest of the descriptor are
1855 * performed before transferring ownership.
1858 bdp->cbd_sc = cpu_to_fec16(status);
1860 /* Update BD pointer to next entry */
1861 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1863 /* Doing this here will keep the FEC running while we process
1864 * incoming frames. On a heavily loaded network, we should be
1865 * able to keep up at the expense of system resources.
1867 writel(0, rxq->bd.reg_desc_active);
1871 if (xdp_result & FEC_ENET_XDP_REDIR)
1874 return pkt_received;
1877 static int fec_enet_rx(struct net_device *ndev, int budget)
1879 struct fec_enet_private *fep = netdev_priv(ndev);
1882 /* Make sure that AVB queues are processed first. */
1883 for (i = fep->num_rx_queues - 1; i >= 0; i--)
1884 done += fec_enet_rx_queue(ndev, budget - done, i);
1889 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1893 int_events = readl(fep->hwp + FEC_IEVENT);
1895 /* Don't clear MDIO events, we poll for those */
1896 int_events &= ~FEC_ENET_MII;
1898 writel(int_events, fep->hwp + FEC_IEVENT);
1900 return int_events != 0;
1904 fec_enet_interrupt(int irq, void *dev_id)
1906 struct net_device *ndev = dev_id;
1907 struct fec_enet_private *fep = netdev_priv(ndev);
1908 irqreturn_t ret = IRQ_NONE;
1910 if (fec_enet_collect_events(fep) && fep->link) {
1913 if (napi_schedule_prep(&fep->napi)) {
1914 /* Disable interrupts */
1915 writel(0, fep->hwp + FEC_IMASK);
1916 __napi_schedule(&fep->napi);
1923 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1925 struct net_device *ndev = napi->dev;
1926 struct fec_enet_private *fep = netdev_priv(ndev);
1930 done += fec_enet_rx(ndev, budget - done);
1931 fec_enet_tx(ndev, budget);
1932 } while ((done < budget) && fec_enet_collect_events(fep));
1934 if (done < budget) {
1935 napi_complete_done(napi, done);
1936 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1942 /* ------------------------------------------------------------------------- */
1943 static int fec_get_mac(struct net_device *ndev)
1945 struct fec_enet_private *fep = netdev_priv(ndev);
1946 unsigned char *iap, tmpaddr[ETH_ALEN];
1950 * try to get mac address in following order:
1952 * 1) module parameter via kernel command line in form
1953 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1958 * 2) from device tree data
1960 if (!is_valid_ether_addr(iap)) {
1961 struct device_node *np = fep->pdev->dev.of_node;
1963 ret = of_get_mac_address(np, tmpaddr);
1966 else if (ret == -EPROBE_DEFER)
1972 * 3) from flash or fuse (via platform data)
1974 if (!is_valid_ether_addr(iap)) {
1977 iap = (unsigned char *)FEC_FLASHMAC;
1979 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1982 iap = (unsigned char *)&pdata->mac;
1987 * 4) FEC mac registers set by bootloader
1989 if (!is_valid_ether_addr(iap)) {
1990 *((__be32 *) &tmpaddr[0]) =
1991 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1992 *((__be16 *) &tmpaddr[4]) =
1993 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1998 * 5) random mac address
2000 if (!is_valid_ether_addr(iap)) {
2001 /* Report it and use a random ethernet address instead */
2002 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
2003 eth_hw_addr_random(ndev);
2004 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
2009 /* Adjust MAC if using macaddr */
2010 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
2015 /* ------------------------------------------------------------------------- */
2021 /* LPI Sleep Ts count base on tx clk (clk_ref).
2022 * The lpi sleep cnt value = X us / (cycle_ns).
2024 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
2026 struct fec_enet_private *fep = netdev_priv(ndev);
2028 return us * (fep->clk_ref_rate / 1000) / 1000;
2031 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
2033 struct fec_enet_private *fep = netdev_priv(ndev);
2034 struct ethtool_keee *p = &fep->eee;
2035 unsigned int sleep_cycle, wake_cycle;
2038 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
2039 wake_cycle = sleep_cycle;
2045 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
2046 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
2051 static void fec_enet_adjust_link(struct net_device *ndev)
2053 struct fec_enet_private *fep = netdev_priv(ndev);
2054 struct phy_device *phy_dev = ndev->phydev;
2055 int status_change = 0;
2058 * If the netdev is down, or is going down, we're not interested
2059 * in link state events, so just mark our idea of the link as down
2060 * and ignore the event.
2062 if (!netif_running(ndev) || !netif_device_present(ndev)) {
2064 } else if (phy_dev->link) {
2066 fep->link = phy_dev->link;
2070 if (fep->full_duplex != phy_dev->duplex) {
2071 fep->full_duplex = phy_dev->duplex;
2075 if (phy_dev->speed != fep->speed) {
2076 fep->speed = phy_dev->speed;
2080 /* if any of the above changed restart the FEC */
2081 if (status_change) {
2082 netif_stop_queue(ndev);
2083 napi_disable(&fep->napi);
2084 netif_tx_lock_bh(ndev);
2086 netif_tx_wake_all_queues(ndev);
2087 netif_tx_unlock_bh(ndev);
2088 napi_enable(&fep->napi);
2090 if (fep->quirks & FEC_QUIRK_HAS_EEE)
2091 fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
2094 netif_stop_queue(ndev);
2095 napi_disable(&fep->napi);
2096 netif_tx_lock_bh(ndev);
2098 netif_tx_unlock_bh(ndev);
2099 napi_enable(&fep->napi);
2100 fep->link = phy_dev->link;
2106 phy_print_status(phy_dev);
2109 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2114 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2115 ievent & FEC_ENET_MII, 2, 30000);
2118 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2123 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2125 struct fec_enet_private *fep = bus->priv;
2126 struct device *dev = &fep->pdev->dev;
2127 int ret = 0, frame_start, frame_addr, frame_op;
2129 ret = pm_runtime_resume_and_get(dev);
2134 frame_op = FEC_MMFR_OP_READ;
2135 frame_start = FEC_MMFR_ST;
2136 frame_addr = regnum;
2138 /* start a read op */
2139 writel(frame_start | frame_op |
2140 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2141 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2143 /* wait for end of transfer */
2144 ret = fec_enet_mdio_wait(fep);
2146 netdev_err(fep->netdev, "MDIO read timeout\n");
2150 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2153 pm_runtime_mark_last_busy(dev);
2154 pm_runtime_put_autosuspend(dev);
2159 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2160 int devad, int regnum)
2162 struct fec_enet_private *fep = bus->priv;
2163 struct device *dev = &fep->pdev->dev;
2164 int ret = 0, frame_start, frame_op;
2166 ret = pm_runtime_resume_and_get(dev);
2170 frame_start = FEC_MMFR_ST_C45;
2173 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2174 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2175 FEC_MMFR_TA | (regnum & 0xFFFF),
2176 fep->hwp + FEC_MII_DATA);
2178 /* wait for end of transfer */
2179 ret = fec_enet_mdio_wait(fep);
2181 netdev_err(fep->netdev, "MDIO address write timeout\n");
2185 frame_op = FEC_MMFR_OP_READ_C45;
2187 /* start a read op */
2188 writel(frame_start | frame_op |
2189 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2190 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2192 /* wait for end of transfer */
2193 ret = fec_enet_mdio_wait(fep);
2195 netdev_err(fep->netdev, "MDIO read timeout\n");
2199 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2202 pm_runtime_mark_last_busy(dev);
2203 pm_runtime_put_autosuspend(dev);
2208 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2211 struct fec_enet_private *fep = bus->priv;
2212 struct device *dev = &fep->pdev->dev;
2213 int ret, frame_start, frame_addr;
2215 ret = pm_runtime_resume_and_get(dev);
2220 frame_start = FEC_MMFR_ST;
2221 frame_addr = regnum;
2223 /* start a write op */
2224 writel(frame_start | FEC_MMFR_OP_WRITE |
2225 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2226 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2227 fep->hwp + FEC_MII_DATA);
2229 /* wait for end of transfer */
2230 ret = fec_enet_mdio_wait(fep);
2232 netdev_err(fep->netdev, "MDIO write timeout\n");
2234 pm_runtime_mark_last_busy(dev);
2235 pm_runtime_put_autosuspend(dev);
2240 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2241 int devad, int regnum, u16 value)
2243 struct fec_enet_private *fep = bus->priv;
2244 struct device *dev = &fep->pdev->dev;
2245 int ret, frame_start;
2247 ret = pm_runtime_resume_and_get(dev);
2251 frame_start = FEC_MMFR_ST_C45;
2254 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2255 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2256 FEC_MMFR_TA | (regnum & 0xFFFF),
2257 fep->hwp + FEC_MII_DATA);
2259 /* wait for end of transfer */
2260 ret = fec_enet_mdio_wait(fep);
2262 netdev_err(fep->netdev, "MDIO address write timeout\n");
2266 /* start a write op */
2267 writel(frame_start | FEC_MMFR_OP_WRITE |
2268 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2269 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2270 fep->hwp + FEC_MII_DATA);
2272 /* wait for end of transfer */
2273 ret = fec_enet_mdio_wait(fep);
2275 netdev_err(fep->netdev, "MDIO write timeout\n");
2278 pm_runtime_mark_last_busy(dev);
2279 pm_runtime_put_autosuspend(dev);
2284 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2286 struct fec_enet_private *fep = netdev_priv(ndev);
2287 struct phy_device *phy_dev = ndev->phydev;
2290 phy_reset_after_clk_enable(phy_dev);
2291 } else if (fep->phy_node) {
2293 * If the PHY still is not bound to the MAC, but there is
2294 * OF PHY node and a matching PHY device instance already,
2295 * use the OF PHY node to obtain the PHY device instance,
2296 * and then use that PHY device instance when triggering
2299 phy_dev = of_phy_find_device(fep->phy_node);
2300 phy_reset_after_clk_enable(phy_dev);
2301 put_device(&phy_dev->mdio.dev);
2305 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2307 struct fec_enet_private *fep = netdev_priv(ndev);
2311 ret = clk_prepare_enable(fep->clk_enet_out);
2316 mutex_lock(&fep->ptp_clk_mutex);
2317 ret = clk_prepare_enable(fep->clk_ptp);
2319 mutex_unlock(&fep->ptp_clk_mutex);
2320 goto failed_clk_ptp;
2322 fep->ptp_clk_on = true;
2324 mutex_unlock(&fep->ptp_clk_mutex);
2327 ret = clk_prepare_enable(fep->clk_ref);
2329 goto failed_clk_ref;
2331 ret = clk_prepare_enable(fep->clk_2x_txclk);
2333 goto failed_clk_2x_txclk;
2335 fec_enet_phy_reset_after_clk_enable(ndev);
2337 clk_disable_unprepare(fep->clk_enet_out);
2339 mutex_lock(&fep->ptp_clk_mutex);
2340 clk_disable_unprepare(fep->clk_ptp);
2341 fep->ptp_clk_on = false;
2342 mutex_unlock(&fep->ptp_clk_mutex);
2344 clk_disable_unprepare(fep->clk_ref);
2345 clk_disable_unprepare(fep->clk_2x_txclk);
2350 failed_clk_2x_txclk:
2352 clk_disable_unprepare(fep->clk_ref);
2355 mutex_lock(&fep->ptp_clk_mutex);
2356 clk_disable_unprepare(fep->clk_ptp);
2357 fep->ptp_clk_on = false;
2358 mutex_unlock(&fep->ptp_clk_mutex);
2361 clk_disable_unprepare(fep->clk_enet_out);
2366 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2367 struct device_node *np)
2369 u32 rgmii_tx_delay, rgmii_rx_delay;
2371 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2372 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2373 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2374 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2376 } else if (rgmii_tx_delay == 2000) {
2377 fep->rgmii_txc_dly = true;
2381 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2382 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2383 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2384 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2386 } else if (rgmii_rx_delay == 2000) {
2387 fep->rgmii_rxc_dly = true;
2394 static int fec_enet_mii_probe(struct net_device *ndev)
2396 struct fec_enet_private *fep = netdev_priv(ndev);
2397 struct phy_device *phy_dev = NULL;
2398 char mdio_bus_id[MII_BUS_ID_SIZE];
2399 char phy_name[MII_BUS_ID_SIZE + 3];
2401 int dev_id = fep->dev_id;
2403 if (fep->phy_node) {
2404 phy_dev = of_phy_connect(ndev, fep->phy_node,
2405 &fec_enet_adjust_link, 0,
2406 fep->phy_interface);
2408 netdev_err(ndev, "Unable to connect to phy\n");
2412 /* check for attached phy */
2413 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2414 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2418 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2422 if (phy_id >= PHY_MAX_ADDR) {
2423 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2424 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2428 snprintf(phy_name, sizeof(phy_name),
2429 PHY_ID_FMT, mdio_bus_id, phy_id);
2430 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2431 fep->phy_interface);
2434 if (IS_ERR(phy_dev)) {
2435 netdev_err(ndev, "could not attach to PHY\n");
2436 return PTR_ERR(phy_dev);
2439 /* mask with MAC supported features */
2440 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2441 phy_set_max_speed(phy_dev, 1000);
2442 phy_remove_link_mode(phy_dev,
2443 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2444 #if !defined(CONFIG_M5272)
2445 phy_support_sym_pause(phy_dev);
2449 phy_set_max_speed(phy_dev, 100);
2451 if (fep->quirks & FEC_QUIRK_HAS_EEE)
2452 phy_support_eee(phy_dev);
2455 fep->full_duplex = 0;
2457 phy_dev->mac_managed_pm = true;
2459 phy_attached_info(phy_dev);
2464 static int fec_enet_mii_init(struct platform_device *pdev)
2466 static struct mii_bus *fec0_mii_bus;
2467 struct net_device *ndev = platform_get_drvdata(pdev);
2468 struct fec_enet_private *fep = netdev_priv(ndev);
2469 bool suppress_preamble = false;
2470 struct device_node *node;
2472 u32 mii_speed, holdtime;
2476 * The i.MX28 dual fec interfaces are not equal.
2477 * Here are the differences:
2479 * - fec0 supports MII & RMII modes while fec1 only supports RMII
2480 * - fec0 acts as the 1588 time master while fec1 is slave
2481 * - external phys can only be configured by fec0
2483 * That is to say fec1 can not work independently. It only works
2484 * when fec0 is working. The reason behind this design is that the
2485 * second interface is added primarily for Switch mode.
2487 * Because of the last point above, both phys are attached on fec0
2488 * mdio interface in board design, and need to be configured by
2491 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2492 /* fec1 uses fec0 mii_bus */
2493 if (mii_cnt && fec0_mii_bus) {
2494 fep->mii_bus = fec0_mii_bus;
2501 bus_freq = 2500000; /* 2.5MHz by default */
2502 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2504 of_property_read_u32(node, "clock-frequency", &bus_freq);
2505 suppress_preamble = of_property_read_bool(node,
2506 "suppress-preamble");
2510 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2512 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2513 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2514 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2517 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2518 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2520 if (mii_speed > 63) {
2522 "fec clock (%lu) too fast to get right mii speed\n",
2523 clk_get_rate(fep->clk_ipg));
2529 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2530 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2531 * versions are RAZ there, so just ignore the difference and write the
2533 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2534 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2536 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2537 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2538 * holdtime cannot result in a value greater than 3.
2540 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2542 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2544 if (suppress_preamble)
2545 fep->phy_speed |= BIT(7);
2547 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2548 /* Clear MMFR to avoid to generate MII event by writing MSCR.
2549 * MII event generation condition:
2551 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2552 * mscr_reg_data_in[7:0] != 0
2554 * - mscr[7:0]_not_zero
2556 writel(0, fep->hwp + FEC_MII_DATA);
2559 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2561 /* Clear any pending transaction complete indication */
2562 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2564 fep->mii_bus = mdiobus_alloc();
2565 if (fep->mii_bus == NULL) {
2570 fep->mii_bus->name = "fec_enet_mii_bus";
2571 fep->mii_bus->read = fec_enet_mdio_read_c22;
2572 fep->mii_bus->write = fec_enet_mdio_write_c22;
2573 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2574 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2575 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2577 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2578 pdev->name, fep->dev_id + 1);
2579 fep->mii_bus->priv = fep;
2580 fep->mii_bus->parent = &pdev->dev;
2582 err = of_mdiobus_register(fep->mii_bus, node);
2584 goto err_out_free_mdiobus;
2589 /* save fec0 mii_bus */
2590 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2591 fec0_mii_bus = fep->mii_bus;
2595 err_out_free_mdiobus:
2596 mdiobus_free(fep->mii_bus);
2602 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2604 if (--mii_cnt == 0) {
2605 mdiobus_unregister(fep->mii_bus);
2606 mdiobus_free(fep->mii_bus);
2610 static void fec_enet_get_drvinfo(struct net_device *ndev,
2611 struct ethtool_drvinfo *info)
2613 struct fec_enet_private *fep = netdev_priv(ndev);
2615 strscpy(info->driver, fep->pdev->dev.driver->name,
2616 sizeof(info->driver));
2617 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2620 static int fec_enet_get_regs_len(struct net_device *ndev)
2622 struct fec_enet_private *fep = netdev_priv(ndev);
2626 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2628 s = resource_size(r);
2633 /* List of registers that can be safety be read to dump them with ethtool */
2634 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2635 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2636 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2637 static __u32 fec_enet_register_version = 2;
2638 static u32 fec_enet_register_offset[] = {
2639 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2640 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2641 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2642 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2643 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2644 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2645 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2646 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2647 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2648 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2649 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2650 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2651 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2652 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2653 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2654 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2655 RMON_T_P_GTE2048, RMON_T_OCTETS,
2656 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2657 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2658 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2659 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2660 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2661 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2662 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2663 RMON_R_P_GTE2048, RMON_R_OCTETS,
2664 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2665 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2668 static u32 fec_enet_register_offset_6ul[] = {
2669 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2670 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2671 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2672 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2673 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2674 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2675 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2676 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2677 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2678 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2679 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2680 RMON_T_P_GTE2048, RMON_T_OCTETS,
2681 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2682 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2683 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2684 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2685 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2686 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2687 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2688 RMON_R_P_GTE2048, RMON_R_OCTETS,
2689 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2690 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2693 static __u32 fec_enet_register_version = 1;
2694 static u32 fec_enet_register_offset[] = {
2695 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2696 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2697 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2698 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2699 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2700 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2701 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2702 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2703 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2707 static void fec_enet_get_regs(struct net_device *ndev,
2708 struct ethtool_regs *regs, void *regbuf)
2710 struct fec_enet_private *fep = netdev_priv(ndev);
2711 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2712 struct device *dev = &fep->pdev->dev;
2713 u32 *buf = (u32 *)regbuf;
2716 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2717 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2718 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2722 if (!of_machine_is_compatible("fsl,imx6ul")) {
2723 reg_list = fec_enet_register_offset;
2724 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2726 reg_list = fec_enet_register_offset_6ul;
2727 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2731 static u32 *reg_list = fec_enet_register_offset;
2732 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2734 ret = pm_runtime_resume_and_get(dev);
2738 regs->version = fec_enet_register_version;
2740 memset(buf, 0, regs->len);
2742 for (i = 0; i < reg_cnt; i++) {
2745 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2746 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2750 buf[off] = readl(&theregs[off]);
2753 pm_runtime_mark_last_busy(dev);
2754 pm_runtime_put_autosuspend(dev);
2757 static int fec_enet_get_ts_info(struct net_device *ndev,
2758 struct ethtool_ts_info *info)
2760 struct fec_enet_private *fep = netdev_priv(ndev);
2762 if (fep->bufdesc_ex) {
2764 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2765 SOF_TIMESTAMPING_RX_SOFTWARE |
2766 SOF_TIMESTAMPING_SOFTWARE |
2767 SOF_TIMESTAMPING_TX_HARDWARE |
2768 SOF_TIMESTAMPING_RX_HARDWARE |
2769 SOF_TIMESTAMPING_RAW_HARDWARE;
2771 info->phc_index = ptp_clock_index(fep->ptp_clock);
2773 info->phc_index = -1;
2775 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2776 (1 << HWTSTAMP_TX_ON);
2778 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2779 (1 << HWTSTAMP_FILTER_ALL);
2782 return ethtool_op_get_ts_info(ndev, info);
2786 #if !defined(CONFIG_M5272)
2788 static void fec_enet_get_pauseparam(struct net_device *ndev,
2789 struct ethtool_pauseparam *pause)
2791 struct fec_enet_private *fep = netdev_priv(ndev);
2793 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2794 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2795 pause->rx_pause = pause->tx_pause;
2798 static int fec_enet_set_pauseparam(struct net_device *ndev,
2799 struct ethtool_pauseparam *pause)
2801 struct fec_enet_private *fep = netdev_priv(ndev);
2806 if (pause->tx_pause != pause->rx_pause) {
2808 "hardware only support enable/disable both tx and rx");
2812 fep->pause_flag = 0;
2814 /* tx pause must be same as rx pause */
2815 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2816 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2818 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2821 if (pause->autoneg) {
2822 if (netif_running(ndev))
2824 phy_start_aneg(ndev->phydev);
2826 if (netif_running(ndev)) {
2827 napi_disable(&fep->napi);
2828 netif_tx_lock_bh(ndev);
2830 netif_tx_wake_all_queues(ndev);
2831 netif_tx_unlock_bh(ndev);
2832 napi_enable(&fep->napi);
2838 static const struct fec_stat {
2839 char name[ETH_GSTRING_LEN];
2843 { "tx_dropped", RMON_T_DROP },
2844 { "tx_packets", RMON_T_PACKETS },
2845 { "tx_broadcast", RMON_T_BC_PKT },
2846 { "tx_multicast", RMON_T_MC_PKT },
2847 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2848 { "tx_undersize", RMON_T_UNDERSIZE },
2849 { "tx_oversize", RMON_T_OVERSIZE },
2850 { "tx_fragment", RMON_T_FRAG },
2851 { "tx_jabber", RMON_T_JAB },
2852 { "tx_collision", RMON_T_COL },
2853 { "tx_64byte", RMON_T_P64 },
2854 { "tx_65to127byte", RMON_T_P65TO127 },
2855 { "tx_128to255byte", RMON_T_P128TO255 },
2856 { "tx_256to511byte", RMON_T_P256TO511 },
2857 { "tx_512to1023byte", RMON_T_P512TO1023 },
2858 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2859 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2860 { "tx_octets", RMON_T_OCTETS },
2863 { "IEEE_tx_drop", IEEE_T_DROP },
2864 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2865 { "IEEE_tx_1col", IEEE_T_1COL },
2866 { "IEEE_tx_mcol", IEEE_T_MCOL },
2867 { "IEEE_tx_def", IEEE_T_DEF },
2868 { "IEEE_tx_lcol", IEEE_T_LCOL },
2869 { "IEEE_tx_excol", IEEE_T_EXCOL },
2870 { "IEEE_tx_macerr", IEEE_T_MACERR },
2871 { "IEEE_tx_cserr", IEEE_T_CSERR },
2872 { "IEEE_tx_sqe", IEEE_T_SQE },
2873 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2874 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2877 { "rx_packets", RMON_R_PACKETS },
2878 { "rx_broadcast", RMON_R_BC_PKT },
2879 { "rx_multicast", RMON_R_MC_PKT },
2880 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2881 { "rx_undersize", RMON_R_UNDERSIZE },
2882 { "rx_oversize", RMON_R_OVERSIZE },
2883 { "rx_fragment", RMON_R_FRAG },
2884 { "rx_jabber", RMON_R_JAB },
2885 { "rx_64byte", RMON_R_P64 },
2886 { "rx_65to127byte", RMON_R_P65TO127 },
2887 { "rx_128to255byte", RMON_R_P128TO255 },
2888 { "rx_256to511byte", RMON_R_P256TO511 },
2889 { "rx_512to1023byte", RMON_R_P512TO1023 },
2890 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2891 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2892 { "rx_octets", RMON_R_OCTETS },
2895 { "IEEE_rx_drop", IEEE_R_DROP },
2896 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2897 { "IEEE_rx_crc", IEEE_R_CRC },
2898 { "IEEE_rx_align", IEEE_R_ALIGN },
2899 { "IEEE_rx_macerr", IEEE_R_MACERR },
2900 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2901 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2904 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2906 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2907 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
2908 "rx_xdp_pass", /* RX_XDP_PASS, */
2909 "rx_xdp_drop", /* RX_XDP_DROP, */
2910 "rx_xdp_tx", /* RX_XDP_TX, */
2911 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
2912 "tx_xdp_xmit", /* TX_XDP_XMIT, */
2913 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
2916 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2918 struct fec_enet_private *fep = netdev_priv(dev);
2921 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2922 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2925 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2927 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2928 struct fec_enet_priv_rx_q *rxq;
2931 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2932 rxq = fep->rx_queue[i];
2934 for (j = 0; j < XDP_STATS_TOTAL; j++)
2935 xdp_stats[j] += rxq->stats[j];
2938 memcpy(data, xdp_stats, sizeof(xdp_stats));
2941 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2943 #ifdef CONFIG_PAGE_POOL_STATS
2944 struct page_pool_stats stats = {};
2945 struct fec_enet_priv_rx_q *rxq;
2948 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2949 rxq = fep->rx_queue[i];
2951 if (!rxq->page_pool)
2954 page_pool_get_stats(rxq->page_pool, &stats);
2957 page_pool_ethtool_stats_get(data, &stats);
2961 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2962 struct ethtool_stats *stats, u64 *data)
2964 struct fec_enet_private *fep = netdev_priv(dev);
2966 if (netif_running(dev))
2967 fec_enet_update_ethtool_stats(dev);
2969 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2970 data += FEC_STATS_SIZE / sizeof(u64);
2972 fec_enet_get_xdp_stats(fep, data);
2973 data += XDP_STATS_TOTAL;
2975 fec_enet_page_pool_stats(fep, data);
2978 static void fec_enet_get_strings(struct net_device *netdev,
2979 u32 stringset, u8 *data)
2982 switch (stringset) {
2984 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
2985 ethtool_puts(&data, fec_stats[i].name);
2987 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
2988 ethtool_puts(&data, fec_xdp_stat_strs[i]);
2990 page_pool_ethtool_stats_get_strings(data);
2994 net_selftest_get_strings(data);
2999 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
3005 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
3006 count += page_pool_ethtool_stats_get_count();
3010 return net_selftest_get_count();
3016 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
3018 struct fec_enet_private *fep = netdev_priv(dev);
3019 struct fec_enet_priv_rx_q *rxq;
3022 /* Disable MIB statistics counters */
3023 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
3025 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3026 writel(0, fep->hwp + fec_stats[i].offset);
3028 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
3029 rxq = fep->rx_queue[i];
3030 for (j = 0; j < XDP_STATS_TOTAL; j++)
3034 /* Don't disable MIB statistics counters */
3035 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
3038 #else /* !defined(CONFIG_M5272) */
3039 #define FEC_STATS_SIZE 0
3040 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
3044 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
3047 #endif /* !defined(CONFIG_M5272) */
3049 /* ITR clock source is enet system clock (clk_ahb).
3050 * TCTT unit is cycle_ns * 64 cycle
3051 * So, the ICTT value = X us / (cycle_ns * 64)
3053 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3055 struct fec_enet_private *fep = netdev_priv(ndev);
3057 return us * (fep->itr_clk_rate / 64000) / 1000;
3060 /* Set threshold for interrupt coalescing */
3061 static void fec_enet_itr_coal_set(struct net_device *ndev)
3063 struct fec_enet_private *fep = netdev_priv(ndev);
3066 /* Must be greater than zero to avoid unpredictable behavior */
3067 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
3068 !fep->tx_time_itr || !fep->tx_pkts_itr)
3071 /* Select enet system clock as Interrupt Coalescing
3072 * timer Clock Source
3074 rx_itr = FEC_ITR_CLK_SEL;
3075 tx_itr = FEC_ITR_CLK_SEL;
3077 /* set ICFT and ICTT */
3078 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3079 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3080 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3081 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3083 rx_itr |= FEC_ITR_EN;
3084 tx_itr |= FEC_ITR_EN;
3086 writel(tx_itr, fep->hwp + FEC_TXIC0);
3087 writel(rx_itr, fep->hwp + FEC_RXIC0);
3088 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3089 writel(tx_itr, fep->hwp + FEC_TXIC1);
3090 writel(rx_itr, fep->hwp + FEC_RXIC1);
3091 writel(tx_itr, fep->hwp + FEC_TXIC2);
3092 writel(rx_itr, fep->hwp + FEC_RXIC2);
3096 static int fec_enet_get_coalesce(struct net_device *ndev,
3097 struct ethtool_coalesce *ec,
3098 struct kernel_ethtool_coalesce *kernel_coal,
3099 struct netlink_ext_ack *extack)
3101 struct fec_enet_private *fep = netdev_priv(ndev);
3103 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3106 ec->rx_coalesce_usecs = fep->rx_time_itr;
3107 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3109 ec->tx_coalesce_usecs = fep->tx_time_itr;
3110 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3115 static int fec_enet_set_coalesce(struct net_device *ndev,
3116 struct ethtool_coalesce *ec,
3117 struct kernel_ethtool_coalesce *kernel_coal,
3118 struct netlink_ext_ack *extack)
3120 struct fec_enet_private *fep = netdev_priv(ndev);
3121 struct device *dev = &fep->pdev->dev;
3124 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3127 if (ec->rx_max_coalesced_frames > 255) {
3128 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3132 if (ec->tx_max_coalesced_frames > 255) {
3133 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3137 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3138 if (cycle > 0xFFFF) {
3139 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3143 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3144 if (cycle > 0xFFFF) {
3145 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3149 fep->rx_time_itr = ec->rx_coalesce_usecs;
3150 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3152 fep->tx_time_itr = ec->tx_coalesce_usecs;
3153 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3155 fec_enet_itr_coal_set(ndev);
3161 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
3163 struct fec_enet_private *fep = netdev_priv(ndev);
3164 struct ethtool_keee *p = &fep->eee;
3166 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3169 if (!netif_running(ndev))
3172 edata->tx_lpi_timer = p->tx_lpi_timer;
3174 return phy_ethtool_get_eee(ndev->phydev, edata);
3178 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
3180 struct fec_enet_private *fep = netdev_priv(ndev);
3181 struct ethtool_keee *p = &fep->eee;
3183 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3186 if (!netif_running(ndev))
3189 p->tx_lpi_timer = edata->tx_lpi_timer;
3191 return phy_ethtool_set_eee(ndev->phydev, edata);
3195 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3197 struct fec_enet_private *fep = netdev_priv(ndev);
3199 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3200 wol->supported = WAKE_MAGIC;
3201 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3203 wol->supported = wol->wolopts = 0;
3208 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3210 struct fec_enet_private *fep = netdev_priv(ndev);
3212 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3215 if (wol->wolopts & ~WAKE_MAGIC)
3218 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3219 if (device_may_wakeup(&ndev->dev))
3220 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3222 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3227 static const struct ethtool_ops fec_enet_ethtool_ops = {
3228 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3229 ETHTOOL_COALESCE_MAX_FRAMES,
3230 .get_drvinfo = fec_enet_get_drvinfo,
3231 .get_regs_len = fec_enet_get_regs_len,
3232 .get_regs = fec_enet_get_regs,
3233 .nway_reset = phy_ethtool_nway_reset,
3234 .get_link = ethtool_op_get_link,
3235 .get_coalesce = fec_enet_get_coalesce,
3236 .set_coalesce = fec_enet_set_coalesce,
3237 #ifndef CONFIG_M5272
3238 .get_pauseparam = fec_enet_get_pauseparam,
3239 .set_pauseparam = fec_enet_set_pauseparam,
3240 .get_strings = fec_enet_get_strings,
3241 .get_ethtool_stats = fec_enet_get_ethtool_stats,
3242 .get_sset_count = fec_enet_get_sset_count,
3244 .get_ts_info = fec_enet_get_ts_info,
3245 .get_wol = fec_enet_get_wol,
3246 .set_wol = fec_enet_set_wol,
3247 .get_eee = fec_enet_get_eee,
3248 .set_eee = fec_enet_set_eee,
3249 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3250 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3251 .self_test = net_selftest,
3254 static void fec_enet_free_buffers(struct net_device *ndev)
3256 struct fec_enet_private *fep = netdev_priv(ndev);
3258 struct fec_enet_priv_tx_q *txq;
3259 struct fec_enet_priv_rx_q *rxq;
3262 for (q = 0; q < fep->num_rx_queues; q++) {
3263 rxq = fep->rx_queue[q];
3264 for (i = 0; i < rxq->bd.ring_size; i++)
3265 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
3267 for (i = 0; i < XDP_STATS_TOTAL; i++)
3270 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3271 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3272 page_pool_destroy(rxq->page_pool);
3273 rxq->page_pool = NULL;
3276 for (q = 0; q < fep->num_tx_queues; q++) {
3277 txq = fep->tx_queue[q];
3278 for (i = 0; i < txq->bd.ring_size; i++) {
3279 kfree(txq->tx_bounce[i]);
3280 txq->tx_bounce[i] = NULL;
3282 if (!txq->tx_buf[i].buf_p) {
3283 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3287 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3288 dev_kfree_skb(txq->tx_buf[i].buf_p);
3289 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3290 xdp_return_frame(txq->tx_buf[i].buf_p);
3292 struct page *page = txq->tx_buf[i].buf_p;
3294 page_pool_put_page(page->pp, page, 0, false);
3297 txq->tx_buf[i].buf_p = NULL;
3298 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3303 static void fec_enet_free_queue(struct net_device *ndev)
3305 struct fec_enet_private *fep = netdev_priv(ndev);
3307 struct fec_enet_priv_tx_q *txq;
3309 for (i = 0; i < fep->num_tx_queues; i++)
3310 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3311 txq = fep->tx_queue[i];
3312 fec_dma_free(&fep->pdev->dev,
3313 txq->bd.ring_size * TSO_HEADER_SIZE,
3314 txq->tso_hdrs, txq->tso_hdrs_dma);
3317 for (i = 0; i < fep->num_rx_queues; i++)
3318 kfree(fep->rx_queue[i]);
3319 for (i = 0; i < fep->num_tx_queues; i++)
3320 kfree(fep->tx_queue[i]);
3323 static int fec_enet_alloc_queue(struct net_device *ndev)
3325 struct fec_enet_private *fep = netdev_priv(ndev);
3328 struct fec_enet_priv_tx_q *txq;
3330 for (i = 0; i < fep->num_tx_queues; i++) {
3331 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3337 fep->tx_queue[i] = txq;
3338 txq->bd.ring_size = TX_RING_SIZE;
3339 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3341 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3342 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3344 txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
3345 txq->bd.ring_size * TSO_HEADER_SIZE,
3346 &txq->tso_hdrs_dma, GFP_KERNEL);
3347 if (!txq->tso_hdrs) {
3353 for (i = 0; i < fep->num_rx_queues; i++) {
3354 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3356 if (!fep->rx_queue[i]) {
3361 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3362 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3367 fec_enet_free_queue(ndev);
3372 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3374 struct fec_enet_private *fep = netdev_priv(ndev);
3375 struct fec_enet_priv_rx_q *rxq;
3376 dma_addr_t phys_addr;
3377 struct bufdesc *bdp;
3381 rxq = fep->rx_queue[queue];
3384 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3386 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3390 for (i = 0; i < rxq->bd.ring_size; i++) {
3391 page = page_pool_dev_alloc_pages(rxq->page_pool);
3395 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3396 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3398 rxq->rx_skb_info[i].page = page;
3399 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
3400 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3402 if (fep->bufdesc_ex) {
3403 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3404 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3407 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3410 /* Set the last buffer to wrap. */
3411 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3412 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3416 fec_enet_free_buffers(ndev);
3421 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3423 struct fec_enet_private *fep = netdev_priv(ndev);
3425 struct bufdesc *bdp;
3426 struct fec_enet_priv_tx_q *txq;
3428 txq = fep->tx_queue[queue];
3430 for (i = 0; i < txq->bd.ring_size; i++) {
3431 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3432 if (!txq->tx_bounce[i])
3435 bdp->cbd_sc = cpu_to_fec16(0);
3436 bdp->cbd_bufaddr = cpu_to_fec32(0);
3438 if (fep->bufdesc_ex) {
3439 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3440 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3443 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3446 /* Set the last buffer to wrap. */
3447 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3448 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3453 fec_enet_free_buffers(ndev);
3457 static int fec_enet_alloc_buffers(struct net_device *ndev)
3459 struct fec_enet_private *fep = netdev_priv(ndev);
3462 for (i = 0; i < fep->num_rx_queues; i++)
3463 if (fec_enet_alloc_rxq_buffers(ndev, i))
3466 for (i = 0; i < fep->num_tx_queues; i++)
3467 if (fec_enet_alloc_txq_buffers(ndev, i))
3473 fec_enet_open(struct net_device *ndev)
3475 struct fec_enet_private *fep = netdev_priv(ndev);
3479 ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3483 pinctrl_pm_select_default_state(&fep->pdev->dev);
3484 ret = fec_enet_clk_enable(ndev, true);
3488 /* During the first fec_enet_open call the PHY isn't probed at this
3489 * point. Therefore the phy_reset_after_clk_enable() call within
3490 * fec_enet_clk_enable() fails. As we need this reset in order to be
3491 * sure the PHY is working correctly we check if we need to reset again
3492 * later when the PHY is probed
3494 if (ndev->phydev && ndev->phydev->drv)
3495 reset_again = false;
3499 /* I should reset the ring buffers here, but I don't yet know
3500 * a simple way to do that.
3503 ret = fec_enet_alloc_buffers(ndev);
3505 goto err_enet_alloc;
3507 /* Init MAC prior to mii bus probe */
3510 /* Call phy_reset_after_clk_enable() again if it failed during
3511 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3514 fec_enet_phy_reset_after_clk_enable(ndev);
3516 /* Probe and connect to PHY when open the interface */
3517 ret = fec_enet_mii_probe(ndev);
3519 goto err_enet_mii_probe;
3521 if (fep->quirks & FEC_QUIRK_ERR006687)
3522 imx6q_cpuidle_fec_irqs_used();
3524 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3525 cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3527 napi_enable(&fep->napi);
3528 phy_start(ndev->phydev);
3529 netif_tx_start_all_queues(ndev);
3531 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3532 FEC_WOL_FLAG_ENABLE);
3537 fec_enet_free_buffers(ndev);
3539 fec_enet_clk_enable(ndev, false);
3541 pm_runtime_mark_last_busy(&fep->pdev->dev);
3542 pm_runtime_put_autosuspend(&fep->pdev->dev);
3543 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3548 fec_enet_close(struct net_device *ndev)
3550 struct fec_enet_private *fep = netdev_priv(ndev);
3552 phy_stop(ndev->phydev);
3554 if (netif_device_present(ndev)) {
3555 napi_disable(&fep->napi);
3556 netif_tx_disable(ndev);
3560 phy_disconnect(ndev->phydev);
3562 if (fep->quirks & FEC_QUIRK_ERR006687)
3563 imx6q_cpuidle_fec_irqs_unused();
3565 fec_enet_update_ethtool_stats(ndev);
3567 fec_enet_clk_enable(ndev, false);
3568 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3569 cpu_latency_qos_remove_request(&fep->pm_qos_req);
3571 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3572 pm_runtime_mark_last_busy(&fep->pdev->dev);
3573 pm_runtime_put_autosuspend(&fep->pdev->dev);
3575 fec_enet_free_buffers(ndev);
3580 /* Set or clear the multicast filter for this adaptor.
3581 * Skeleton taken from sunlance driver.
3582 * The CPM Ethernet implementation allows Multicast as well as individual
3583 * MAC address filtering. Some of the drivers check to make sure it is
3584 * a group multicast address, and discard those that are not. I guess I
3585 * will do the same for now, but just remove the test if you want
3586 * individual filtering as well (do the upper net layers want or support
3587 * this kind of feature?).
3590 #define FEC_HASH_BITS 6 /* #bits in hash */
3592 static void set_multicast_list(struct net_device *ndev)
3594 struct fec_enet_private *fep = netdev_priv(ndev);
3595 struct netdev_hw_addr *ha;
3596 unsigned int crc, tmp;
3598 unsigned int hash_high = 0, hash_low = 0;
3600 if (ndev->flags & IFF_PROMISC) {
3601 tmp = readl(fep->hwp + FEC_R_CNTRL);
3603 writel(tmp, fep->hwp + FEC_R_CNTRL);
3607 tmp = readl(fep->hwp + FEC_R_CNTRL);
3609 writel(tmp, fep->hwp + FEC_R_CNTRL);
3611 if (ndev->flags & IFF_ALLMULTI) {
3612 /* Catch all multicast addresses, so set the
3615 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3616 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3621 /* Add the addresses in hash register */
3622 netdev_for_each_mc_addr(ha, ndev) {
3623 /* calculate crc32 value of mac address */
3624 crc = ether_crc_le(ndev->addr_len, ha->addr);
3626 /* only upper 6 bits (FEC_HASH_BITS) are used
3627 * which point to specific bit in the hash registers
3629 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3632 hash_high |= 1 << (hash - 32);
3634 hash_low |= 1 << hash;
3637 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3638 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3641 /* Set a MAC change in hardware. */
3643 fec_set_mac_address(struct net_device *ndev, void *p)
3645 struct fec_enet_private *fep = netdev_priv(ndev);
3646 struct sockaddr *addr = p;
3649 if (!is_valid_ether_addr(addr->sa_data))
3650 return -EADDRNOTAVAIL;
3651 eth_hw_addr_set(ndev, addr->sa_data);
3654 /* Add netif status check here to avoid system hang in below case:
3655 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3656 * After ethx down, fec all clocks are gated off and then register
3657 * access causes system hang.
3659 if (!netif_running(ndev))
3662 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3663 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3664 fep->hwp + FEC_ADDR_LOW);
3665 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3666 fep->hwp + FEC_ADDR_HIGH);
3670 #ifdef CONFIG_NET_POLL_CONTROLLER
3672 * fec_poll_controller - FEC Poll controller function
3673 * @dev: The FEC network adapter
3675 * Polled functionality used by netconsole and others in non interrupt mode
3678 static void fec_poll_controller(struct net_device *dev)
3681 struct fec_enet_private *fep = netdev_priv(dev);
3683 for (i = 0; i < FEC_IRQ_NUM; i++) {
3684 if (fep->irq[i] > 0) {
3685 disable_irq(fep->irq[i]);
3686 fec_enet_interrupt(fep->irq[i], dev);
3687 enable_irq(fep->irq[i]);
3693 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3694 netdev_features_t features)
3696 struct fec_enet_private *fep = netdev_priv(netdev);
3697 netdev_features_t changed = features ^ netdev->features;
3699 netdev->features = features;
3701 /* Receive checksum has been changed */
3702 if (changed & NETIF_F_RXCSUM) {
3703 if (features & NETIF_F_RXCSUM)
3704 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3706 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3710 static int fec_set_features(struct net_device *netdev,
3711 netdev_features_t features)
3713 struct fec_enet_private *fep = netdev_priv(netdev);
3714 netdev_features_t changed = features ^ netdev->features;
3716 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3717 napi_disable(&fep->napi);
3718 netif_tx_lock_bh(netdev);
3720 fec_enet_set_netdev_features(netdev, features);
3721 fec_restart(netdev);
3722 netif_tx_wake_all_queues(netdev);
3723 netif_tx_unlock_bh(netdev);
3724 napi_enable(&fep->napi);
3726 fec_enet_set_netdev_features(netdev, features);
3732 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3733 struct net_device *sb_dev)
3735 struct fec_enet_private *fep = netdev_priv(ndev);
3738 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3739 return netdev_pick_tx(ndev, skb, NULL);
3741 /* VLAN is present in the payload.*/
3742 if (eth_type_vlan(skb->protocol)) {
3743 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3745 vlan_tag = ntohs(vhdr->h_vlan_TCI);
3746 /* VLAN is present in the skb but not yet pushed in the payload.*/
3747 } else if (skb_vlan_tag_present(skb)) {
3748 vlan_tag = skb->vlan_tci;
3753 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3756 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3758 struct fec_enet_private *fep = netdev_priv(dev);
3759 bool is_run = netif_running(dev);
3760 struct bpf_prog *old_prog;
3762 switch (bpf->command) {
3763 case XDP_SETUP_PROG:
3764 /* No need to support the SoCs that require to
3765 * do the frame swap because the performance wouldn't be
3766 * better than the skb mode.
3768 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3772 xdp_features_clear_redirect_target(dev);
3775 napi_disable(&fep->napi);
3776 netif_tx_disable(dev);
3779 old_prog = xchg(&fep->xdp_prog, bpf->prog);
3781 bpf_prog_put(old_prog);
3786 napi_enable(&fep->napi);
3787 netif_tx_start_all_queues(dev);
3791 xdp_features_set_redirect_target(dev, false);
3795 case XDP_SETUP_XSK_POOL:
3804 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3806 if (unlikely(index < 0))
3809 return (index % fep->num_tx_queues);
3812 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3813 struct fec_enet_priv_tx_q *txq,
3814 void *frame, u32 dma_sync_len,
3817 unsigned int index, status, estatus;
3818 struct bufdesc *bdp;
3819 dma_addr_t dma_addr;
3823 entries_free = fec_enet_get_free_txdesc_num(txq);
3824 if (entries_free < MAX_SKB_FRAGS + 1) {
3825 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3829 /* Fill in a Tx ring entry */
3831 status = fec16_to_cpu(bdp->cbd_sc);
3832 status &= ~BD_ENET_TX_STATS;
3834 index = fec_enet_get_bd_index(bdp, &txq->bd);
3837 struct xdp_frame *xdpf = frame;
3839 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3840 xdpf->len, DMA_TO_DEVICE);
3841 if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3844 frame_len = xdpf->len;
3845 txq->tx_buf[index].buf_p = xdpf;
3846 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3848 struct xdp_buff *xdpb = frame;
3851 page = virt_to_page(xdpb->data);
3852 dma_addr = page_pool_get_dma_addr(page) +
3853 (xdpb->data - xdpb->data_hard_start);
3854 dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3855 dma_sync_len, DMA_BIDIRECTIONAL);
3856 frame_len = xdpb->data_end - xdpb->data;
3857 txq->tx_buf[index].buf_p = page;
3858 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3861 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3862 if (fep->bufdesc_ex)
3863 estatus = BD_ENET_TX_INT;
3865 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3866 bdp->cbd_datlen = cpu_to_fec16(frame_len);
3868 if (fep->bufdesc_ex) {
3869 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3871 if (fep->quirks & FEC_QUIRK_HAS_AVB)
3872 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3875 ebdp->cbd_esc = cpu_to_fec32(estatus);
3878 /* Make sure the updates to rest of the descriptor are performed before
3879 * transferring ownership.
3883 /* Send it on its way. Tell FEC it's ready, interrupt when done,
3884 * it's the last BD of the frame, and to put the CRC on the end.
3886 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3887 bdp->cbd_sc = cpu_to_fec16(status);
3889 /* If this was the last BD in the ring, start at the beginning again. */
3890 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3892 /* Make sure the update to bdp are performed before txq->bd.cur. */
3897 /* Trigger transmission start */
3898 writel(0, txq->bd.reg_desc_active);
3903 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3904 int cpu, struct xdp_buff *xdp,
3907 struct fec_enet_priv_tx_q *txq;
3908 struct netdev_queue *nq;
3911 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3912 txq = fep->tx_queue[queue];
3913 nq = netdev_get_tx_queue(fep->netdev, queue);
3915 __netif_tx_lock(nq, cpu);
3917 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3918 txq_trans_cond_update(nq);
3919 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3921 __netif_tx_unlock(nq);
3926 static int fec_enet_xdp_xmit(struct net_device *dev,
3928 struct xdp_frame **frames,
3931 struct fec_enet_private *fep = netdev_priv(dev);
3932 struct fec_enet_priv_tx_q *txq;
3933 int cpu = smp_processor_id();
3934 unsigned int sent_frames = 0;
3935 struct netdev_queue *nq;
3939 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3940 txq = fep->tx_queue[queue];
3941 nq = netdev_get_tx_queue(fep->netdev, queue);
3943 __netif_tx_lock(nq, cpu);
3945 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3946 txq_trans_cond_update(nq);
3947 for (i = 0; i < num_frames; i++) {
3948 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3953 __netif_tx_unlock(nq);
3958 static int fec_hwtstamp_get(struct net_device *ndev,
3959 struct kernel_hwtstamp_config *config)
3961 struct fec_enet_private *fep = netdev_priv(ndev);
3963 if (!netif_running(ndev))
3966 if (!fep->bufdesc_ex)
3969 fec_ptp_get(ndev, config);
3974 static int fec_hwtstamp_set(struct net_device *ndev,
3975 struct kernel_hwtstamp_config *config,
3976 struct netlink_ext_ack *extack)
3978 struct fec_enet_private *fep = netdev_priv(ndev);
3980 if (!netif_running(ndev))
3983 if (!fep->bufdesc_ex)
3986 return fec_ptp_set(ndev, config, extack);
3989 static const struct net_device_ops fec_netdev_ops = {
3990 .ndo_open = fec_enet_open,
3991 .ndo_stop = fec_enet_close,
3992 .ndo_start_xmit = fec_enet_start_xmit,
3993 .ndo_select_queue = fec_enet_select_queue,
3994 .ndo_set_rx_mode = set_multicast_list,
3995 .ndo_validate_addr = eth_validate_addr,
3996 .ndo_tx_timeout = fec_timeout,
3997 .ndo_set_mac_address = fec_set_mac_address,
3998 .ndo_eth_ioctl = phy_do_ioctl_running,
3999 #ifdef CONFIG_NET_POLL_CONTROLLER
4000 .ndo_poll_controller = fec_poll_controller,
4002 .ndo_set_features = fec_set_features,
4003 .ndo_bpf = fec_enet_bpf,
4004 .ndo_xdp_xmit = fec_enet_xdp_xmit,
4005 .ndo_hwtstamp_get = fec_hwtstamp_get,
4006 .ndo_hwtstamp_set = fec_hwtstamp_set,
4009 static const unsigned short offset_des_active_rxq[] = {
4010 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
4013 static const unsigned short offset_des_active_txq[] = {
4014 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
4018 * XXX: We need to clean up on failure exits here.
4021 static int fec_enet_init(struct net_device *ndev)
4023 struct fec_enet_private *fep = netdev_priv(ndev);
4024 struct bufdesc *cbd_base;
4028 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4029 sizeof(struct bufdesc);
4030 unsigned dsize_log2 = __fls(dsize);
4033 WARN_ON(dsize != (1 << dsize_log2));
4034 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4035 fep->rx_align = 0xf;
4036 fep->tx_align = 0xf;
4038 fep->rx_align = 0x3;
4039 fep->tx_align = 0x3;
4041 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4042 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4043 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4044 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4046 /* Check mask of the streaming and coherent API */
4047 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4049 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4053 ret = fec_enet_alloc_queue(ndev);
4057 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4059 /* Allocate memory for buffer descriptors. */
4060 cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4064 goto free_queue_mem;
4067 /* Get the Ethernet address */
4068 ret = fec_get_mac(ndev);
4070 goto free_queue_mem;
4072 /* Set receive and transmit descriptor base. */
4073 for (i = 0; i < fep->num_rx_queues; i++) {
4074 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4075 unsigned size = dsize * rxq->bd.ring_size;
4078 rxq->bd.base = cbd_base;
4079 rxq->bd.cur = cbd_base;
4080 rxq->bd.dma = bd_dma;
4081 rxq->bd.dsize = dsize;
4082 rxq->bd.dsize_log2 = dsize_log2;
4083 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4085 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4086 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4089 for (i = 0; i < fep->num_tx_queues; i++) {
4090 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4091 unsigned size = dsize * txq->bd.ring_size;
4094 txq->bd.base = cbd_base;
4095 txq->bd.cur = cbd_base;
4096 txq->bd.dma = bd_dma;
4097 txq->bd.dsize = dsize;
4098 txq->bd.dsize_log2 = dsize_log2;
4099 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4101 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4102 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4106 /* The FEC Ethernet specific entries in the device structure */
4107 ndev->watchdog_timeo = TX_TIMEOUT;
4108 ndev->netdev_ops = &fec_netdev_ops;
4109 ndev->ethtool_ops = &fec_enet_ethtool_ops;
4111 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4112 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4114 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4115 /* enable hw VLAN support */
4116 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4118 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4119 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4121 /* enable hw accelerator */
4122 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4123 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4124 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4127 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4129 fep->rx_align = 0x3f;
4132 ndev->hw_features = ndev->features;
4134 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4135 ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4136 NETDEV_XDP_ACT_REDIRECT;
4140 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4141 fec_enet_clear_ethtool_stats(ndev);
4143 fec_enet_update_ethtool_stats(ndev);
4148 fec_enet_free_queue(ndev);
4153 static int fec_reset_phy(struct platform_device *pdev)
4155 struct gpio_desc *phy_reset;
4156 int msec = 1, phy_post_delay = 0;
4157 struct device_node *np = pdev->dev.of_node;
4163 err = of_property_read_u32(np, "phy-reset-duration", &msec);
4164 /* A sane reset duration should not be longer than 1s */
4165 if (!err && msec > 1000)
4168 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4169 /* valid reset duration should be less than 1s */
4170 if (!err && phy_post_delay > 1000)
4173 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4175 if (IS_ERR(phy_reset))
4176 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4177 "failed to get phy-reset-gpios\n");
4185 usleep_range(msec * 1000, msec * 1000 + 1000);
4187 gpiod_set_value_cansleep(phy_reset, 0);
4189 if (!phy_post_delay)
4192 if (phy_post_delay > 20)
4193 msleep(phy_post_delay);
4195 usleep_range(phy_post_delay * 1000,
4196 phy_post_delay * 1000 + 1000);
4200 #else /* CONFIG_OF */
4201 static int fec_reset_phy(struct platform_device *pdev)
4204 * In case of platform probe, the reset has been done
4209 #endif /* CONFIG_OF */
4212 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4214 struct device_node *np = pdev->dev.of_node;
4216 *num_tx = *num_rx = 1;
4218 if (!np || !of_device_is_available(np))
4221 /* parse the num of tx and rx queues */
4222 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4224 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4226 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4227 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4233 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4234 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4242 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4244 int irq_cnt = platform_irq_count(pdev);
4246 if (irq_cnt > FEC_IRQ_NUM)
4247 irq_cnt = FEC_IRQ_NUM; /* last for pps */
4248 else if (irq_cnt == 2)
4249 irq_cnt = 1; /* last for pps */
4250 else if (irq_cnt <= 0)
4251 irq_cnt = 1; /* At least 1 irq is needed */
4255 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4257 struct net_device *ndev = platform_get_drvdata(pdev);
4258 struct fec_enet_private *fep = netdev_priv(ndev);
4260 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4261 fep->wake_irq = fep->irq[2];
4263 fep->wake_irq = fep->irq[0];
4266 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4267 struct device_node *np)
4269 struct device_node *gpr_np;
4273 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4277 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4278 ARRAY_SIZE(out_val));
4280 dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4284 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4285 if (IS_ERR(fep->stop_gpr.gpr)) {
4286 dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4287 ret = PTR_ERR(fep->stop_gpr.gpr);
4288 fep->stop_gpr.gpr = NULL;
4292 fep->stop_gpr.reg = out_val[1];
4293 fep->stop_gpr.bit = out_val[2];
4296 of_node_put(gpr_np);
4302 fec_probe(struct platform_device *pdev)
4304 struct fec_enet_private *fep;
4305 struct fec_platform_data *pdata;
4306 phy_interface_t interface;
4307 struct net_device *ndev;
4308 int i, irq, ret = 0;
4310 struct device_node *np = pdev->dev.of_node, *phy_node;
4315 const struct fec_devinfo *dev_info;
4317 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4319 /* Init network device */
4320 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4321 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4325 SET_NETDEV_DEV(ndev, &pdev->dev);
4327 /* setup board info structure */
4328 fep = netdev_priv(ndev);
4330 dev_info = device_get_match_data(&pdev->dev);
4332 dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
4334 fep->quirks = dev_info->quirks;
4337 fep->num_rx_queues = num_rx_qs;
4338 fep->num_tx_queues = num_tx_qs;
4340 #if !defined(CONFIG_M5272)
4341 /* default enable pause frame auto negotiation */
4342 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4343 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4346 /* Select default pin state */
4347 pinctrl_pm_select_default_state(&pdev->dev);
4349 fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4350 if (IS_ERR(fep->hwp)) {
4351 ret = PTR_ERR(fep->hwp);
4352 goto failed_ioremap;
4356 fep->dev_id = dev_id++;
4358 platform_set_drvdata(pdev, ndev);
4360 if ((of_machine_is_compatible("fsl,imx6q") ||
4361 of_machine_is_compatible("fsl,imx6dl")) &&
4362 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4363 fep->quirks |= FEC_QUIRK_ERR006687;
4365 ret = fec_enet_ipc_handle_init(fep);
4367 goto failed_ipc_init;
4369 if (of_property_read_bool(np, "fsl,magic-packet"))
4370 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4372 ret = fec_enet_init_stop_mode(fep, np);
4374 goto failed_stop_mode;
4376 phy_node = of_parse_phandle(np, "phy-handle", 0);
4377 if (!phy_node && of_phy_is_fixed_link(np)) {
4378 ret = of_phy_register_fixed_link(np);
4381 "broken fixed-link specification\n");
4384 phy_node = of_node_get(np);
4386 fep->phy_node = phy_node;
4388 ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4390 pdata = dev_get_platdata(&pdev->dev);
4392 fep->phy_interface = pdata->phy;
4394 fep->phy_interface = PHY_INTERFACE_MODE_MII;
4396 fep->phy_interface = interface;
4399 ret = fec_enet_parse_rgmii_delay(fep, np);
4401 goto failed_rgmii_delay;
4403 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4404 if (IS_ERR(fep->clk_ipg)) {
4405 ret = PTR_ERR(fep->clk_ipg);
4409 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4410 if (IS_ERR(fep->clk_ahb)) {
4411 ret = PTR_ERR(fep->clk_ahb);
4415 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4417 /* enet_out is optional, depends on board */
4418 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4419 if (IS_ERR(fep->clk_enet_out)) {
4420 ret = PTR_ERR(fep->clk_enet_out);
4424 fep->ptp_clk_on = false;
4425 mutex_init(&fep->ptp_clk_mutex);
4427 /* clk_ref is optional, depends on board */
4428 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4429 if (IS_ERR(fep->clk_ref)) {
4430 ret = PTR_ERR(fep->clk_ref);
4433 fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4435 /* clk_2x_txclk is optional, depends on board */
4436 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4437 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4438 if (IS_ERR(fep->clk_2x_txclk))
4439 fep->clk_2x_txclk = NULL;
4442 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4443 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4444 if (IS_ERR(fep->clk_ptp)) {
4445 fep->clk_ptp = NULL;
4446 fep->bufdesc_ex = false;
4449 ret = fec_enet_clk_enable(ndev, true);
4453 ret = clk_prepare_enable(fep->clk_ipg);
4455 goto failed_clk_ipg;
4456 ret = clk_prepare_enable(fep->clk_ahb);
4458 goto failed_clk_ahb;
4460 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4461 if (!IS_ERR(fep->reg_phy)) {
4462 ret = regulator_enable(fep->reg_phy);
4465 "Failed to enable phy regulator: %d\n", ret);
4466 goto failed_regulator;
4469 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4470 ret = -EPROBE_DEFER;
4471 goto failed_regulator;
4473 fep->reg_phy = NULL;
4476 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4477 pm_runtime_use_autosuspend(&pdev->dev);
4478 pm_runtime_get_noresume(&pdev->dev);
4479 pm_runtime_set_active(&pdev->dev);
4480 pm_runtime_enable(&pdev->dev);
4482 ret = fec_reset_phy(pdev);
4486 irq_cnt = fec_enet_get_irq_cnt(pdev);
4487 if (fep->bufdesc_ex)
4488 fec_ptp_init(pdev, irq_cnt);
4490 ret = fec_enet_init(ndev);
4494 for (i = 0; i < irq_cnt; i++) {
4495 snprintf(irq_name, sizeof(irq_name), "int%d", i);
4496 irq = platform_get_irq_byname_optional(pdev, irq_name);
4498 irq = platform_get_irq(pdev, i);
4503 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4504 0, pdev->name, ndev);
4511 /* Decide which interrupt line is wakeup capable */
4512 fec_enet_get_wakeup_irq(pdev);
4514 ret = fec_enet_mii_init(pdev);
4516 goto failed_mii_init;
4518 /* Carrier starts down, phylib will bring it up */
4519 netif_carrier_off(ndev);
4520 fec_enet_clk_enable(ndev, false);
4521 pinctrl_pm_select_sleep_state(&pdev->dev);
4523 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4525 ret = register_netdev(ndev);
4527 goto failed_register;
4529 device_init_wakeup(&ndev->dev, fep->wol_flag &
4530 FEC_WOL_HAS_MAGIC_PACKET);
4532 if (fep->bufdesc_ex && fep->ptp_clock)
4533 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4535 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4537 pm_runtime_mark_last_busy(&pdev->dev);
4538 pm_runtime_put_autosuspend(&pdev->dev);
4543 fec_enet_mii_remove(fep);
4549 pm_runtime_put_noidle(&pdev->dev);
4550 pm_runtime_disable(&pdev->dev);
4552 regulator_disable(fep->reg_phy);
4554 clk_disable_unprepare(fep->clk_ahb);
4556 clk_disable_unprepare(fep->clk_ipg);
4558 fec_enet_clk_enable(ndev, false);
4561 if (of_phy_is_fixed_link(np))
4562 of_phy_deregister_fixed_link(np);
4563 of_node_put(phy_node);
4575 fec_drv_remove(struct platform_device *pdev)
4577 struct net_device *ndev = platform_get_drvdata(pdev);
4578 struct fec_enet_private *fep = netdev_priv(ndev);
4579 struct device_node *np = pdev->dev.of_node;
4582 ret = pm_runtime_get_sync(&pdev->dev);
4585 "Failed to resume device in remove callback (%pe)\n",
4588 cancel_work_sync(&fep->tx_timeout_work);
4590 unregister_netdev(ndev);
4591 fec_enet_mii_remove(fep);
4593 regulator_disable(fep->reg_phy);
4595 if (of_phy_is_fixed_link(np))
4596 of_phy_deregister_fixed_link(np);
4597 of_node_put(fep->phy_node);
4599 /* After pm_runtime_get_sync() failed, the clks are still off, so skip
4600 * disabling them again.
4603 clk_disable_unprepare(fep->clk_ahb);
4604 clk_disable_unprepare(fep->clk_ipg);
4606 pm_runtime_put_noidle(&pdev->dev);
4607 pm_runtime_disable(&pdev->dev);
4612 static int __maybe_unused fec_suspend(struct device *dev)
4614 struct net_device *ndev = dev_get_drvdata(dev);
4615 struct fec_enet_private *fep = netdev_priv(ndev);
4619 if (netif_running(ndev)) {
4620 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4621 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4622 phy_stop(ndev->phydev);
4623 napi_disable(&fep->napi);
4624 netif_tx_lock_bh(ndev);
4625 netif_device_detach(ndev);
4626 netif_tx_unlock_bh(ndev);
4628 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4629 fec_irqs_disable(ndev);
4630 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4632 fec_irqs_disable_except_wakeup(ndev);
4633 if (fep->wake_irq > 0) {
4634 disable_irq(fep->wake_irq);
4635 enable_irq_wake(fep->wake_irq);
4637 fec_enet_stop_mode(fep, true);
4639 /* It's safe to disable clocks since interrupts are masked */
4640 fec_enet_clk_enable(ndev, false);
4642 fep->rpm_active = !pm_runtime_status_suspended(dev);
4643 if (fep->rpm_active) {
4644 ret = pm_runtime_force_suspend(dev);
4653 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4654 regulator_disable(fep->reg_phy);
4656 /* SOC supply clock to phy, when clock is disabled, phy link down
4657 * SOC control phy regulator, when regulator is disabled, phy link down
4659 if (fep->clk_enet_out || fep->reg_phy)
4665 static int __maybe_unused fec_resume(struct device *dev)
4667 struct net_device *ndev = dev_get_drvdata(dev);
4668 struct fec_enet_private *fep = netdev_priv(ndev);
4672 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4673 ret = regulator_enable(fep->reg_phy);
4679 if (netif_running(ndev)) {
4680 if (fep->rpm_active)
4681 pm_runtime_force_resume(dev);
4683 ret = fec_enet_clk_enable(ndev, true);
4688 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4689 fec_enet_stop_mode(fep, false);
4690 if (fep->wake_irq) {
4691 disable_irq_wake(fep->wake_irq);
4692 enable_irq(fep->wake_irq);
4695 val = readl(fep->hwp + FEC_ECNTRL);
4696 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4697 writel(val, fep->hwp + FEC_ECNTRL);
4698 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4700 pinctrl_pm_select_default_state(&fep->pdev->dev);
4703 netif_tx_lock_bh(ndev);
4704 netif_device_attach(ndev);
4705 netif_tx_unlock_bh(ndev);
4706 napi_enable(&fep->napi);
4707 phy_init_hw(ndev->phydev);
4708 phy_start(ndev->phydev);
4716 regulator_disable(fep->reg_phy);
4720 static int __maybe_unused fec_runtime_suspend(struct device *dev)
4722 struct net_device *ndev = dev_get_drvdata(dev);
4723 struct fec_enet_private *fep = netdev_priv(ndev);
4725 clk_disable_unprepare(fep->clk_ahb);
4726 clk_disable_unprepare(fep->clk_ipg);
4731 static int __maybe_unused fec_runtime_resume(struct device *dev)
4733 struct net_device *ndev = dev_get_drvdata(dev);
4734 struct fec_enet_private *fep = netdev_priv(ndev);
4737 ret = clk_prepare_enable(fep->clk_ahb);
4740 ret = clk_prepare_enable(fep->clk_ipg);
4742 goto failed_clk_ipg;
4747 clk_disable_unprepare(fep->clk_ahb);
4751 static const struct dev_pm_ops fec_pm_ops = {
4752 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4753 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4756 static struct platform_driver fec_driver = {
4758 .name = DRIVER_NAME,
4760 .of_match_table = fec_dt_ids,
4761 .suppress_bind_attrs = true,
4763 .id_table = fec_devtype,
4765 .remove_new = fec_drv_remove,
4768 module_platform_driver(fec_driver);
4770 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
4771 MODULE_LICENSE("GPL");