1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
4 /* TSN endpoint Ethernet MAC driver
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
7 * communication. It is designed for endpoints within TSN (Time Sensitive
8 * Networking) networks; e.g., for PLCs in the industrial automation case.
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
13 * More information can be found here:
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
21 #include <linux/module.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <linux/interrupt.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
28 #include <linux/iopoll.h>
29 #include <linux/bpf.h>
30 #include <linux/bpf_trace.h>
31 #include <net/page_pool/helpers.h>
32 #include <net/xdp_sock_drv.h>
34 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
35 #define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
37 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
38 /* XSK buffer shall store at least Q-in-Q frame */
39 #define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \
40 ETH_FRAME_LEN + ETH_FCS_LEN + \
43 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
44 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
46 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
48 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
50 #define TSNEP_COALESCE_USECS_DEFAULT 64
51 #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
55 #define TSNEP_TX_TYPE_MAP BIT(0)
56 #define TSNEP_TX_TYPE_MAP_PAGE BIT(1)
57 #define TSNEP_TX_TYPE_INLINE BIT(2)
59 #define TSNEP_TX_TYPE_SKB BIT(8)
60 #define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP)
61 #define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE)
62 #define TSNEP_TX_TYPE_SKB_FRAG BIT(9)
63 #define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE)
64 #define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE)
65 #define TSNEP_TX_TYPE_XDP_TX BIT(10)
66 #define TSNEP_TX_TYPE_XDP_NDO BIT(11)
67 #define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
68 #define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
69 #define TSNEP_TX_TYPE_XSK BIT(12)
71 #define TSNEP_XDP_TX BIT(0)
72 #define TSNEP_XDP_REDIRECT BIT(1)
74 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
76 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
79 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
81 mask |= ECM_INT_DISABLE;
82 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
85 static irqreturn_t tsnep_irq(int irq, void *arg)
87 struct tsnep_adapter *adapter = arg;
88 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
90 /* acknowledge interrupt */
92 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
94 /* handle link interrupt */
95 if ((active & ECM_INT_LINK) != 0)
96 phy_mac_interrupt(adapter->netdev->phydev);
98 /* handle TX/RX queue 0 interrupt */
99 if ((active & adapter->queue[0].irq_mask) != 0) {
100 if (napi_schedule_prep(&adapter->queue[0].napi)) {
101 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
102 /* schedule after masking to avoid races */
103 __napi_schedule(&adapter->queue[0].napi);
110 static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
112 struct tsnep_queue *queue = arg;
114 /* handle TX/RX queue interrupt */
115 if (napi_schedule_prep(&queue->napi)) {
116 tsnep_disable_irq(queue->adapter, queue->irq_mask);
117 /* schedule after masking to avoid races */
118 __napi_schedule(&queue->napi);
124 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
126 if (usecs > TSNEP_COALESCE_USECS_MAX)
129 usecs /= ECM_INT_DELAY_BASE_US;
130 usecs <<= ECM_INT_DELAY_SHIFT;
131 usecs &= ECM_INT_DELAY_MASK;
133 queue->irq_delay &= ~ECM_INT_DELAY_MASK;
134 queue->irq_delay |= usecs;
135 iowrite8(queue->irq_delay, queue->irq_delay_addr);
140 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
144 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
145 usecs >>= ECM_INT_DELAY_SHIFT;
146 usecs *= ECM_INT_DELAY_BASE_US;
151 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
153 struct tsnep_adapter *adapter = bus->priv;
158 if (!adapter->suppress_preamble)
159 md |= ECM_MD_PREAMBLE;
160 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
161 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
162 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
163 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
164 !(md & ECM_MD_BUSY), 16, 1000);
168 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
171 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
174 struct tsnep_adapter *adapter = bus->priv;
179 if (!adapter->suppress_preamble)
180 md |= ECM_MD_PREAMBLE;
181 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
182 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
183 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
184 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
185 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
186 !(md & ECM_MD_BUSY), 16, 1000);
193 static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
197 switch (adapter->phydev->speed) {
199 mode = ECM_LINK_MODE_100;
202 mode = ECM_LINK_MODE_1000;
205 mode = ECM_LINK_MODE_OFF;
208 iowrite32(mode, adapter->addr + ECM_STATUS);
211 static void tsnep_phy_link_status_change(struct net_device *netdev)
213 struct tsnep_adapter *adapter = netdev_priv(netdev);
214 struct phy_device *phydev = netdev->phydev;
217 tsnep_set_link_mode(adapter);
219 phy_print_status(netdev->phydev);
222 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
226 retval = phy_loopback(adapter->phydev, enable);
228 /* PHY link state change is not signaled if loopback is enabled, it
229 * would delay a working loopback anyway, let's ensure that loopback
230 * is working immediately by setting link mode directly
232 if (!retval && enable)
233 tsnep_set_link_mode(adapter);
238 static int tsnep_phy_open(struct tsnep_adapter *adapter)
240 struct phy_device *phydev;
241 struct ethtool_eee ethtool_eee;
244 retval = phy_connect_direct(adapter->netdev, adapter->phydev,
245 tsnep_phy_link_status_change,
249 phydev = adapter->netdev->phydev;
251 /* MAC supports only 100Mbps|1000Mbps full duplex
252 * SPE (Single Pair Ethernet) is also an option but not implemented yet
254 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
255 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
256 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
257 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
259 /* disable EEE autoneg, EEE not supported by TSNEP */
260 memset(ðtool_eee, 0, sizeof(ethtool_eee));
261 phy_ethtool_set_eee(adapter->phydev, ðtool_eee);
263 adapter->phydev->irq = PHY_MAC_INTERRUPT;
264 phy_start(adapter->phydev);
269 static void tsnep_phy_close(struct tsnep_adapter *adapter)
271 phy_stop(adapter->netdev->phydev);
272 phy_disconnect(adapter->netdev->phydev);
275 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
277 struct device *dmadev = tx->adapter->dmadev;
280 memset(tx->entry, 0, sizeof(tx->entry));
282 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
284 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
292 static int tsnep_tx_ring_create(struct tsnep_tx *tx)
294 struct device *dmadev = tx->adapter->dmadev;
295 struct tsnep_tx_entry *entry;
296 struct tsnep_tx_entry *next_entry;
300 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
302 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
308 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
309 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
310 entry->desc_wb = (struct tsnep_tx_desc_wb *)
311 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
312 entry->desc = (struct tsnep_tx_desc *)
313 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
314 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
315 entry->owner_user_flag = false;
318 for (i = 0; i < TSNEP_RING_SIZE; i++) {
319 entry = &tx->entry[i];
320 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK];
321 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
327 tsnep_tx_ring_cleanup(tx);
331 static void tsnep_tx_init(struct tsnep_tx *tx)
335 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
336 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
337 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
340 tx->owner_counter = 1;
341 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
344 static void tsnep_tx_enable(struct tsnep_tx *tx)
346 struct netdev_queue *nq;
348 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
350 __netif_tx_lock_bh(nq);
351 netif_tx_wake_queue(nq);
352 __netif_tx_unlock_bh(nq);
355 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi)
357 struct netdev_queue *nq;
360 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
362 __netif_tx_lock_bh(nq);
363 netif_tx_stop_queue(nq);
364 __netif_tx_unlock_bh(nq);
366 /* wait until TX is done in hardware */
367 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
368 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
371 /* wait until TX is also done in software */
372 while (READ_ONCE(tx->read) != tx->write) {
374 napi_synchronize(napi);
378 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
381 struct tsnep_tx_entry *entry = &tx->entry[index];
383 entry->properties = 0;
384 /* xdpf and zc are union with skb */
386 entry->properties = length & TSNEP_DESC_LENGTH_MASK;
387 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
388 if ((entry->type & TSNEP_TX_TYPE_SKB) &&
389 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
390 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
392 /* toggle user flag to prevent false acknowledge
394 * Only the first fragment is acknowledged. For all other
395 * fragments no acknowledge is done and the last written owner
396 * counter stays in the writeback descriptor. Therefore, it is
397 * possible that the last written owner counter is identical to
398 * the new incremented owner counter and a false acknowledge is
399 * detected before the real acknowledge has been done by
402 * The user flag is used to prevent this situation. The user
403 * flag is copied to the writeback descriptor by the hardware
404 * and is used as additional acknowledge data. By toggeling the
405 * user flag only for the first fragment (which is
406 * acknowledged), it is guaranteed that the last acknowledge
407 * done for this descriptor has used a different user flag and
408 * cannot be detected as false acknowledge.
410 entry->owner_user_flag = !entry->owner_user_flag;
413 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
414 if (index == tx->increment_owner_counter) {
416 if (tx->owner_counter == 4)
417 tx->owner_counter = 1;
418 tx->increment_owner_counter--;
419 if (tx->increment_owner_counter < 0)
420 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
423 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
424 TSNEP_DESC_OWNER_COUNTER_MASK;
425 if (entry->owner_user_flag)
426 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
427 entry->desc->more_properties =
428 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
429 if (entry->type & TSNEP_TX_TYPE_INLINE)
430 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG;
432 /* descriptor properties shall be written last, because valid data is
437 entry->desc->properties = __cpu_to_le32(entry->properties);
440 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
442 if (tx->read <= tx->write)
443 return TSNEP_RING_SIZE - tx->write + tx->read - 1;
445 return tx->read - tx->write - 1;
448 static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
449 struct device *dmadev, dma_addr_t *dma)
454 len = skb_frag_size(frag);
455 if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) {
456 *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE);
457 if (dma_mapping_error(dmadev, *dma))
459 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE;
462 void *fragdata = skb_frag_address_safe(frag);
464 if (likely(fragdata)) {
465 memcpy(&entry->desc->tx, fragdata, len);
467 struct page *page = skb_frag_page(frag);
469 fragdata = kmap_local_page(page);
470 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag),
472 kunmap_local(fragdata);
474 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE;
481 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
483 struct device *dmadev = tx->adapter->dmadev;
484 struct tsnep_tx_entry *entry;
490 for (i = 0; i < count; i++) {
491 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
494 len = skb_headlen(skb);
495 if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) {
496 dma = dma_map_single(dmadev, skb->data, len,
498 if (dma_mapping_error(dmadev, dma))
500 entry->type = TSNEP_TX_TYPE_SKB_MAP;
503 memcpy(&entry->desc->tx, skb->data, len);
504 entry->type = TSNEP_TX_TYPE_SKB_INLINE;
508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
510 len = skb_frag_size(frag);
511 mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma);
517 if (likely(mapped)) {
518 dma_unmap_addr_set(entry, dma, dma);
519 entry->desc->tx = __cpu_to_le64(dma);
528 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
530 struct device *dmadev = tx->adapter->dmadev;
531 struct tsnep_tx_entry *entry;
535 for (i = 0; i < count; i++) {
536 entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
539 if (entry->type & TSNEP_TX_TYPE_MAP)
540 dma_unmap_single(dmadev,
541 dma_unmap_addr(entry, dma),
542 dma_unmap_len(entry, len),
544 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE)
545 dma_unmap_page(dmadev,
546 dma_unmap_addr(entry, dma),
547 dma_unmap_len(entry, len),
549 map_len += entry->len;
557 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
561 struct tsnep_tx_entry *entry;
566 if (skb_shinfo(skb)->nr_frags > 0)
567 count += skb_shinfo(skb)->nr_frags;
569 if (tsnep_tx_desc_available(tx) < count) {
570 /* ring full, shall not happen because queue is stopped if full
573 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
575 return NETDEV_TX_BUSY;
578 entry = &tx->entry[tx->write];
581 retval = tsnep_tx_map(skb, tx, count);
583 tsnep_tx_unmap(tx, tx->write, count);
584 dev_kfree_skb_any(entry->skb);
593 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
594 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
596 for (i = 0; i < count; i++)
597 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
599 tx->write = (tx->write + count) & TSNEP_RING_MASK;
601 skb_tx_timestamp(skb);
603 /* descriptor properties shall be valid before hardware is notified */
606 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
608 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
609 /* ring can get full with next frame */
610 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
616 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
617 struct skb_shared_info *shinfo, int count, u32 type)
619 struct device *dmadev = tx->adapter->dmadev;
620 struct tsnep_tx_entry *entry;
631 for (i = 0; i < count; i++) {
632 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
633 if (type & TSNEP_TX_TYPE_XDP_NDO) {
634 data = unlikely(frag) ? skb_frag_address(frag) :
636 dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
637 if (dma_mapping_error(dmadev, dma))
640 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE;
642 page = unlikely(frag) ? skb_frag_page(frag) :
643 virt_to_page(xdpf->data);
644 dma = page_pool_get_dma_addr(page);
646 dma += skb_frag_off(frag);
648 dma += sizeof(*xdpf) + xdpf->headroom;
649 dma_sync_single_for_device(dmadev, dma, len,
652 entry->type = TSNEP_TX_TYPE_XDP_TX;
656 dma_unmap_addr_set(entry, dma, dma);
658 entry->desc->tx = __cpu_to_le64(dma);
663 frag = &shinfo->frags[i];
664 len = skb_frag_size(frag);
671 /* This function requires __netif_tx_lock is held by the caller. */
672 static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
673 struct tsnep_tx *tx, u32 type)
675 struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
676 struct tsnep_tx_entry *entry;
677 int count, length, retval, i;
680 if (unlikely(xdp_frame_has_frags(xdpf)))
681 count += shinfo->nr_frags;
683 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
684 * will be available for normal TX path and queue is stopped there if
687 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
690 entry = &tx->entry[tx->write];
693 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
695 tsnep_tx_unmap(tx, tx->write, count);
704 for (i = 0; i < count; i++)
705 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
707 tx->write = (tx->write + count) & TSNEP_RING_MASK;
709 /* descriptor properties shall be valid before hardware is notified */
715 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
717 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
720 static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
721 struct xdp_buff *xdp,
722 struct netdev_queue *tx_nq, struct tsnep_tx *tx,
725 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
732 /* no page pool for zero copy */
734 type = TSNEP_TX_TYPE_XDP_NDO;
736 type = TSNEP_TX_TYPE_XDP_TX;
738 __netif_tx_lock(tx_nq, smp_processor_id());
740 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
742 /* Avoid transmit queue timeout since we share it with the slow path */
744 txq_trans_cond_update(tx_nq);
746 __netif_tx_unlock(tx_nq);
751 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx)
753 struct tsnep_tx_entry *entry;
756 entry = &tx->entry[tx->write];
759 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr);
760 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len);
762 entry->type = TSNEP_TX_TYPE_XSK;
763 entry->len = xdpd->len;
765 entry->desc->tx = __cpu_to_le64(dma);
770 static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd,
775 length = tsnep_xdp_tx_map_zc(xdpd, tx);
777 tsnep_tx_activate(tx, tx->write, length, true);
778 tx->write = (tx->write + 1) & TSNEP_RING_MASK;
781 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx)
783 int desc_available = tsnep_tx_desc_available(tx);
784 struct xdp_desc *descs = tx->xsk_pool->tx_descs;
787 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
788 * will be available for normal TX path and queue is stopped there if
791 if (desc_available <= (MAX_SKB_FRAGS + 1))
793 desc_available -= MAX_SKB_FRAGS + 1;
795 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available);
796 for (i = 0; i < batch; i++)
797 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx);
800 /* descriptor properties shall be valid before hardware is
805 tsnep_xdp_xmit_flush(tx);
809 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
811 struct tsnep_tx_entry *entry;
812 struct netdev_queue *nq;
818 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
819 __netif_tx_lock(nq, smp_processor_id());
822 if (tx->read == tx->write)
825 entry = &tx->entry[tx->read];
826 if ((__le32_to_cpu(entry->desc_wb->properties) &
827 TSNEP_TX_DESC_OWNER_MASK) !=
828 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
831 /* descriptor properties shall be read first, because valid data
837 if ((entry->type & TSNEP_TX_TYPE_SKB) &&
838 skb_shinfo(entry->skb)->nr_frags > 0)
839 count += skb_shinfo(entry->skb)->nr_frags;
840 else if ((entry->type & TSNEP_TX_TYPE_XDP) &&
841 xdp_frame_has_frags(entry->xdpf))
842 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
844 length = tsnep_tx_unmap(tx, tx->read, count);
846 if ((entry->type & TSNEP_TX_TYPE_SKB) &&
847 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
848 (__le32_to_cpu(entry->desc_wb->properties) &
849 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
850 struct skb_shared_hwtstamps hwtstamps;
853 if (skb_shinfo(entry->skb)->tx_flags &
854 SKBTX_HW_TSTAMP_USE_CYCLES)
856 __le64_to_cpu(entry->desc_wb->counter);
859 __le64_to_cpu(entry->desc_wb->timestamp);
861 memset(&hwtstamps, 0, sizeof(hwtstamps));
862 hwtstamps.hwtstamp = ns_to_ktime(timestamp);
864 skb_tstamp_tx(entry->skb, &hwtstamps);
867 if (entry->type & TSNEP_TX_TYPE_SKB)
868 napi_consume_skb(entry->skb, napi_budget);
869 else if (entry->type & TSNEP_TX_TYPE_XDP)
870 xdp_return_frame_rx_napi(entry->xdpf);
873 /* xdpf and zc are union with skb */
876 tx->read = (tx->read + count) & TSNEP_RING_MASK;
879 tx->bytes += length + ETH_FCS_LEN;
882 } while (likely(budget));
886 xsk_tx_completed(tx->xsk_pool, xsk_frames);
887 if (xsk_uses_need_wakeup(tx->xsk_pool))
888 xsk_set_tx_need_wakeup(tx->xsk_pool);
889 tsnep_xdp_xmit_zc(tx);
892 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
893 netif_tx_queue_stopped(nq)) {
894 netif_tx_wake_queue(nq);
897 __netif_tx_unlock(nq);
902 static bool tsnep_tx_pending(struct tsnep_tx *tx)
904 struct tsnep_tx_entry *entry;
905 struct netdev_queue *nq;
906 bool pending = false;
908 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
909 __netif_tx_lock(nq, smp_processor_id());
911 if (tx->read != tx->write) {
912 entry = &tx->entry[tx->read];
913 if ((__le32_to_cpu(entry->desc_wb->properties) &
914 TSNEP_TX_DESC_OWNER_MASK) ==
915 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
919 __netif_tx_unlock(nq);
924 static int tsnep_tx_open(struct tsnep_tx *tx)
928 retval = tsnep_tx_ring_create(tx);
937 static void tsnep_tx_close(struct tsnep_tx *tx)
939 tsnep_tx_ring_cleanup(tx);
942 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
944 struct device *dmadev = rx->adapter->dmadev;
945 struct tsnep_rx_entry *entry;
948 for (i = 0; i < TSNEP_RING_SIZE; i++) {
949 entry = &rx->entry[i];
950 if (!rx->xsk_pool && entry->page)
951 page_pool_put_full_page(rx->page_pool, entry->page,
953 if (rx->xsk_pool && entry->xdp)
954 xsk_buff_free(entry->xdp);
955 /* xdp is union with page */
960 page_pool_destroy(rx->page_pool);
962 memset(rx->entry, 0, sizeof(rx->entry));
964 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
966 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
974 static int tsnep_rx_ring_create(struct tsnep_rx *rx)
976 struct device *dmadev = rx->adapter->dmadev;
977 struct tsnep_rx_entry *entry;
978 struct page_pool_params pp_params = { 0 };
979 struct tsnep_rx_entry *next_entry;
983 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
985 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
991 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
992 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
993 entry->desc_wb = (struct tsnep_rx_desc_wb *)
994 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
995 entry->desc = (struct tsnep_rx_desc *)
996 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
997 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
1001 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1002 pp_params.order = 0;
1003 pp_params.pool_size = TSNEP_RING_SIZE;
1004 pp_params.nid = dev_to_node(dmadev);
1005 pp_params.dev = dmadev;
1006 pp_params.dma_dir = DMA_BIDIRECTIONAL;
1007 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
1008 pp_params.offset = TSNEP_RX_OFFSET;
1009 rx->page_pool = page_pool_create(&pp_params);
1010 if (IS_ERR(rx->page_pool)) {
1011 retval = PTR_ERR(rx->page_pool);
1012 rx->page_pool = NULL;
1016 for (i = 0; i < TSNEP_RING_SIZE; i++) {
1017 entry = &rx->entry[i];
1018 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
1019 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
1025 tsnep_rx_ring_cleanup(rx);
1029 static void tsnep_rx_init(struct tsnep_rx *rx)
1033 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
1034 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
1035 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
1038 rx->owner_counter = 1;
1039 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
1042 static void tsnep_rx_enable(struct tsnep_rx *rx)
1044 /* descriptor properties shall be valid before hardware is notified */
1047 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
1050 static void tsnep_rx_disable(struct tsnep_rx *rx)
1054 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
1055 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
1056 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
1060 static int tsnep_rx_desc_available(struct tsnep_rx *rx)
1062 if (rx->read <= rx->write)
1063 return TSNEP_RING_SIZE - rx->write + rx->read - 1;
1065 return rx->read - rx->write - 1;
1068 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
1072 /* last entry of page_buffer is always zero, because ring cannot be
1075 page = rx->page_buffer;
1077 page_pool_put_full_page(rx->page_pool, *page, false);
1083 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
1087 /* alloc for all ring entries except the last one, because ring cannot
1088 * be filled completely
1090 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) {
1091 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
1092 if (!rx->page_buffer[i]) {
1093 tsnep_rx_free_page_buffer(rx);
1102 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
1106 entry->len = TSNEP_MAX_RX_BUF_SIZE;
1107 entry->dma = page_pool_get_dma_addr(entry->page);
1108 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
1111 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
1113 struct tsnep_rx_entry *entry = &rx->entry[index];
1116 page = page_pool_dev_alloc_pages(rx->page_pool);
1117 if (unlikely(!page))
1119 tsnep_rx_set_page(rx, entry, page);
1124 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
1126 struct tsnep_rx_entry *entry = &rx->entry[index];
1127 struct tsnep_rx_entry *read = &rx->entry[rx->read];
1129 tsnep_rx_set_page(rx, entry, read->page);
1133 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
1135 struct tsnep_rx_entry *entry = &rx->entry[index];
1137 /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */
1138 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
1139 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
1140 if (index == rx->increment_owner_counter) {
1141 rx->owner_counter++;
1142 if (rx->owner_counter == 4)
1143 rx->owner_counter = 1;
1144 rx->increment_owner_counter--;
1145 if (rx->increment_owner_counter < 0)
1146 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
1148 entry->properties |=
1149 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
1150 TSNEP_DESC_OWNER_COUNTER_MASK;
1152 /* descriptor properties shall be written last, because valid data is
1157 entry->desc->properties = __cpu_to_le32(entry->properties);
1160 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
1162 bool alloc_failed = false;
1165 for (i = 0; i < count && !alloc_failed; i++) {
1166 index = (rx->write + i) & TSNEP_RING_MASK;
1168 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
1170 alloc_failed = true;
1172 /* reuse only if no other allocation was successful */
1173 if (i == 0 && reuse)
1174 tsnep_rx_reuse_buffer(rx, index);
1179 tsnep_rx_activate(rx, index);
1183 rx->write = (rx->write + i) & TSNEP_RING_MASK;
1188 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
1192 desc_refilled = tsnep_rx_alloc(rx, count, reuse);
1194 tsnep_rx_enable(rx);
1196 return desc_refilled;
1199 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
1200 struct xdp_buff *xdp)
1203 entry->len = TSNEP_XSK_RX_BUF_SIZE;
1204 entry->dma = xsk_buff_xdp_get_dma(entry->xdp);
1205 entry->desc->rx = __cpu_to_le64(entry->dma);
1208 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
1210 struct tsnep_rx_entry *entry = &rx->entry[index];
1211 struct tsnep_rx_entry *read = &rx->entry[rx->read];
1213 tsnep_rx_set_xdp(rx, entry, read->xdp);
1217 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
1222 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
1223 for (i = 0; i < allocated; i++) {
1224 int index = (rx->write + i) & TSNEP_RING_MASK;
1225 struct tsnep_rx_entry *entry = &rx->entry[index];
1227 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
1228 tsnep_rx_activate(rx, index);
1234 tsnep_rx_reuse_buffer_zc(rx, rx->write);
1235 tsnep_rx_activate(rx, rx->write);
1240 rx->write = (rx->write + i) & TSNEP_RING_MASK;
1245 static void tsnep_rx_free_zc(struct tsnep_rx *rx)
1249 for (i = 0; i < TSNEP_RING_SIZE; i++) {
1250 struct tsnep_rx_entry *entry = &rx->entry[i];
1253 xsk_buff_free(entry->xdp);
1258 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
1262 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
1264 tsnep_rx_enable(rx);
1266 return desc_refilled;
1269 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
1270 struct xdp_buff *xdp, int *status,
1271 struct netdev_queue *tx_nq, struct tsnep_tx *tx)
1273 unsigned int length;
1277 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
1279 act = bpf_prog_run_xdp(prog, xdp);
1284 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
1286 *status |= TSNEP_XDP_TX;
1289 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
1291 *status |= TSNEP_XDP_REDIRECT;
1294 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
1298 trace_xdp_exception(rx->adapter->netdev, prog, act);
1301 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU
1304 sync = xdp->data_end - xdp->data_hard_start -
1305 XDP_PACKET_HEADROOM;
1306 sync = max(sync, length);
1307 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
1313 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
1314 struct xdp_buff *xdp, int *status,
1315 struct netdev_queue *tx_nq,
1316 struct tsnep_tx *tx)
1320 act = bpf_prog_run_xdp(prog, xdp);
1322 /* XDP_REDIRECT is the main action for zero-copy */
1323 if (likely(act == XDP_REDIRECT)) {
1324 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
1326 *status |= TSNEP_XDP_REDIRECT;
1334 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
1336 *status |= TSNEP_XDP_TX;
1339 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
1343 trace_xdp_exception(rx->adapter->netdev, prog, act);
1351 static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
1352 struct netdev_queue *tx_nq, struct tsnep_tx *tx)
1354 if (status & TSNEP_XDP_TX) {
1355 __netif_tx_lock(tx_nq, smp_processor_id());
1356 tsnep_xdp_xmit_flush(tx);
1357 __netif_tx_unlock(tx_nq);
1360 if (status & TSNEP_XDP_REDIRECT)
1364 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
1367 struct sk_buff *skb;
1369 skb = napi_build_skb(page_address(page), PAGE_SIZE);
1373 /* update pointers within the skb to store the data */
1374 skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
1375 __skb_put(skb, length - ETH_FCS_LEN);
1377 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
1378 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
1379 struct tsnep_rx_inline *rx_inline =
1380 (struct tsnep_rx_inline *)(page_address(page) +
1383 skb_shinfo(skb)->tx_flags |=
1384 SKBTX_HW_TSTAMP_NETDEV;
1385 memset(hwtstamps, 0, sizeof(*hwtstamps));
1386 hwtstamps->netdev_data = rx_inline;
1389 skb_record_rx_queue(skb, rx->queue_index);
1390 skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
1395 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
1396 struct page *page, int length)
1398 struct sk_buff *skb;
1400 skb = tsnep_build_skb(rx, page, length);
1402 skb_mark_for_recycle(skb);
1405 rx->bytes += length;
1406 if (skb->pkt_type == PACKET_MULTICAST)
1409 napi_gro_receive(napi, skb);
1411 page_pool_recycle_direct(rx->page_pool, page);
1417 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
1420 struct device *dmadev = rx->adapter->dmadev;
1421 enum dma_data_direction dma_dir;
1422 struct tsnep_rx_entry *entry;
1423 struct netdev_queue *tx_nq;
1424 struct bpf_prog *prog;
1425 struct xdp_buff xdp;
1426 struct tsnep_tx *tx;
1432 desc_available = tsnep_rx_desc_available(rx);
1433 dma_dir = page_pool_get_dma_dir(rx->page_pool);
1434 prog = READ_ONCE(rx->adapter->xdp_prog);
1436 tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
1437 rx->tx_queue_index);
1438 tx = &rx->adapter->tx[rx->tx_queue_index];
1440 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
1443 while (likely(done < budget) && (rx->read != rx->write)) {
1444 entry = &rx->entry[rx->read];
1445 if ((__le32_to_cpu(entry->desc_wb->properties) &
1446 TSNEP_DESC_OWNER_COUNTER_MASK) !=
1447 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
1451 if (desc_available >= TSNEP_RING_RX_REFILL) {
1452 bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
1454 desc_available -= tsnep_rx_refill(rx, desc_available,
1457 /* buffer has been reused for refill to prevent
1458 * empty RX ring, thus buffer cannot be used for
1461 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1470 /* descriptor properties shall be read first, because valid data
1475 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
1476 length = __le32_to_cpu(entry->desc_wb->properties) &
1477 TSNEP_DESC_LENGTH_MASK;
1478 dma_sync_single_range_for_cpu(dmadev, entry->dma,
1479 TSNEP_RX_OFFSET, length, dma_dir);
1481 /* RX metadata with timestamps is in front of actual data,
1482 * subtract metadata size to get length of actual data and
1483 * consider metadata size as offset of actual data during RX
1486 length -= TSNEP_RX_INLINE_METADATA_SIZE;
1488 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1494 xdp_prepare_buff(&xdp, page_address(entry->page),
1495 XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
1496 length - ETH_FCS_LEN, false);
1498 consume = tsnep_xdp_run_prog(rx, prog, &xdp,
1499 &xdp_status, tx_nq, tx);
1502 rx->bytes += length;
1510 tsnep_rx_page(rx, napi, entry->page, length);
1515 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
1518 tsnep_rx_refill(rx, desc_available, false);
1523 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
1526 struct tsnep_rx_entry *entry;
1527 struct netdev_queue *tx_nq;
1528 struct bpf_prog *prog;
1529 struct tsnep_tx *tx;
1536 desc_available = tsnep_rx_desc_available(rx);
1537 prog = READ_ONCE(rx->adapter->xdp_prog);
1539 tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
1540 rx->tx_queue_index);
1541 tx = &rx->adapter->tx[rx->tx_queue_index];
1544 while (likely(done < budget) && (rx->read != rx->write)) {
1545 entry = &rx->entry[rx->read];
1546 if ((__le32_to_cpu(entry->desc_wb->properties) &
1547 TSNEP_DESC_OWNER_COUNTER_MASK) !=
1548 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
1552 if (desc_available >= TSNEP_RING_RX_REFILL) {
1553 bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
1555 desc_available -= tsnep_rx_refill_zc(rx, desc_available,
1558 /* buffer has been reused for refill to prevent
1559 * empty RX ring, thus buffer cannot be used for
1562 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1571 /* descriptor properties shall be read first, because valid data
1576 prefetch(entry->xdp->data);
1577 length = __le32_to_cpu(entry->desc_wb->properties) &
1578 TSNEP_DESC_LENGTH_MASK;
1579 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
1580 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
1582 /* RX metadata with timestamps is in front of actual data,
1583 * subtract metadata size to get length of actual data and
1584 * consider metadata size as offset of actual data during RX
1587 length -= TSNEP_RX_INLINE_METADATA_SIZE;
1589 rx->read = (rx->read + 1) & TSNEP_RING_MASK;
1595 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
1596 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
1598 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
1599 &xdp_status, tx_nq, tx);
1602 rx->bytes += length;
1610 page = page_pool_dev_alloc_pages(rx->page_pool);
1612 memcpy(page_address(page) + TSNEP_RX_OFFSET,
1613 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE,
1614 length + TSNEP_RX_INLINE_METADATA_SIZE);
1615 tsnep_rx_page(rx, napi, page, length);
1619 xsk_buff_free(entry->xdp);
1624 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
1627 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
1629 if (xsk_uses_need_wakeup(rx->xsk_pool)) {
1631 xsk_set_rx_need_wakeup(rx->xsk_pool);
1633 xsk_clear_rx_need_wakeup(rx->xsk_pool);
1638 return desc_available ? budget : done;
1641 static bool tsnep_rx_pending(struct tsnep_rx *rx)
1643 struct tsnep_rx_entry *entry;
1645 if (rx->read != rx->write) {
1646 entry = &rx->entry[rx->read];
1647 if ((__le32_to_cpu(entry->desc_wb->properties) &
1648 TSNEP_DESC_OWNER_COUNTER_MASK) ==
1649 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
1656 static int tsnep_rx_open(struct tsnep_rx *rx)
1661 retval = tsnep_rx_ring_create(rx);
1667 desc_available = tsnep_rx_desc_available(rx);
1669 retval = tsnep_rx_alloc_zc(rx, desc_available, false);
1671 retval = tsnep_rx_alloc(rx, desc_available, false);
1672 if (retval != desc_available) {
1678 /* prealloc pages to prevent allocation failures when XSK pool is
1679 * disabled at runtime
1682 retval = tsnep_rx_alloc_page_buffer(rx);
1690 tsnep_rx_ring_cleanup(rx);
1694 static void tsnep_rx_close(struct tsnep_rx *rx)
1697 tsnep_rx_free_page_buffer(rx);
1699 tsnep_rx_ring_cleanup(rx);
1702 static void tsnep_rx_reopen(struct tsnep_rx *rx)
1704 struct page **page = rx->page_buffer;
1709 for (i = 0; i < TSNEP_RING_SIZE; i++) {
1710 struct tsnep_rx_entry *entry = &rx->entry[i];
1712 /* defined initial values for properties are required for
1713 * correct owner counter checking
1715 entry->desc->properties = 0;
1716 entry->desc_wb->properties = 0;
1718 /* prevent allocation failures by reusing kept pages */
1720 tsnep_rx_set_page(rx, entry, *page);
1721 tsnep_rx_activate(rx, rx->write);
1730 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
1732 struct page **page = rx->page_buffer;
1738 /* alloc all ring entries except the last one, because ring cannot be
1739 * filled completely, as many buffers as possible is enough as wakeup is
1740 * done if new buffers are available
1742 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
1743 TSNEP_RING_SIZE - 1);
1745 for (i = 0; i < TSNEP_RING_SIZE; i++) {
1746 struct tsnep_rx_entry *entry = &rx->entry[i];
1748 /* keep pages to prevent allocation failures when xsk is
1752 *page = entry->page;
1758 /* defined initial values for properties are required for
1759 * correct owner counter checking
1761 entry->desc->properties = 0;
1762 entry->desc_wb->properties = 0;
1765 tsnep_rx_set_xdp(rx, entry,
1766 rx->xdp_batch[allocated - 1]);
1767 tsnep_rx_activate(rx, rx->write);
1774 /* set need wakeup flag immediately if ring is not filled completely,
1775 * first polling would be too late as need wakeup signalisation would
1776 * be delayed for an indefinite time
1778 if (xsk_uses_need_wakeup(rx->xsk_pool)) {
1779 int desc_available = tsnep_rx_desc_available(rx);
1782 xsk_set_rx_need_wakeup(rx->xsk_pool);
1784 xsk_clear_rx_need_wakeup(rx->xsk_pool);
1788 static bool tsnep_pending(struct tsnep_queue *queue)
1790 if (queue->tx && tsnep_tx_pending(queue->tx))
1793 if (queue->rx && tsnep_rx_pending(queue->rx))
1799 static int tsnep_poll(struct napi_struct *napi, int budget)
1801 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
1803 bool complete = true;
1807 complete = tsnep_tx_poll(queue->tx, budget);
1809 /* handle case where we are called by netpoll with a budget of 0 */
1810 if (unlikely(budget <= 0))
1814 done = queue->rx->xsk_pool ?
1815 tsnep_rx_poll_zc(queue->rx, napi, budget) :
1816 tsnep_rx_poll(queue->rx, napi, budget);
1821 /* if all work not completed, return budget and keep polling */
1825 if (likely(napi_complete_done(napi, done))) {
1826 tsnep_enable_irq(queue->adapter, queue->irq_mask);
1828 /* reschedule if work is already pending, prevent rotten packets
1829 * which are transmitted or received after polling but before
1832 if (tsnep_pending(queue)) {
1833 tsnep_disable_irq(queue->adapter, queue->irq_mask);
1834 napi_schedule(napi);
1838 return min(done, budget - 1);
1841 static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
1843 const char *name = netdev_name(queue->adapter->netdev);
1844 irq_handler_t handler;
1849 sprintf(queue->name, "%s-mac", name);
1850 handler = tsnep_irq;
1851 dev = queue->adapter;
1853 if (queue->tx && queue->rx)
1854 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
1855 name, queue->rx->queue_index);
1857 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
1858 name, queue->tx->queue_index);
1860 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
1861 name, queue->rx->queue_index);
1862 handler = tsnep_irq_txrx;
1866 retval = request_irq(queue->irq, handler, 0, queue->name, dev);
1868 /* if name is empty, then interrupt won't be freed */
1869 memset(queue->name, 0, sizeof(queue->name));
1875 static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
1879 if (!strlen(queue->name))
1883 dev = queue->adapter;
1887 free_irq(queue->irq, dev);
1888 memset(queue->name, 0, sizeof(queue->name));
1891 static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
1893 struct tsnep_rx *rx = queue->rx;
1895 tsnep_free_irq(queue, first);
1898 if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
1899 xdp_rxq_info_unreg(&rx->xdp_rxq);
1900 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
1901 xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
1904 netif_napi_del(&queue->napi);
1907 static int tsnep_queue_open(struct tsnep_adapter *adapter,
1908 struct tsnep_queue *queue, bool first)
1910 struct tsnep_rx *rx = queue->rx;
1911 struct tsnep_tx *tx = queue->tx;
1914 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
1917 /* choose TX queue for XDP_TX */
1919 rx->tx_queue_index = tx->queue_index;
1920 else if (rx->queue_index < adapter->num_tx_queues)
1921 rx->tx_queue_index = rx->queue_index;
1923 rx->tx_queue_index = 0;
1925 /* prepare both memory models to eliminate possible registration
1926 * errors when memory model is switched between page pool and
1927 * XSK pool during runtime
1929 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
1930 rx->queue_index, queue->napi.napi_id);
1933 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
1938 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
1939 rx->queue_index, queue->napi.napi_id);
1942 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
1943 MEM_TYPE_XSK_BUFF_POOL,
1948 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
1951 retval = tsnep_request_irq(queue, first);
1953 netif_err(adapter, drv, adapter->netdev,
1954 "can't get assigned irq %d.\n", queue->irq);
1961 tsnep_queue_close(queue, first);
1966 static void tsnep_queue_enable(struct tsnep_queue *queue)
1968 napi_enable(&queue->napi);
1969 tsnep_enable_irq(queue->adapter, queue->irq_mask);
1972 tsnep_tx_enable(queue->tx);
1975 tsnep_rx_enable(queue->rx);
1978 static void tsnep_queue_disable(struct tsnep_queue *queue)
1981 tsnep_tx_disable(queue->tx, &queue->napi);
1983 napi_disable(&queue->napi);
1984 tsnep_disable_irq(queue->adapter, queue->irq_mask);
1986 /* disable RX after NAPI polling has been disabled, because RX can be
1987 * enabled during NAPI polling
1990 tsnep_rx_disable(queue->rx);
1993 static int tsnep_netdev_open(struct net_device *netdev)
1995 struct tsnep_adapter *adapter = netdev_priv(netdev);
1998 for (i = 0; i < adapter->num_queues; i++) {
1999 if (adapter->queue[i].tx) {
2000 retval = tsnep_tx_open(adapter->queue[i].tx);
2004 if (adapter->queue[i].rx) {
2005 retval = tsnep_rx_open(adapter->queue[i].rx);
2010 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
2015 retval = netif_set_real_num_tx_queues(adapter->netdev,
2016 adapter->num_tx_queues);
2019 retval = netif_set_real_num_rx_queues(adapter->netdev,
2020 adapter->num_rx_queues);
2024 tsnep_enable_irq(adapter, ECM_INT_LINK);
2025 retval = tsnep_phy_open(adapter);
2029 for (i = 0; i < adapter->num_queues; i++)
2030 tsnep_queue_enable(&adapter->queue[i]);
2035 tsnep_disable_irq(adapter, ECM_INT_LINK);
2037 for (i = 0; i < adapter->num_queues; i++) {
2038 tsnep_queue_close(&adapter->queue[i], i == 0);
2040 if (adapter->queue[i].rx)
2041 tsnep_rx_close(adapter->queue[i].rx);
2042 if (adapter->queue[i].tx)
2043 tsnep_tx_close(adapter->queue[i].tx);
2048 static int tsnep_netdev_close(struct net_device *netdev)
2050 struct tsnep_adapter *adapter = netdev_priv(netdev);
2053 tsnep_disable_irq(adapter, ECM_INT_LINK);
2054 tsnep_phy_close(adapter);
2056 for (i = 0; i < adapter->num_queues; i++) {
2057 tsnep_queue_disable(&adapter->queue[i]);
2059 tsnep_queue_close(&adapter->queue[i], i == 0);
2061 if (adapter->queue[i].rx)
2062 tsnep_rx_close(adapter->queue[i].rx);
2063 if (adapter->queue[i].tx)
2064 tsnep_tx_close(adapter->queue[i].tx);
2070 int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
2072 bool running = netif_running(queue->adapter->netdev);
2075 frame_size = xsk_pool_get_rx_frame_size(pool);
2076 if (frame_size < TSNEP_XSK_RX_BUF_SIZE)
2079 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
2080 sizeof(*queue->rx->page_buffer),
2082 if (!queue->rx->page_buffer)
2084 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
2085 sizeof(*queue->rx->xdp_batch),
2087 if (!queue->rx->xdp_batch) {
2088 kfree(queue->rx->page_buffer);
2089 queue->rx->page_buffer = NULL;
2094 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
2097 tsnep_queue_disable(queue);
2099 queue->tx->xsk_pool = pool;
2100 queue->rx->xsk_pool = pool;
2103 tsnep_rx_reopen_xsk(queue->rx);
2104 tsnep_queue_enable(queue);
2110 void tsnep_disable_xsk(struct tsnep_queue *queue)
2112 bool running = netif_running(queue->adapter->netdev);
2115 tsnep_queue_disable(queue);
2117 tsnep_rx_free_zc(queue->rx);
2119 queue->rx->xsk_pool = NULL;
2120 queue->tx->xsk_pool = NULL;
2123 tsnep_rx_reopen(queue->rx);
2124 tsnep_queue_enable(queue);
2127 kfree(queue->rx->xdp_batch);
2128 queue->rx->xdp_batch = NULL;
2129 kfree(queue->rx->page_buffer);
2130 queue->rx->page_buffer = NULL;
2133 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
2134 struct net_device *netdev)
2136 struct tsnep_adapter *adapter = netdev_priv(netdev);
2137 u16 queue_mapping = skb_get_queue_mapping(skb);
2139 if (queue_mapping >= adapter->num_tx_queues)
2142 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
2145 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
2148 if (!netif_running(netdev))
2150 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
2151 return tsnep_ptp_ioctl(netdev, ifr, cmd);
2152 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
2155 static void tsnep_netdev_set_multicast(struct net_device *netdev)
2157 struct tsnep_adapter *adapter = netdev_priv(netdev);
2161 /* configured MAC address and broadcasts are never filtered */
2162 if (netdev->flags & IFF_PROMISC) {
2163 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
2164 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
2165 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
2166 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
2168 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
2171 static void tsnep_netdev_get_stats64(struct net_device *netdev,
2172 struct rtnl_link_stats64 *stats)
2174 struct tsnep_adapter *adapter = netdev_priv(netdev);
2179 for (i = 0; i < adapter->num_tx_queues; i++) {
2180 stats->tx_packets += adapter->tx[i].packets;
2181 stats->tx_bytes += adapter->tx[i].bytes;
2182 stats->tx_dropped += adapter->tx[i].dropped;
2184 for (i = 0; i < adapter->num_rx_queues; i++) {
2185 stats->rx_packets += adapter->rx[i].packets;
2186 stats->rx_bytes += adapter->rx[i].bytes;
2187 stats->rx_dropped += adapter->rx[i].dropped;
2188 stats->multicast += adapter->rx[i].multicast;
2190 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
2191 TSNEP_RX_STATISTIC);
2192 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
2193 TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
2194 stats->rx_dropped += val;
2195 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
2196 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
2197 stats->rx_dropped += val;
2198 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
2199 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
2200 stats->rx_errors += val;
2201 stats->rx_fifo_errors += val;
2202 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
2203 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
2204 stats->rx_errors += val;
2205 stats->rx_frame_errors += val;
2208 reg = ioread32(adapter->addr + ECM_STAT);
2209 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
2210 stats->rx_errors += val;
2211 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
2212 stats->rx_errors += val;
2213 stats->rx_crc_errors += val;
2214 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
2215 stats->rx_errors += val;
2218 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
2220 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
2221 iowrite16(*(u16 *)(addr + sizeof(u32)),
2222 adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
2224 ether_addr_copy(adapter->mac_address, addr);
2225 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
2229 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
2231 struct tsnep_adapter *adapter = netdev_priv(netdev);
2232 struct sockaddr *sock_addr = addr;
2235 retval = eth_prepare_mac_addr_change(netdev, sock_addr);
2238 eth_hw_addr_set(netdev, sock_addr->sa_data);
2239 tsnep_mac_set_address(adapter, sock_addr->sa_data);
2244 static int tsnep_netdev_set_features(struct net_device *netdev,
2245 netdev_features_t features)
2247 struct tsnep_adapter *adapter = netdev_priv(netdev);
2248 netdev_features_t changed = netdev->features ^ features;
2252 if (changed & NETIF_F_LOOPBACK) {
2253 enable = !!(features & NETIF_F_LOOPBACK);
2254 retval = tsnep_phy_loopback(adapter, enable);
2260 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
2261 const struct skb_shared_hwtstamps *hwtstamps,
2264 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
2268 timestamp = __le64_to_cpu(rx_inline->counter);
2270 timestamp = __le64_to_cpu(rx_inline->timestamp);
2272 return ns_to_ktime(timestamp);
2275 static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
2277 struct tsnep_adapter *adapter = netdev_priv(dev);
2279 switch (bpf->command) {
2280 case XDP_SETUP_PROG:
2281 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
2282 case XDP_SETUP_XSK_POOL:
2283 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool,
2290 static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
2292 if (cpu >= TSNEP_MAX_QUEUES)
2293 cpu &= TSNEP_MAX_QUEUES - 1;
2295 while (cpu >= adapter->num_tx_queues)
2296 cpu -= adapter->num_tx_queues;
2298 return &adapter->tx[cpu];
2301 static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
2302 struct xdp_frame **xdp, u32 flags)
2304 struct tsnep_adapter *adapter = netdev_priv(dev);
2305 u32 cpu = smp_processor_id();
2306 struct netdev_queue *nq;
2307 struct tsnep_tx *tx;
2311 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2314 tx = tsnep_xdp_get_tx(adapter, cpu);
2315 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
2317 __netif_tx_lock(nq, cpu);
2319 for (nxmit = 0; nxmit < n; nxmit++) {
2320 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
2321 TSNEP_TX_TYPE_XDP_NDO);
2325 /* avoid transmit queue timeout since we share it with the slow
2328 txq_trans_cond_update(nq);
2331 if (flags & XDP_XMIT_FLUSH)
2332 tsnep_xdp_xmit_flush(tx);
2334 __netif_tx_unlock(nq);
2339 static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id,
2342 struct tsnep_adapter *adapter = netdev_priv(dev);
2343 struct tsnep_queue *queue;
2345 if (queue_id >= adapter->num_rx_queues ||
2346 queue_id >= adapter->num_tx_queues)
2349 queue = &adapter->queue[queue_id];
2351 if (!napi_if_scheduled_mark_missed(&queue->napi))
2352 napi_schedule(&queue->napi);
2357 static const struct net_device_ops tsnep_netdev_ops = {
2358 .ndo_open = tsnep_netdev_open,
2359 .ndo_stop = tsnep_netdev_close,
2360 .ndo_start_xmit = tsnep_netdev_xmit_frame,
2361 .ndo_eth_ioctl = tsnep_netdev_ioctl,
2362 .ndo_set_rx_mode = tsnep_netdev_set_multicast,
2363 .ndo_get_stats64 = tsnep_netdev_get_stats64,
2364 .ndo_set_mac_address = tsnep_netdev_set_mac_address,
2365 .ndo_set_features = tsnep_netdev_set_features,
2366 .ndo_get_tstamp = tsnep_netdev_get_tstamp,
2367 .ndo_setup_tc = tsnep_tc_setup,
2368 .ndo_bpf = tsnep_netdev_bpf,
2369 .ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
2370 .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
2373 static int tsnep_mac_init(struct tsnep_adapter *adapter)
2377 /* initialize RX filtering, at least configured MAC address and
2378 * broadcast are not filtered
2380 iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
2382 /* try to get MAC address in the following order:
2384 * - valid MAC address already set
2385 * - MAC address register if valid
2386 * - random MAC address
2388 retval = of_get_mac_address(adapter->pdev->dev.of_node,
2389 adapter->mac_address);
2390 if (retval == -EPROBE_DEFER)
2392 if (retval && !is_valid_ether_addr(adapter->mac_address)) {
2393 *(u32 *)adapter->mac_address =
2394 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
2395 *(u16 *)(adapter->mac_address + sizeof(u32)) =
2396 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
2397 if (!is_valid_ether_addr(adapter->mac_address))
2398 eth_random_addr(adapter->mac_address);
2401 tsnep_mac_set_address(adapter, adapter->mac_address);
2402 eth_hw_addr_set(adapter->netdev, adapter->mac_address);
2407 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
2409 struct device_node *np = adapter->pdev->dev.of_node;
2413 np = of_get_child_by_name(np, "mdio");
2417 adapter->suppress_preamble =
2418 of_property_read_bool(np, "suppress-preamble");
2421 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
2422 if (!adapter->mdiobus) {
2428 adapter->mdiobus->priv = (void *)adapter;
2429 adapter->mdiobus->parent = &adapter->pdev->dev;
2430 adapter->mdiobus->read = tsnep_mdiobus_read;
2431 adapter->mdiobus->write = tsnep_mdiobus_write;
2432 adapter->mdiobus->name = TSNEP "-mdiobus";
2433 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
2434 adapter->pdev->name);
2436 /* do not scan broadcast address */
2437 adapter->mdiobus->phy_mask = 0x0000001;
2439 retval = of_mdiobus_register(adapter->mdiobus, np);
2447 static int tsnep_phy_init(struct tsnep_adapter *adapter)
2449 struct device_node *phy_node;
2452 retval = of_get_phy_mode(adapter->pdev->dev.of_node,
2453 &adapter->phy_mode);
2455 adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
2457 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
2459 adapter->phydev = of_phy_find_device(phy_node);
2460 of_node_put(phy_node);
2461 if (!adapter->phydev && adapter->mdiobus)
2462 adapter->phydev = phy_find_first(adapter->mdiobus);
2463 if (!adapter->phydev)
2469 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
2471 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
2476 /* one TX/RX queue pair for netdev is mandatory */
2477 if (platform_irq_count(adapter->pdev) == 1)
2478 retval = platform_get_irq(adapter->pdev, 0);
2480 retval = platform_get_irq_byname(adapter->pdev, "mac");
2483 adapter->num_tx_queues = 1;
2484 adapter->num_rx_queues = 1;
2485 adapter->num_queues = 1;
2486 adapter->queue[0].adapter = adapter;
2487 adapter->queue[0].irq = retval;
2488 adapter->queue[0].tx = &adapter->tx[0];
2489 adapter->queue[0].tx->adapter = adapter;
2490 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0);
2491 adapter->queue[0].tx->queue_index = 0;
2492 adapter->queue[0].rx = &adapter->rx[0];
2493 adapter->queue[0].rx->adapter = adapter;
2494 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
2495 adapter->queue[0].rx->queue_index = 0;
2496 adapter->queue[0].irq_mask = irq_mask;
2497 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
2498 retval = tsnep_set_irq_coalesce(&adapter->queue[0],
2499 TSNEP_COALESCE_USECS_DEFAULT);
2503 adapter->netdev->irq = adapter->queue[0].irq;
2505 /* add additional TX/RX queue pairs only if dedicated interrupt is
2508 for (i = 1; i < queue_count; i++) {
2509 sprintf(name, "txrx-%d", i);
2510 retval = platform_get_irq_byname_optional(adapter->pdev, name);
2514 adapter->num_tx_queues++;
2515 adapter->num_rx_queues++;
2516 adapter->num_queues++;
2517 adapter->queue[i].adapter = adapter;
2518 adapter->queue[i].irq = retval;
2519 adapter->queue[i].tx = &adapter->tx[i];
2520 adapter->queue[i].tx->adapter = adapter;
2521 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i);
2522 adapter->queue[i].tx->queue_index = i;
2523 adapter->queue[i].rx = &adapter->rx[i];
2524 adapter->queue[i].rx->adapter = adapter;
2525 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
2526 adapter->queue[i].rx->queue_index = i;
2527 adapter->queue[i].irq_mask =
2528 irq_mask << (ECM_INT_TXRX_SHIFT * i);
2529 adapter->queue[i].irq_delay_addr =
2530 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
2531 retval = tsnep_set_irq_coalesce(&adapter->queue[i],
2532 TSNEP_COALESCE_USECS_DEFAULT);
2540 static int tsnep_probe(struct platform_device *pdev)
2542 struct tsnep_adapter *adapter;
2543 struct net_device *netdev;
2544 struct resource *io;
2551 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
2552 sizeof(struct tsnep_adapter),
2553 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
2556 SET_NETDEV_DEV(netdev, &pdev->dev);
2557 adapter = netdev_priv(netdev);
2558 platform_set_drvdata(pdev, adapter);
2559 adapter->pdev = pdev;
2560 adapter->dmadev = &pdev->dev;
2561 adapter->netdev = netdev;
2562 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2563 NETIF_MSG_LINK | NETIF_MSG_IFUP |
2564 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
2566 netdev->min_mtu = ETH_MIN_MTU;
2567 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
2569 mutex_init(&adapter->gate_control_lock);
2570 mutex_init(&adapter->rxnfc_lock);
2571 INIT_LIST_HEAD(&adapter->rxnfc_rules);
2573 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2574 adapter->addr = devm_ioremap_resource(&pdev->dev, io);
2575 if (IS_ERR(adapter->addr))
2576 return PTR_ERR(adapter->addr);
2577 netdev->mem_start = io->start;
2578 netdev->mem_end = io->end;
2580 type = ioread32(adapter->addr + ECM_TYPE);
2581 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
2582 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
2583 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
2584 adapter->gate_control = type & ECM_GATE_CONTROL;
2585 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
2587 tsnep_disable_irq(adapter, ECM_INT_ALL);
2589 retval = tsnep_queue_init(adapter, queue_count);
2593 retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
2596 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
2600 retval = tsnep_mac_init(adapter);
2604 retval = tsnep_mdio_init(adapter);
2606 goto mdio_init_failed;
2608 retval = tsnep_phy_init(adapter);
2610 goto phy_init_failed;
2612 retval = tsnep_ptp_init(adapter);
2614 goto ptp_init_failed;
2616 retval = tsnep_tc_init(adapter);
2618 goto tc_init_failed;
2620 retval = tsnep_rxnfc_init(adapter);
2622 goto rxnfc_init_failed;
2624 netdev->netdev_ops = &tsnep_netdev_ops;
2625 netdev->ethtool_ops = &tsnep_ethtool_ops;
2626 netdev->features = NETIF_F_SG;
2627 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
2629 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
2630 NETDEV_XDP_ACT_NDO_XMIT |
2631 NETDEV_XDP_ACT_NDO_XMIT_SG |
2632 NETDEV_XDP_ACT_XSK_ZEROCOPY;
2634 /* carrier off reporting is important to ethtool even BEFORE open */
2635 netif_carrier_off(netdev);
2637 retval = register_netdev(netdev);
2639 goto register_failed;
2641 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
2643 if (adapter->gate_control)
2644 dev_info(&adapter->pdev->dev, "gate control detected\n");
2649 tsnep_rxnfc_cleanup(adapter);
2651 tsnep_tc_cleanup(adapter);
2653 tsnep_ptp_cleanup(adapter);
2656 if (adapter->mdiobus)
2657 mdiobus_unregister(adapter->mdiobus);
2662 static void tsnep_remove(struct platform_device *pdev)
2664 struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
2666 unregister_netdev(adapter->netdev);
2668 tsnep_rxnfc_cleanup(adapter);
2670 tsnep_tc_cleanup(adapter);
2672 tsnep_ptp_cleanup(adapter);
2674 if (adapter->mdiobus)
2675 mdiobus_unregister(adapter->mdiobus);
2677 tsnep_disable_irq(adapter, ECM_INT_ALL);
2680 static const struct of_device_id tsnep_of_match[] = {
2681 { .compatible = "engleder,tsnep", },
2684 MODULE_DEVICE_TABLE(of, tsnep_of_match);
2686 static struct platform_driver tsnep_driver = {
2689 .of_match_table = tsnep_of_match,
2691 .probe = tsnep_probe,
2692 .remove_new = tsnep_remove,
2694 module_platform_driver(tsnep_driver);
2696 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
2697 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
2698 MODULE_LICENSE("GPL");