1 // SPDX-License-Identifier: GPL-2.0+
4 #include <linux/filter.h>
6 #include "lan966x_main.h"
8 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
10 return lan_rd(lan966x, FDMA_CH_ACTIVE);
13 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
14 struct lan966x_db *db)
18 page = page_pool_dev_alloc_pages(rx->page_pool);
22 db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
27 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
31 for (i = 0; i < FDMA_DCB_MAX; ++i) {
32 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
33 page_pool_put_full_page(rx->page_pool,
34 rx->page[i][j], false);
38 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
42 page = rx->page[rx->dcb_index][rx->db_index];
46 page_pool_recycle_direct(rx->page_pool, page);
49 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
50 struct lan966x_rx_dcb *dcb,
53 struct lan966x_db *db;
56 for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
58 db->status = FDMA_DCB_STATUS_INTR;
61 dcb->nextptr = FDMA_DCB_INVALID_DATA;
62 dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
64 rx->last_entry->nextptr = nextptr;
68 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
70 struct lan966x *lan966x = rx->lan966x;
71 struct page_pool_params pp_params = {
72 .order = rx->page_order,
73 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
74 .pool_size = FDMA_DCB_MAX,
77 .dma_dir = DMA_FROM_DEVICE,
78 .offset = XDP_PACKET_HEADROOM,
79 .max_len = rx->max_mtu -
80 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
83 if (lan966x_xdp_present(lan966x))
84 pp_params.dma_dir = DMA_BIDIRECTIONAL;
86 rx->page_pool = page_pool_create(&pp_params);
88 for (int i = 0; i < lan966x->num_phys_ports; ++i) {
89 struct lan966x_port *port;
91 if (!lan966x->ports[i])
94 port = lan966x->ports[i];
95 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
96 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
100 return PTR_ERR_OR_ZERO(rx->page_pool);
103 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
105 struct lan966x *lan966x = rx->lan966x;
106 struct lan966x_rx_dcb *dcb;
107 struct lan966x_db *db;
112 if (lan966x_fdma_rx_alloc_page_pool(rx))
113 return PTR_ERR(rx->page_pool);
115 /* calculate how many pages are needed to allocate the dcbs */
116 size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
117 size = ALIGN(size, PAGE_SIZE);
119 rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
123 rx->last_entry = rx->dcbs;
127 /* Now for each dcb allocate the dbs */
128 for (i = 0; i < FDMA_DCB_MAX; ++i) {
132 /* For each db allocate a page and map it to the DB dataptr. */
133 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
135 page = lan966x_fdma_rx_alloc_page(rx, db);
140 rx->page[i][j] = page;
143 lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
149 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
152 rx->dcb_index &= FDMA_DCB_MAX - 1;
155 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
157 struct lan966x *lan966x = rx->lan966x;
160 /* Now it is possible to do the cleanup of dcb */
161 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
162 size = ALIGN(size, PAGE_SIZE);
163 dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
166 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
168 struct lan966x *lan966x = rx->lan966x;
171 /* When activating a channel, first is required to write the first DCB
172 * address and then to activate it
174 lan_wr(lower_32_bits((u64)rx->dma), lan966x,
175 FDMA_DCB_LLP(rx->channel_id));
176 lan_wr(upper_32_bits((u64)rx->dma), lan966x,
177 FDMA_DCB_LLP1(rx->channel_id));
179 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
180 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
181 FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
182 FDMA_CH_CFG_CH_MEM_SET(1),
183 lan966x, FDMA_CH_CFG(rx->channel_id));
186 lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
187 FDMA_PORT_CTRL_XTR_STOP,
188 lan966x, FDMA_PORT_CTRL(0));
190 /* Enable interrupts */
191 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
192 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
193 mask |= BIT(rx->channel_id);
194 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
195 FDMA_INTR_DB_ENA_INTR_DB_ENA,
196 lan966x, FDMA_INTR_DB_ENA);
198 /* Activate the channel */
199 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
200 FDMA_CH_ACTIVATE_CH_ACTIVATE,
201 lan966x, FDMA_CH_ACTIVATE);
204 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
206 struct lan966x *lan966x = rx->lan966x;
209 /* Disable the channel */
210 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
211 FDMA_CH_DISABLE_CH_DISABLE,
212 lan966x, FDMA_CH_DISABLE);
214 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
215 val, !(val & BIT(rx->channel_id)),
216 READL_SLEEP_US, READL_TIMEOUT_US);
218 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
219 FDMA_CH_DB_DISCARD_DB_DISCARD,
220 lan966x, FDMA_CH_DB_DISCARD);
223 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
225 struct lan966x *lan966x = rx->lan966x;
227 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
228 FDMA_CH_RELOAD_CH_RELOAD,
229 lan966x, FDMA_CH_RELOAD);
232 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
233 struct lan966x_tx_dcb *dcb)
235 dcb->nextptr = FDMA_DCB_INVALID_DATA;
239 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
241 struct lan966x *lan966x = tx->lan966x;
242 struct lan966x_tx_dcb *dcb;
243 struct lan966x_db *db;
247 tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
252 /* calculate how many pages are needed to allocate the dcbs */
253 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
254 size = ALIGN(size, PAGE_SIZE);
255 tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
259 /* Now for each dcb allocate the db */
260 for (i = 0; i < FDMA_DCB_MAX; ++i) {
263 for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
269 lan966x_fdma_tx_add_dcb(tx, dcb);
279 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
281 struct lan966x *lan966x = tx->lan966x;
286 size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
287 size = ALIGN(size, PAGE_SIZE);
288 dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
291 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
293 struct lan966x *lan966x = tx->lan966x;
296 /* When activating a channel, first is required to write the first DCB
297 * address and then to activate it
299 lan_wr(lower_32_bits((u64)tx->dma), lan966x,
300 FDMA_DCB_LLP(tx->channel_id));
301 lan_wr(upper_32_bits((u64)tx->dma), lan966x,
302 FDMA_DCB_LLP1(tx->channel_id));
304 lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
305 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
306 FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
307 FDMA_CH_CFG_CH_MEM_SET(1),
308 lan966x, FDMA_CH_CFG(tx->channel_id));
311 lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
312 FDMA_PORT_CTRL_INJ_STOP,
313 lan966x, FDMA_PORT_CTRL(0));
315 /* Enable interrupts */
316 mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
317 mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
318 mask |= BIT(tx->channel_id);
319 lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
320 FDMA_INTR_DB_ENA_INTR_DB_ENA,
321 lan966x, FDMA_INTR_DB_ENA);
323 /* Activate the channel */
324 lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
325 FDMA_CH_ACTIVATE_CH_ACTIVATE,
326 lan966x, FDMA_CH_ACTIVATE);
329 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
331 struct lan966x *lan966x = tx->lan966x;
334 /* Disable the channel */
335 lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
336 FDMA_CH_DISABLE_CH_DISABLE,
337 lan966x, FDMA_CH_DISABLE);
339 readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
340 val, !(val & BIT(tx->channel_id)),
341 READL_SLEEP_US, READL_TIMEOUT_US);
343 lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
344 FDMA_CH_DB_DISCARD_DB_DISCARD,
345 lan966x, FDMA_CH_DB_DISCARD);
347 tx->activated = false;
348 tx->last_in_use = -1;
351 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
353 struct lan966x *lan966x = tx->lan966x;
355 /* Write the registers to reload the channel */
356 lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
357 FDMA_CH_RELOAD_CH_RELOAD,
358 lan966x, FDMA_CH_RELOAD);
361 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
363 struct lan966x_port *port;
366 for (i = 0; i < lan966x->num_phys_ports; ++i) {
367 port = lan966x->ports[i];
371 if (netif_queue_stopped(port->dev))
372 netif_wake_queue(port->dev);
376 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
378 struct lan966x_port *port;
381 for (i = 0; i < lan966x->num_phys_ports; ++i) {
382 port = lan966x->ports[i];
386 netif_stop_queue(port->dev);
390 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
392 struct lan966x_tx *tx = &lan966x->tx;
393 struct lan966x_tx_dcb_buf *dcb_buf;
394 struct xdp_frame_bulk bq;
395 struct lan966x_db *db;
400 xdp_frame_bulk_init(&bq);
402 spin_lock_irqsave(&lan966x->tx_lock, flags);
403 for (i = 0; i < FDMA_DCB_MAX; ++i) {
404 dcb_buf = &tx->dcbs_buf[i];
409 db = &tx->dcbs[i].db[0];
410 if (!(db->status & FDMA_DCB_STATUS_DONE))
413 dcb_buf->dev->stats.tx_packets++;
414 dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
416 dcb_buf->used = false;
417 if (dcb_buf->use_skb) {
418 dma_unmap_single(lan966x->dev,
424 napi_consume_skb(dcb_buf->data.skb, weight);
426 if (dcb_buf->xdp_ndo)
427 dma_unmap_single(lan966x->dev,
432 if (dcb_buf->xdp_ndo)
433 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
435 xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
441 xdp_flush_frame_bulk(&bq);
444 lan966x_fdma_wakeup_netdev(lan966x);
446 spin_unlock_irqrestore(&lan966x->tx_lock, flags);
449 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
451 struct lan966x_db *db;
453 /* Check if there is any data */
454 db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
455 if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
461 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
463 struct lan966x *lan966x = rx->lan966x;
464 struct lan966x_port *port;
465 struct lan966x_db *db;
468 db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
469 page = rx->page[rx->dcb_index][rx->db_index];
473 dma_sync_single_for_cpu(lan966x->dev,
474 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
475 FDMA_DCB_STATUS_BLOCKL(db->status),
478 lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
480 if (WARN_ON(*src_port >= lan966x->num_phys_ports))
483 port = lan966x->ports[*src_port];
484 if (!lan966x_xdp_port_present(port))
487 return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
490 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
493 struct lan966x *lan966x = rx->lan966x;
494 struct lan966x_db *db;
499 /* Get the received frame and unmap it */
500 db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
501 page = rx->page[rx->dcb_index][rx->db_index];
503 skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
507 skb_mark_for_recycle(skb);
509 skb_reserve(skb, XDP_PACKET_HEADROOM);
510 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
512 lan966x_ifh_get_timestamp(skb->data, ×tamp);
514 skb->dev = lan966x->ports[src_port]->dev;
515 skb_pull(skb, IFH_LEN_BYTES);
517 if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
518 skb_trim(skb, skb->len - ETH_FCS_LEN);
520 lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
521 skb->protocol = eth_type_trans(skb, skb->dev);
523 if (lan966x->bridge_mask & BIT(src_port)) {
524 skb->offload_fwd_mark = 1;
526 skb_reset_network_header(skb);
527 if (!lan966x_hw_offload(lan966x, src_port, skb))
528 skb->offload_fwd_mark = 0;
531 skb->dev->stats.rx_bytes += skb->len;
532 skb->dev->stats.rx_packets++;
537 page_pool_recycle_direct(rx->page_pool, page);
542 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
544 struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
545 struct lan966x_rx *rx = &lan966x->rx;
546 int dcb_reload = rx->dcb_index;
547 struct lan966x_rx_dcb *old_dcb;
548 struct lan966x_db *db;
549 bool redirect = false;
556 lan966x_fdma_tx_clear_buf(lan966x, weight);
558 /* Get all received skb */
559 while (counter < weight) {
560 if (!lan966x_fdma_rx_more_frames(rx))
565 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
569 lan966x_fdma_rx_free_page(rx);
570 lan966x_fdma_rx_advance_dcb(rx);
576 lan966x_fdma_rx_advance_dcb(rx);
579 lan966x_fdma_rx_free_page(rx);
580 lan966x_fdma_rx_advance_dcb(rx);
584 skb = lan966x_fdma_rx_get_frame(rx, src_port);
585 lan966x_fdma_rx_advance_dcb(rx);
589 napi_gro_receive(&lan966x->napi, skb);
593 /* Allocate new pages and map them */
594 while (dcb_reload != rx->dcb_index) {
595 db = &rx->dcbs[dcb_reload].db[rx->db_index];
596 page = lan966x_fdma_rx_alloc_page(rx, db);
599 rx->page[dcb_reload][rx->db_index] = page;
601 old_dcb = &rx->dcbs[dcb_reload];
603 dcb_reload &= FDMA_DCB_MAX - 1;
605 nextptr = rx->dma + ((unsigned long)old_dcb -
606 (unsigned long)rx->dcbs);
607 lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
608 lan966x_fdma_rx_reload(rx);
614 if (counter < weight && napi_complete_done(napi, counter))
615 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
620 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
622 struct lan966x *lan966x = args;
623 u32 db, err, err_type;
625 db = lan_rd(lan966x, FDMA_INTR_DB);
626 err = lan_rd(lan966x, FDMA_INTR_ERR);
629 lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
630 lan_wr(db, lan966x, FDMA_INTR_DB);
632 napi_schedule(&lan966x->napi);
636 err_type = lan_rd(lan966x, FDMA_ERRORS);
638 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
640 lan_wr(err, lan966x, FDMA_INTR_ERR);
641 lan_wr(err_type, lan966x, FDMA_ERRORS);
647 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
649 struct lan966x_tx_dcb_buf *dcb_buf;
652 for (i = 0; i < FDMA_DCB_MAX; ++i) {
653 dcb_buf = &tx->dcbs_buf[i];
654 if (!dcb_buf->used && i != tx->last_in_use)
661 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
662 int next_to_use, int len,
665 struct lan966x_tx_dcb *next_dcb;
666 struct lan966x_db *next_db;
668 next_dcb = &tx->dcbs[next_to_use];
669 next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
671 next_db = &next_dcb->db[0];
672 next_db->dataptr = dma_addr;
673 next_db->status = FDMA_DCB_STATUS_SOF |
674 FDMA_DCB_STATUS_EOF |
675 FDMA_DCB_STATUS_INTR |
676 FDMA_DCB_STATUS_BLOCKO(0) |
677 FDMA_DCB_STATUS_BLOCKL(len);
680 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
682 struct lan966x *lan966x = tx->lan966x;
683 struct lan966x_tx_dcb *dcb;
685 if (likely(lan966x->tx.activated)) {
686 /* Connect current dcb to the next db */
687 dcb = &tx->dcbs[tx->last_in_use];
688 dcb->nextptr = tx->dma + (next_to_use *
689 sizeof(struct lan966x_tx_dcb));
691 lan966x_fdma_tx_reload(tx);
693 /* Because it is first time, then just activate */
694 lan966x->tx.activated = true;
695 lan966x_fdma_tx_activate(tx);
698 /* Move to next dcb because this last in use */
699 tx->last_in_use = next_to_use;
702 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
703 struct xdp_frame *xdpf,
707 struct lan966x *lan966x = port->lan966x;
708 struct lan966x_tx_dcb_buf *next_dcb_buf;
709 struct lan966x_tx *tx = &lan966x->tx;
715 spin_lock(&lan966x->tx_lock);
718 next_to_use = lan966x_fdma_get_next_dcb(tx);
719 if (next_to_use < 0) {
720 netif_stop_queue(port->dev);
721 ret = NETDEV_TX_BUSY;
725 /* Generate new IFH */
727 if (xdpf->headroom < IFH_LEN_BYTES) {
732 ifh = xdpf->data - IFH_LEN_BYTES;
733 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
734 lan966x_ifh_set_bypass(ifh, 1);
735 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
737 dma_addr = dma_map_single(lan966x->dev,
738 xdpf->data - IFH_LEN_BYTES,
739 xdpf->len + IFH_LEN_BYTES,
741 if (dma_mapping_error(lan966x->dev, dma_addr)) {
747 lan966x_fdma_tx_setup_dcb(tx, next_to_use,
748 xdpf->len + IFH_LEN_BYTES,
751 ifh = page_address(page) + XDP_PACKET_HEADROOM;
752 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
753 lan966x_ifh_set_bypass(ifh, 1);
754 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
756 dma_addr = page_pool_get_dma_addr(page);
757 dma_sync_single_for_device(lan966x->dev,
758 dma_addr + XDP_PACKET_HEADROOM,
759 xdpf->len + IFH_LEN_BYTES,
763 lan966x_fdma_tx_setup_dcb(tx, next_to_use,
764 xdpf->len + IFH_LEN_BYTES,
765 dma_addr + XDP_PACKET_HEADROOM);
768 /* Fill up the buffer */
769 next_dcb_buf = &tx->dcbs_buf[next_to_use];
770 next_dcb_buf->use_skb = false;
771 next_dcb_buf->data.xdpf = xdpf;
772 next_dcb_buf->xdp_ndo = dma_map;
773 next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
774 next_dcb_buf->dma_addr = dma_addr;
775 next_dcb_buf->used = true;
776 next_dcb_buf->ptp = false;
777 next_dcb_buf->dev = port->dev;
779 /* Start the transmission */
780 lan966x_fdma_tx_start(tx, next_to_use);
783 spin_unlock(&lan966x->tx_lock);
788 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
790 struct lan966x_port *port = netdev_priv(dev);
791 struct lan966x *lan966x = port->lan966x;
792 struct lan966x_tx_dcb_buf *next_dcb_buf;
793 struct lan966x_tx *tx = &lan966x->tx;
801 next_to_use = lan966x_fdma_get_next_dcb(tx);
802 if (next_to_use < 0) {
803 netif_stop_queue(dev);
804 return NETDEV_TX_BUSY;
807 if (skb_put_padto(skb, ETH_ZLEN)) {
808 dev->stats.tx_dropped++;
813 needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
814 needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
815 if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
816 err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
819 dev->stats.tx_dropped++;
825 skb_tx_timestamp(skb);
826 skb_push(skb, IFH_LEN_BYTES);
827 memcpy(skb->data, ifh, IFH_LEN_BYTES);
830 dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
832 if (dma_mapping_error(lan966x->dev, dma_addr)) {
833 dev->stats.tx_dropped++;
839 lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
841 /* Fill up the buffer */
842 next_dcb_buf = &tx->dcbs_buf[next_to_use];
843 next_dcb_buf->use_skb = true;
844 next_dcb_buf->data.skb = skb;
845 next_dcb_buf->xdp_ndo = false;
846 next_dcb_buf->len = skb->len;
847 next_dcb_buf->dma_addr = dma_addr;
848 next_dcb_buf->used = true;
849 next_dcb_buf->ptp = false;
850 next_dcb_buf->dev = dev;
852 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
853 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
854 next_dcb_buf->ptp = true;
856 /* Start the transmission */
857 lan966x_fdma_tx_start(tx, next_to_use);
862 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
863 LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
864 lan966x_ptp_txtstamp_release(port, skb);
866 dev_kfree_skb_any(skb);
870 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
875 for (i = 0; i < lan966x->num_phys_ports; ++i) {
876 struct lan966x_port *port;
879 port = lan966x->ports[i];
883 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
891 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
893 return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
896 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
898 struct page_pool *page_pool;
904 /* Store these for later to free them */
905 rx_dma = lan966x->rx.dma;
906 rx_dcbs = lan966x->rx.dcbs;
907 page_pool = lan966x->rx.page_pool;
909 napi_synchronize(&lan966x->napi);
910 napi_disable(&lan966x->napi);
911 lan966x_fdma_stop_netdev(lan966x);
913 lan966x_fdma_rx_disable(&lan966x->rx);
914 lan966x_fdma_rx_free_pages(&lan966x->rx);
915 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
916 lan966x->rx.max_mtu = new_mtu;
917 err = lan966x_fdma_rx_alloc(&lan966x->rx);
920 lan966x_fdma_rx_start(&lan966x->rx);
922 size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
923 size = ALIGN(size, PAGE_SIZE);
924 dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
926 page_pool_destroy(page_pool);
928 lan966x_fdma_wakeup_netdev(lan966x);
929 napi_enable(&lan966x->napi);
933 lan966x->rx.page_pool = page_pool;
934 lan966x->rx.dma = rx_dma;
935 lan966x->rx.dcbs = rx_dcbs;
936 lan966x_fdma_rx_start(&lan966x->rx);
941 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
943 return lan966x_fdma_get_max_mtu(lan966x) +
945 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
950 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
955 /* Disable the CPU port */
956 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
957 QSYS_SW_PORT_MODE_PORT_ENA,
958 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
960 /* Flush the CPU queues */
961 readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
962 val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
963 READL_SLEEP_US, READL_TIMEOUT_US);
965 /* Add a sleep in case there are frames between the queues and the CPU
968 usleep_range(1000, 2000);
970 err = lan966x_fdma_reload(lan966x, max_mtu);
972 /* Enable back the CPU port */
973 lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
974 QSYS_SW_PORT_MODE_PORT_ENA,
975 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
980 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
984 max_mtu = lan966x_fdma_get_max_frame(lan966x);
985 if (max_mtu == lan966x->rx.max_mtu)
988 return __lan966x_fdma_reload(lan966x, max_mtu);
991 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
995 max_mtu = lan966x_fdma_get_max_frame(lan966x);
996 return __lan966x_fdma_reload(lan966x, max_mtu);
999 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1001 if (lan966x->fdma_ndev)
1004 lan966x->fdma_ndev = dev;
1005 netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1006 napi_enable(&lan966x->napi);
1009 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1011 if (lan966x->fdma_ndev == dev) {
1012 netif_napi_del(&lan966x->napi);
1013 lan966x->fdma_ndev = NULL;
1017 int lan966x_fdma_init(struct lan966x *lan966x)
1024 lan966x->rx.lan966x = lan966x;
1025 lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
1026 lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1027 lan966x->tx.lan966x = lan966x;
1028 lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
1029 lan966x->tx.last_in_use = -1;
1031 err = lan966x_fdma_rx_alloc(&lan966x->rx);
1035 err = lan966x_fdma_tx_alloc(&lan966x->tx);
1037 lan966x_fdma_rx_free(&lan966x->rx);
1041 lan966x_fdma_rx_start(&lan966x->rx);
1046 void lan966x_fdma_deinit(struct lan966x *lan966x)
1051 lan966x_fdma_rx_disable(&lan966x->rx);
1052 lan966x_fdma_tx_disable(&lan966x->tx);
1054 napi_synchronize(&lan966x->napi);
1055 napi_disable(&lan966x->napi);
1057 lan966x_fdma_rx_free_pages(&lan966x->rx);
1058 lan966x_fdma_rx_free(&lan966x->rx);
1059 page_pool_destroy(lan966x->rx.page_pool);
1060 lan966x_fdma_tx_free(&lan966x->tx);