1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2022 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/kthread.h>
12 #include <linux/iommu.h>
13 #include <linux/fsl/mc.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_trace.h>
16 #include <linux/fsl/ptp_qoriq.h>
17 #include <linux/ptp_classify.h>
18 #include <net/pkt_cls.h>
21 #include <net/xdp_sock_drv.h>
23 #include "dpaa2-eth.h"
25 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
26 * using trace events only need to #include <trace/events/sched.h>
28 #define CREATE_TRACE_POINTS
29 #include "dpaa2-eth-trace.h"
31 MODULE_LICENSE("Dual BSD/GPL");
32 MODULE_AUTHOR("Freescale Semiconductor, Inc");
33 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
35 struct ptp_qoriq *dpaa2_ptp;
36 EXPORT_SYMBOL(dpaa2_ptp);
38 static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
42 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
43 DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
44 priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
47 static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
50 struct dpni_single_step_cfg cfg;
57 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
58 WARN_ONCE(1, "Failed to set single step register");
61 static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
66 val = DPAA2_PTP_SINGLE_STEP_ENABLE |
67 DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
70 val |= DPAA2_PTP_SINGLE_STEP_CH;
72 if (priv->onestep_reg_base)
73 writel(val, priv->onestep_reg_base);
76 static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
78 struct device *dev = priv->net_dev->dev.parent;
79 struct dpni_single_step_cfg ptp_cfg;
81 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
83 if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
86 if (dpni_get_single_step_cfg(priv->mc_io, 0,
87 priv->mc_token, &ptp_cfg)) {
88 dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
92 if (!ptp_cfg.ptp_onestep_reg_base) {
93 dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
97 priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
99 if (!priv->onestep_reg_base) {
100 dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
104 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
107 void *dpaa2_iova_to_virt(struct iommu_domain *domain,
108 dma_addr_t iova_addr)
110 phys_addr_t phys_addr;
112 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
114 return phys_to_virt(phys_addr);
117 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
121 skb_checksum_none_assert(skb);
123 /* HW checksum validation is disabled, nothing to do here */
124 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
127 /* Read checksum validation bits */
128 if (!((fd_status & DPAA2_FAS_L3CV) &&
129 (fd_status & DPAA2_FAS_L4CV)))
132 /* Inform the stack there's no need to compute L3/L4 csum anymore */
133 skb->ip_summed = CHECKSUM_UNNECESSARY;
136 /* Free a received FD.
137 * Not to be used for Tx conf FDs or on any other paths.
139 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
140 const struct dpaa2_fd *fd,
143 struct device *dev = priv->net_dev->dev.parent;
144 dma_addr_t addr = dpaa2_fd_get_addr(fd);
145 u8 fd_format = dpaa2_fd_get_format(fd);
146 struct dpaa2_sg_entry *sgt;
150 /* If single buffer frame, just free the data buffer */
151 if (fd_format == dpaa2_fd_single)
153 else if (fd_format != dpaa2_fd_sg)
154 /* We don't support any other format */
157 /* For S/G frames, we first need to free all SG entries
158 * except the first one, which was taken care of already
160 sgt = vaddr + dpaa2_fd_get_offset(fd);
161 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
162 addr = dpaa2_sg_get_addr(&sgt[i]);
163 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
164 dma_unmap_page(dev, addr, priv->rx_buf_size,
167 free_pages((unsigned long)sg_vaddr, 0);
168 if (dpaa2_sg_is_final(&sgt[i]))
173 free_pages((unsigned long)vaddr, 0);
176 /* Build a linear skb based on a single-buffer frame descriptor */
177 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
178 const struct dpaa2_fd *fd,
181 struct sk_buff *skb = NULL;
182 u16 fd_offset = dpaa2_fd_get_offset(fd);
183 u32 fd_length = dpaa2_fd_get_len(fd);
187 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
191 skb_reserve(skb, fd_offset);
192 skb_put(skb, fd_length);
197 /* Build a non linear (fragmented) skb based on a S/G table */
198 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
199 struct dpaa2_eth_channel *ch,
200 struct dpaa2_sg_entry *sgt)
202 struct sk_buff *skb = NULL;
203 struct device *dev = priv->net_dev->dev.parent;
208 struct page *page, *head_page;
212 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
213 struct dpaa2_sg_entry *sge = &sgt[i];
215 /* NOTE: We only support SG entries in dpaa2_sg_single format,
216 * but this is the only format we may receive from HW anyway
219 /* Get the address and length from the S/G entry */
220 sg_addr = dpaa2_sg_get_addr(sge);
221 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
222 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
225 sg_length = dpaa2_sg_get_len(sge);
228 /* We build the skb around the first data buffer */
229 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
230 if (unlikely(!skb)) {
231 /* Free the first SG entry now, since we already
232 * unmapped it and obtained the virtual address
234 free_pages((unsigned long)sg_vaddr, 0);
236 /* We still need to subtract the buffers used
237 * by this FD from our software counter
239 while (!dpaa2_sg_is_final(&sgt[i]) &&
240 i < DPAA2_ETH_MAX_SG_ENTRIES)
245 sg_offset = dpaa2_sg_get_offset(sge);
246 skb_reserve(skb, sg_offset);
247 skb_put(skb, sg_length);
249 /* Rest of the data buffers are stored as skb frags */
250 page = virt_to_page(sg_vaddr);
251 head_page = virt_to_head_page(sg_vaddr);
253 /* Offset in page (which may be compound).
254 * Data in subsequent SG entries is stored from the
255 * beginning of the buffer, so we don't need to add the
258 page_offset = ((unsigned long)sg_vaddr &
260 (page_address(page) - page_address(head_page));
262 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
263 sg_length, priv->rx_buf_size);
266 if (dpaa2_sg_is_final(sge))
270 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
272 /* Count all data buffers + SG table buffer */
273 ch->buf_count -= i + 2;
278 /* Free buffers acquired from the buffer pool or which were meant to
279 * be released in the pool
281 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
282 int count, bool xsk_zc)
284 struct device *dev = priv->net_dev->dev.parent;
285 struct dpaa2_eth_swa *swa;
286 struct xdp_buff *xdp_buff;
290 for (i = 0; i < count; i++) {
291 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
294 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
296 free_pages((unsigned long)vaddr, 0);
298 swa = (struct dpaa2_eth_swa *)
299 (vaddr + DPAA2_ETH_RX_HWA_SIZE);
300 xdp_buff = swa->xsk.xdp_buff;
301 xsk_buff_free(xdp_buff);
306 void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
307 struct dpaa2_eth_channel *ch,
313 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
314 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
317 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
319 ch->recycled_bufs_cnt)) == -EBUSY) {
320 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
326 dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
327 ch->recycled_bufs_cnt, ch->xsk_zc);
328 ch->buf_count -= ch->recycled_bufs_cnt;
331 ch->recycled_bufs_cnt = 0;
334 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
335 struct dpaa2_eth_fq *fq,
336 struct dpaa2_eth_xdp_fds *xdp_fds)
338 int total_enqueued = 0, retries = 0, enqueued;
339 struct dpaa2_eth_drv_stats *percpu_extras;
340 int num_fds, err, max_retries;
341 struct dpaa2_fd *fds;
343 percpu_extras = this_cpu_ptr(priv->percpu_extras);
345 /* try to enqueue all the FDs until the max number of retries is hit */
347 num_fds = xdp_fds->num;
348 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
349 while (total_enqueued < num_fds && retries < max_retries) {
350 err = priv->enqueue(priv, fq, &fds[total_enqueued],
351 0, num_fds - total_enqueued, &enqueued);
353 percpu_extras->tx_portal_busy += ++retries;
356 total_enqueued += enqueued;
360 return total_enqueued;
363 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
364 struct dpaa2_eth_channel *ch,
365 struct dpaa2_eth_fq *fq)
367 struct rtnl_link_stats64 *percpu_stats;
368 struct dpaa2_fd *fds;
371 percpu_stats = this_cpu_ptr(priv->percpu_stats);
373 // enqueue the array of XDP_TX frames
374 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
376 /* update statistics */
377 percpu_stats->tx_packets += enqueued;
378 fds = fq->xdp_tx_fds.fds;
379 for (i = 0; i < enqueued; i++) {
380 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
383 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
384 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
385 percpu_stats->tx_errors++;
386 ch->stats.xdp_tx_err++;
388 fq->xdp_tx_fds.num = 0;
391 void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
392 struct dpaa2_eth_channel *ch,
394 void *buf_start, u16 queue_id)
396 struct dpaa2_faead *faead;
397 struct dpaa2_fd *dest_fd;
398 struct dpaa2_eth_fq *fq;
401 /* Mark the egress frame hardware annotation area as valid */
402 frc = dpaa2_fd_get_frc(fd);
403 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
404 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
406 /* Instruct hardware to release the FD buffer directly into
407 * the buffer pool once transmission is completed, instead of
408 * sending a Tx confirmation frame to us
410 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
411 faead = dpaa2_get_faead(buf_start, false);
412 faead->ctrl = cpu_to_le32(ctrl);
413 faead->conf_fqid = 0;
415 fq = &priv->fq[queue_id];
416 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
417 memcpy(dest_fd, fd, sizeof(*dest_fd));
419 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
422 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
425 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
426 struct dpaa2_eth_channel *ch,
427 struct dpaa2_eth_fq *rx_fq,
428 struct dpaa2_fd *fd, void *vaddr)
430 dma_addr_t addr = dpaa2_fd_get_addr(fd);
431 struct bpf_prog *xdp_prog;
433 u32 xdp_act = XDP_PASS;
436 xdp_prog = READ_ONCE(ch->xdp.prog);
440 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
441 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
442 xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
443 dpaa2_fd_get_len(fd), false);
445 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
447 /* xdp.data pointer may have changed */
448 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
449 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
455 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
458 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
461 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
464 dpaa2_eth_recycle_buf(priv, ch, addr);
465 ch->stats.xdp_drop++;
468 dma_unmap_page(priv->net_dev->dev.parent, addr,
469 priv->rx_buf_size, DMA_BIDIRECTIONAL);
472 /* Allow redirect use of full headroom */
473 xdp.data_hard_start = vaddr;
474 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
476 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
478 addr = dma_map_page(priv->net_dev->dev.parent,
479 virt_to_page(vaddr), 0,
480 priv->rx_buf_size, DMA_BIDIRECTIONAL);
481 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
482 free_pages((unsigned long)vaddr, 0);
485 dpaa2_eth_recycle_buf(priv, ch, addr);
487 ch->stats.xdp_drop++;
489 ch->stats.xdp_redirect++;
494 ch->xdp.res |= xdp_act;
499 struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
500 struct dpaa2_eth_channel *ch,
501 const struct dpaa2_fd *fd, u32 fd_length,
504 u16 fd_offset = dpaa2_fd_get_offset(fd);
505 struct sk_buff *skb = NULL;
506 unsigned int skb_len;
508 skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
510 skb = napi_alloc_skb(&ch->napi, skb_len);
514 skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
515 skb_put(skb, fd_length);
517 memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
519 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
524 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
525 const struct dpaa2_fd *fd,
528 struct dpaa2_eth_priv *priv = ch->priv;
529 u32 fd_length = dpaa2_fd_get_len(fd);
531 if (fd_length > priv->rx_copybreak)
534 return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
537 void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
538 struct dpaa2_eth_channel *ch,
539 const struct dpaa2_fd *fd, void *vaddr,
540 struct dpaa2_eth_fq *fq,
541 struct rtnl_link_stats64 *percpu_stats,
544 struct dpaa2_fas *fas;
547 fas = dpaa2_get_fas(vaddr, false);
551 /* Get the timestamp value */
552 if (priv->rx_tstamp) {
553 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
554 __le64 *ts = dpaa2_get_ts(vaddr, false);
557 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
559 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
560 shhwtstamps->hwtstamp = ns_to_ktime(ns);
563 /* Check if we need to validate the L4 csum */
564 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
565 status = le32_to_cpu(fas->status);
566 dpaa2_eth_validate_rx_csum(priv, status, skb);
569 skb->protocol = eth_type_trans(skb, priv->net_dev);
570 skb_record_rx_queue(skb, fq->flowid);
572 percpu_stats->rx_packets++;
573 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
574 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
576 list_add_tail(&skb->list, ch->rx_list);
579 /* Main Rx frame processing routine */
580 void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
581 struct dpaa2_eth_channel *ch,
582 const struct dpaa2_fd *fd,
583 struct dpaa2_eth_fq *fq)
585 dma_addr_t addr = dpaa2_fd_get_addr(fd);
586 u8 fd_format = dpaa2_fd_get_format(fd);
589 struct rtnl_link_stats64 *percpu_stats;
590 struct dpaa2_eth_drv_stats *percpu_extras;
591 struct device *dev = priv->net_dev->dev.parent;
596 trace_dpaa2_rx_fd(priv->net_dev, fd);
598 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
599 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
602 buf_data = vaddr + dpaa2_fd_get_offset(fd);
605 percpu_stats = this_cpu_ptr(priv->percpu_stats);
606 percpu_extras = this_cpu_ptr(priv->percpu_extras);
608 if (fd_format == dpaa2_fd_single) {
609 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
610 if (xdp_act != XDP_PASS) {
611 percpu_stats->rx_packets++;
612 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
616 skb = dpaa2_eth_copybreak(ch, fd, vaddr);
618 dma_unmap_page(dev, addr, priv->rx_buf_size,
620 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
622 } else if (fd_format == dpaa2_fd_sg) {
623 WARN_ON(priv->xdp_prog);
625 dma_unmap_page(dev, addr, priv->rx_buf_size,
627 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
628 free_pages((unsigned long)vaddr, 0);
629 percpu_extras->rx_sg_frames++;
630 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
632 /* We don't support any other format */
633 goto err_frame_format;
639 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
643 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
645 percpu_stats->rx_dropped++;
648 /* Processing of Rx frames received on the error FQ
649 * We check and print the error bits and then free the frame
651 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
652 struct dpaa2_eth_channel *ch,
653 const struct dpaa2_fd *fd,
654 struct dpaa2_eth_fq *fq __always_unused)
656 struct device *dev = priv->net_dev->dev.parent;
657 dma_addr_t addr = dpaa2_fd_get_addr(fd);
658 u8 fd_format = dpaa2_fd_get_format(fd);
659 struct rtnl_link_stats64 *percpu_stats;
660 struct dpaa2_eth_trap_item *trap_item;
661 struct dpaa2_fapr *fapr;
666 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
667 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
670 buf_data = vaddr + dpaa2_fd_get_offset(fd);
672 if (fd_format == dpaa2_fd_single) {
673 dma_unmap_page(dev, addr, priv->rx_buf_size,
675 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
676 } else if (fd_format == dpaa2_fd_sg) {
677 dma_unmap_page(dev, addr, priv->rx_buf_size,
679 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
680 free_pages((unsigned long)vaddr, 0);
682 /* We don't support any other format */
683 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
684 goto err_frame_format;
687 fapr = dpaa2_get_fapr(vaddr, false);
688 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
690 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
691 &priv->devlink_port, NULL);
695 percpu_stats = this_cpu_ptr(priv->percpu_stats);
696 percpu_stats->rx_errors++;
700 /* Consume all frames pull-dequeued into the store. This is the simplest way to
701 * make sure we don't accidentally issue another volatile dequeue which would
702 * overwrite (leak) frames already in the store.
704 * Observance of NAPI budget is not our concern, leaving that to the caller.
706 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
707 struct dpaa2_eth_fq **src)
709 struct dpaa2_eth_priv *priv = ch->priv;
710 struct dpaa2_eth_fq *fq = NULL;
712 const struct dpaa2_fd *fd;
713 int cleaned = 0, retries = 0;
717 dq = dpaa2_io_store_next(ch->store, &is_last);
719 /* If we're here, we *must* have placed a
720 * volatile dequeue comnmand, so keep reading through
721 * the store until we get some sort of valid response
722 * token (either a valid frame or an "empty dequeue")
724 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
725 netdev_err_once(priv->net_dev,
726 "Unable to read a valid dequeue response\n");
732 fd = dpaa2_dq_fd(dq);
733 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
735 fq->consume(priv, ch, fd, fq);
743 fq->stats.frames += cleaned;
744 ch->stats.frames += cleaned;
745 ch->stats.frames_per_cdan += cleaned;
747 /* A dequeue operation only pulls frames from a single queue
748 * into the store. Return the frame queue as an out param.
756 static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
757 u8 *msgtype, u8 *twostep, u8 *udp,
758 u16 *correction_offset,
759 u16 *origintimestamp_offset)
761 unsigned int ptp_class;
762 struct ptp_header *hdr;
766 ptp_class = ptp_classify_raw(skb);
767 if (ptp_class == PTP_CLASS_NONE)
770 hdr = ptp_parse_header(skb, ptp_class);
774 *msgtype = ptp_get_msgtype(hdr, ptp_class);
775 *twostep = hdr->flag_field[0] & 0x2;
777 type = ptp_class & PTP_CLASS_PMASK;
778 if (type == PTP_CLASS_IPV4 ||
779 type == PTP_CLASS_IPV6)
784 base = skb_mac_header(skb);
785 *correction_offset = (u8 *)&hdr->correction - base;
786 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
791 /* Configure the egress frame annotation for timestamp update */
792 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
797 struct ptp_tstamp origin_timestamp;
798 u8 msgtype, twostep, udp;
799 struct dpaa2_faead *faead;
800 struct dpaa2_fas *fas;
801 struct timespec64 ts;
802 u16 offset1, offset2;
807 /* Mark the egress frame annotation area as valid */
808 frc = dpaa2_fd_get_frc(fd);
809 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
811 /* Set hardware annotation size */
812 ctrl = dpaa2_fd_get_ctrl(fd);
813 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
815 /* enable UPD (update prepanded data) bit in FAEAD field of
816 * hardware frame annotation area
818 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
819 faead = dpaa2_get_faead(buf_start, true);
820 faead->ctrl = cpu_to_le32(ctrl);
822 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
823 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
824 &offset1, &offset2) ||
825 msgtype != PTP_MSGTYPE_SYNC || twostep) {
826 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
830 /* Mark the frame annotation status as valid */
831 frc = dpaa2_fd_get_frc(fd);
832 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
834 /* Mark the PTP flag for one step timestamping */
835 fas = dpaa2_get_fas(buf_start, true);
836 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
838 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
839 ns = dpaa2_get_ts(buf_start, true);
840 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
841 DPAA2_PTP_CLK_PERIOD_NS);
843 /* Update current time to PTP message originTimestamp field */
844 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
845 data = skb_mac_header(skb);
846 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
847 *(__be32 *)(data + offset2 + 2) =
848 htonl(origin_timestamp.sec_lsb);
849 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
851 if (priv->ptp_correction_off == offset1)
854 priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
855 priv->ptp_correction_off = offset1;
860 void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
862 struct dpaa2_eth_sgt_cache *sgt_cache;
863 void *sgt_buf = NULL;
866 sgt_cache = this_cpu_ptr(priv->sgt_cache);
867 sgt_buf_size = priv->tx_data_offset +
868 DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
870 if (sgt_cache->count == 0)
871 sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
873 sgt_buf = sgt_cache->buf[--sgt_cache->count];
877 memset(sgt_buf, 0, sgt_buf_size);
882 void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
884 struct dpaa2_eth_sgt_cache *sgt_cache;
886 sgt_cache = this_cpu_ptr(priv->sgt_cache);
887 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
888 skb_free_frag(sgt_buf);
890 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
893 /* Create a frame descriptor based on a fragmented skb */
894 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
899 struct device *dev = priv->net_dev->dev.parent;
900 void *sgt_buf = NULL;
902 int nr_frags = skb_shinfo(skb)->nr_frags;
903 struct dpaa2_sg_entry *sgt;
906 struct scatterlist *scl, *crt_scl;
909 struct dpaa2_eth_swa *swa;
911 /* Create and map scatterlist.
912 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
913 * to go beyond nr_frags+1.
914 * Note: We don't support chained scatterlists
916 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
919 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
923 sg_init_table(scl, nr_frags + 1);
924 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
925 if (unlikely(num_sg < 0)) {
927 goto dma_map_sg_failed;
929 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
930 if (unlikely(!num_dma_bufs)) {
932 goto dma_map_sg_failed;
935 /* Prepare the HW SGT structure */
936 sgt_buf_size = priv->tx_data_offset +
937 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
938 sgt_buf = dpaa2_eth_sgt_get(priv);
939 if (unlikely(!sgt_buf)) {
941 goto sgt_buf_alloc_failed;
944 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
946 /* Fill in the HW SGT structure.
948 * sgt_buf is zeroed out, so the following fields are implicit
949 * in all sgt entries:
951 * - format is 'dpaa2_sg_single'
953 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
954 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
955 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
957 dpaa2_sg_set_final(&sgt[i - 1], true);
959 /* Store the skb backpointer in the SGT buffer.
960 * Fit the scatterlist and the number of buffers alongside the
961 * skb backpointer in the software annotation area. We'll need
962 * all of them on Tx Conf.
964 *swa_addr = (void *)sgt_buf;
965 swa = (struct dpaa2_eth_swa *)sgt_buf;
966 swa->type = DPAA2_ETH_SWA_SG;
969 swa->sg.num_sg = num_sg;
970 swa->sg.sgt_size = sgt_buf_size;
972 /* Separately map the SGT buffer */
973 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
974 if (unlikely(dma_mapping_error(dev, addr))) {
976 goto dma_map_single_failed;
978 memset(fd, 0, sizeof(struct dpaa2_fd));
979 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
980 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
981 dpaa2_fd_set_addr(fd, addr);
982 dpaa2_fd_set_len(fd, skb->len);
983 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
987 dma_map_single_failed:
988 dpaa2_eth_sgt_recycle(priv, sgt_buf);
989 sgt_buf_alloc_failed:
990 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
996 /* Create a SG frame descriptor based on a linear skb.
998 * This function is used on the Tx path when the skb headroom is not large
999 * enough for the HW requirements, thus instead of realloc-ing the skb we
1000 * create a SG frame descriptor with only one entry.
1002 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
1003 struct sk_buff *skb,
1004 struct dpaa2_fd *fd,
1007 struct device *dev = priv->net_dev->dev.parent;
1008 struct dpaa2_sg_entry *sgt;
1009 struct dpaa2_eth_swa *swa;
1010 dma_addr_t addr, sgt_addr;
1011 void *sgt_buf = NULL;
1015 /* Prepare the HW SGT structure */
1016 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
1017 sgt_buf = dpaa2_eth_sgt_get(priv);
1018 if (unlikely(!sgt_buf))
1020 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1022 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
1023 if (unlikely(dma_mapping_error(dev, addr))) {
1025 goto data_map_failed;
1028 /* Fill in the HW SGT structure */
1029 dpaa2_sg_set_addr(sgt, addr);
1030 dpaa2_sg_set_len(sgt, skb->len);
1031 dpaa2_sg_set_final(sgt, true);
1033 /* Store the skb backpointer in the SGT buffer */
1034 *swa_addr = (void *)sgt_buf;
1035 swa = (struct dpaa2_eth_swa *)sgt_buf;
1036 swa->type = DPAA2_ETH_SWA_SINGLE;
1037 swa->single.skb = skb;
1038 swa->single.sgt_size = sgt_buf_size;
1040 /* Separately map the SGT buffer */
1041 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1042 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1044 goto sgt_map_failed;
1047 memset(fd, 0, sizeof(struct dpaa2_fd));
1048 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1049 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1050 dpaa2_fd_set_addr(fd, sgt_addr);
1051 dpaa2_fd_set_len(fd, skb->len);
1052 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1057 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1059 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1064 /* Create a frame descriptor based on a linear skb */
1065 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
1066 struct sk_buff *skb,
1067 struct dpaa2_fd *fd,
1070 struct device *dev = priv->net_dev->dev.parent;
1071 u8 *buffer_start, *aligned_start;
1072 struct dpaa2_eth_swa *swa;
1075 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
1077 /* If there's enough room to align the FD address, do it.
1078 * It will help hardware optimize accesses.
1080 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1081 DPAA2_ETH_TX_BUF_ALIGN);
1082 if (aligned_start >= skb->head)
1083 buffer_start = aligned_start;
1085 /* Store a backpointer to the skb at the beginning of the buffer
1086 * (in the private data area) such that we can release it
1089 *swa_addr = (void *)buffer_start;
1090 swa = (struct dpaa2_eth_swa *)buffer_start;
1091 swa->type = DPAA2_ETH_SWA_SINGLE;
1092 swa->single.skb = skb;
1094 addr = dma_map_single(dev, buffer_start,
1095 skb_tail_pointer(skb) - buffer_start,
1097 if (unlikely(dma_mapping_error(dev, addr)))
1100 memset(fd, 0, sizeof(struct dpaa2_fd));
1101 dpaa2_fd_set_addr(fd, addr);
1102 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1103 dpaa2_fd_set_len(fd, skb->len);
1104 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1105 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1110 /* FD freeing routine on the Tx path
1112 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1113 * back-pointed to is also freed.
1114 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1117 void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
1118 struct dpaa2_eth_channel *ch,
1119 struct dpaa2_eth_fq *fq,
1120 const struct dpaa2_fd *fd, bool in_napi)
1122 struct device *dev = priv->net_dev->dev.parent;
1123 dma_addr_t fd_addr, sg_addr;
1124 struct sk_buff *skb = NULL;
1125 unsigned char *buffer_start;
1126 struct dpaa2_eth_swa *swa;
1127 u8 fd_format = dpaa2_fd_get_format(fd);
1128 u32 fd_len = dpaa2_fd_get_len(fd);
1129 struct dpaa2_sg_entry *sgt;
1130 int should_free_skb = 1;
1134 fd_addr = dpaa2_fd_get_addr(fd);
1135 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1136 swa = (struct dpaa2_eth_swa *)buffer_start;
1138 if (fd_format == dpaa2_fd_single) {
1139 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1140 skb = swa->single.skb;
1141 /* Accessing the skb buffer is safe before dma unmap,
1142 * because we didn't map the actual skb shell.
1144 dma_unmap_single(dev, fd_addr,
1145 skb_tail_pointer(skb) - buffer_start,
1148 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1149 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1152 } else if (fd_format == dpaa2_fd_sg) {
1153 if (swa->type == DPAA2_ETH_SWA_SG) {
1156 /* Unmap the scatterlist */
1157 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1161 /* Unmap the SGT buffer */
1162 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1164 } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
1167 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1168 priv->tx_data_offset);
1170 /* Unmap the SGT buffer */
1171 dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
1174 /* Unmap and free the header */
1175 tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
1176 dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1180 /* Unmap the other SG entries for the data */
1181 for (i = 1; i < swa->tso.num_sg; i++)
1182 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1183 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1185 if (!swa->tso.is_last_fd)
1186 should_free_skb = 0;
1187 } else if (swa->type == DPAA2_ETH_SWA_XSK) {
1188 /* Unmap the SGT Buffer */
1189 dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
1192 skb = swa->single.skb;
1194 /* Unmap the SGT Buffer */
1195 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1198 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1199 priv->tx_data_offset);
1200 sg_addr = dpaa2_sg_get_addr(sgt);
1201 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1204 netdev_dbg(priv->net_dev, "Invalid FD format\n");
1208 if (swa->type == DPAA2_ETH_SWA_XSK) {
1209 ch->xsk_tx_pkts_sent++;
1210 dpaa2_eth_sgt_recycle(priv, buffer_start);
1214 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1216 fq->dq_bytes += fd_len;
1219 if (swa->type == DPAA2_ETH_SWA_XDP) {
1220 xdp_return_frame(swa->xdp.xdpf);
1224 /* Get the timestamp value */
1225 if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
1226 if (skb->cb[0] == TX_TSTAMP) {
1227 struct skb_shared_hwtstamps shhwtstamps;
1228 __le64 *ts = dpaa2_get_ts(buffer_start, true);
1231 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1233 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1234 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1235 skb_tstamp_tx(skb, &shhwtstamps);
1236 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1237 mutex_unlock(&priv->onestep_tstamp_lock);
1241 /* Free SGT buffer allocated on tx */
1242 if (fd_format != dpaa2_fd_single)
1243 dpaa2_eth_sgt_recycle(priv, buffer_start);
1245 /* Move on with skb release. If we are just confirming multiple FDs
1246 * from the same TSO skb then only the last one will need to free the
1249 if (should_free_skb)
1250 napi_consume_skb(skb, in_napi);
1253 static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
1254 struct sk_buff *skb, struct dpaa2_fd *fd,
1255 int *num_fds, u32 *total_fds_len)
1257 struct device *dev = priv->net_dev->dev.parent;
1258 int hdr_len, total_len, data_left, fd_len;
1259 int num_sge, err, i, sgt_buf_size;
1260 struct dpaa2_fd *fd_start = fd;
1261 struct dpaa2_sg_entry *sgt;
1262 struct dpaa2_eth_swa *swa;
1263 dma_addr_t sgt_addr, addr;
1264 dma_addr_t tso_hdr_dma;
1265 unsigned int index = 0;
1270 /* Initialize the TSO handler, and prepare the first payload */
1271 hdr_len = tso_start(skb, &tso);
1274 total_len = skb->len - hdr_len;
1275 while (total_len > 0) {
1276 /* Prepare the HW SGT structure for this frame */
1277 sgt_buf = dpaa2_eth_sgt_get(priv);
1278 if (unlikely(!sgt_buf)) {
1279 netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
1283 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1285 /* Determine the data length of this frame */
1286 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1287 total_len -= data_left;
1288 fd_len = data_left + hdr_len;
1290 /* Prepare packet headers: MAC + IP + TCP */
1291 tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
1294 goto err_alloc_tso_hdr;
1297 tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
1298 tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1299 if (dma_mapping_error(dev, tso_hdr_dma)) {
1300 netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
1302 goto err_map_tso_hdr;
1305 /* Setup the SG entry for the header */
1306 dpaa2_sg_set_addr(sgt, tso_hdr_dma);
1307 dpaa2_sg_set_len(sgt, hdr_len);
1308 dpaa2_sg_set_final(sgt, data_left <= 0);
1310 /* Compose the SG entries for each fragment of data */
1312 while (data_left > 0) {
1315 /* Move to the next SG entry */
1317 size = min_t(int, tso.size, data_left);
1319 addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1320 if (dma_mapping_error(dev, addr)) {
1321 netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
1325 dpaa2_sg_set_addr(sgt, addr);
1326 dpaa2_sg_set_len(sgt, size);
1327 dpaa2_sg_set_final(sgt, size == data_left);
1331 /* Build the data for the __next__ fragment */
1333 tso_build_data(skb, &tso, size);
1336 /* Store the skb backpointer in the SGT buffer */
1337 sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
1338 swa = (struct dpaa2_eth_swa *)sgt_buf;
1339 swa->type = DPAA2_ETH_SWA_SW_TSO;
1341 swa->tso.num_sg = num_sge;
1342 swa->tso.sgt_size = sgt_buf_size;
1343 swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
1345 /* Separately map the SGT buffer */
1346 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1347 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1348 netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
1353 /* Setup the frame descriptor */
1354 memset(fd, 0, sizeof(struct dpaa2_fd));
1355 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1356 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1357 dpaa2_fd_set_addr(fd, sgt_addr);
1358 dpaa2_fd_set_len(fd, fd_len);
1359 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1361 *total_fds_len += fd_len;
1362 /* Advance to the next frame descriptor */
1373 /* Unmap all the data S/G entries for the current FD */
1374 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1375 for (i = 1; i < num_sge; i++)
1376 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1377 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1379 /* Unmap the header entry */
1380 dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1384 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1386 /* Free all the other FDs that were already fully created */
1387 for (i = 0; i < index; i++)
1388 dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
1393 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1394 struct net_device *net_dev)
1396 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1397 int total_enqueued = 0, retries = 0, enqueued;
1398 struct dpaa2_eth_drv_stats *percpu_extras;
1399 struct rtnl_link_stats64 *percpu_stats;
1400 unsigned int needed_headroom;
1401 int num_fds = 1, max_retries;
1402 struct dpaa2_eth_fq *fq;
1403 struct netdev_queue *nq;
1404 struct dpaa2_fd *fd;
1411 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1412 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1413 fd = (this_cpu_ptr(priv->fd))->array;
1415 needed_headroom = dpaa2_eth_needed_headroom(skb);
1417 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1418 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1420 skb = skb_unshare(skb, GFP_ATOMIC);
1421 if (unlikely(!skb)) {
1422 /* skb_unshare() has already freed the skb */
1423 percpu_stats->tx_dropped++;
1424 return NETDEV_TX_OK;
1427 /* Setup the FD fields */
1429 if (skb_is_gso(skb)) {
1430 err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
1431 percpu_extras->tx_sg_frames += num_fds;
1432 percpu_extras->tx_sg_bytes += fd_len;
1433 percpu_extras->tx_tso_frames += num_fds;
1434 percpu_extras->tx_tso_bytes += fd_len;
1435 } else if (skb_is_nonlinear(skb)) {
1436 err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
1437 percpu_extras->tx_sg_frames++;
1438 percpu_extras->tx_sg_bytes += skb->len;
1439 fd_len = dpaa2_fd_get_len(fd);
1440 } else if (skb_headroom(skb) < needed_headroom) {
1441 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
1442 percpu_extras->tx_sg_frames++;
1443 percpu_extras->tx_sg_bytes += skb->len;
1444 percpu_extras->tx_converted_sg_frames++;
1445 percpu_extras->tx_converted_sg_bytes += skb->len;
1446 fd_len = dpaa2_fd_get_len(fd);
1448 err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
1449 fd_len = dpaa2_fd_get_len(fd);
1452 if (unlikely(err)) {
1453 percpu_stats->tx_dropped++;
1457 if (swa && skb->cb[0])
1458 dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
1461 for (i = 0; i < num_fds; i++)
1462 trace_dpaa2_tx_fd(net_dev, &fd[i]);
1464 /* TxConf FQ selection relies on queue id from the stack.
1465 * In case of a forwarded frame from another DPNI interface, we choose
1466 * a queue affined to the same core that processed the Rx frame
1468 queue_mapping = skb_get_queue_mapping(skb);
1470 if (net_dev->num_tc) {
1471 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1472 /* Hardware interprets priority level 0 as being the highest,
1473 * so we need to do a reverse mapping to the netdev tc index
1475 prio = net_dev->num_tc - prio - 1;
1476 /* We have only one FQ array entry for all Tx hardware queues
1477 * with the same flow id (but different priority levels)
1479 queue_mapping %= dpaa2_eth_queue_count(priv);
1481 fq = &priv->fq[queue_mapping];
1482 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1483 netdev_tx_sent_queue(nq, fd_len);
1485 /* Everything that happens after this enqueues might race with
1486 * the Tx confirmation callback for this frame
1488 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
1489 while (total_enqueued < num_fds && retries < max_retries) {
1490 err = priv->enqueue(priv, fq, &fd[total_enqueued],
1491 prio, num_fds - total_enqueued, &enqueued);
1492 if (err == -EBUSY) {
1497 total_enqueued += enqueued;
1499 percpu_extras->tx_portal_busy += retries;
1501 if (unlikely(err < 0)) {
1502 percpu_stats->tx_errors++;
1503 /* Clean up everything, including freeing the skb */
1504 dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
1505 netdev_tx_completed_queue(nq, 1, fd_len);
1507 percpu_stats->tx_packets += total_enqueued;
1508 percpu_stats->tx_bytes += fd_len;
1511 return NETDEV_TX_OK;
1516 return NETDEV_TX_OK;
1519 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1521 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1523 struct sk_buff *skb;
1526 skb = skb_dequeue(&priv->tx_skbs);
1530 /* Lock just before TX one-step timestamping packet,
1531 * and release the lock in dpaa2_eth_free_tx_fd when
1532 * confirm the packet has been sent on hardware, or
1533 * when clean up during transmit failure.
1535 mutex_lock(&priv->onestep_tstamp_lock);
1536 __dpaa2_eth_tx(skb, priv->net_dev);
1540 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1542 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1543 u8 msgtype, twostep, udp;
1544 u16 offset1, offset2;
1546 /* Utilize skb->cb[0] for timestamping request per skb */
1549 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1550 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1551 skb->cb[0] = TX_TSTAMP;
1552 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1553 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1556 /* TX for one-step timestamping PTP Sync packet */
1557 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1558 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1559 &offset1, &offset2))
1560 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1561 skb_queue_tail(&priv->tx_skbs, skb);
1562 queue_work(priv->dpaa2_ptp_wq,
1563 &priv->tx_onestep_tstamp);
1564 return NETDEV_TX_OK;
1566 /* Use two-step timestamping if not one-step timestamping
1569 skb->cb[0] = TX_TSTAMP;
1572 /* TX for other packets */
1573 return __dpaa2_eth_tx(skb, net_dev);
1576 /* Tx confirmation frame processing routine */
1577 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1578 struct dpaa2_eth_channel *ch,
1579 const struct dpaa2_fd *fd,
1580 struct dpaa2_eth_fq *fq)
1582 struct rtnl_link_stats64 *percpu_stats;
1583 struct dpaa2_eth_drv_stats *percpu_extras;
1584 u32 fd_len = dpaa2_fd_get_len(fd);
1588 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1590 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1591 percpu_extras->tx_conf_frames++;
1592 percpu_extras->tx_conf_bytes += fd_len;
1593 ch->stats.bytes_per_cdan += fd_len;
1595 /* Check frame errors in the FD field */
1596 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1597 dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
1599 if (likely(!fd_errors))
1602 if (net_ratelimit())
1603 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1606 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1607 /* Tx-conf logically pertains to the egress path. */
1608 percpu_stats->tx_errors++;
1611 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1616 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1619 netdev_err(priv->net_dev,
1620 "dpni_enable_vlan_filter failed\n");
1627 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1631 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1632 DPNI_OFF_RX_L3_CSUM, enable);
1634 netdev_err(priv->net_dev,
1635 "dpni_set_offload(RX_L3_CSUM) failed\n");
1639 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1640 DPNI_OFF_RX_L4_CSUM, enable);
1642 netdev_err(priv->net_dev,
1643 "dpni_set_offload(RX_L4_CSUM) failed\n");
1650 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1654 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1655 DPNI_OFF_TX_L3_CSUM, enable);
1657 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1661 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1662 DPNI_OFF_TX_L4_CSUM, enable);
1664 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1671 /* Perform a single release command to add buffers
1672 * to the specified buffer pool
1674 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1675 struct dpaa2_eth_channel *ch)
1677 struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
1678 struct device *dev = priv->net_dev->dev.parent;
1679 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1680 struct dpaa2_eth_swa *swa;
1687 /* Allocate buffers visible to WRIOP */
1689 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1690 /* Also allocate skb shared info and alignment padding.
1691 * There is one page for each Rx buffer. WRIOP sees
1692 * the entire page except for a tailroom reserved for
1695 page = dev_alloc_pages(0);
1699 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1701 if (unlikely(dma_mapping_error(dev, addr)))
1704 buf_array[i] = addr;
1707 trace_dpaa2_eth_buf_seed(priv->net_dev,
1709 DPAA2_ETH_RX_BUF_RAW_SIZE,
1710 addr, priv->rx_buf_size,
1713 } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
1714 /* Allocate XSK buffers for AF_XDP fast path in batches
1715 * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
1716 * provide enough buffers at the moment
1718 batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
1719 DPAA2_ETH_BUFS_PER_CMD);
1723 for (i = 0; i < batch; i++) {
1724 swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
1725 DPAA2_ETH_RX_HWA_SIZE);
1726 swa->xsk.xdp_buff = xdp_buffs[i];
1728 addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
1729 if (unlikely(dma_mapping_error(dev, addr)))
1732 buf_array[i] = addr;
1734 trace_dpaa2_xsk_buf_seed(priv->net_dev,
1735 xdp_buffs[i]->data_hard_start,
1736 DPAA2_ETH_RX_BUF_RAW_SIZE,
1737 addr, priv->rx_buf_size,
1743 /* In case the portal is busy, retry until successful */
1744 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
1745 buf_array, i)) == -EBUSY) {
1746 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1751 /* If release command failed, clean up and bail out;
1752 * not much else we can do about it
1755 dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
1763 __free_pages(page, 0);
1765 for (; i < batch; i++)
1766 xsk_buff_free(xdp_buffs[i]);
1769 /* If we managed to allocate at least some buffers,
1770 * release them to hardware
1778 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
1779 struct dpaa2_eth_channel *ch)
1784 for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
1785 new_count = dpaa2_eth_add_bufs(priv, ch);
1786 ch->buf_count += new_count;
1788 if (new_count < DPAA2_ETH_BUFS_PER_CMD)
1795 static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
1797 struct net_device *net_dev = priv->net_dev;
1798 struct dpaa2_eth_channel *channel;
1801 for (i = 0; i < priv->num_channels; i++) {
1802 channel = priv->channel[i];
1804 err = dpaa2_eth_seed_pool(priv, channel);
1806 /* Not much to do; the buffer pool, though not filled up,
1807 * may still contain some buffers which would enable us
1811 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1812 channel->bp->dev->obj_desc.id,
1818 * Drain the specified number of buffers from one of the DPNI's private buffer
1820 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1822 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
1825 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1826 bool xsk_zc = false;
1830 for (i = 0; i < priv->num_channels; i++)
1831 if (priv->channel[i]->bp->bpid == bpid)
1832 xsk_zc = priv->channel[i]->xsk_zc;
1835 ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
1837 if (ret == -EBUSY &&
1838 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1840 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1843 dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
1848 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
1852 /* Drain the buffer pool */
1853 dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
1854 dpaa2_eth_drain_bufs(priv, bpid, 1);
1856 /* Setup to zero the buffer count of all channels which were
1857 * using this buffer pool.
1859 for (i = 0; i < priv->num_channels; i++)
1860 if (priv->channel[i]->bp->bpid == bpid)
1861 priv->channel[i]->buf_count = 0;
1864 static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
1868 for (i = 0; i < priv->num_bps; i++)
1869 dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
1872 /* Function is called from softirq context only, so we don't need to guard
1873 * the access to percpu count
1875 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1876 struct dpaa2_eth_channel *ch)
1880 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1884 new_count = dpaa2_eth_add_bufs(priv, ch);
1885 if (unlikely(!new_count)) {
1886 /* Out of memory; abort for now, we'll try later on */
1889 ch->buf_count += new_count;
1890 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1892 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1898 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1900 struct dpaa2_eth_sgt_cache *sgt_cache;
1904 for_each_possible_cpu(k) {
1905 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1906 count = sgt_cache->count;
1908 for (i = 0; i < count; i++)
1909 skb_free_frag(sgt_cache->buf[i]);
1910 sgt_cache->count = 0;
1914 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1919 /* Retry while portal is busy */
1921 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1925 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1927 ch->stats.dequeue_portal_busy += dequeues;
1929 ch->stats.pull_err++;
1934 /* NAPI poll routine
1936 * Frames are dequeued from the QMan channel associated with this NAPI context.
1937 * Rx, Tx confirmation and (if configured) Rx error frames all count
1938 * towards the NAPI budget.
1940 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1942 struct dpaa2_eth_channel *ch;
1943 struct dpaa2_eth_priv *priv;
1944 int rx_cleaned = 0, txconf_cleaned = 0;
1945 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1946 struct netdev_queue *nq;
1947 int store_cleaned, work_done;
1948 bool work_done_zc = false;
1949 struct list_head rx_list;
1954 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1958 INIT_LIST_HEAD(&rx_list);
1959 ch->rx_list = &rx_list;
1962 work_done_zc = dpaa2_xsk_tx(priv, ch);
1963 /* If we reached the XSK Tx per NAPI threshold, we're done */
1971 err = dpaa2_eth_pull_channel(ch);
1975 /* Refill pool if appropriate */
1976 dpaa2_eth_refill_pool(priv, ch);
1978 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1979 if (store_cleaned <= 0)
1981 if (fq->type == DPAA2_RX_FQ) {
1982 rx_cleaned += store_cleaned;
1983 flowid = fq->flowid;
1985 txconf_cleaned += store_cleaned;
1986 /* We have a single Tx conf FQ on this channel */
1990 /* If we either consumed the whole NAPI budget with Rx frames
1991 * or we reached the Tx confirmations threshold, we're done.
1993 if (rx_cleaned >= budget ||
1994 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1998 } while (store_cleaned);
2000 /* Update NET DIM with the values for this CDAN */
2001 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
2002 ch->stats.bytes_per_cdan);
2003 ch->stats.frames_per_cdan = 0;
2004 ch->stats.bytes_per_cdan = 0;
2006 /* We didn't consume the entire budget, so finish napi and
2007 * re-enable data availability notifications
2009 napi_complete_done(napi, rx_cleaned);
2011 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
2013 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
2014 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
2015 ch->nctx.desired_cpu);
2017 work_done = max(rx_cleaned, 1);
2020 netif_receive_skb_list(ch->rx_list);
2022 if (ch->xsk_tx_pkts_sent) {
2023 xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
2024 ch->xsk_tx_pkts_sent = 0;
2027 if (txc_fq && txc_fq->dq_frames) {
2028 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
2029 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
2031 txc_fq->dq_frames = 0;
2032 txc_fq->dq_bytes = 0;
2035 if (ch->xdp.res & XDP_REDIRECT)
2037 else if (rx_cleaned && ch->xdp.res & XDP_TX)
2038 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
2043 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
2045 struct dpaa2_eth_channel *ch;
2048 for (i = 0; i < priv->num_channels; i++) {
2049 ch = priv->channel[i];
2050 napi_enable(&ch->napi);
2054 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
2056 struct dpaa2_eth_channel *ch;
2059 for (i = 0; i < priv->num_channels; i++) {
2060 ch = priv->channel[i];
2061 napi_disable(&ch->napi);
2065 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
2066 bool tx_pause, bool pfc)
2068 struct dpni_taildrop td = {0};
2069 struct dpaa2_eth_fq *fq;
2072 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
2073 * flow control is disabled (as it might interfere with either the
2074 * buffer pool depletion trigger for pause frames or with the group
2075 * congestion trigger for PFC frames)
2077 td.enable = !tx_pause;
2078 if (priv->rx_fqtd_enabled == td.enable)
2081 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
2082 td.units = DPNI_CONGESTION_UNIT_BYTES;
2084 for (i = 0; i < priv->num_fqs; i++) {
2086 if (fq->type != DPAA2_RX_FQ)
2088 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2089 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
2090 fq->tc, fq->flowid, &td);
2092 netdev_err(priv->net_dev,
2093 "dpni_set_taildrop(FQ) failed\n");
2098 priv->rx_fqtd_enabled = td.enable;
2101 /* Congestion group taildrop: threshold is in frames, per group
2102 * of FQs belonging to the same traffic class
2103 * Enabled if general Tx pause disabled or if PFCs are enabled
2104 * (congestion group threhsold for PFC generation is lower than the
2105 * CG taildrop threshold, so it won't interfere with it; we also
2106 * want frames in non-PFC enabled traffic classes to be kept in check)
2108 td.enable = !tx_pause || pfc;
2109 if (priv->rx_cgtd_enabled == td.enable)
2112 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
2113 td.units = DPNI_CONGESTION_UNIT_FRAMES;
2114 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2115 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2116 DPNI_CP_GROUP, DPNI_QUEUE_RX,
2119 netdev_err(priv->net_dev,
2120 "dpni_set_taildrop(CG) failed\n");
2125 priv->rx_cgtd_enabled = td.enable;
2128 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
2130 struct dpni_link_state state = {0};
2134 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2135 if (unlikely(err)) {
2136 netdev_err(priv->net_dev,
2137 "dpni_get_link_state() failed\n");
2141 /* If Tx pause frame settings have changed, we need to update
2142 * Rx FQ taildrop configuration as well. We configure taildrop
2143 * only when pause frame generation is disabled.
2145 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
2146 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
2148 /* When we manage the MAC/PHY using phylink there is no need
2149 * to manually update the netif_carrier.
2150 * We can avoid locking because we are called from the "link changed"
2151 * IRQ handler, which is the same as the "endpoint changed" IRQ handler
2152 * (the writer to priv->mac), so we cannot race with it.
2154 if (dpaa2_mac_is_type_phy(priv->mac))
2157 /* Chech link state; speed / duplex changes are not treated yet */
2158 if (priv->link_state.up == state.up)
2162 netif_carrier_on(priv->net_dev);
2163 netif_tx_start_all_queues(priv->net_dev);
2165 netif_tx_stop_all_queues(priv->net_dev);
2166 netif_carrier_off(priv->net_dev);
2169 netdev_info(priv->net_dev, "Link Event: state %s\n",
2170 state.up ? "up" : "down");
2173 priv->link_state = state;
2178 static int dpaa2_eth_open(struct net_device *net_dev)
2180 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2183 dpaa2_eth_seed_pools(priv);
2185 mutex_lock(&priv->mac_lock);
2187 if (!dpaa2_eth_is_type_phy(priv)) {
2188 /* We'll only start the txqs when the link is actually ready;
2189 * make sure we don't race against the link up notification,
2190 * which may come immediately after dpni_enable();
2192 netif_tx_stop_all_queues(net_dev);
2194 /* Also, explicitly set carrier off, otherwise
2195 * netif_carrier_ok() will return true and cause 'ip link show'
2196 * to report the LOWER_UP flag, even though the link
2197 * notification wasn't even received.
2199 netif_carrier_off(net_dev);
2201 dpaa2_eth_enable_ch_napi(priv);
2203 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2205 mutex_unlock(&priv->mac_lock);
2206 netdev_err(net_dev, "dpni_enable() failed\n");
2210 if (dpaa2_eth_is_type_phy(priv))
2211 dpaa2_mac_start(priv->mac);
2213 mutex_unlock(&priv->mac_lock);
2218 dpaa2_eth_disable_ch_napi(priv);
2219 dpaa2_eth_drain_pools(priv);
2223 /* Total number of in-flight frames on ingress queues */
2224 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
2226 struct dpaa2_eth_fq *fq;
2227 u32 fcnt = 0, bcnt = 0, total = 0;
2230 for (i = 0; i < priv->num_fqs; i++) {
2232 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2234 netdev_warn(priv->net_dev, "query_fq_count failed");
2243 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
2249 pending = dpaa2_eth_ingress_fq_count(priv);
2252 } while (pending && --retries);
2255 #define DPNI_TX_PENDING_VER_MAJOR 7
2256 #define DPNI_TX_PENDING_VER_MINOR 13
2257 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
2259 union dpni_statistics stats;
2263 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
2264 DPNI_TX_PENDING_VER_MINOR) < 0)
2268 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
2272 if (stats.page_6.tx_pending_frames == 0)
2274 } while (--retries);
2280 static int dpaa2_eth_stop(struct net_device *net_dev)
2282 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2283 int dpni_enabled = 0;
2286 mutex_lock(&priv->mac_lock);
2288 if (dpaa2_eth_is_type_phy(priv)) {
2289 dpaa2_mac_stop(priv->mac);
2291 netif_tx_stop_all_queues(net_dev);
2292 netif_carrier_off(net_dev);
2295 mutex_unlock(&priv->mac_lock);
2297 /* On dpni_disable(), the MC firmware will:
2298 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
2299 * - cut off WRIOP dequeues from egress FQs and wait until transmission
2300 * of all in flight Tx frames is finished (and corresponding Tx conf
2301 * frames are enqueued back to software)
2303 * Before calling dpni_disable(), we wait for all Tx frames to arrive
2304 * on WRIOP. After it finishes, wait until all remaining frames on Rx
2305 * and Tx conf queues are consumed on NAPI poll.
2307 dpaa2_eth_wait_for_egress_fq_empty(priv);
2310 dpni_disable(priv->mc_io, 0, priv->mc_token);
2311 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2313 /* Allow the hardware some slack */
2315 } while (dpni_enabled && --retries);
2317 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2318 /* Must go on and disable NAPI nonetheless, so we don't crash at
2319 * the next "ifconfig up"
2323 dpaa2_eth_wait_for_ingress_fq_empty(priv);
2324 dpaa2_eth_disable_ch_napi(priv);
2326 /* Empty the buffer pool */
2327 dpaa2_eth_drain_pools(priv);
2329 /* Empty the Scatter-Gather Buffer cache */
2330 dpaa2_eth_sgt_cache_drain(priv);
2335 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2337 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2338 struct device *dev = net_dev->dev.parent;
2341 err = eth_mac_addr(net_dev, addr);
2343 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2347 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2350 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2357 /** Fill in counters maintained by the GPP driver. These may be different from
2358 * the hardware counters obtained by ethtool.
2360 static void dpaa2_eth_get_stats(struct net_device *net_dev,
2361 struct rtnl_link_stats64 *stats)
2363 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2364 struct rtnl_link_stats64 *percpu_stats;
2366 u64 *netstats = (u64 *)stats;
2368 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2370 for_each_possible_cpu(i) {
2371 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2372 cpustats = (u64 *)percpu_stats;
2373 for (j = 0; j < num; j++)
2374 netstats[j] += cpustats[j];
2378 /* Copy mac unicast addresses from @net_dev to @priv.
2379 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2381 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
2382 struct dpaa2_eth_priv *priv)
2384 struct netdev_hw_addr *ha;
2387 netdev_for_each_uc_addr(ha, net_dev) {
2388 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2391 netdev_warn(priv->net_dev,
2392 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2397 /* Copy mac multicast addresses from @net_dev to @priv
2398 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2400 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
2401 struct dpaa2_eth_priv *priv)
2403 struct netdev_hw_addr *ha;
2406 netdev_for_each_mc_addr(ha, net_dev) {
2407 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2410 netdev_warn(priv->net_dev,
2411 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2416 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2417 __be16 vlan_proto, u16 vid)
2419 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2422 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2426 netdev_warn(priv->net_dev,
2427 "Could not add the vlan id %u\n",
2435 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2436 __be16 vlan_proto, u16 vid)
2438 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2441 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2444 netdev_warn(priv->net_dev,
2445 "Could not remove the vlan id %u\n",
2453 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2455 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2456 int uc_count = netdev_uc_count(net_dev);
2457 int mc_count = netdev_mc_count(net_dev);
2458 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2459 u32 options = priv->dpni_attrs.options;
2460 u16 mc_token = priv->mc_token;
2461 struct fsl_mc_io *mc_io = priv->mc_io;
2464 /* Basic sanity checks; these probably indicate a misconfiguration */
2465 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2466 netdev_info(net_dev,
2467 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2470 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2471 if (uc_count > max_mac) {
2472 netdev_info(net_dev,
2473 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2477 if (mc_count + uc_count > max_mac) {
2478 netdev_info(net_dev,
2479 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2480 uc_count + mc_count, max_mac);
2481 goto force_mc_promisc;
2484 /* Adjust promisc settings due to flag combinations */
2485 if (net_dev->flags & IFF_PROMISC)
2487 if (net_dev->flags & IFF_ALLMULTI) {
2488 /* First, rebuild unicast filtering table. This should be done
2489 * in promisc mode, in order to avoid frame loss while we
2490 * progressively add entries to the table.
2491 * We don't know whether we had been in promisc already, and
2492 * making an MC call to find out is expensive; so set uc promisc
2495 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2497 netdev_warn(net_dev, "Can't set uc promisc\n");
2499 /* Actual uc table reconstruction. */
2500 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2502 netdev_warn(net_dev, "Can't clear uc filters\n");
2503 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2505 /* Finally, clear uc promisc and set mc promisc as requested. */
2506 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2508 netdev_warn(net_dev, "Can't clear uc promisc\n");
2509 goto force_mc_promisc;
2512 /* Neither unicast, nor multicast promisc will be on... eventually.
2513 * For now, rebuild mac filtering tables while forcing both of them on.
2515 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2517 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2518 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2520 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2522 /* Actual mac filtering tables reconstruction */
2523 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2525 netdev_warn(net_dev, "Can't clear mac filters\n");
2526 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2527 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2529 /* Now we can clear both ucast and mcast promisc, without risking
2530 * to drop legitimate frames anymore.
2532 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2534 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2535 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2537 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2542 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2544 netdev_warn(net_dev, "Can't set ucast promisc\n");
2546 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2548 netdev_warn(net_dev, "Can't set mcast promisc\n");
2551 static int dpaa2_eth_set_features(struct net_device *net_dev,
2552 netdev_features_t features)
2554 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2555 netdev_features_t changed = features ^ net_dev->features;
2559 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2560 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2561 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2566 if (changed & NETIF_F_RXCSUM) {
2567 enable = !!(features & NETIF_F_RXCSUM);
2568 err = dpaa2_eth_set_rx_csum(priv, enable);
2573 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2574 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2575 err = dpaa2_eth_set_tx_csum(priv, enable);
2583 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2585 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2586 struct hwtstamp_config config;
2591 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2594 switch (config.tx_type) {
2595 case HWTSTAMP_TX_OFF:
2596 case HWTSTAMP_TX_ON:
2597 case HWTSTAMP_TX_ONESTEP_SYNC:
2598 priv->tx_tstamp_type = config.tx_type;
2604 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2605 priv->rx_tstamp = false;
2607 priv->rx_tstamp = true;
2608 /* TS is set for all frame types, not only those requested */
2609 config.rx_filter = HWTSTAMP_FILTER_ALL;
2612 if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
2613 dpaa2_ptp_onestep_reg_update_method(priv);
2615 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2619 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2621 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2624 if (cmd == SIOCSHWTSTAMP)
2625 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2627 mutex_lock(&priv->mac_lock);
2629 if (dpaa2_eth_is_type_phy(priv)) {
2630 err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2631 mutex_unlock(&priv->mac_lock);
2635 mutex_unlock(&priv->mac_lock);
2640 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2642 int mfl, linear_mfl;
2644 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2645 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2646 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2648 if (mfl > linear_mfl) {
2649 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2650 linear_mfl - VLAN_ETH_HLEN);
2657 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2661 /* We enforce a maximum Rx frame length based on MTU only if we have
2662 * an XDP program attached (in order to avoid Rx S/G frames).
2663 * Otherwise, we accept all incoming frames as long as they are not
2664 * larger than maximum size supported in hardware
2667 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2669 mfl = DPAA2_ETH_MFL;
2671 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2673 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2680 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2682 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2685 if (!priv->xdp_prog)
2688 if (!xdp_mtu_valid(priv, new_mtu))
2691 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2700 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2702 struct dpni_buffer_layout buf_layout = {0};
2705 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2706 DPNI_QUEUE_RX, &buf_layout);
2708 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2712 /* Reserve extra headroom for XDP header size changes */
2713 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2714 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2715 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2716 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2717 DPNI_QUEUE_RX, &buf_layout);
2719 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2726 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2728 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2729 struct dpaa2_eth_channel *ch;
2730 struct bpf_prog *old;
2731 bool up, need_update;
2734 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2738 bpf_prog_add(prog, priv->num_channels);
2740 up = netif_running(dev);
2741 need_update = (!!priv->xdp_prog != !!prog);
2746 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2747 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2748 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2749 * so we are sure no old format buffers will be used from now on.
2752 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2755 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2760 old = xchg(&priv->xdp_prog, prog);
2764 for (i = 0; i < priv->num_channels; i++) {
2765 ch = priv->channel[i];
2766 old = xchg(&ch->xdp.prog, prog);
2772 err = dev_open(dev, NULL);
2781 bpf_prog_sub(prog, priv->num_channels);
2783 dev_open(dev, NULL);
2788 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2790 switch (xdp->command) {
2791 case XDP_SETUP_PROG:
2792 return dpaa2_eth_setup_xdp(dev, xdp->prog);
2793 case XDP_SETUP_XSK_POOL:
2794 return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
2802 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2803 struct xdp_frame *xdpf,
2804 struct dpaa2_fd *fd)
2806 struct device *dev = net_dev->dev.parent;
2807 unsigned int needed_headroom;
2808 struct dpaa2_eth_swa *swa;
2809 void *buffer_start, *aligned_start;
2812 /* We require a minimum headroom to be able to transmit the frame.
2813 * Otherwise return an error and let the original net_device handle it
2815 needed_headroom = dpaa2_eth_needed_headroom(NULL);
2816 if (xdpf->headroom < needed_headroom)
2819 /* Setup the FD fields */
2820 memset(fd, 0, sizeof(*fd));
2822 /* Align FD address, if possible */
2823 buffer_start = xdpf->data - needed_headroom;
2824 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2825 DPAA2_ETH_TX_BUF_ALIGN);
2826 if (aligned_start >= xdpf->data - xdpf->headroom)
2827 buffer_start = aligned_start;
2829 swa = (struct dpaa2_eth_swa *)buffer_start;
2830 /* fill in necessary fields here */
2831 swa->type = DPAA2_ETH_SWA_XDP;
2832 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2833 swa->xdp.xdpf = xdpf;
2835 addr = dma_map_single(dev, buffer_start,
2838 if (unlikely(dma_mapping_error(dev, addr)))
2841 dpaa2_fd_set_addr(fd, addr);
2842 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2843 dpaa2_fd_set_len(fd, xdpf->len);
2844 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2845 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2850 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2851 struct xdp_frame **frames, u32 flags)
2853 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2854 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2855 struct rtnl_link_stats64 *percpu_stats;
2856 struct dpaa2_eth_fq *fq;
2857 struct dpaa2_fd *fds;
2858 int enqueued, i, err;
2860 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2863 if (!netif_running(net_dev))
2866 fq = &priv->fq[smp_processor_id()];
2867 xdp_redirect_fds = &fq->xdp_redirect_fds;
2868 fds = xdp_redirect_fds->fds;
2870 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2872 /* create a FD for each xdp_frame in the list received */
2873 for (i = 0; i < n; i++) {
2874 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2878 xdp_redirect_fds->num = i;
2880 /* enqueue all the frame descriptors */
2881 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2883 /* update statistics */
2884 percpu_stats->tx_packets += enqueued;
2885 for (i = 0; i < enqueued; i++)
2886 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2891 static int update_xps(struct dpaa2_eth_priv *priv)
2893 struct net_device *net_dev = priv->net_dev;
2894 struct cpumask xps_mask;
2895 struct dpaa2_eth_fq *fq;
2896 int i, num_queues, netdev_queues;
2899 num_queues = dpaa2_eth_queue_count(priv);
2900 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2902 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2903 * queues, so only process those
2905 for (i = 0; i < netdev_queues; i++) {
2906 fq = &priv->fq[i % num_queues];
2908 cpumask_clear(&xps_mask);
2909 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2911 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2913 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2921 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2922 struct tc_mqprio_qopt *mqprio)
2924 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2925 u8 num_tc, num_queues;
2928 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2929 num_queues = dpaa2_eth_queue_count(priv);
2930 num_tc = mqprio->num_tc;
2932 if (num_tc == net_dev->num_tc)
2935 if (num_tc > dpaa2_eth_tc_count(priv)) {
2936 netdev_err(net_dev, "Max %d traffic classes supported\n",
2937 dpaa2_eth_tc_count(priv));
2942 netdev_reset_tc(net_dev);
2943 netif_set_real_num_tx_queues(net_dev, num_queues);
2947 netdev_set_num_tc(net_dev, num_tc);
2948 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2950 for (i = 0; i < num_tc; i++)
2951 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2959 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2961 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2963 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2964 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2965 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2966 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2969 if (p->command == TC_TBF_STATS)
2972 /* Only per port Tx shaping */
2973 if (p->parent != TC_H_ROOT)
2976 if (p->command == TC_TBF_REPLACE) {
2977 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2978 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2979 DPAA2_ETH_MAX_BURST_SIZE);
2983 tx_cr_shaper.max_burst_size = cfg->max_size;
2984 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2987 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2990 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2993 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
3000 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
3001 enum tc_setup_type type, void *type_data)
3004 case TC_SETUP_QDISC_MQPRIO:
3005 return dpaa2_eth_setup_mqprio(net_dev, type_data);
3006 case TC_SETUP_QDISC_TBF:
3007 return dpaa2_eth_setup_tbf(net_dev, type_data);
3013 static const struct net_device_ops dpaa2_eth_ops = {
3014 .ndo_open = dpaa2_eth_open,
3015 .ndo_start_xmit = dpaa2_eth_tx,
3016 .ndo_stop = dpaa2_eth_stop,
3017 .ndo_set_mac_address = dpaa2_eth_set_addr,
3018 .ndo_get_stats64 = dpaa2_eth_get_stats,
3019 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
3020 .ndo_set_features = dpaa2_eth_set_features,
3021 .ndo_eth_ioctl = dpaa2_eth_ioctl,
3022 .ndo_change_mtu = dpaa2_eth_change_mtu,
3023 .ndo_bpf = dpaa2_eth_xdp,
3024 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
3025 .ndo_xsk_wakeup = dpaa2_xsk_wakeup,
3026 .ndo_setup_tc = dpaa2_eth_setup_tc,
3027 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
3028 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
3031 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
3033 struct dpaa2_eth_channel *ch;
3035 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
3037 /* Update NAPI statistics */
3040 /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
3041 * so that it can be rescheduled again.
3043 if (!napi_if_scheduled_mark_missed(&ch->napi))
3044 napi_schedule(&ch->napi);
3047 /* Allocate and configure a DPCON object */
3048 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
3050 struct fsl_mc_device *dpcon;
3051 struct device *dev = priv->net_dev->dev.parent;
3054 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
3055 FSL_MC_POOL_DPCON, &dpcon);
3057 if (err == -ENXIO) {
3058 dev_dbg(dev, "Waiting for DPCON\n");
3059 err = -EPROBE_DEFER;
3061 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
3063 return ERR_PTR(err);
3066 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
3068 dev_err(dev, "dpcon_open() failed\n");
3072 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
3074 dev_err(dev, "dpcon_reset() failed\n");
3078 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
3080 dev_err(dev, "dpcon_enable() failed\n");
3087 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3089 fsl_mc_object_free(dpcon);
3091 return ERR_PTR(err);
3094 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
3095 struct fsl_mc_device *dpcon)
3097 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
3098 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3099 fsl_mc_object_free(dpcon);
3102 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
3104 struct dpaa2_eth_channel *channel;
3105 struct dpcon_attr attr;
3106 struct device *dev = priv->net_dev->dev.parent;
3109 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3113 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
3114 if (IS_ERR(channel->dpcon)) {
3115 err = PTR_ERR(channel->dpcon);
3119 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
3122 dev_err(dev, "dpcon_get_attributes() failed\n");
3126 channel->dpcon_id = attr.id;
3127 channel->ch_id = attr.qbman_ch_id;
3128 channel->priv = priv;
3133 dpaa2_eth_free_dpcon(priv, channel->dpcon);
3136 return ERR_PTR(err);
3139 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
3140 struct dpaa2_eth_channel *channel)
3142 dpaa2_eth_free_dpcon(priv, channel->dpcon);
3146 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
3147 * and register data availability notifications
3149 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
3151 struct dpaa2_io_notification_ctx *nctx;
3152 struct dpaa2_eth_channel *channel;
3153 struct dpcon_notification_cfg dpcon_notif_cfg;
3154 struct device *dev = priv->net_dev->dev.parent;
3157 /* We want the ability to spread ingress traffic (RX, TX conf) to as
3158 * many cores as possible, so we need one channel for each core
3159 * (unless there's fewer queues than cores, in which case the extra
3160 * channels would be wasted).
3161 * Allocate one channel per core and register it to the core's
3162 * affine DPIO. If not enough channels are available for all cores
3163 * or if some cores don't have an affine DPIO, there will be no
3164 * ingress frame processing on those cores.
3166 cpumask_clear(&priv->dpio_cpumask);
3167 for_each_online_cpu(i) {
3168 /* Try to allocate a channel */
3169 channel = dpaa2_eth_alloc_channel(priv);
3170 if (IS_ERR_OR_NULL(channel)) {
3171 err = PTR_ERR_OR_ZERO(channel);
3172 if (err == -EPROBE_DEFER)
3173 dev_dbg(dev, "waiting for affine channel\n");
3176 "No affine channel for cpu %d and above\n", i);
3180 priv->channel[priv->num_channels] = channel;
3182 nctx = &channel->nctx;
3184 nctx->cb = dpaa2_eth_cdan_cb;
3185 nctx->id = channel->ch_id;
3186 nctx->desired_cpu = i;
3188 /* Register the new context */
3189 channel->dpio = dpaa2_io_service_select(i);
3190 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3192 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3193 /* If no affine DPIO for this core, there's probably
3194 * none available for next cores either. Signal we want
3195 * to retry later, in case the DPIO devices weren't
3198 err = -EPROBE_DEFER;
3199 goto err_service_reg;
3202 /* Register DPCON notification with MC */
3203 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
3204 dpcon_notif_cfg.priority = 0;
3205 dpcon_notif_cfg.user_ctx = nctx->qman64;
3206 err = dpcon_set_notification(priv->mc_io, 0,
3207 channel->dpcon->mc_handle,
3210 dev_err(dev, "dpcon_set_notification failed()\n");
3214 /* If we managed to allocate a channel and also found an affine
3215 * DPIO for this core, add it to the final mask
3217 cpumask_set_cpu(i, &priv->dpio_cpumask);
3218 priv->num_channels++;
3220 /* Stop if we already have enough channels to accommodate all
3221 * RX and TX conf queues
3223 if (priv->num_channels == priv->dpni_attrs.num_queues)
3230 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3232 dpaa2_eth_free_channel(priv, channel);
3234 if (err == -EPROBE_DEFER) {
3235 for (i = 0; i < priv->num_channels; i++) {
3236 channel = priv->channel[i];
3237 nctx = &channel->nctx;
3238 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3239 dpaa2_eth_free_channel(priv, channel);
3241 priv->num_channels = 0;
3245 if (cpumask_empty(&priv->dpio_cpumask)) {
3246 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3250 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3251 cpumask_pr_args(&priv->dpio_cpumask));
3256 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
3258 struct device *dev = priv->net_dev->dev.parent;
3259 struct dpaa2_eth_channel *ch;
3262 /* deregister CDAN notifications and free channels */
3263 for (i = 0; i < priv->num_channels; i++) {
3264 ch = priv->channel[i];
3265 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3266 dpaa2_eth_free_channel(priv, ch);
3270 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
3273 struct device *dev = priv->net_dev->dev.parent;
3276 for (i = 0; i < priv->num_channels; i++)
3277 if (priv->channel[i]->nctx.desired_cpu == cpu)
3278 return priv->channel[i];
3280 /* We should never get here. Issue a warning and return
3281 * the first channel, because it's still better than nothing
3283 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3285 return priv->channel[0];
3288 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
3290 struct device *dev = priv->net_dev->dev.parent;
3291 struct dpaa2_eth_fq *fq;
3292 int rx_cpu, txc_cpu;
3295 /* For each FQ, pick one channel/CPU to deliver frames to.
3296 * This may well change at runtime, either through irqbalance or
3297 * through direct user intervention.
3299 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
3301 for (i = 0; i < priv->num_fqs; i++) {
3305 case DPAA2_RX_ERR_FQ:
3306 fq->target_cpu = rx_cpu;
3307 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3308 if (rx_cpu >= nr_cpu_ids)
3309 rx_cpu = cpumask_first(&priv->dpio_cpumask);
3311 case DPAA2_TX_CONF_FQ:
3312 fq->target_cpu = txc_cpu;
3313 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
3314 if (txc_cpu >= nr_cpu_ids)
3315 txc_cpu = cpumask_first(&priv->dpio_cpumask);
3318 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3320 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
3326 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
3330 /* We have one TxConf FQ per Tx flow.
3331 * The number of Tx and Rx queues is the same.
3332 * Tx queues come first in the fq array.
3334 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3335 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3336 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3337 priv->fq[priv->num_fqs++].flowid = (u16)i;
3340 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3341 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3342 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3343 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3344 priv->fq[priv->num_fqs].tc = (u8)j;
3345 priv->fq[priv->num_fqs++].flowid = (u16)i;
3349 /* We have exactly one Rx error queue per DPNI */
3350 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3351 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3353 /* For each FQ, decide on which core to process incoming frames */
3354 dpaa2_eth_set_fq_affinity(priv);
3357 /* Allocate and configure a buffer pool */
3358 struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
3360 struct device *dev = priv->net_dev->dev.parent;
3361 struct fsl_mc_device *dpbp_dev;
3362 struct dpbp_attr dpbp_attrs;
3363 struct dpaa2_eth_bp *bp;
3366 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3370 err = -EPROBE_DEFER;
3372 dev_err(dev, "DPBP device allocation failed\n");
3373 return ERR_PTR(err);
3376 bp = kzalloc(sizeof(*bp), GFP_KERNEL);
3382 err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
3383 &dpbp_dev->mc_handle);
3385 dev_err(dev, "dpbp_open() failed\n");
3389 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3391 dev_err(dev, "dpbp_reset() failed\n");
3395 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3397 dev_err(dev, "dpbp_enable() failed\n");
3401 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3404 dev_err(dev, "dpbp_get_attributes() failed\n");
3409 bp->bpid = dpbp_attrs.bpid;
3414 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3417 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3421 fsl_mc_object_free(dpbp_dev);
3423 return ERR_PTR(err);
3426 static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
3428 struct dpaa2_eth_bp *bp;
3431 bp = dpaa2_eth_allocate_dpbp(priv);
3435 priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
3438 for (i = 0; i < priv->num_channels; i++)
3439 priv->channel[i]->bp = bp;
3444 void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
3448 /* Find the index at which this BP is stored */
3449 for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
3450 if (priv->bp[idx_bp] == bp)
3453 /* Drain the pool and disable the associated MC object */
3454 dpaa2_eth_drain_pool(priv, bp->bpid);
3455 dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
3456 dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
3457 fsl_mc_object_free(bp->dev);
3460 /* Move the last in use DPBP over in this position */
3461 priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
3465 static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
3469 for (i = 0; i < priv->num_bps; i++)
3470 dpaa2_eth_free_dpbp(priv, priv->bp[i]);
3473 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3475 struct device *dev = priv->net_dev->dev.parent;
3476 struct dpni_buffer_layout buf_layout = {0};
3480 /* We need to check for WRIOP version 1.0.0, but depending on the MC
3481 * version, this number is not always provided correctly on rev1.
3482 * We need to check for both alternatives in this situation.
3484 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3485 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3486 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3488 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3490 /* We need to ensure that the buffer size seen by WRIOP is a multiple
3491 * of 64 or 256 bytes depending on the WRIOP version.
3493 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3496 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3497 buf_layout.pass_timestamp = true;
3498 buf_layout.pass_frame_status = true;
3499 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3500 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3501 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3502 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3503 DPNI_QUEUE_TX, &buf_layout);
3505 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3509 /* tx-confirm buffer */
3510 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3511 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3512 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3513 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3515 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3519 /* Now that we've set our tx buffer layout, retrieve the minimum
3520 * required tx data offset.
3522 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3523 &priv->tx_data_offset);
3525 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3529 if ((priv->tx_data_offset % 64) != 0)
3530 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3531 priv->tx_data_offset);
3534 buf_layout.pass_frame_status = true;
3535 buf_layout.pass_parser_result = true;
3536 buf_layout.data_align = rx_buf_align;
3537 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3538 buf_layout.private_data_size = 0;
3539 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3540 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3541 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3542 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3543 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3544 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3545 DPNI_QUEUE_RX, &buf_layout);
3547 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3554 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3555 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
3557 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3558 struct dpaa2_eth_fq *fq,
3559 struct dpaa2_fd *fd, u8 prio,
3560 u32 num_frames __always_unused,
3561 int *frames_enqueued)
3565 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3566 priv->tx_qdid, prio,
3568 if (!err && frames_enqueued)
3569 *frames_enqueued = 1;
3573 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3574 struct dpaa2_eth_fq *fq,
3575 struct dpaa2_fd *fd,
3576 u8 prio, u32 num_frames,
3577 int *frames_enqueued)
3581 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3588 if (frames_enqueued)
3589 *frames_enqueued = err;
3593 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3595 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3596 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3597 priv->enqueue = dpaa2_eth_enqueue_qd;
3599 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3602 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3604 struct device *dev = priv->net_dev->dev.parent;
3605 struct dpni_link_cfg link_cfg = {0};
3608 /* Get the default link options so we don't override other flags */
3609 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3611 dev_err(dev, "dpni_get_link_cfg() failed\n");
3615 /* By default, enable both Rx and Tx pause frames */
3616 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3617 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3618 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3620 dev_err(dev, "dpni_set_link_cfg() failed\n");
3624 priv->link_state.options = link_cfg.options;
3629 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3631 struct dpni_queue_id qid = {0};
3632 struct dpaa2_eth_fq *fq;
3633 struct dpni_queue queue;
3636 /* We only use Tx FQIDs for FQID-based enqueue, so check
3637 * if DPNI version supports it before updating FQIDs
3639 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3640 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3643 for (i = 0; i < priv->num_fqs; i++) {
3645 if (fq->type != DPAA2_TX_CONF_FQ)
3647 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3648 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3649 DPNI_QUEUE_TX, j, fq->flowid,
3654 fq->tx_fqid[j] = qid.fqid;
3655 if (fq->tx_fqid[j] == 0)
3660 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3665 netdev_info(priv->net_dev,
3666 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3667 priv->enqueue = dpaa2_eth_enqueue_qd;
3670 /* Configure ingress classification based on VLAN PCP */
3671 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3673 struct device *dev = priv->net_dev->dev.parent;
3674 struct dpkg_profile_cfg kg_cfg = {0};
3675 struct dpni_qos_tbl_cfg qos_cfg = {0};
3676 struct dpni_rule_cfg key_params;
3677 void *dma_mem, *key, *mask;
3678 u8 key_size = 2; /* VLAN TCI field */
3681 /* VLAN-based classification only makes sense if we have multiple
3683 * Also, we need to extract just the 3-bit PCP field from the VLAN
3684 * header and we can only do that by using a mask
3686 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3687 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3691 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3695 kg_cfg.num_extracts = 1;
3696 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3697 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3698 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3699 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3701 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3703 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3708 qos_cfg.default_tc = 0;
3709 qos_cfg.discard_on_miss = 0;
3710 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3711 DPAA2_CLASSIFIER_DMA_SIZE,
3713 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3714 dev_err(dev, "QoS table DMA mapping failed\n");
3719 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3721 dev_err(dev, "dpni_set_qos_table failed\n");
3725 /* Add QoS table entries */
3726 key = kzalloc(key_size * 2, GFP_KERNEL);
3731 mask = key + key_size;
3732 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3734 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3736 if (dma_mapping_error(dev, key_params.key_iova)) {
3737 dev_err(dev, "Qos table entry DMA mapping failed\n");
3742 key_params.mask_iova = key_params.key_iova + key_size;
3743 key_params.key_size = key_size;
3745 /* We add rules for PCP-based distribution starting with highest
3746 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3747 * classes to accommodate all priority levels, the lowest ones end up
3748 * on TC 0 which was configured as default
3750 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3751 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3752 dma_sync_single_for_device(dev, key_params.key_iova,
3753 key_size * 2, DMA_TO_DEVICE);
3755 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3758 dev_err(dev, "dpni_add_qos_entry failed\n");
3759 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3764 priv->vlan_cls_enabled = true;
3766 /* Table and key memory is not persistent, clean everything up after
3767 * configuration is finished
3770 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3774 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3782 /* Configure the DPNI object this interface is associated with */
3783 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3785 struct device *dev = &ls_dev->dev;
3786 struct dpaa2_eth_priv *priv;
3787 struct net_device *net_dev;
3790 net_dev = dev_get_drvdata(dev);
3791 priv = netdev_priv(net_dev);
3793 /* get a handle for the DPNI object */
3794 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3796 dev_err(dev, "dpni_open() failed\n");
3800 /* Check if we can work with this DPNI object */
3801 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3802 &priv->dpni_ver_minor);
3804 dev_err(dev, "dpni_get_api_version() failed\n");
3807 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3808 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3809 priv->dpni_ver_major, priv->dpni_ver_minor,
3810 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3815 ls_dev->mc_io = priv->mc_io;
3816 ls_dev->mc_handle = priv->mc_token;
3818 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3820 dev_err(dev, "dpni_reset() failed\n");
3824 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3827 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3831 err = dpaa2_eth_set_buffer_layout(priv);
3835 dpaa2_eth_set_enqueue_mode(priv);
3837 /* Enable pause frame support */
3838 if (dpaa2_eth_has_pause_support(priv)) {
3839 err = dpaa2_eth_set_pause(priv);
3844 err = dpaa2_eth_set_vlan_qos(priv);
3845 if (err && err != -EOPNOTSUPP)
3848 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3849 sizeof(struct dpaa2_eth_cls_rule),
3851 if (!priv->cls_rules) {
3859 dpni_close(priv->mc_io, 0, priv->mc_token);
3864 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3868 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3870 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3873 dpni_close(priv->mc_io, 0, priv->mc_token);
3876 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3877 struct dpaa2_eth_fq *fq)
3879 struct device *dev = priv->net_dev->dev.parent;
3880 struct dpni_queue queue;
3881 struct dpni_queue_id qid;
3884 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3885 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3887 dev_err(dev, "dpni_get_queue(RX) failed\n");
3891 fq->fqid = qid.fqid;
3893 queue.destination.id = fq->channel->dpcon_id;
3894 queue.destination.type = DPNI_DEST_DPCON;
3895 queue.destination.priority = 1;
3896 queue.user_context = (u64)(uintptr_t)fq;
3897 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3898 DPNI_QUEUE_RX, fq->tc, fq->flowid,
3899 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3902 dev_err(dev, "dpni_set_queue(RX) failed\n");
3907 /* only once for each channel */
3911 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3914 dev_err(dev, "xdp_rxq_info_reg failed\n");
3918 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3919 MEM_TYPE_PAGE_ORDER0, NULL);
3921 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3928 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3929 struct dpaa2_eth_fq *fq)
3931 struct device *dev = priv->net_dev->dev.parent;
3932 struct dpni_queue queue;
3933 struct dpni_queue_id qid;
3936 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3937 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3938 DPNI_QUEUE_TX, i, fq->flowid,
3941 dev_err(dev, "dpni_get_queue(TX) failed\n");
3944 fq->tx_fqid[i] = qid.fqid;
3947 /* All Tx queues belonging to the same flowid have the same qdbin */
3948 fq->tx_qdbin = qid.qdbin;
3950 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3951 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3954 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3958 fq->fqid = qid.fqid;
3960 queue.destination.id = fq->channel->dpcon_id;
3961 queue.destination.type = DPNI_DEST_DPCON;
3962 queue.destination.priority = 0;
3963 queue.user_context = (u64)(uintptr_t)fq;
3964 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3965 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3966 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3969 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3976 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3977 struct dpaa2_eth_fq *fq)
3979 struct device *dev = priv->net_dev->dev.parent;
3980 struct dpni_queue q = { { 0 } };
3981 struct dpni_queue_id qid;
3982 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3985 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3986 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3988 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3992 fq->fqid = qid.fqid;
3994 q.destination.id = fq->channel->dpcon_id;
3995 q.destination.type = DPNI_DEST_DPCON;
3996 q.destination.priority = 1;
3997 q.user_context = (u64)(uintptr_t)fq;
3998 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3999 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
4001 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
4008 /* Supported header fields for Rx hash distribution key */
4009 static const struct dpaa2_eth_dist_fields dist_fields[] = {
4012 .rxnfc_field = RXH_L2DA,
4013 .cls_prot = NET_PROT_ETH,
4014 .cls_field = NH_FLD_ETH_DA,
4015 .id = DPAA2_ETH_DIST_ETHDST,
4018 .cls_prot = NET_PROT_ETH,
4019 .cls_field = NH_FLD_ETH_SA,
4020 .id = DPAA2_ETH_DIST_ETHSRC,
4023 /* This is the last ethertype field parsed:
4024 * depending on frame format, it can be the MAC ethertype
4025 * or the VLAN etype.
4027 .cls_prot = NET_PROT_ETH,
4028 .cls_field = NH_FLD_ETH_TYPE,
4029 .id = DPAA2_ETH_DIST_ETHTYPE,
4033 .rxnfc_field = RXH_VLAN,
4034 .cls_prot = NET_PROT_VLAN,
4035 .cls_field = NH_FLD_VLAN_TCI,
4036 .id = DPAA2_ETH_DIST_VLAN,
4040 .rxnfc_field = RXH_IP_SRC,
4041 .cls_prot = NET_PROT_IP,
4042 .cls_field = NH_FLD_IP_SRC,
4043 .id = DPAA2_ETH_DIST_IPSRC,
4046 .rxnfc_field = RXH_IP_DST,
4047 .cls_prot = NET_PROT_IP,
4048 .cls_field = NH_FLD_IP_DST,
4049 .id = DPAA2_ETH_DIST_IPDST,
4052 .rxnfc_field = RXH_L3_PROTO,
4053 .cls_prot = NET_PROT_IP,
4054 .cls_field = NH_FLD_IP_PROTO,
4055 .id = DPAA2_ETH_DIST_IPPROTO,
4058 /* Using UDP ports, this is functionally equivalent to raw
4059 * byte pairs from L4 header.
4061 .rxnfc_field = RXH_L4_B_0_1,
4062 .cls_prot = NET_PROT_UDP,
4063 .cls_field = NH_FLD_UDP_PORT_SRC,
4064 .id = DPAA2_ETH_DIST_L4SRC,
4067 .rxnfc_field = RXH_L4_B_2_3,
4068 .cls_prot = NET_PROT_UDP,
4069 .cls_field = NH_FLD_UDP_PORT_DST,
4070 .id = DPAA2_ETH_DIST_L4DST,
4075 /* Configure the Rx hash key using the legacy API */
4076 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4078 struct device *dev = priv->net_dev->dev.parent;
4079 struct dpni_rx_tc_dist_cfg dist_cfg;
4082 memset(&dist_cfg, 0, sizeof(dist_cfg));
4084 dist_cfg.key_cfg_iova = key;
4085 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4086 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4088 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4089 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
4092 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4100 /* Configure the Rx hash key using the new API */
4101 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4103 struct device *dev = priv->net_dev->dev.parent;
4104 struct dpni_rx_dist_cfg dist_cfg;
4107 memset(&dist_cfg, 0, sizeof(dist_cfg));
4109 dist_cfg.key_cfg_iova = key;
4110 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4111 dist_cfg.enable = 1;
4113 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4115 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
4118 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4122 /* If the flow steering / hashing key is shared between all
4123 * traffic classes, install it just once
4125 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4132 /* Configure the Rx flow classification key */
4133 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4135 struct device *dev = priv->net_dev->dev.parent;
4136 struct dpni_rx_dist_cfg dist_cfg;
4139 memset(&dist_cfg, 0, sizeof(dist_cfg));
4141 dist_cfg.key_cfg_iova = key;
4142 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4143 dist_cfg.enable = 1;
4145 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4147 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
4150 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4154 /* If the flow steering / hashing key is shared between all
4155 * traffic classes, install it just once
4157 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4164 /* Size of the Rx flow classification key */
4165 int dpaa2_eth_cls_key_size(u64 fields)
4169 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4170 if (!(fields & dist_fields[i].id))
4172 size += dist_fields[i].size;
4178 /* Offset of header field in Rx classification key */
4179 int dpaa2_eth_cls_fld_off(int prot, int field)
4183 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4184 if (dist_fields[i].cls_prot == prot &&
4185 dist_fields[i].cls_field == field)
4187 off += dist_fields[i].size;
4190 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
4194 /* Prune unused fields from the classification rule.
4195 * Used when masking is not supported
4197 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4199 int off = 0, new_off = 0;
4202 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4203 size = dist_fields[i].size;
4204 if (dist_fields[i].id & fields) {
4205 memcpy(key_mem + new_off, key_mem + off, size);
4212 /* Set Rx distribution (hash or flow classification) key
4213 * flags is a combination of RXH_ bits
4215 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4216 enum dpaa2_eth_rx_dist type, u64 flags)
4218 struct device *dev = net_dev->dev.parent;
4219 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4220 struct dpkg_profile_cfg cls_cfg;
4221 u32 rx_hash_fields = 0;
4222 dma_addr_t key_iova;
4227 memset(&cls_cfg, 0, sizeof(cls_cfg));
4229 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4230 struct dpkg_extract *key =
4231 &cls_cfg.extracts[cls_cfg.num_extracts];
4233 /* For both Rx hashing and classification keys
4234 * we set only the selected fields.
4236 if (!(flags & dist_fields[i].id))
4238 if (type == DPAA2_ETH_RX_DIST_HASH)
4239 rx_hash_fields |= dist_fields[i].rxnfc_field;
4241 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4242 dev_err(dev, "error adding key extraction rule, too many rules?\n");
4246 key->type = DPKG_EXTRACT_FROM_HDR;
4247 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4248 key->extract.from_hdr.type = DPKG_FULL_FIELD;
4249 key->extract.from_hdr.field = dist_fields[i].cls_field;
4250 cls_cfg.num_extracts++;
4253 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4257 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4259 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4263 /* Prepare for setting the rx dist */
4264 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4266 if (dma_mapping_error(dev, key_iova)) {
4267 dev_err(dev, "DMA mapping failed\n");
4272 if (type == DPAA2_ETH_RX_DIST_HASH) {
4273 if (dpaa2_eth_has_legacy_dist(priv))
4274 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4276 err = dpaa2_eth_config_hash_key(priv, key_iova);
4278 err = dpaa2_eth_config_cls_key(priv, key_iova);
4281 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4283 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4284 priv->rx_hash_fields = rx_hash_fields;
4291 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4293 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4297 if (!dpaa2_eth_hash_enabled(priv))
4300 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4301 if (dist_fields[i].rxnfc_field & flags)
4302 key |= dist_fields[i].id;
4304 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4307 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4309 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4312 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4314 struct device *dev = priv->net_dev->dev.parent;
4317 /* Check if we actually support Rx flow classification */
4318 if (dpaa2_eth_has_legacy_dist(priv)) {
4319 dev_dbg(dev, "Rx cls not supported by current MC version\n");
4323 if (!dpaa2_eth_fs_enabled(priv)) {
4324 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4328 if (!dpaa2_eth_hash_enabled(priv)) {
4329 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4333 /* If there is no support for masking in the classification table,
4334 * we don't set a default key, as it will depend on the rules
4335 * added by the user at runtime.
4337 if (!dpaa2_eth_fs_mask_enabled(priv))
4340 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4345 priv->rx_cls_enabled = 1;
4350 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
4351 * frame queues and channels
4353 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
4355 struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
4356 struct net_device *net_dev = priv->net_dev;
4357 struct dpni_pools_cfg pools_params = { 0 };
4358 struct device *dev = net_dev->dev.parent;
4359 struct dpni_error_cfg err_cfg;
4363 pools_params.num_dpbp = 1;
4364 pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
4365 pools_params.pools[0].backup_pool = 0;
4366 pools_params.pools[0].buffer_size = priv->rx_buf_size;
4367 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4369 dev_err(dev, "dpni_set_pools() failed\n");
4373 /* have the interface implicitly distribute traffic based on
4374 * the default hash key
4376 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4377 if (err && err != -EOPNOTSUPP)
4378 dev_err(dev, "Failed to configure hashing\n");
4380 /* Configure the flow classification key; it includes all
4381 * supported header fields and cannot be modified at runtime
4383 err = dpaa2_eth_set_default_cls(priv);
4384 if (err && err != -EOPNOTSUPP)
4385 dev_err(dev, "Failed to configure Rx classification key\n");
4387 /* Configure handling of error frames */
4388 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4389 err_cfg.set_frame_annotation = 1;
4390 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4391 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4394 dev_err(dev, "dpni_set_errors_behavior failed\n");
4398 /* Configure Rx and Tx conf queues to generate CDANs */
4399 for (i = 0; i < priv->num_fqs; i++) {
4400 switch (priv->fq[i].type) {
4402 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
4404 case DPAA2_TX_CONF_FQ:
4405 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
4407 case DPAA2_RX_ERR_FQ:
4408 err = setup_rx_err_flow(priv, &priv->fq[i]);
4411 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4418 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
4419 DPNI_QUEUE_TX, &priv->tx_qdid);
4421 dev_err(dev, "dpni_get_qdid() failed\n");
4428 /* Allocate rings for storing incoming frame descriptors */
4429 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
4431 struct net_device *net_dev = priv->net_dev;
4432 struct device *dev = net_dev->dev.parent;
4435 for (i = 0; i < priv->num_channels; i++) {
4436 priv->channel[i]->store =
4437 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4438 if (!priv->channel[i]->store) {
4439 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
4447 for (i = 0; i < priv->num_channels; i++) {
4448 if (!priv->channel[i]->store)
4450 dpaa2_io_store_destroy(priv->channel[i]->store);
4456 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
4460 for (i = 0; i < priv->num_channels; i++)
4461 dpaa2_io_store_destroy(priv->channel[i]->store);
4464 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
4466 struct net_device *net_dev = priv->net_dev;
4467 struct device *dev = net_dev->dev.parent;
4468 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
4471 /* Get firmware address, if any */
4472 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4474 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4478 /* Get DPNI attributes address, if any */
4479 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4482 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4486 /* First check if firmware has any address configured by bootloader */
4487 if (!is_zero_ether_addr(mac_addr)) {
4488 /* If the DPMAC addr != DPNI addr, update it */
4489 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4490 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4494 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4498 eth_hw_addr_set(net_dev, mac_addr);
4499 } else if (is_zero_ether_addr(dpni_mac_addr)) {
4500 /* No MAC address configured, fill in net_dev->dev_addr
4503 eth_hw_addr_random(net_dev);
4504 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4506 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4509 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4513 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4514 * practical purposes, this will be our "permanent" mac address,
4515 * at least until the next reboot. This move will also permit
4516 * register_netdevice() to properly fill up net_dev->perm_addr.
4518 net_dev->addr_assign_type = NET_ADDR_PERM;
4520 /* NET_ADDR_PERM is default, all we have to do is
4521 * fill in the device addr.
4523 eth_hw_addr_set(net_dev, dpni_mac_addr);
4529 static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4531 struct device *dev = net_dev->dev.parent;
4532 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4533 u32 options = priv->dpni_attrs.options;
4534 u64 supported = 0, not_supported = 0;
4535 u8 bcast_addr[ETH_ALEN];
4539 net_dev->netdev_ops = &dpaa2_eth_ops;
4540 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4542 err = dpaa2_eth_set_mac_addr(priv);
4546 /* Explicitly add the broadcast address to the MAC filtering table */
4547 eth_broadcast_addr(bcast_addr);
4548 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4550 dev_err(dev, "dpni_add_mac_addr() failed\n");
4554 /* Set MTU upper limit; lower limit is 68B (default value) */
4555 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4556 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4559 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4563 /* Set actual number of queues in the net device */
4564 num_queues = dpaa2_eth_queue_count(priv);
4565 err = netif_set_real_num_tx_queues(net_dev, num_queues);
4567 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4570 err = netif_set_real_num_rx_queues(net_dev, num_queues);
4572 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4576 dpaa2_eth_detect_features(priv);
4578 /* Capabilities listing */
4579 supported |= IFF_LIVE_ADDR_CHANGE;
4581 if (options & DPNI_OPT_NO_MAC_FILTER)
4582 not_supported |= IFF_UNICAST_FLT;
4584 supported |= IFF_UNICAST_FLT;
4586 net_dev->priv_flags |= supported;
4587 net_dev->priv_flags &= ~not_supported;
4590 net_dev->features = NETIF_F_RXCSUM |
4591 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4592 NETIF_F_SG | NETIF_F_HIGHDMA |
4593 NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
4594 net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
4595 net_dev->hw_features = net_dev->features;
4597 if (priv->dpni_attrs.vlan_filter_entries)
4598 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4603 static int dpaa2_eth_poll_link_state(void *arg)
4605 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4608 while (!kthread_should_stop()) {
4609 err = dpaa2_eth_link_state_update(priv);
4613 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4619 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4621 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4622 struct dpaa2_mac *mac;
4625 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4626 dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4628 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
4629 netdev_dbg(priv->net_dev, "waiting for mac\n");
4630 return PTR_ERR(dpmac_dev);
4633 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4636 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4640 mac->mc_dev = dpmac_dev;
4641 mac->mc_io = priv->mc_io;
4642 mac->net_dev = priv->net_dev;
4644 err = dpaa2_mac_open(mac);
4648 if (dpaa2_mac_is_type_phy(mac)) {
4649 err = dpaa2_mac_connect(mac);
4651 if (err == -EPROBE_DEFER)
4652 netdev_dbg(priv->net_dev,
4653 "could not connect to MAC\n");
4655 netdev_err(priv->net_dev,
4656 "Error connecting to the MAC endpoint: %pe",
4662 mutex_lock(&priv->mac_lock);
4664 mutex_unlock(&priv->mac_lock);
4669 dpaa2_mac_close(mac);
4675 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4677 struct dpaa2_mac *mac;
4679 mutex_lock(&priv->mac_lock);
4682 mutex_unlock(&priv->mac_lock);
4687 if (dpaa2_mac_is_type_phy(mac))
4688 dpaa2_mac_disconnect(mac);
4690 dpaa2_mac_close(mac);
4694 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4697 struct device *dev = (struct device *)arg;
4698 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4699 struct net_device *net_dev = dev_get_drvdata(dev);
4700 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4704 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4705 DPNI_IRQ_INDEX, &status);
4706 if (unlikely(err)) {
4707 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4711 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4712 dpaa2_eth_link_state_update(netdev_priv(net_dev));
4714 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4715 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4716 dpaa2_eth_update_tx_fqids(priv);
4718 /* We can avoid locking because the "endpoint changed" IRQ
4719 * handler is the only one who changes priv->mac at runtime,
4720 * so we are not racing with anyone.
4722 had_mac = !!priv->mac;
4724 dpaa2_eth_disconnect_mac(priv);
4726 dpaa2_eth_connect_mac(priv);
4732 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4735 struct fsl_mc_device_irq *irq;
4737 err = fsl_mc_allocate_irqs(ls_dev);
4739 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4743 irq = ls_dev->irqs[0];
4744 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4745 NULL, dpni_irq0_handler_thread,
4746 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4747 dev_name(&ls_dev->dev), &ls_dev->dev);
4749 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4753 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4754 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4755 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4757 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4761 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4764 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4771 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4773 fsl_mc_free_irqs(ls_dev);
4778 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4781 struct dpaa2_eth_channel *ch;
4783 for (i = 0; i < priv->num_channels; i++) {
4784 ch = priv->channel[i];
4785 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4786 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
4790 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4793 struct dpaa2_eth_channel *ch;
4795 for (i = 0; i < priv->num_channels; i++) {
4796 ch = priv->channel[i];
4797 netif_napi_del(&ch->napi);
4801 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4804 struct net_device *net_dev = NULL;
4805 struct dpaa2_eth_priv *priv = NULL;
4808 dev = &dpni_dev->dev;
4811 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4813 dev_err(dev, "alloc_etherdev_mq() failed\n");
4817 SET_NETDEV_DEV(net_dev, dev);
4818 dev_set_drvdata(dev, net_dev);
4820 priv = netdev_priv(net_dev);
4821 priv->net_dev = net_dev;
4822 SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
4824 mutex_init(&priv->mac_lock);
4826 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4828 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4829 priv->rx_tstamp = false;
4831 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4832 if (!priv->dpaa2_ptp_wq) {
4837 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4838 mutex_init(&priv->onestep_tstamp_lock);
4839 skb_queue_head_init(&priv->tx_skbs);
4841 priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4843 /* Obtain a MC portal */
4844 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4847 if (err == -ENXIO) {
4848 dev_dbg(dev, "waiting for MC portal\n");
4849 err = -EPROBE_DEFER;
4851 dev_err(dev, "MC portal allocation failed\n");
4853 goto err_portal_alloc;
4856 /* MC objects initialization and configuration */
4857 err = dpaa2_eth_setup_dpni(dpni_dev);
4859 goto err_dpni_setup;
4861 err = dpaa2_eth_setup_dpio(priv);
4863 goto err_dpio_setup;
4865 dpaa2_eth_setup_fqs(priv);
4867 err = dpaa2_eth_setup_default_dpbp(priv);
4869 goto err_dpbp_setup;
4871 err = dpaa2_eth_bind_dpni(priv);
4875 /* Add a NAPI context for each channel */
4876 dpaa2_eth_add_ch_napi(priv);
4878 /* Percpu statistics */
4879 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4880 if (!priv->percpu_stats) {
4881 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4883 goto err_alloc_percpu_stats;
4885 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4886 if (!priv->percpu_extras) {
4887 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4889 goto err_alloc_percpu_extras;
4892 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4893 if (!priv->sgt_cache) {
4894 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4896 goto err_alloc_sgt_cache;
4899 priv->fd = alloc_percpu(*priv->fd);
4901 dev_err(dev, "alloc_percpu(fds) failed\n");
4906 err = dpaa2_eth_netdev_init(net_dev);
4908 goto err_netdev_init;
4910 /* Configure checksum offload based on current interface flags */
4911 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4915 err = dpaa2_eth_set_tx_csum(priv,
4916 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4920 err = dpaa2_eth_alloc_rings(priv);
4922 goto err_alloc_rings;
4924 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4925 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4926 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4927 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4929 dev_dbg(dev, "PFC not supported\n");
4933 err = dpaa2_eth_connect_mac(priv);
4935 goto err_connect_mac;
4937 err = dpaa2_eth_setup_irqs(dpni_dev);
4939 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4940 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4941 "%s_poll_link", net_dev->name);
4942 if (IS_ERR(priv->poll_thread)) {
4943 dev_err(dev, "Error starting polling thread\n");
4944 goto err_poll_thread;
4946 priv->do_link_poll = true;
4949 err = dpaa2_eth_dl_alloc(priv);
4951 goto err_dl_register;
4953 err = dpaa2_eth_dl_traps_register(priv);
4955 goto err_dl_trap_register;
4957 err = dpaa2_eth_dl_port_add(priv);
4959 goto err_dl_port_add;
4961 err = register_netdev(net_dev);
4963 dev_err(dev, "register_netdev() failed\n");
4964 goto err_netdev_reg;
4967 #ifdef CONFIG_DEBUG_FS
4968 dpaa2_dbg_add(priv);
4971 dpaa2_eth_dl_register(priv);
4972 dev_info(dev, "Probed interface %s\n", net_dev->name);
4976 dpaa2_eth_dl_port_del(priv);
4978 dpaa2_eth_dl_traps_unregister(priv);
4979 err_dl_trap_register:
4980 dpaa2_eth_dl_free(priv);
4982 if (priv->do_link_poll)
4983 kthread_stop(priv->poll_thread);
4985 fsl_mc_free_irqs(dpni_dev);
4987 dpaa2_eth_disconnect_mac(priv);
4989 dpaa2_eth_free_rings(priv);
4993 free_percpu(priv->fd);
4995 free_percpu(priv->sgt_cache);
4996 err_alloc_sgt_cache:
4997 free_percpu(priv->percpu_extras);
4998 err_alloc_percpu_extras:
4999 free_percpu(priv->percpu_stats);
5000 err_alloc_percpu_stats:
5001 dpaa2_eth_del_ch_napi(priv);
5003 dpaa2_eth_free_dpbps(priv);
5005 dpaa2_eth_free_dpio(priv);
5007 dpaa2_eth_free_dpni(priv);
5009 fsl_mc_portal_free(priv->mc_io);
5011 destroy_workqueue(priv->dpaa2_ptp_wq);
5013 dev_set_drvdata(dev, NULL);
5014 free_netdev(net_dev);
5019 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
5022 struct net_device *net_dev;
5023 struct dpaa2_eth_priv *priv;
5026 net_dev = dev_get_drvdata(dev);
5027 priv = netdev_priv(net_dev);
5029 dpaa2_eth_dl_unregister(priv);
5031 #ifdef CONFIG_DEBUG_FS
5032 dpaa2_dbg_remove(priv);
5035 unregister_netdev(net_dev);
5037 dpaa2_eth_dl_port_del(priv);
5038 dpaa2_eth_dl_traps_unregister(priv);
5039 dpaa2_eth_dl_free(priv);
5041 if (priv->do_link_poll)
5042 kthread_stop(priv->poll_thread);
5044 fsl_mc_free_irqs(ls_dev);
5046 dpaa2_eth_disconnect_mac(priv);
5047 dpaa2_eth_free_rings(priv);
5048 free_percpu(priv->fd);
5049 free_percpu(priv->sgt_cache);
5050 free_percpu(priv->percpu_stats);
5051 free_percpu(priv->percpu_extras);
5053 dpaa2_eth_del_ch_napi(priv);
5054 dpaa2_eth_free_dpbps(priv);
5055 dpaa2_eth_free_dpio(priv);
5056 dpaa2_eth_free_dpni(priv);
5057 if (priv->onestep_reg_base)
5058 iounmap(priv->onestep_reg_base);
5060 fsl_mc_portal_free(priv->mc_io);
5062 destroy_workqueue(priv->dpaa2_ptp_wq);
5064 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5066 free_netdev(net_dev);
5071 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
5073 .vendor = FSL_MC_VENDOR_FREESCALE,
5078 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
5080 static struct fsl_mc_driver dpaa2_eth_driver = {
5082 .name = KBUILD_MODNAME,
5083 .owner = THIS_MODULE,
5085 .probe = dpaa2_eth_probe,
5086 .remove = dpaa2_eth_remove,
5087 .match_id_table = dpaa2_eth_match_id_table
5090 static int __init dpaa2_eth_driver_init(void)
5094 dpaa2_eth_dbg_init();
5095 err = fsl_mc_driver_register(&dpaa2_eth_driver);
5097 dpaa2_eth_dbg_exit();
5104 static void __exit dpaa2_eth_driver_exit(void)
5106 dpaa2_eth_dbg_exit();
5107 fsl_mc_driver_unregister(&dpaa2_eth_driver);
5110 module_init(dpaa2_eth_driver_init);
5111 module_exit(dpaa2_eth_driver_exit);