1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/of_platform.h>
36 #include <linux/of_mdio.h>
37 #include <linux/of_net.h>
39 #include <linux/if_arp.h>
40 #include <linux/if_vlan.h>
41 #include <linux/icmp.h>
43 #include <linux/ipv6.h>
44 #include <linux/udp.h>
45 #include <linux/tcp.h>
46 #include <linux/net.h>
47 #include <linux/skbuff.h>
48 #include <linux/etherdevice.h>
49 #include <linux/if_ether.h>
50 #include <linux/highmem.h>
51 #include <linux/percpu.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/sort.h>
54 #include <linux/phy_fixed.h>
55 #include <soc/fsl/bman.h>
56 #include <soc/fsl/qman.h>
58 #include "fman_port.h"
62 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
63 * using trace events only need to #include <trace/events/sched.h>
65 #define CREATE_TRACE_POINTS
66 #include "dpaa_eth_trace.h"
68 static int debug = -1;
69 module_param(debug, int, 0444);
70 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
72 static u16 tx_timeout = 1000;
73 module_param(tx_timeout, ushort, 0444);
74 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
76 #define FM_FD_STAT_RX_ERRORS \
77 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
78 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
79 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
80 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
81 FM_FD_ERR_PRS_HDR_ERR)
83 #define FM_FD_STAT_TX_ERRORS \
84 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
85 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
87 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
88 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
91 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
92 /* Ingress congestion threshold on FMan ports
93 * The size in bytes of the ingress tail-drop threshold on FMan ports.
94 * Traffic piling up above this value will be rejected by QMan and discarded
98 /* Size in bytes of the FQ taildrop threshold */
99 #define DPAA_FQ_TD 0x200000
101 #define DPAA_CS_THRESHOLD_1G 0x06000000
102 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
103 * The size in bytes of the egress Congestion State notification threshold on
104 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
105 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
106 * and the larger the frame size, the more acute the problem.
107 * So we have to find a balance between these factors:
108 * - avoiding the device staying congested for a prolonged time (risking
109 * the netdev watchdog to fire - see also the tx_timeout module param);
110 * - affecting performance of protocols such as TCP, which otherwise
111 * behave well under the congestion notification mechanism;
112 * - preventing the Tx cores from tightly-looping (as if the congestion
113 * threshold was too low to be effective);
114 * - running out of memory if the CS threshold is set too high.
117 #define DPAA_CS_THRESHOLD_10G 0x10000000
118 /* The size in bytes of the egress Congestion State notification threshold on
119 * 10G ports, range 0x1000 .. 0x10000000
122 /* Largest value that the FQD's OAL field can hold */
123 #define FSL_QMAN_MAX_OAL 127
125 /* Default alignment for start of data in an Rx FD */
126 #define DPAA_FD_DATA_ALIGNMENT 16
128 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
129 #define DPAA_SGT_SIZE 256
131 /* Values for the L3R field of the FM Parse Results
133 /* L3 Type field: First IP Present IPv4 */
134 #define FM_L3_PARSE_RESULT_IPV4 0x8000
135 /* L3 Type field: First IP Present IPv6 */
136 #define FM_L3_PARSE_RESULT_IPV6 0x4000
137 /* Values for the L4R field of the FM Parse Results */
138 /* L4 Type field: UDP */
139 #define FM_L4_PARSE_RESULT_UDP 0x40
140 /* L4 Type field: TCP */
141 #define FM_L4_PARSE_RESULT_TCP 0x20
143 /* FD status field indicating whether the FM Parser has attempted to validate
144 * the L4 csum of the frame.
145 * Note that having this bit set doesn't necessarily imply that the checksum
146 * is valid. One would have to check the parse results to find that out.
148 #define FM_FD_STAT_L4CV 0x00000004
150 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
151 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
153 #define FSL_DPAA_BPID_INV 0xff
154 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128
155 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80
157 #define DPAA_TX_PRIV_DATA_SIZE 16
158 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
159 #define DPAA_TIME_STAMP_SIZE 8
160 #define DPAA_HASH_RESULTS_SIZE 8
161 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
162 dpaa_rx_extra_headroom)
164 #define DPAA_ETH_PCD_RXQ_NUM 128
166 #define DPAA_ENQUEUE_RETRIES 100000
168 enum port_type {RX, TX};
171 struct dpaa_fq *tx_defq;
172 struct dpaa_fq *tx_errq;
173 struct dpaa_fq *rx_defq;
174 struct dpaa_fq *rx_errq;
175 struct dpaa_fq *rx_pcdq;
178 /* All the dpa bps in use at any moment */
179 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
181 /* The raw buffer size must be cacheline aligned */
182 #define DPAA_BP_RAW_SIZE 4096
183 /* When using more than one buffer pool, the raw sizes are as follows:
186 * 3 bp: 1KB, 2KB, 4KB
187 * 4 bp: 1KB, 2KB, 4KB, 8KB
189 static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
191 size_t res = DPAA_BP_RAW_SIZE / 4;
194 for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
199 /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
200 * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
201 * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
202 * half-page-aligned buffers, so we reserve some more space for start-of-buffer
205 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
207 static int dpaa_max_frm;
209 static int dpaa_rx_extra_headroom;
211 #define dpaa_get_max_mtu() \
212 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
214 static int dpaa_netdev_init(struct net_device *net_dev,
215 const struct net_device_ops *dpaa_ops,
218 struct dpaa_priv *priv = netdev_priv(net_dev);
219 struct device *dev = net_dev->dev.parent;
220 struct dpaa_percpu_priv *percpu_priv;
224 /* Although we access another CPU's private data here
225 * we do it at initialization so it is safe
227 for_each_possible_cpu(i) {
228 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
229 percpu_priv->net_dev = net_dev;
232 net_dev->netdev_ops = dpaa_ops;
233 mac_addr = priv->mac_dev->addr;
235 net_dev->mem_start = priv->mac_dev->res->start;
236 net_dev->mem_end = priv->mac_dev->res->end;
238 net_dev->min_mtu = ETH_MIN_MTU;
239 net_dev->max_mtu = dpaa_get_max_mtu();
241 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
242 NETIF_F_LLTX | NETIF_F_RXHASH);
244 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
245 /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
246 * For conformity, we'll still declare GSO explicitly.
248 net_dev->features |= NETIF_F_GSO;
249 net_dev->features |= NETIF_F_RXCSUM;
251 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
252 /* we do not want shared skbs on TX */
253 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
255 net_dev->features |= net_dev->hw_features;
256 net_dev->vlan_features = net_dev->features;
258 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
259 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
261 net_dev->ethtool_ops = &dpaa_ethtool_ops;
263 net_dev->needed_headroom = priv->tx_headroom;
264 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
266 /* start without the RUNNING flag, phylib controls it later */
267 netif_carrier_off(net_dev);
269 err = register_netdev(net_dev);
271 dev_err(dev, "register_netdev() = %d\n", err);
278 static int dpaa_stop(struct net_device *net_dev)
280 struct mac_device *mac_dev;
281 struct dpaa_priv *priv;
284 priv = netdev_priv(net_dev);
285 mac_dev = priv->mac_dev;
287 netif_tx_stop_all_queues(net_dev);
288 /* Allow the Fman (Tx) port to process in-flight frames before we
289 * try switching it off.
291 usleep_range(5000, 10000);
293 err = mac_dev->stop(mac_dev);
295 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
298 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
299 error = fman_port_disable(mac_dev->port[i]);
305 phy_disconnect(net_dev->phydev);
306 net_dev->phydev = NULL;
311 static void dpaa_tx_timeout(struct net_device *net_dev)
313 struct dpaa_percpu_priv *percpu_priv;
314 const struct dpaa_priv *priv;
316 priv = netdev_priv(net_dev);
317 percpu_priv = this_cpu_ptr(priv->percpu_priv);
319 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
320 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
322 percpu_priv->stats.tx_errors++;
325 /* Calculates the statistics for the given device by adding the statistics
326 * collected by each CPU.
328 static void dpaa_get_stats64(struct net_device *net_dev,
329 struct rtnl_link_stats64 *s)
331 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
332 struct dpaa_priv *priv = netdev_priv(net_dev);
333 struct dpaa_percpu_priv *percpu_priv;
334 u64 *netstats = (u64 *)s;
338 for_each_possible_cpu(i) {
339 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
341 cpustats = (u64 *)&percpu_priv->stats;
343 /* add stats from all CPUs */
344 for (j = 0; j < numstats; j++)
345 netstats[j] += cpustats[j];
349 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
352 struct dpaa_priv *priv = netdev_priv(net_dev);
353 struct tc_mqprio_qopt *mqprio = type_data;
357 if (type != TC_SETUP_QDISC_MQPRIO)
360 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
361 num_tc = mqprio->num_tc;
363 if (num_tc == priv->num_tc)
367 netdev_reset_tc(net_dev);
371 if (num_tc > DPAA_TC_NUM) {
372 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
377 netdev_set_num_tc(net_dev, num_tc);
379 for (i = 0; i < num_tc; i++)
380 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
381 i * DPAA_TC_TXQ_NUM);
384 priv->num_tc = num_tc ? : 1;
385 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
389 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
391 struct dpaa_eth_data *eth_data;
392 struct device *dpaa_dev;
393 struct mac_device *mac_dev;
395 dpaa_dev = &pdev->dev;
396 eth_data = dpaa_dev->platform_data;
398 dev_err(dpaa_dev, "eth_data missing\n");
399 return ERR_PTR(-ENODEV);
401 mac_dev = eth_data->mac_dev;
403 dev_err(dpaa_dev, "mac_dev missing\n");
404 return ERR_PTR(-EINVAL);
410 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
412 const struct dpaa_priv *priv;
413 struct mac_device *mac_dev;
414 struct sockaddr old_addr;
417 priv = netdev_priv(net_dev);
419 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
421 err = eth_mac_addr(net_dev, addr);
423 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
427 mac_dev = priv->mac_dev;
429 err = mac_dev->change_addr(mac_dev->fman_mac,
430 (enet_addr_t *)net_dev->dev_addr);
432 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
434 /* reverting to previous address */
435 eth_mac_addr(net_dev, &old_addr);
443 static void dpaa_set_rx_mode(struct net_device *net_dev)
445 const struct dpaa_priv *priv;
448 priv = netdev_priv(net_dev);
450 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
451 priv->mac_dev->promisc = !priv->mac_dev->promisc;
452 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
453 priv->mac_dev->promisc);
455 netif_err(priv, drv, net_dev,
456 "mac_dev->set_promisc() = %d\n",
460 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
461 priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
462 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
463 priv->mac_dev->allmulti);
465 netif_err(priv, drv, net_dev,
466 "mac_dev->set_allmulti() = %d\n",
470 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
472 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
476 static struct dpaa_bp *dpaa_bpid2pool(int bpid)
478 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
481 return dpaa_bp_array[bpid];
484 /* checks if this bpool is already allocated */
485 static bool dpaa_bpid2pool_use(int bpid)
487 if (dpaa_bpid2pool(bpid)) {
488 atomic_inc(&dpaa_bp_array[bpid]->refs);
495 /* called only once per bpid by dpaa_bp_alloc_pool() */
496 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
498 dpaa_bp_array[bpid] = dpaa_bp;
499 atomic_set(&dpaa_bp->refs, 1);
502 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
506 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
507 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
512 /* If the pool is already specified, we only create one per bpid */
513 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
514 dpaa_bpid2pool_use(dpaa_bp->bpid))
517 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
518 dpaa_bp->pool = bman_new_pool();
519 if (!dpaa_bp->pool) {
520 pr_err("%s: bman_new_pool() failed\n",
525 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
528 if (dpaa_bp->seed_cb) {
529 err = dpaa_bp->seed_cb(dpaa_bp);
531 goto pool_seed_failed;
534 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
539 pr_err("%s: pool seeding failed\n", __func__);
540 bman_free_pool(dpaa_bp->pool);
545 /* remove and free all the buffers from the given buffer pool */
546 static void dpaa_bp_drain(struct dpaa_bp *bp)
552 struct bm_buffer bmb[8];
555 ret = bman_acquire(bp->pool, bmb, num);
558 /* we have less than 8 buffers left;
559 * drain them one by one
565 /* Pool is fully drained */
571 for (i = 0; i < num; i++)
572 bp->free_buf_cb(bp, &bmb[i]);
576 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
578 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
580 /* the mapping between bpid and dpaa_bp is done very late in the
581 * allocation procedure; if something failed before the mapping, the bp
582 * was not configured, therefore we don't need the below instructions
587 if (!atomic_dec_and_test(&bp->refs))
593 dpaa_bp_array[bp->bpid] = NULL;
594 bman_free_pool(bp->pool);
597 static void dpaa_bps_free(struct dpaa_priv *priv)
601 for (i = 0; i < DPAA_BPS_NUM; i++)
602 dpaa_bp_free(priv->dpaa_bps[i]);
605 /* Use multiple WQs for FQ assignment:
606 * - Tx Confirmation queues go to WQ1.
607 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
608 * to be scheduled, in case there are many more FQs in WQ6).
609 * - Rx Default goes to WQ6.
610 * - Tx queues go to different WQs depending on their priority. Equal
611 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
612 * WQ0 (highest priority).
613 * This ensures that Tx-confirmed buffers are timely released. In particular,
614 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
615 * are greatly outnumbered by other FQs in the system, while
616 * dequeue scheduling is round-robin.
618 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
620 switch (fq->fq_type) {
621 case FQ_TYPE_TX_CONFIRM:
622 case FQ_TYPE_TX_CONF_MQ:
625 case FQ_TYPE_RX_ERROR:
626 case FQ_TYPE_TX_ERROR:
629 case FQ_TYPE_RX_DEFAULT:
634 switch (idx / DPAA_TC_TXQ_NUM) {
636 /* Low priority (best effort) */
640 /* Medium priority */
648 /* Very high priority */
652 WARN(1, "Too many TX FQs: more than %d!\n",
657 WARN(1, "Invalid FQ type %d for FQID %d!\n",
658 fq->fq_type, fq->fqid);
662 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
663 u32 start, u32 count,
664 struct list_head *list,
665 enum dpaa_fq_type fq_type)
667 struct dpaa_fq *dpaa_fq;
670 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
675 for (i = 0; i < count; i++) {
676 dpaa_fq[i].fq_type = fq_type;
677 dpaa_fq[i].fqid = start ? start + i : 0;
678 list_add_tail(&dpaa_fq[i].list, list);
681 for (i = 0; i < count; i++)
682 dpaa_assign_wq(dpaa_fq + i, i);
687 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
688 struct fm_port_fqs *port_fqs)
690 struct dpaa_fq *dpaa_fq;
691 u32 fq_base, fq_base_aligned, i;
693 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
695 goto fq_alloc_failed;
697 port_fqs->rx_errq = &dpaa_fq[0];
699 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
701 goto fq_alloc_failed;
703 port_fqs->rx_defq = &dpaa_fq[0];
705 /* the PCD FQIDs range needs to be aligned for correct operation */
706 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
707 goto fq_alloc_failed;
709 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
711 for (i = fq_base; i < fq_base_aligned; i++)
712 qman_release_fqid(i);
714 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
715 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
716 qman_release_fqid(i);
718 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
719 list, FQ_TYPE_RX_PCD);
721 goto fq_alloc_failed;
723 port_fqs->rx_pcdq = &dpaa_fq[0];
725 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
726 goto fq_alloc_failed;
728 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
730 goto fq_alloc_failed;
732 port_fqs->tx_errq = &dpaa_fq[0];
734 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
736 goto fq_alloc_failed;
738 port_fqs->tx_defq = &dpaa_fq[0];
740 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
741 goto fq_alloc_failed;
746 dev_err(dev, "dpaa_fq_alloc() failed\n");
750 static u32 rx_pool_channel;
751 static DEFINE_SPINLOCK(rx_pool_channel_init);
753 static int dpaa_get_channel(void)
755 spin_lock(&rx_pool_channel_init);
756 if (!rx_pool_channel) {
760 ret = qman_alloc_pool(&pool);
763 rx_pool_channel = pool;
765 spin_unlock(&rx_pool_channel_init);
766 if (!rx_pool_channel)
768 return rx_pool_channel;
771 static void dpaa_release_channel(void)
773 qman_release_pool(rx_pool_channel);
776 static void dpaa_eth_add_channel(u16 channel)
778 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
779 const cpumask_t *cpus = qman_affine_cpus();
780 struct qman_portal *portal;
783 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
784 portal = qman_get_affine_portal(cpu);
785 qman_p_static_dequeue_add(portal, pool);
789 /* Congestion group state change notification callback.
790 * Stops the device's egress queues while they are congested and
791 * wakes them upon exiting congested state.
792 * Also updates some CGR-related stats.
794 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
797 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
798 struct dpaa_priv, cgr_data.cgr);
801 priv->cgr_data.congestion_start_jiffies = jiffies;
802 netif_tx_stop_all_queues(priv->net_dev);
803 priv->cgr_data.cgr_congested_count++;
805 priv->cgr_data.congested_jiffies +=
806 (jiffies - priv->cgr_data.congestion_start_jiffies);
807 netif_tx_wake_all_queues(priv->net_dev);
811 static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
813 struct qm_mcc_initcgr initcgr;
817 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
819 if (netif_msg_drv(priv))
820 pr_err("%s: Error %d allocating CGR ID\n",
824 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
826 /* Enable Congestion State Change Notifications and CS taildrop */
827 memset(&initcgr, 0, sizeof(initcgr));
828 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
829 initcgr.cgr.cscn_en = QM_CGR_EN;
831 /* Set different thresholds based on the MAC speed.
832 * This may turn suboptimal if the MAC is reconfigured at a speed
833 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
834 * In such cases, we ought to reconfigure the threshold, too.
836 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
837 cs_th = DPAA_CS_THRESHOLD_10G;
839 cs_th = DPAA_CS_THRESHOLD_1G;
840 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
842 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
843 initcgr.cgr.cstd_en = QM_CGR_EN;
845 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
848 if (netif_msg_drv(priv))
849 pr_err("%s: Error %d creating CGR with ID %d\n",
850 __func__, err, priv->cgr_data.cgr.cgrid);
851 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
854 if (netif_msg_drv(priv))
855 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
856 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
857 priv->cgr_data.cgr.chan);
863 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
865 const struct qman_fq *template)
867 fq->fq_base = *template;
868 fq->net_dev = priv->net_dev;
870 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
871 fq->channel = priv->channel;
874 static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
876 struct fman_port *port,
877 const struct qman_fq *template)
879 fq->fq_base = *template;
880 fq->net_dev = priv->net_dev;
883 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
884 fq->channel = (u16)fman_port_get_qman_channel_id(port);
886 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
890 static void dpaa_fq_setup(struct dpaa_priv *priv,
891 const struct dpaa_fq_cbs *fq_cbs,
892 struct fman_port *tx_port)
894 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
895 const cpumask_t *affine_cpus = qman_affine_cpus();
896 u16 channels[NR_CPUS];
899 for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
900 channels[num_portals++] = qman_affine_channel(cpu);
902 if (num_portals == 0)
903 dev_err(priv->net_dev->dev.parent,
904 "No Qman software (affine) channels found");
906 /* Initialize each FQ in the list */
907 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
908 switch (fq->fq_type) {
909 case FQ_TYPE_RX_DEFAULT:
910 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
912 case FQ_TYPE_RX_ERROR:
913 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
918 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
919 fq->channel = channels[portal_cnt++ % num_portals];
922 dpaa_setup_egress(priv, fq, tx_port,
923 &fq_cbs->egress_ern);
924 /* If we have more Tx queues than the number of cores,
925 * just ignore the extra ones.
927 if (egress_cnt < DPAA_ETH_TXQ_NUM)
928 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
930 case FQ_TYPE_TX_CONF_MQ:
931 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
933 case FQ_TYPE_TX_CONFIRM:
934 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
936 case FQ_TYPE_TX_ERROR:
937 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
940 dev_warn(priv->net_dev->dev.parent,
941 "Unknown FQ type detected!\n");
946 /* Make sure all CPUs receive a corresponding Tx queue. */
947 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
948 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
949 if (fq->fq_type != FQ_TYPE_TX)
951 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
952 if (egress_cnt == DPAA_ETH_TXQ_NUM)
958 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
959 struct qman_fq *tx_fq)
963 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
964 if (priv->egress_fqs[i] == tx_fq)
970 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
972 const struct dpaa_priv *priv;
973 struct qman_fq *confq = NULL;
974 struct qm_mcc_initfq initfq;
980 priv = netdev_priv(dpaa_fq->net_dev);
981 dev = dpaa_fq->net_dev->dev.parent;
983 if (dpaa_fq->fqid == 0)
984 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
986 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
988 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
990 dev_err(dev, "qman_create_fq() failed\n");
993 fq = &dpaa_fq->fq_base;
996 memset(&initfq, 0, sizeof(initfq));
998 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
999 /* Note: we may get to keep an empty FQ in cache */
1000 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1002 /* Try to reduce the number of portal interrupts for
1003 * Tx Confirmation FQs.
1005 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1006 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1009 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1011 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1013 /* Put all egress queues in a congestion group of their own.
1014 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1015 * rather than Tx - but they nonetheless account for the
1016 * memory footprint on behalf of egress traffic. We therefore
1017 * place them in the netdev's CGR, along with the Tx FQs.
1019 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1020 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1021 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1022 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1023 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1024 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1025 /* Set a fixed overhead accounting, in an attempt to
1026 * reduce the impact of fixed-size skb shells and the
1027 * driver's needed headroom on system memory. This is
1028 * especially the case when the egress traffic is
1029 * composed of small datagrams.
1030 * Unfortunately, QMan's OAL value is capped to an
1031 * insufficient value, but even that is better than
1032 * no overhead accounting at all.
1034 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1035 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1036 qm_fqd_set_oal(&initfq.fqd,
1037 min(sizeof(struct sk_buff) +
1039 (size_t)FSL_QMAN_MAX_OAL));
1043 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1044 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1045 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1048 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1049 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1051 confq = priv->conf_fqs[queue_id];
1054 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1055 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1056 * A2V=1 (contextA A2 field is valid)
1057 * A0V=1 (contextA A0 field is valid)
1058 * B0V=1 (contextB field is valid)
1059 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1060 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1062 qm_fqd_context_a_set64(&initfq.fqd,
1063 0x1e00000080000000ULL);
1067 /* Put all the ingress queues in our "ingress CGR". */
1068 if (priv->use_ingress_cgr &&
1069 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1070 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1071 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1072 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1073 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1074 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1075 /* Set a fixed overhead accounting, just like for the
1078 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1079 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1080 qm_fqd_set_oal(&initfq.fqd,
1081 min(sizeof(struct sk_buff) +
1083 (size_t)FSL_QMAN_MAX_OAL));
1086 /* Initialization common to all ingress queues */
1087 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1088 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1089 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1090 QM_FQCTRL_CTXASTASHING);
1091 initfq.fqd.context_a.stashing.exclusive =
1092 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1093 QM_STASHING_EXCL_ANNOTATION;
1094 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1095 DIV_ROUND_UP(sizeof(struct qman_fq),
1099 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1101 dev_err(dev, "qman_init_fq(%u) = %d\n",
1102 qman_fq_fqid(fq), err);
1103 qman_destroy_fq(fq);
1108 dpaa_fq->fqid = qman_fq_fqid(fq);
1113 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1115 const struct dpaa_priv *priv;
1116 struct dpaa_fq *dpaa_fq;
1121 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1122 priv = netdev_priv(dpaa_fq->net_dev);
1124 if (dpaa_fq->init) {
1125 err = qman_retire_fq(fq, NULL);
1126 if (err < 0 && netif_msg_drv(priv))
1127 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1128 qman_fq_fqid(fq), err);
1130 error = qman_oos_fq(fq);
1131 if (error < 0 && netif_msg_drv(priv)) {
1132 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1133 qman_fq_fqid(fq), error);
1139 qman_destroy_fq(fq);
1140 list_del(&dpaa_fq->list);
1145 static int dpaa_fq_free(struct device *dev, struct list_head *list)
1147 struct dpaa_fq *dpaa_fq, *tmp;
1151 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1152 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1153 if (error < 0 && err >= 0)
1160 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1161 struct dpaa_fq *defq,
1162 struct dpaa_buffer_layout *buf_layout)
1164 struct fman_buffer_prefix_content buf_prefix_content;
1165 struct fman_port_params params;
1168 memset(¶ms, 0, sizeof(params));
1169 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1171 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1172 buf_prefix_content.pass_prs_result = true;
1173 buf_prefix_content.pass_hash_result = true;
1174 buf_prefix_content.pass_time_stamp = true;
1175 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1177 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1178 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1180 err = fman_port_config(port, ¶ms);
1182 pr_err("%s: fman_port_config failed\n", __func__);
1186 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1188 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1193 err = fman_port_init(port);
1195 pr_err("%s: fm_port_init failed\n", __func__);
1200 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1201 size_t count, struct dpaa_fq *errq,
1202 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1203 struct dpaa_buffer_layout *buf_layout)
1205 struct fman_buffer_prefix_content buf_prefix_content;
1206 struct fman_port_rx_params *rx_p;
1207 struct fman_port_params params;
1210 memset(¶ms, 0, sizeof(params));
1211 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1213 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1214 buf_prefix_content.pass_prs_result = true;
1215 buf_prefix_content.pass_hash_result = true;
1216 buf_prefix_content.pass_time_stamp = true;
1217 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1219 rx_p = ¶ms.specific_params.rx_params;
1220 rx_p->err_fqid = errq->fqid;
1221 rx_p->dflt_fqid = defq->fqid;
1223 rx_p->pcd_base_fqid = pcdq->fqid;
1224 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1227 count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
1228 rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
1229 for (i = 0; i < count; i++) {
1230 rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
1231 rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
1234 err = fman_port_config(port, ¶ms);
1236 pr_err("%s: fman_port_config failed\n", __func__);
1240 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1242 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1247 err = fman_port_init(port);
1249 pr_err("%s: fm_port_init failed\n", __func__);
1254 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1255 struct dpaa_bp **bps, size_t count,
1256 struct fm_port_fqs *port_fqs,
1257 struct dpaa_buffer_layout *buf_layout,
1260 struct fman_port *rxport = mac_dev->port[RX];
1261 struct fman_port *txport = mac_dev->port[TX];
1264 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1265 port_fqs->tx_defq, &buf_layout[TX]);
1269 err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1270 port_fqs->rx_defq, port_fqs->rx_pcdq,
1276 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1277 struct bm_buffer *bmb, int cnt)
1281 err = bman_release(dpaa_bp->pool, bmb, cnt);
1282 /* Should never occur, address anyway to avoid leaking the buffers */
1283 if (WARN_ON(err) && dpaa_bp->free_buf_cb)
1285 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1290 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1292 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1293 struct dpaa_bp *dpaa_bp;
1296 memset(bmb, 0, sizeof(bmb));
1299 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1305 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1307 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1310 } while (j < ARRAY_SIZE(bmb) &&
1311 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1312 sgt[i - 1].bpid == sgt[i].bpid);
1314 dpaa_bman_release(dpaa_bp, bmb, j);
1315 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1318 static void dpaa_fd_release(const struct net_device *net_dev,
1319 const struct qm_fd *fd)
1321 struct qm_sg_entry *sgt;
1322 struct dpaa_bp *dpaa_bp;
1323 struct bm_buffer bmb;
1328 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1330 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1334 if (qm_fd_get_format(fd) == qm_fd_sg) {
1335 vaddr = phys_to_virt(qm_fd_addr(fd));
1336 sgt = vaddr + qm_fd_get_offset(fd);
1338 dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1341 dpaa_release_sgt_members(sgt);
1343 addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1345 if (dma_mapping_error(dpaa_bp->dev, addr)) {
1346 dev_err(dpaa_bp->dev, "DMA mapping failed");
1349 bm_buffer_set64(&bmb, addr);
1352 dpaa_bman_release(dpaa_bp, &bmb, 1);
1355 static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1356 const union qm_mr_entry *msg)
1358 switch (msg->ern.rc & QM_MR_RC_MASK) {
1359 case QM_MR_RC_CGR_TAILDROP:
1360 percpu_priv->ern_cnt.cg_tdrop++;
1363 percpu_priv->ern_cnt.wred++;
1365 case QM_MR_RC_ERROR:
1366 percpu_priv->ern_cnt.err_cond++;
1368 case QM_MR_RC_ORPWINDOW_EARLY:
1369 percpu_priv->ern_cnt.early_window++;
1371 case QM_MR_RC_ORPWINDOW_LATE:
1372 percpu_priv->ern_cnt.late_window++;
1374 case QM_MR_RC_FQ_TAILDROP:
1375 percpu_priv->ern_cnt.fq_tdrop++;
1377 case QM_MR_RC_ORPWINDOW_RETIRED:
1378 percpu_priv->ern_cnt.fq_retired++;
1380 case QM_MR_RC_ORP_ZERO:
1381 percpu_priv->ern_cnt.orp_zero++;
1386 /* Turn on HW checksum computation for this outgoing frame.
1387 * If the current protocol is not something we support in this regard
1388 * (or if the stack has already computed the SW checksum), we do nothing.
1390 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1393 * Note that this function may modify the fd->cmd field and the skb data buffer
1394 * (the Parse Results area).
1396 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1397 struct sk_buff *skb,
1399 char *parse_results)
1401 struct fman_prs_result *parse_result;
1402 u16 ethertype = ntohs(skb->protocol);
1403 struct ipv6hdr *ipv6h = NULL;
1408 if (skb->ip_summed != CHECKSUM_PARTIAL)
1411 /* Note: L3 csum seems to be already computed in sw, but we can't choose
1412 * L4 alone from the FM configuration anyway.
1415 /* Fill in some fields of the Parse Results array, so the FMan
1416 * can find them as if they came from the FMan Parser.
1418 parse_result = (struct fman_prs_result *)parse_results;
1420 /* If we're dealing with VLAN, get the real Ethernet type */
1421 if (ethertype == ETH_P_8021Q) {
1422 /* We can't always assume the MAC header is set correctly
1423 * by the stack, so reset to beginning of skb->data
1425 skb_reset_mac_header(skb);
1426 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1429 /* Fill in the relevant L3 parse result fields
1430 * and read the L4 protocol type
1432 switch (ethertype) {
1434 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1437 l4_proto = iph->protocol;
1440 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1441 ipv6h = ipv6_hdr(skb);
1443 l4_proto = ipv6h->nexthdr;
1446 /* We shouldn't even be here */
1447 if (net_ratelimit())
1448 netif_alert(priv, tx_err, priv->net_dev,
1449 "Can't compute HW csum for L3 proto 0x%x\n",
1450 ntohs(skb->protocol));
1455 /* Fill in the relevant L4 parse result fields */
1458 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1461 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1464 if (net_ratelimit())
1465 netif_alert(priv, tx_err, priv->net_dev,
1466 "Can't compute HW csum for L4 proto 0x%x\n",
1472 /* At index 0 is IPOffset_1 as defined in the Parse Results */
1473 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1474 parse_result->l4_off = (u8)skb_transport_offset(skb);
1476 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1477 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1479 /* On P1023 and similar platforms fd->cmd interpretation could
1480 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1481 * is not set so we do not need to check; in the future, if/when
1482 * using context_a we need to check this bit
1489 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1491 struct device *dev = dpaa_bp->dev;
1492 struct bm_buffer bmb[8];
1497 for (i = 0; i < 8; i++) {
1498 new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
1499 if (unlikely(!new_buf)) {
1500 dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1502 goto release_previous_buffs;
1504 new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
1506 addr = dma_map_single(dev, new_buf,
1507 dpaa_bp->size, DMA_FROM_DEVICE);
1508 if (unlikely(dma_mapping_error(dev, addr))) {
1509 dev_err(dpaa_bp->dev, "DMA map failed");
1510 goto release_previous_buffs;
1514 bm_buffer_set64(&bmb[i], addr);
1518 return dpaa_bman_release(dpaa_bp, bmb, i);
1520 release_previous_buffs:
1521 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1523 bm_buffer_set64(&bmb[i], 0);
1524 /* Avoid releasing a completely null buffer; bman_release() requires
1525 * at least one buffer.
1533 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1537 /* Give each CPU an allotment of "config_count" buffers */
1538 for_each_possible_cpu(i) {
1539 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1542 /* Although we access another CPU's counters here
1543 * we do it at boot time so it is safe
1545 for (j = 0; j < dpaa_bp->config_count; j += 8)
1546 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1551 /* Add buffers/(pages) for Rx processing whenever bpool count falls below
1554 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1556 int count = *countptr;
1559 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1561 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1562 if (unlikely(!new_bufs)) {
1563 /* Avoid looping forever if we've temporarily
1564 * run out of memory. We'll try again at the
1570 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1573 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1580 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1582 struct dpaa_bp *dpaa_bp;
1586 for (i = 0; i < DPAA_BPS_NUM; i++) {
1587 dpaa_bp = priv->dpaa_bps[i];
1590 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1591 res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1598 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
1599 * either contiguous frames or scatter/gather ones.
1600 * Skb freeing is not handled here.
1602 * This function may be called on error paths in the Tx function, so guard
1603 * against cases when not all fd relevant fields were filled in.
1605 * Return the skb backpointer, since for S/G frames the buffer containing it
1608 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1609 const struct qm_fd *fd)
1611 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1612 struct device *dev = priv->net_dev->dev.parent;
1613 struct skb_shared_hwtstamps shhwtstamps;
1614 dma_addr_t addr = qm_fd_addr(fd);
1615 const struct qm_sg_entry *sgt;
1616 struct sk_buff **skbh, *skb;
1620 skbh = (struct sk_buff **)phys_to_virt(addr);
1623 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1624 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1626 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
1628 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1629 skb_tstamp_tx(skb, &shhwtstamps);
1631 dev_warn(dev, "fman_port_get_tstamp failed!\n");
1635 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1636 nr_frags = skb_shinfo(skb)->nr_frags;
1637 dma_unmap_single(dev, addr,
1638 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1641 /* The sgt buffer has been allocated with netdev_alloc_frag(),
1644 sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
1646 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1647 dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1648 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1650 /* remaining pages were mapped with skb_frag_dma_map() */
1651 for (i = 1; i <= nr_frags; i++) {
1652 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1654 dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1655 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1658 /* Free the page frag that we allocated on Tx */
1659 skb_free_frag(phys_to_virt(addr));
1661 dma_unmap_single(dev, addr,
1662 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1668 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1670 /* The parser has run and performed L4 checksum validation.
1671 * We know there were no parser errors (and implicitly no
1672 * L4 csum error), otherwise we wouldn't be here.
1674 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1675 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1676 return CHECKSUM_UNNECESSARY;
1678 /* We're here because either the parser didn't run or the L4 checksum
1679 * was not verified. This may include the case of a UDP frame with
1680 * checksum zero or an L4 proto other than TCP/UDP
1682 return CHECKSUM_NONE;
1685 /* Build a linear skb around the received buffer.
1686 * We are guaranteed there is enough room at the end of the data buffer to
1687 * accommodate the shared info area of the skb.
1689 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1690 const struct qm_fd *fd)
1692 ssize_t fd_off = qm_fd_get_offset(fd);
1693 dma_addr_t addr = qm_fd_addr(fd);
1694 struct dpaa_bp *dpaa_bp;
1695 struct sk_buff *skb;
1698 vaddr = phys_to_virt(addr);
1699 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1701 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1705 skb = build_skb(vaddr, dpaa_bp->size +
1706 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1707 if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
1709 WARN_ON(fd_off != priv->rx_headroom);
1710 skb_reserve(skb, fd_off);
1711 skb_put(skb, qm_fd_get_length(fd));
1713 skb->ip_summed = rx_csum_offload(priv, fd);
1718 skb_free_frag(vaddr);
1722 /* Build an skb with the data of the first S/G entry in the linear portion and
1723 * the rest of the frame as skb fragments.
1725 * The page fragment holding the S/G Table is recycled here.
1727 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1728 const struct qm_fd *fd)
1730 ssize_t fd_off = qm_fd_get_offset(fd);
1731 dma_addr_t addr = qm_fd_addr(fd);
1732 const struct qm_sg_entry *sgt;
1733 struct page *page, *head_page;
1734 struct dpaa_bp *dpaa_bp;
1735 void *vaddr, *sg_vaddr;
1736 int frag_off, frag_len;
1737 struct sk_buff *skb;
1744 vaddr = phys_to_virt(addr);
1745 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1747 /* Iterate through the SGT entries and add data buffers to the skb */
1748 sgt = vaddr + fd_off;
1750 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1751 /* Extension bit is not supported */
1752 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1754 sg_addr = qm_sg_addr(&sgt[i]);
1755 sg_vaddr = phys_to_virt(sg_addr);
1756 WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
1759 /* We may use multiple Rx pools */
1760 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1764 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1765 dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1768 sz = dpaa_bp->size +
1769 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1770 skb = build_skb(sg_vaddr, sz);
1774 skb->ip_summed = rx_csum_offload(priv, fd);
1776 /* Make sure forwarded skbs will have enough space
1777 * on Tx, if extra headers are added.
1779 WARN_ON(fd_off != priv->rx_headroom);
1780 skb_reserve(skb, fd_off);
1781 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1783 /* Not the first S/G entry; all data from buffer will
1784 * be added in an skb fragment; fragment index is offset
1785 * by one since first S/G entry was incorporated in the
1786 * linear part of the skb.
1788 * Caution: 'page' may be a tail page.
1790 page = virt_to_page(sg_vaddr);
1791 head_page = virt_to_head_page(sg_vaddr);
1793 /* Compute offset in (possibly tail) page */
1794 page_offset = ((unsigned long)sg_vaddr &
1796 (page_address(page) - page_address(head_page));
1797 /* page_offset only refers to the beginning of sgt[i];
1798 * but the buffer itself may have an internal offset.
1800 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1801 frag_len = qm_sg_entry_get_len(&sgt[i]);
1802 /* skb_add_rx_frag() does no checking on the page; if
1803 * we pass it a tail page, we'll end up with
1804 * bad page accounting and eventually with segafults.
1806 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1807 frag_len, dpaa_bp->size);
1809 /* Update the pool count for the current {cpu x bpool} */
1812 if (qm_sg_entry_is_final(&sgt[i]))
1815 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1817 /* free the SG table buffer */
1818 skb_free_frag(vaddr);
1823 /* compensate sw bpool counter changes */
1824 for (i--; i >= 0; i--) {
1825 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1827 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1831 /* free all the SG entries */
1832 for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
1833 sg_addr = qm_sg_addr(&sgt[i]);
1834 sg_vaddr = phys_to_virt(sg_addr);
1835 skb_free_frag(sg_vaddr);
1836 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1838 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1842 if (qm_sg_entry_is_final(&sgt[i]))
1845 /* free the SGT fragment */
1846 skb_free_frag(vaddr);
1851 static int skb_to_contig_fd(struct dpaa_priv *priv,
1852 struct sk_buff *skb, struct qm_fd *fd,
1855 struct net_device *net_dev = priv->net_dev;
1856 struct device *dev = net_dev->dev.parent;
1857 enum dma_data_direction dma_dir;
1858 unsigned char *buffer_start;
1859 struct sk_buff **skbh;
1863 /* We are guaranteed to have at least tx_headroom bytes
1864 * available, so just use that for offset.
1866 fd->bpid = FSL_DPAA_BPID_INV;
1867 buffer_start = skb->data - priv->tx_headroom;
1868 dma_dir = DMA_TO_DEVICE;
1870 skbh = (struct sk_buff **)buffer_start;
1873 /* Enable L3/L4 hardware checksum computation.
1875 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1876 * need to write into the skb.
1878 err = dpaa_enable_tx_csum(priv, skb, fd,
1879 ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
1880 if (unlikely(err < 0)) {
1881 if (net_ratelimit())
1882 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1887 /* Fill in the rest of the FD fields */
1888 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1889 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1891 /* Map the entire buffer size that may be seen by FMan, but no more */
1892 addr = dma_map_single(dev, skbh,
1893 skb_tail_pointer(skb) - buffer_start, dma_dir);
1894 if (unlikely(dma_mapping_error(dev, addr))) {
1895 if (net_ratelimit())
1896 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1899 qm_fd_addr_set64(fd, addr);
1904 static int skb_to_sg_fd(struct dpaa_priv *priv,
1905 struct sk_buff *skb, struct qm_fd *fd)
1907 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1908 const int nr_frags = skb_shinfo(skb)->nr_frags;
1909 struct net_device *net_dev = priv->net_dev;
1910 struct device *dev = net_dev->dev.parent;
1911 struct qm_sg_entry *sgt;
1912 struct sk_buff **skbh;
1920 /* get a page frag to store the SGTable */
1921 sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
1922 sgt_buf = netdev_alloc_frag(sz);
1923 if (unlikely(!sgt_buf)) {
1924 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
1929 /* Enable L3/L4 hardware checksum computation.
1931 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1932 * need to write into the skb.
1934 err = dpaa_enable_tx_csum(priv, skb, fd,
1935 sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
1936 if (unlikely(err < 0)) {
1937 if (net_ratelimit())
1938 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1943 /* SGT[0] is used by the linear part */
1944 sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
1945 frag_len = skb_headlen(skb);
1946 qm_sg_entry_set_len(&sgt[0], frag_len);
1947 sgt[0].bpid = FSL_DPAA_BPID_INV;
1949 addr = dma_map_single(dev, skb->data,
1950 skb_headlen(skb), dma_dir);
1951 if (unlikely(dma_mapping_error(dev, addr))) {
1952 dev_err(dev, "DMA mapping failed");
1954 goto sg0_map_failed;
1956 qm_sg_entry_set64(&sgt[0], addr);
1958 /* populate the rest of SGT entries */
1959 for (i = 0; i < nr_frags; i++) {
1960 frag = &skb_shinfo(skb)->frags[i];
1961 frag_len = frag->size;
1962 WARN_ON(!skb_frag_page(frag));
1963 addr = skb_frag_dma_map(dev, frag, 0,
1965 if (unlikely(dma_mapping_error(dev, addr))) {
1966 dev_err(dev, "DMA mapping failed");
1971 qm_sg_entry_set_len(&sgt[i + 1], frag_len);
1972 sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
1973 sgt[i + 1].offset = 0;
1975 /* keep the offset in the address */
1976 qm_sg_entry_set64(&sgt[i + 1], addr);
1979 /* Set the final bit in the last used entry of the SGT */
1980 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
1982 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
1984 /* DMA map the SGT page */
1985 buffer_start = (void *)sgt - priv->tx_headroom;
1986 skbh = (struct sk_buff **)buffer_start;
1989 addr = dma_map_single(dev, buffer_start,
1990 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1991 if (unlikely(dma_mapping_error(dev, addr))) {
1992 dev_err(dev, "DMA mapping failed");
1994 goto sgt_map_failed;
1997 fd->bpid = FSL_DPAA_BPID_INV;
1998 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1999 qm_fd_addr_set64(fd, addr);
2005 for (j = 0; j < i; j++)
2006 dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
2007 qm_sg_entry_get_len(&sgt[j]), dma_dir);
2010 skb_free_frag(sgt_buf);
2015 static inline int dpaa_xmit(struct dpaa_priv *priv,
2016 struct rtnl_link_stats64 *percpu_stats,
2020 struct qman_fq *egress_fq;
2023 egress_fq = priv->egress_fqs[queue];
2024 if (fd->bpid == FSL_DPAA_BPID_INV)
2025 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2027 /* Trace this Tx fd */
2028 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2030 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2031 err = qman_enqueue(egress_fq, fd);
2036 if (unlikely(err < 0)) {
2037 percpu_stats->tx_fifo_errors++;
2041 percpu_stats->tx_packets++;
2042 percpu_stats->tx_bytes += qm_fd_get_length(fd);
2048 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2050 const int queue_mapping = skb_get_queue_mapping(skb);
2051 bool nonlinear = skb_is_nonlinear(skb);
2052 struct rtnl_link_stats64 *percpu_stats;
2053 struct dpaa_percpu_priv *percpu_priv;
2054 struct netdev_queue *txq;
2055 struct dpaa_priv *priv;
2060 priv = netdev_priv(net_dev);
2061 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2062 percpu_stats = &percpu_priv->stats;
2064 qm_fd_clear_fd(&fd);
2067 /* We're going to store the skb backpointer at the beginning
2068 * of the data buffer, so we need a privately owned skb
2070 * We've made sure skb is not shared in dev->priv_flags,
2071 * we need to verify the skb head is not cloned
2073 if (skb_cow_head(skb, priv->tx_headroom))
2076 WARN_ON(skb_is_nonlinear(skb));
2079 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2080 * make sure we don't feed FMan with more fragments than it supports.
2082 if (unlikely(nonlinear &&
2083 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2084 /* If the egress skb contains more fragments than we support
2085 * we have no choice but to linearize it ourselves.
2087 if (__skb_linearize(skb))
2090 nonlinear = skb_is_nonlinear(skb);
2094 /* Just create a S/G fd based on the skb */
2095 err = skb_to_sg_fd(priv, skb, &fd);
2096 percpu_priv->tx_frag_skbuffs++;
2098 /* Create a contig FD from this skb */
2099 err = skb_to_contig_fd(priv, skb, &fd, &offset);
2101 if (unlikely(err < 0))
2102 goto skb_to_fd_failed;
2104 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2106 /* LLTX requires to do our own update of trans_start */
2107 txq->trans_start = jiffies;
2109 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2110 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2111 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2114 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2115 return NETDEV_TX_OK;
2117 dpaa_cleanup_tx_fd(priv, &fd);
2120 percpu_stats->tx_errors++;
2122 return NETDEV_TX_OK;
2125 static void dpaa_rx_error(struct net_device *net_dev,
2126 const struct dpaa_priv *priv,
2127 struct dpaa_percpu_priv *percpu_priv,
2128 const struct qm_fd *fd,
2131 if (net_ratelimit())
2132 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2133 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2135 percpu_priv->stats.rx_errors++;
2137 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2138 percpu_priv->rx_errors.dme++;
2139 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2140 percpu_priv->rx_errors.fpe++;
2141 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2142 percpu_priv->rx_errors.fse++;
2143 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2144 percpu_priv->rx_errors.phe++;
2146 dpaa_fd_release(net_dev, fd);
2149 static void dpaa_tx_error(struct net_device *net_dev,
2150 const struct dpaa_priv *priv,
2151 struct dpaa_percpu_priv *percpu_priv,
2152 const struct qm_fd *fd,
2155 struct sk_buff *skb;
2157 if (net_ratelimit())
2158 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2159 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2161 percpu_priv->stats.tx_errors++;
2163 skb = dpaa_cleanup_tx_fd(priv, fd);
2167 static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2169 struct dpaa_napi_portal *np =
2170 container_of(napi, struct dpaa_napi_portal, napi);
2172 int cleaned = qman_p_poll_dqrr(np->p, budget);
2174 if (cleaned < budget) {
2175 napi_complete_done(napi, cleaned);
2176 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2177 } else if (np->down) {
2178 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2184 static void dpaa_tx_conf(struct net_device *net_dev,
2185 const struct dpaa_priv *priv,
2186 struct dpaa_percpu_priv *percpu_priv,
2187 const struct qm_fd *fd,
2190 struct sk_buff *skb;
2192 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2193 if (net_ratelimit())
2194 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2195 be32_to_cpu(fd->status) &
2196 FM_FD_STAT_TX_ERRORS);
2198 percpu_priv->stats.tx_errors++;
2201 percpu_priv->tx_confirm++;
2203 skb = dpaa_cleanup_tx_fd(priv, fd);
2208 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2209 struct qman_portal *portal)
2211 if (unlikely(in_irq() || !in_serving_softirq())) {
2212 /* Disable QMan IRQ and invoke NAPI */
2213 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2215 percpu_priv->np.p = portal;
2216 napi_schedule(&percpu_priv->np.napi);
2217 percpu_priv->in_interrupt++;
2223 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2225 const struct qm_dqrr_entry *dq)
2227 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2228 struct dpaa_percpu_priv *percpu_priv;
2229 struct net_device *net_dev;
2230 struct dpaa_bp *dpaa_bp;
2231 struct dpaa_priv *priv;
2233 net_dev = dpaa_fq->net_dev;
2234 priv = netdev_priv(net_dev);
2235 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2237 return qman_cb_dqrr_consume;
2239 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2241 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2242 return qman_cb_dqrr_stop;
2244 dpaa_eth_refill_bpools(priv);
2245 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2247 return qman_cb_dqrr_consume;
2250 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2252 const struct qm_dqrr_entry *dq)
2254 struct skb_shared_hwtstamps *shhwtstamps;
2255 struct rtnl_link_stats64 *percpu_stats;
2256 struct dpaa_percpu_priv *percpu_priv;
2257 const struct qm_fd *fd = &dq->fd;
2258 dma_addr_t addr = qm_fd_addr(fd);
2259 enum qm_fd_format fd_format;
2260 struct net_device *net_dev;
2261 u32 fd_status, hash_offset;
2262 struct dpaa_bp *dpaa_bp;
2263 struct dpaa_priv *priv;
2264 unsigned int skb_len;
2265 struct sk_buff *skb;
2270 fd_status = be32_to_cpu(fd->status);
2271 fd_format = qm_fd_get_format(fd);
2272 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2273 priv = netdev_priv(net_dev);
2274 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2276 return qman_cb_dqrr_consume;
2278 /* Trace the Rx fd */
2279 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2281 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2282 percpu_stats = &percpu_priv->stats;
2284 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
2285 return qman_cb_dqrr_stop;
2287 /* Make sure we didn't run out of buffers */
2288 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2289 /* Unable to refill the buffer pool due to insufficient
2290 * system memory. Just release the frame back into the pool,
2291 * otherwise we'll soon end up with an empty buffer pool.
2293 dpaa_fd_release(net_dev, &dq->fd);
2294 return qman_cb_dqrr_consume;
2297 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2298 if (net_ratelimit())
2299 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2300 fd_status & FM_FD_STAT_RX_ERRORS);
2302 percpu_stats->rx_errors++;
2303 dpaa_fd_release(net_dev, fd);
2304 return qman_cb_dqrr_consume;
2307 dpaa_bp = dpaa_bpid2pool(fd->bpid);
2309 return qman_cb_dqrr_consume;
2311 dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2313 /* prefetch the first 64 bytes of the frame or the SGT start */
2314 vaddr = phys_to_virt(addr);
2315 prefetch(vaddr + qm_fd_get_offset(fd));
2317 /* The only FD types that we may receive are contig and S/G */
2318 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2320 /* Account for either the contig buffer or the SGT buffer (depending on
2321 * which case we were in) having been removed from the pool.
2323 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2326 if (likely(fd_format == qm_fd_contig))
2327 skb = contig_fd_to_skb(priv, fd);
2329 skb = sg_fd_to_skb(priv, fd);
2331 return qman_cb_dqrr_consume;
2333 if (priv->rx_tstamp) {
2334 shhwtstamps = skb_hwtstamps(skb);
2335 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2337 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
2338 shhwtstamps->hwtstamp = ns_to_ktime(ns);
2340 dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
2343 skb->protocol = eth_type_trans(skb, net_dev);
2345 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2346 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2348 enum pkt_hash_types type;
2350 /* if L4 exists, it was used in the hash generation */
2351 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2352 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2353 skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)),
2359 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2360 percpu_stats->rx_dropped++;
2361 return qman_cb_dqrr_consume;
2364 percpu_stats->rx_packets++;
2365 percpu_stats->rx_bytes += skb_len;
2367 return qman_cb_dqrr_consume;
2370 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2372 const struct qm_dqrr_entry *dq)
2374 struct dpaa_percpu_priv *percpu_priv;
2375 struct net_device *net_dev;
2376 struct dpaa_priv *priv;
2378 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2379 priv = netdev_priv(net_dev);
2381 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2383 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2384 return qman_cb_dqrr_stop;
2386 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2388 return qman_cb_dqrr_consume;
2391 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2393 const struct qm_dqrr_entry *dq)
2395 struct dpaa_percpu_priv *percpu_priv;
2396 struct net_device *net_dev;
2397 struct dpaa_priv *priv;
2399 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2400 priv = netdev_priv(net_dev);
2403 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2405 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2407 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2408 return qman_cb_dqrr_stop;
2410 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2412 return qman_cb_dqrr_consume;
2415 static void egress_ern(struct qman_portal *portal,
2417 const union qm_mr_entry *msg)
2419 const struct qm_fd *fd = &msg->ern.fd;
2420 struct dpaa_percpu_priv *percpu_priv;
2421 const struct dpaa_priv *priv;
2422 struct net_device *net_dev;
2423 struct sk_buff *skb;
2425 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2426 priv = netdev_priv(net_dev);
2427 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2429 percpu_priv->stats.tx_dropped++;
2430 percpu_priv->stats.tx_fifo_errors++;
2431 count_ern(percpu_priv, msg);
2433 skb = dpaa_cleanup_tx_fd(priv, fd);
2434 dev_kfree_skb_any(skb);
2437 static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2438 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2439 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2440 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2441 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2442 .egress_ern = { .cb = { .ern = egress_ern } }
2445 static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2447 struct dpaa_percpu_priv *percpu_priv;
2450 for_each_online_cpu(i) {
2451 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2453 percpu_priv->np.down = 0;
2454 napi_enable(&percpu_priv->np.napi);
2458 static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2460 struct dpaa_percpu_priv *percpu_priv;
2463 for_each_online_cpu(i) {
2464 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2466 percpu_priv->np.down = 1;
2467 napi_disable(&percpu_priv->np.napi);
2471 static void dpaa_adjust_link(struct net_device *net_dev)
2473 struct mac_device *mac_dev;
2474 struct dpaa_priv *priv;
2476 priv = netdev_priv(net_dev);
2477 mac_dev = priv->mac_dev;
2478 mac_dev->adjust_link(mac_dev);
2481 static int dpaa_phy_init(struct net_device *net_dev)
2483 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2484 struct mac_device *mac_dev;
2485 struct phy_device *phy_dev;
2486 struct dpaa_priv *priv;
2488 priv = netdev_priv(net_dev);
2489 mac_dev = priv->mac_dev;
2491 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
2492 &dpaa_adjust_link, 0,
2495 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2499 /* Remove any features not supported by the controller */
2500 ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
2501 linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2503 phy_support_asym_pause(phy_dev);
2505 mac_dev->phy_dev = phy_dev;
2506 net_dev->phydev = phy_dev;
2511 static int dpaa_open(struct net_device *net_dev)
2513 struct mac_device *mac_dev;
2514 struct dpaa_priv *priv;
2517 priv = netdev_priv(net_dev);
2518 mac_dev = priv->mac_dev;
2519 dpaa_eth_napi_enable(priv);
2521 err = dpaa_phy_init(net_dev);
2523 goto phy_init_failed;
2525 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2526 err = fman_port_enable(mac_dev->port[i]);
2528 goto mac_start_failed;
2531 err = priv->mac_dev->start(mac_dev);
2533 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2534 goto mac_start_failed;
2537 netif_tx_start_all_queues(net_dev);
2542 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2543 fman_port_disable(mac_dev->port[i]);
2546 dpaa_eth_napi_disable(priv);
2551 static int dpaa_eth_stop(struct net_device *net_dev)
2553 struct dpaa_priv *priv;
2556 err = dpaa_stop(net_dev);
2558 priv = netdev_priv(net_dev);
2559 dpaa_eth_napi_disable(priv);
2564 static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2566 struct dpaa_priv *priv = netdev_priv(dev);
2567 struct hwtstamp_config config;
2569 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2572 switch (config.tx_type) {
2573 case HWTSTAMP_TX_OFF:
2574 /* Couldn't disable rx/tx timestamping separately.
2577 priv->tx_tstamp = false;
2579 case HWTSTAMP_TX_ON:
2580 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
2581 priv->tx_tstamp = true;
2587 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2588 /* Couldn't disable rx/tx timestamping separately.
2591 priv->rx_tstamp = false;
2593 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
2594 priv->rx_tstamp = true;
2595 /* TS is set for all frame types, not only those requested */
2596 config.rx_filter = HWTSTAMP_FILTER_ALL;
2599 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2603 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2607 if (cmd == SIOCGMIIREG) {
2608 if (net_dev->phydev)
2609 return phy_mii_ioctl(net_dev->phydev, rq, cmd);
2612 if (cmd == SIOCSHWTSTAMP)
2613 return dpaa_ts_ioctl(net_dev, rq, cmd);
2618 static const struct net_device_ops dpaa_ops = {
2619 .ndo_open = dpaa_open,
2620 .ndo_start_xmit = dpaa_start_xmit,
2621 .ndo_stop = dpaa_eth_stop,
2622 .ndo_tx_timeout = dpaa_tx_timeout,
2623 .ndo_get_stats64 = dpaa_get_stats64,
2624 .ndo_change_carrier = fixed_phy_change_carrier,
2625 .ndo_set_mac_address = dpaa_set_mac_address,
2626 .ndo_validate_addr = eth_validate_addr,
2627 .ndo_set_rx_mode = dpaa_set_rx_mode,
2628 .ndo_do_ioctl = dpaa_ioctl,
2629 .ndo_setup_tc = dpaa_setup_tc,
2632 static int dpaa_napi_add(struct net_device *net_dev)
2634 struct dpaa_priv *priv = netdev_priv(net_dev);
2635 struct dpaa_percpu_priv *percpu_priv;
2638 for_each_possible_cpu(cpu) {
2639 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2641 netif_napi_add(net_dev, &percpu_priv->np.napi,
2642 dpaa_eth_poll, NAPI_POLL_WEIGHT);
2648 static void dpaa_napi_del(struct net_device *net_dev)
2650 struct dpaa_priv *priv = netdev_priv(net_dev);
2651 struct dpaa_percpu_priv *percpu_priv;
2654 for_each_possible_cpu(cpu) {
2655 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2657 netif_napi_del(&percpu_priv->np.napi);
2661 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
2662 struct bm_buffer *bmb)
2664 dma_addr_t addr = bm_buf_addr(bmb);
2666 dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2668 skb_free_frag(phys_to_virt(addr));
2671 /* Alloc the dpaa_bp struct and configure default values */
2672 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
2674 struct dpaa_bp *dpaa_bp;
2676 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
2678 return ERR_PTR(-ENOMEM);
2680 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
2681 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
2682 if (!dpaa_bp->percpu_count)
2683 return ERR_PTR(-ENOMEM);
2685 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
2687 dpaa_bp->seed_cb = dpaa_bp_seed;
2688 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
2693 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
2694 * We won't be sending congestion notifications to FMan; for now, we just use
2695 * this CGR to generate enqueue rejections to FMan in order to drop the frames
2696 * before they reach our ingress queues and eat up memory.
2698 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2700 struct qm_mcc_initcgr initcgr;
2704 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2706 if (netif_msg_drv(priv))
2707 pr_err("Error %d allocating CGR ID\n", err);
2711 /* Enable CS TD, but disable Congestion State Change Notifications. */
2712 memset(&initcgr, 0, sizeof(initcgr));
2713 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2714 initcgr.cgr.cscn_en = QM_CGR_EN;
2715 cs_th = DPAA_INGRESS_CS_THRESHOLD;
2716 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2718 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2719 initcgr.cgr.cstd_en = QM_CGR_EN;
2721 /* This CGR will be associated with the SWP affined to the current CPU.
2722 * However, we'll place all our ingress FQs in it.
2724 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2727 if (netif_msg_drv(priv))
2728 pr_err("Error %d creating ingress CGR with ID %d\n",
2729 err, priv->ingress_cgr.cgrid);
2730 qman_release_cgrid(priv->ingress_cgr.cgrid);
2733 if (netif_msg_drv(priv))
2734 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2735 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2737 priv->use_ingress_cgr = true;
2743 static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2747 /* The frame headroom must accommodate:
2748 * - the driver private data area
2749 * - parse results, hash results, timestamp if selected
2750 * If either hash results or time stamp are selected, both will
2751 * be copied to/from the frame headroom, as TS is located between PR and
2752 * HR in the IC and IC copy size has a granularity of 16bytes
2753 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2755 * Also make sure the headroom is a multiple of data_align bytes
2757 headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2758 DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2760 return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2761 DPAA_FD_DATA_ALIGNMENT) :
2765 static int dpaa_eth_probe(struct platform_device *pdev)
2767 struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
2768 struct net_device *net_dev = NULL;
2769 struct dpaa_fq *dpaa_fq, *tmp;
2770 struct dpaa_priv *priv = NULL;
2771 struct fm_port_fqs port_fqs;
2772 struct mac_device *mac_dev;
2773 int err = 0, i, channel;
2776 /* device used for DMA mapping */
2777 dev = pdev->dev.parent;
2778 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2780 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2784 /* Allocate this early, so we can store relevant information in
2787 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
2789 dev_err(dev, "alloc_etherdev_mq() failed\n");
2793 /* Do this here, so we can be verbose early */
2794 SET_NETDEV_DEV(net_dev, dev);
2795 dev_set_drvdata(dev, net_dev);
2797 priv = netdev_priv(net_dev);
2798 priv->net_dev = net_dev;
2800 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
2802 mac_dev = dpaa_mac_dev_get(pdev);
2803 if (IS_ERR(mac_dev)) {
2804 dev_err(dev, "dpaa_mac_dev_get() failed\n");
2805 err = PTR_ERR(mac_dev);
2809 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
2810 * we choose conservatively and let the user explicitly set a higher
2811 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
2813 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
2814 * start with the maximum allowed.
2816 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
2818 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
2821 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
2822 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2825 for (i = 0; i < DPAA_BPS_NUM; i++) {
2826 dpaa_bps[i] = dpaa_bp_alloc(dev);
2827 if (IS_ERR(dpaa_bps[i])) {
2828 err = PTR_ERR(dpaa_bps[i]);
2831 /* the raw size of the buffers used for reception */
2832 dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
2833 /* avoid runtime computations by keeping the usable size here */
2834 dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2835 dpaa_bps[i]->dev = dev;
2837 err = dpaa_bp_alloc_pool(dpaa_bps[i]);
2840 priv->dpaa_bps[i] = dpaa_bps[i];
2843 INIT_LIST_HEAD(&priv->dpaa_fq_list);
2845 memset(&port_fqs, 0, sizeof(port_fqs));
2847 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
2849 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
2853 priv->mac_dev = mac_dev;
2855 channel = dpaa_get_channel();
2857 dev_err(dev, "dpaa_get_channel() failed\n");
2862 priv->channel = (u16)channel;
2864 /* Walk the CPUs with affine portals
2865 * and add this pool channel to each's dequeue mask.
2867 dpaa_eth_add_channel(priv->channel);
2869 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
2871 /* Create a congestion group for this netdev, with
2872 * dynamically-allocated CGR ID.
2873 * Must be executed after probing the MAC, but before
2874 * assigning the egress FQs to the CGRs.
2876 err = dpaa_eth_cgr_init(priv);
2878 dev_err(dev, "Error initializing CGR\n");
2882 err = dpaa_ingress_cgr_init(priv);
2884 dev_err(dev, "Error initializing ingress CGR\n");
2885 goto delete_egress_cgr;
2888 /* Add the FQs to the interface, and make them active */
2889 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
2890 err = dpaa_fq_init(dpaa_fq, false);
2895 priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
2896 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
2898 /* All real interfaces need their ports initialized */
2899 err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2900 &priv->buf_layout[0], dev);
2904 /* Rx traffic distribution based on keygen hashing defaults to on */
2905 priv->keygen_in_use = true;
2907 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2908 if (!priv->percpu_priv) {
2909 dev_err(dev, "devm_alloc_percpu() failed\n");
2915 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
2917 /* Initialize NAPI */
2918 err = dpaa_napi_add(net_dev);
2920 goto delete_dpaa_napi;
2922 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
2924 goto delete_dpaa_napi;
2926 dpaa_eth_sysfs_init(&net_dev->dev);
2928 netif_info(priv, probe, net_dev, "Probed interface %s\n",
2934 dpaa_napi_del(net_dev);
2936 dpaa_fq_free(dev, &priv->dpaa_fq_list);
2937 qman_delete_cgr_safe(&priv->ingress_cgr);
2938 qman_release_cgrid(priv->ingress_cgr.cgrid);
2940 qman_delete_cgr_safe(&priv->cgr_data.cgr);
2941 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2943 dpaa_bps_free(priv);
2945 dev_set_drvdata(dev, NULL);
2946 free_netdev(net_dev);
2951 static int dpaa_remove(struct platform_device *pdev)
2953 struct net_device *net_dev;
2954 struct dpaa_priv *priv;
2958 dev = pdev->dev.parent;
2959 net_dev = dev_get_drvdata(dev);
2961 priv = netdev_priv(net_dev);
2963 dpaa_eth_sysfs_remove(dev);
2965 dev_set_drvdata(dev, NULL);
2966 unregister_netdev(net_dev);
2968 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
2970 qman_delete_cgr_safe(&priv->ingress_cgr);
2971 qman_release_cgrid(priv->ingress_cgr.cgrid);
2972 qman_delete_cgr_safe(&priv->cgr_data.cgr);
2973 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2975 dpaa_napi_del(net_dev);
2977 dpaa_bps_free(priv);
2979 free_netdev(net_dev);
2984 static const struct platform_device_id dpaa_devtype[] = {
2986 .name = "dpaa-ethernet",
2991 MODULE_DEVICE_TABLE(platform, dpaa_devtype);
2993 static struct platform_driver dpaa_driver = {
2995 .name = KBUILD_MODNAME,
2997 .id_table = dpaa_devtype,
2998 .probe = dpaa_eth_probe,
2999 .remove = dpaa_remove
3002 static int __init dpaa_load(void)
3006 pr_debug("FSL DPAA Ethernet driver\n");
3008 /* initialize dpaa_eth mirror values */
3009 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3010 dpaa_max_frm = fman_get_max_frm();
3012 err = platform_driver_register(&dpaa_driver);
3014 pr_err("Error, platform_driver_register() = %d\n", err);
3018 module_init(dpaa_load);
3020 static void __exit dpaa_unload(void)
3022 platform_driver_unregister(&dpaa_driver);
3024 /* Only one channel is used and needs to be released after all
3025 * interfaces are removed
3027 dpaa_release_channel();
3029 module_exit(dpaa_unload);
3031 MODULE_LICENSE("Dual BSD/GPL");
3032 MODULE_DESCRIPTION("FSL DPAA Ethernet driver");