1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver
5 * Copyright (C) 2012 Texas Instruments
9 #include <linux/kernel.h>
11 #include <linux/clk.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/irqreturn.h>
16 #include <linux/interrupt.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/gpio/consumer.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_device.h>
31 #include <linux/if_vlan.h>
32 #include <linux/kmemleak.h>
33 #include <linux/sys_soc.h>
35 #include <linux/pinctrl/consumer.h>
36 #include <net/pkt_cls.h>
40 #include "cpsw_priv.h"
42 #include "davinci_cpdma.h"
44 #include <net/pkt_sched.h>
46 static int debug_level;
47 module_param(debug_level, int, 0);
48 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
50 static int ale_ageout = 10;
51 module_param(ale_ageout, int, 0);
52 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
54 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
55 module_param(rx_packet_max, int, 0);
56 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
58 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
59 module_param(descs_pool_size, int, 0444);
60 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
63 char stat_string[ETH_GSTRING_LEN];
75 #define CPSW_STAT(m) CPSW_STATS, \
76 FIELD_SIZEOF(struct cpsw_hw_stats, m), \
77 offsetof(struct cpsw_hw_stats, m)
78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
79 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
80 offsetof(struct cpdma_chan_stats, m)
81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
82 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
83 offsetof(struct cpdma_chan_stats, m)
85 static const struct cpsw_stats cpsw_gstrings_stats[] = {
86 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
87 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
88 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
89 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
90 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
91 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
92 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
93 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
94 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
95 { "Rx Fragments", CPSW_STAT(rxfragments) },
96 { "Rx Octets", CPSW_STAT(rxoctets) },
97 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
98 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
99 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
100 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
101 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
102 { "Collisions", CPSW_STAT(txcollisionframes) },
103 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
104 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
105 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
106 { "Late Collisions", CPSW_STAT(txlatecollisions) },
107 { "Tx Underrun", CPSW_STAT(txunderrun) },
108 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
109 { "Tx Octets", CPSW_STAT(txoctets) },
110 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
111 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
112 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
113 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
114 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
115 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
116 { "Net Octets", CPSW_STAT(netoctets) },
117 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
118 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
119 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
122 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
123 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
124 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
125 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
126 { "misqueued", CPDMA_RX_STAT(misqueued) },
127 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
128 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
129 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
130 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
131 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
132 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
133 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
134 { "requeue", CPDMA_RX_STAT(requeue) },
135 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
138 #define for_each_slave(priv, func, arg...) \
140 struct cpsw_slave *slave; \
141 struct cpsw_common *cpsw = (priv)->cpsw; \
143 if (cpsw->data.dual_emac) \
144 (func)((cpsw)->slaves + priv->emac_port, ##arg);\
146 for (n = cpsw->data.slaves, \
147 slave = cpsw->slaves; \
149 (func)(slave++, ##arg); \
152 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
153 __be16 proto, u16 vid);
155 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
157 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
158 struct cpsw_ale *ale = cpsw->ale;
161 if (cpsw->data.dual_emac) {
164 /* Enabling promiscuous mode for one interface will be
165 * common for both the interface as the interface shares
166 * the same hardware resource.
168 for (i = 0; i < cpsw->data.slaves; i++)
169 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
172 if (!enable && flag) {
174 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
179 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
181 dev_dbg(&ndev->dev, "promiscuity enabled\n");
184 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
185 dev_dbg(&ndev->dev, "promiscuity disabled\n");
189 unsigned long timeout = jiffies + HZ;
191 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
192 for (i = 0; i <= cpsw->data.slaves; i++) {
193 cpsw_ale_control_set(ale, i,
194 ALE_PORT_NOLEARN, 1);
195 cpsw_ale_control_set(ale, i,
196 ALE_PORT_NO_SA_UPDATE, 1);
199 /* Clear All Untouched entries */
200 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
203 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
205 } while (time_after(timeout, jiffies));
206 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
208 /* Clear all mcast from ALE */
209 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
210 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
212 /* Flood All Unicast Packets to Host port */
213 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
214 dev_dbg(&ndev->dev, "promiscuity enabled\n");
216 /* Don't Flood All Unicast Packets to Host port */
217 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
219 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
220 for (i = 0; i <= cpsw->data.slaves; i++) {
221 cpsw_ale_control_set(ale, i,
222 ALE_PORT_NOLEARN, 0);
223 cpsw_ale_control_set(ale, i,
224 ALE_PORT_NO_SA_UPDATE, 0);
226 dev_dbg(&ndev->dev, "promiscuity disabled\n");
232 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
233 * if it's not deleted
234 * @ndev: device to sync
235 * @addr: address to be added or deleted
236 * @vid: vlan id, if vid < 0 set/unset address for real device
237 * @add: add address if the flag is set or remove otherwise
239 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
242 struct cpsw_priv *priv = netdev_priv(ndev);
243 struct cpsw_common *cpsw = priv->cpsw;
244 int mask, flags, ret;
247 if (cpsw->data.dual_emac)
248 vid = cpsw->slaves[priv->emac_port].port_vlan;
253 mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
254 flags = vid ? ALE_VLAN : 0;
257 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
259 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
264 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
266 struct addr_sync_ctx *sync_ctx = ctx;
267 struct netdev_hw_addr *ha;
268 int found = 0, ret = 0;
270 if (!vdev || !(vdev->flags & IFF_UP))
273 /* vlan address is relevant if its sync_cnt != 0 */
274 netdev_for_each_mc_addr(ha, vdev) {
275 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
276 found = ha->sync_cnt;
282 sync_ctx->consumed++;
284 if (sync_ctx->flush) {
286 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
291 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
296 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
298 struct addr_sync_ctx sync_ctx;
301 sync_ctx.consumed = 0;
302 sync_ctx.addr = addr;
303 sync_ctx.ndev = ndev;
306 ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
307 if (sync_ctx.consumed < num && !ret)
308 ret = cpsw_set_mc(ndev, addr, -1, 1);
313 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
315 struct addr_sync_ctx sync_ctx;
317 sync_ctx.consumed = 0;
318 sync_ctx.addr = addr;
319 sync_ctx.ndev = ndev;
322 vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
323 if (sync_ctx.consumed == num)
324 cpsw_set_mc(ndev, addr, -1, 0);
329 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
331 struct addr_sync_ctx *sync_ctx = ctx;
332 struct netdev_hw_addr *ha;
335 if (!vdev || !(vdev->flags & IFF_UP))
338 /* vlan address is relevant if its sync_cnt != 0 */
339 netdev_for_each_mc_addr(ha, vdev) {
340 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
341 found = ha->sync_cnt;
349 sync_ctx->consumed++;
350 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
354 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
356 struct addr_sync_ctx sync_ctx;
358 sync_ctx.addr = addr;
359 sync_ctx.ndev = ndev;
360 sync_ctx.consumed = 0;
362 vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
363 if (sync_ctx.consumed < num)
364 cpsw_set_mc(ndev, addr, -1, 0);
369 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
371 struct cpsw_priv *priv = netdev_priv(ndev);
372 struct cpsw_common *cpsw = priv->cpsw;
375 if (cpsw->data.dual_emac)
376 slave_port = priv->emac_port + 1;
378 if (ndev->flags & IFF_PROMISC) {
379 /* Enable promiscuous mode */
380 cpsw_set_promiscious(ndev, true);
381 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
384 /* Disable promiscuous mode */
385 cpsw_set_promiscious(ndev, false);
388 /* Restore allmulti on vlans if necessary */
389 cpsw_ale_set_allmulti(cpsw->ale,
390 ndev->flags & IFF_ALLMULTI, slave_port);
392 /* add/remove mcast address either for real netdev or for vlan */
393 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
397 static void cpsw_intr_enable(struct cpsw_common *cpsw)
399 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
400 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
402 cpdma_ctlr_int_ctrl(cpsw->dma, true);
406 static void cpsw_intr_disable(struct cpsw_common *cpsw)
408 writel_relaxed(0, &cpsw->wr_regs->tx_en);
409 writel_relaxed(0, &cpsw->wr_regs->rx_en);
411 cpdma_ctlr_int_ctrl(cpsw->dma, false);
415 static void cpsw_tx_handler(void *token, int len, int status)
417 struct netdev_queue *txq;
418 struct sk_buff *skb = token;
419 struct net_device *ndev = skb->dev;
420 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
422 /* Check whether the queue is stopped due to stalled tx dma, if the
423 * queue is stopped then start the queue as we have free desc for tx
425 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
426 if (unlikely(netif_tx_queue_stopped(txq)))
427 netif_tx_wake_queue(txq);
429 cpts_tx_timestamp(cpsw->cpts, skb);
430 ndev->stats.tx_packets++;
431 ndev->stats.tx_bytes += len;
432 dev_kfree_skb_any(skb);
435 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
437 struct cpsw_priv *priv = netdev_priv(skb->dev);
438 struct cpsw_common *cpsw = priv->cpsw;
439 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
440 u16 vtag, vid, prio, pkt_type;
442 /* Remove VLAN header encapsulation word */
443 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
445 pkt_type = (rx_vlan_encap_hdr >>
446 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
447 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
448 /* Ignore unknown & Priority-tagged packets*/
449 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
450 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
453 vid = (rx_vlan_encap_hdr >>
454 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
456 /* Ignore vid 0 and pass packet as is */
459 /* Ignore default vlans in dual mac mode */
460 if (cpsw->data.dual_emac &&
461 vid == cpsw->slaves[priv->emac_port].port_vlan)
464 prio = (rx_vlan_encap_hdr >>
465 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
466 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
468 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
469 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
471 /* strip vlan tag for VLAN-tagged packet */
472 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
473 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
474 skb_pull(skb, VLAN_HLEN);
478 static void cpsw_rx_handler(void *token, int len, int status)
480 struct cpdma_chan *ch;
481 struct sk_buff *skb = token;
482 struct sk_buff *new_skb;
483 struct net_device *ndev = skb->dev;
485 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
486 struct cpsw_priv *priv;
488 if (cpsw->data.dual_emac) {
489 port = CPDMA_RX_SOURCE_PORT(status);
491 ndev = cpsw->slaves[--port].ndev;
496 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
497 /* In dual emac mode check for all interfaces */
498 if (cpsw->data.dual_emac && cpsw->usage_count &&
500 /* The packet received is for the interface which
501 * is already down and the other interface is up
502 * and running, instead of freeing which results
503 * in reducing of the number of rx descriptor in
504 * DMA engine, requeue skb back to cpdma.
510 /* the interface is going down, skbs are purged */
511 dev_kfree_skb_any(skb);
515 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
517 skb_copy_queue_mapping(new_skb, skb);
519 if (status & CPDMA_RX_VLAN_ENCAP)
520 cpsw_rx_vlan_encap(skb);
521 priv = netdev_priv(ndev);
522 if (priv->rx_ts_enabled)
523 cpts_rx_timestamp(cpsw->cpts, skb);
524 skb->protocol = eth_type_trans(skb, ndev);
525 netif_receive_skb(skb);
526 ndev->stats.rx_bytes += len;
527 ndev->stats.rx_packets++;
528 kmemleak_not_leak(new_skb);
530 ndev->stats.rx_dropped++;
535 if (netif_dormant(ndev)) {
536 dev_kfree_skb_any(new_skb);
540 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
541 ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
542 skb_tailroom(new_skb), 0);
543 if (WARN_ON(ret < 0))
544 dev_kfree_skb_any(new_skb);
547 static void cpsw_split_res(struct cpsw_common *cpsw)
549 u32 consumed_rate = 0, bigest_rate = 0;
550 struct cpsw_vector *txv = cpsw->txv;
551 int i, ch_weight, rlim_ch_num = 0;
552 int budget, bigest_rate_ch = 0;
553 u32 ch_rate, max_rate;
556 for (i = 0; i < cpsw->tx_ch_num; i++) {
557 ch_rate = cpdma_chan_get_rate(txv[i].ch);
562 consumed_rate += ch_rate;
565 if (cpsw->tx_ch_num == rlim_ch_num) {
566 max_rate = consumed_rate;
567 } else if (!rlim_ch_num) {
568 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
570 max_rate = consumed_rate;
572 max_rate = cpsw->speed * 1000;
574 /* if max_rate is less then expected due to reduced link speed,
575 * split proportionally according next potential max speed
577 if (max_rate < consumed_rate)
580 if (max_rate < consumed_rate)
583 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
584 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
585 (cpsw->tx_ch_num - rlim_ch_num);
586 bigest_rate = (max_rate - consumed_rate) /
587 (cpsw->tx_ch_num - rlim_ch_num);
590 /* split tx weight/budget */
591 budget = CPSW_POLL_WEIGHT;
592 for (i = 0; i < cpsw->tx_ch_num; i++) {
593 ch_rate = cpdma_chan_get_rate(txv[i].ch);
595 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
598 if (ch_rate > bigest_rate) {
600 bigest_rate = ch_rate;
603 ch_weight = (ch_rate * 100) / max_rate;
606 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
608 txv[i].budget = ch_budget;
611 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
614 budget -= txv[i].budget;
618 txv[bigest_rate_ch].budget += budget;
620 /* split rx budget */
621 budget = CPSW_POLL_WEIGHT;
622 ch_budget = budget / cpsw->rx_ch_num;
623 for (i = 0; i < cpsw->rx_ch_num; i++) {
624 cpsw->rxv[i].budget = ch_budget;
629 cpsw->rxv[0].budget += budget;
632 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
634 struct cpsw_common *cpsw = dev_id;
636 writel(0, &cpsw->wr_regs->tx_en);
637 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
639 if (cpsw->quirk_irq) {
640 disable_irq_nosync(cpsw->irqs_table[1]);
641 cpsw->tx_irq_disabled = true;
644 napi_schedule(&cpsw->napi_tx);
648 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
650 struct cpsw_common *cpsw = dev_id;
652 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
653 writel(0, &cpsw->wr_regs->rx_en);
655 if (cpsw->quirk_irq) {
656 disable_irq_nosync(cpsw->irqs_table[0]);
657 cpsw->rx_irq_disabled = true;
660 napi_schedule(&cpsw->napi_rx);
664 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
667 int num_tx, cur_budget, ch;
668 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
669 struct cpsw_vector *txv;
671 /* process every unprocessed channel */
672 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
673 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
674 if (!(ch_map & 0x80))
677 txv = &cpsw->txv[ch];
678 if (unlikely(txv->budget > budget - num_tx))
679 cur_budget = budget - num_tx;
681 cur_budget = txv->budget;
683 num_tx += cpdma_chan_process(txv->ch, cur_budget);
684 if (num_tx >= budget)
688 if (num_tx < budget) {
689 napi_complete(napi_tx);
690 writel(0xff, &cpsw->wr_regs->tx_en);
696 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
698 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
701 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
702 if (num_tx < budget) {
703 napi_complete(napi_tx);
704 writel(0xff, &cpsw->wr_regs->tx_en);
705 if (cpsw->tx_irq_disabled) {
706 cpsw->tx_irq_disabled = false;
707 enable_irq(cpsw->irqs_table[1]);
714 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
717 int num_rx, cur_budget, ch;
718 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
719 struct cpsw_vector *rxv;
721 /* process every unprocessed channel */
722 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
723 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
724 if (!(ch_map & 0x01))
727 rxv = &cpsw->rxv[ch];
728 if (unlikely(rxv->budget > budget - num_rx))
729 cur_budget = budget - num_rx;
731 cur_budget = rxv->budget;
733 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
734 if (num_rx >= budget)
738 if (num_rx < budget) {
739 napi_complete_done(napi_rx, num_rx);
740 writel(0xff, &cpsw->wr_regs->rx_en);
746 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
748 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
751 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
752 if (num_rx < budget) {
753 napi_complete_done(napi_rx, num_rx);
754 writel(0xff, &cpsw->wr_regs->rx_en);
755 if (cpsw->rx_irq_disabled) {
756 cpsw->rx_irq_disabled = false;
757 enable_irq(cpsw->irqs_table[0]);
764 static inline void soft_reset(const char *module, void __iomem *reg)
766 unsigned long timeout = jiffies + HZ;
768 writel_relaxed(1, reg);
771 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
773 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
776 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
777 struct cpsw_priv *priv)
779 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
780 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
783 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
785 struct cpsw_common *cpsw = priv->cpsw;
786 struct cpsw_slave *slave;
787 u32 shift, mask, val;
789 val = readl_relaxed(&cpsw->regs->ptype);
791 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
792 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
799 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
801 struct cpsw_common *cpsw = priv->cpsw;
802 struct cpsw_slave *slave;
803 u32 shift, mask, val;
805 val = readl_relaxed(&cpsw->regs->ptype);
807 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
808 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
809 mask = (1 << --fifo) << shift;
810 val = on ? val | mask : val & ~mask;
812 writel_relaxed(val, &cpsw->regs->ptype);
815 static void _cpsw_adjust_link(struct cpsw_slave *slave,
816 struct cpsw_priv *priv, bool *link)
818 struct phy_device *phy = slave->phy;
821 struct cpsw_common *cpsw = priv->cpsw;
826 slave_port = cpsw_get_slave_port(slave->slave_num);
829 mac_control = cpsw->data.mac_control;
831 /* enable forwarding */
832 cpsw_ale_control_set(cpsw->ale, slave_port,
833 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
835 if (phy->speed == 1000)
836 mac_control |= BIT(7); /* GIGABITEN */
838 mac_control |= BIT(0); /* FULLDUPLEXEN */
840 /* set speed_in input in case RMII mode is used in 100Mbps */
841 if (phy->speed == 100)
842 mac_control |= BIT(15);
843 /* in band mode only works in 10Mbps RGMII mode */
844 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
845 mac_control |= BIT(18); /* In Band mode */
848 mac_control |= BIT(3);
851 mac_control |= BIT(4);
855 if (priv->shp_cfg_speed &&
856 priv->shp_cfg_speed != slave->phy->speed &&
857 !cpsw_shp_is_off(priv))
859 "Speed was changed, CBS shaper speeds are changed!");
862 /* disable forwarding */
863 cpsw_ale_control_set(cpsw->ale, slave_port,
864 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
867 if (mac_control != slave->mac_control) {
868 phy_print_status(phy);
869 writel_relaxed(mac_control, &slave->sliver->mac_control);
872 slave->mac_control = mac_control;
875 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
879 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
880 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
881 speed += cpsw->slaves[i].phy->speed;
886 static int cpsw_need_resplit(struct cpsw_common *cpsw)
891 /* re-split resources only in case speed was changed */
892 speed = cpsw_get_common_speed(cpsw);
893 if (speed == cpsw->speed || !speed)
898 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
899 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
906 /* cases not dependent on speed */
907 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
913 static void cpsw_adjust_link(struct net_device *ndev)
915 struct cpsw_priv *priv = netdev_priv(ndev);
916 struct cpsw_common *cpsw = priv->cpsw;
919 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
922 if (cpsw_need_resplit(cpsw))
923 cpsw_split_res(cpsw);
925 netif_carrier_on(ndev);
926 if (netif_running(ndev))
927 netif_tx_wake_all_queues(ndev);
929 netif_carrier_off(ndev);
930 netif_tx_stop_all_queues(ndev);
934 static int cpsw_get_coalesce(struct net_device *ndev,
935 struct ethtool_coalesce *coal)
937 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
939 coal->rx_coalesce_usecs = cpsw->coal_intvl;
943 static int cpsw_set_coalesce(struct net_device *ndev,
944 struct ethtool_coalesce *coal)
946 struct cpsw_priv *priv = netdev_priv(ndev);
948 u32 num_interrupts = 0;
952 struct cpsw_common *cpsw = priv->cpsw;
954 coal_intvl = coal->rx_coalesce_usecs;
956 int_ctrl = readl(&cpsw->wr_regs->int_control);
957 prescale = cpsw->bus_freq_mhz * 4;
959 if (!coal->rx_coalesce_usecs) {
960 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
964 if (coal_intvl < CPSW_CMINTMIN_INTVL)
965 coal_intvl = CPSW_CMINTMIN_INTVL;
967 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
968 /* Interrupt pacer works with 4us Pulse, we can
969 * throttle further by dilating the 4us pulse.
971 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
973 if (addnl_dvdr > 1) {
974 prescale *= addnl_dvdr;
975 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
976 coal_intvl = (CPSW_CMINTMAX_INTVL
980 coal_intvl = CPSW_CMINTMAX_INTVL;
984 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
985 writel(num_interrupts, &cpsw->wr_regs->rx_imax);
986 writel(num_interrupts, &cpsw->wr_regs->tx_imax);
988 int_ctrl |= CPSW_INTPACEEN;
989 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
990 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
993 writel(int_ctrl, &cpsw->wr_regs->int_control);
995 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
996 cpsw->coal_intvl = coal_intvl;
1001 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1003 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1007 return (CPSW_STATS_COMMON_LEN +
1008 (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1015 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1021 ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1022 for (i = 0; i < ch_stats_len; i++) {
1023 line = i % CPSW_STATS_CH_LEN;
1024 snprintf(*p, ETH_GSTRING_LEN,
1025 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1026 (long)(i / CPSW_STATS_CH_LEN),
1027 cpsw_gstrings_ch_stats[line].stat_string);
1028 *p += ETH_GSTRING_LEN;
1032 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1034 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1038 switch (stringset) {
1040 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1041 memcpy(p, cpsw_gstrings_stats[i].stat_string,
1043 p += ETH_GSTRING_LEN;
1046 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1047 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1052 static void cpsw_get_ethtool_stats(struct net_device *ndev,
1053 struct ethtool_stats *stats, u64 *data)
1056 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1057 struct cpdma_chan_stats ch_stats;
1060 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
1061 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1062 data[l] = readl(cpsw->hw_stats +
1063 cpsw_gstrings_stats[l].stat_offset);
1065 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1066 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1067 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1068 p = (u8 *)&ch_stats +
1069 cpsw_gstrings_ch_stats[i].stat_offset;
1070 data[l] = *(u32 *)p;
1074 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1075 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1076 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1077 p = (u8 *)&ch_stats +
1078 cpsw_gstrings_ch_stats[i].stat_offset;
1079 data[l] = *(u32 *)p;
1084 static inline void cpsw_add_dual_emac_def_ale_entries(
1085 struct cpsw_priv *priv, struct cpsw_slave *slave,
1088 struct cpsw_common *cpsw = priv->cpsw;
1089 u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1091 if (cpsw->version == CPSW_VERSION_1)
1092 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1094 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1095 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1096 port_mask, port_mask, 0);
1097 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1098 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
1099 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1100 HOST_PORT_NUM, ALE_VLAN |
1101 ALE_SECURE, slave->port_vlan);
1102 cpsw_ale_control_set(cpsw->ale, slave_port,
1103 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1106 static void soft_reset_slave(struct cpsw_slave *slave)
1110 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1111 soft_reset(name, &slave->sliver->soft_reset);
1114 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1117 struct phy_device *phy;
1118 struct cpsw_common *cpsw = priv->cpsw;
1120 soft_reset_slave(slave);
1122 /* setup priority mapping */
1123 writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1125 switch (cpsw->version) {
1126 case CPSW_VERSION_1:
1127 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1128 /* Increase RX FIFO size to 5 for supporting fullduplex
1132 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1133 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1135 case CPSW_VERSION_2:
1136 case CPSW_VERSION_3:
1137 case CPSW_VERSION_4:
1138 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1139 /* Increase RX FIFO size to 5 for supporting fullduplex
1143 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1144 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1148 /* setup max packet size, and mac address */
1149 writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1150 cpsw_set_slave_mac(slave, priv);
1152 slave->mac_control = 0; /* no link yet */
1154 slave_port = cpsw_get_slave_port(slave->slave_num);
1156 if (cpsw->data.dual_emac)
1157 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1159 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1160 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1162 if (slave->data->phy_node) {
1163 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1164 &cpsw_adjust_link, 0, slave->data->phy_if);
1166 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1167 slave->data->phy_node,
1172 phy = phy_connect(priv->ndev, slave->data->phy_id,
1173 &cpsw_adjust_link, slave->data->phy_if);
1176 "phy \"%s\" not found on slave %d, err %ld\n",
1177 slave->data->phy_id, slave->slave_num,
1185 phy_attached_info(slave->phy);
1187 phy_start(slave->phy);
1189 /* Configure GMII_SEL register */
1190 if (!IS_ERR(slave->data->ifphy))
1191 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
1192 slave->data->phy_if);
1194 cpsw_phy_sel(cpsw->dev, slave->phy->interface,
1198 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1200 struct cpsw_common *cpsw = priv->cpsw;
1201 const int vlan = cpsw->data.default_vlan;
1204 int unreg_mcast_mask;
1206 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1209 writel(vlan, &cpsw->host_port_regs->port_vlan);
1211 for (i = 0; i < cpsw->data.slaves; i++)
1212 slave_write(cpsw->slaves + i, vlan, reg);
1214 if (priv->ndev->flags & IFF_ALLMULTI)
1215 unreg_mcast_mask = ALE_ALL_PORTS;
1217 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1219 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1220 ALE_ALL_PORTS, ALE_ALL_PORTS,
1224 static void cpsw_init_host_port(struct cpsw_priv *priv)
1228 struct cpsw_common *cpsw = priv->cpsw;
1230 /* soft reset the controller and initialize ale */
1231 soft_reset("cpsw", &cpsw->regs->soft_reset);
1232 cpsw_ale_start(cpsw->ale);
1234 /* switch to vlan unaware mode */
1235 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1236 CPSW_ALE_VLAN_AWARE);
1237 control_reg = readl(&cpsw->regs->control);
1238 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1239 writel(control_reg, &cpsw->regs->control);
1240 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1241 CPSW_FIFO_NORMAL_MODE;
1242 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1244 /* setup host port priority mapping */
1245 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1246 &cpsw->host_port_regs->cpdma_tx_pri_map);
1247 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1249 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1250 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1252 if (!cpsw->data.dual_emac) {
1253 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1255 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1256 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1260 static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1262 struct cpsw_common *cpsw = priv->cpsw;
1263 struct sk_buff *skb;
1267 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1268 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1269 for (i = 0; i < ch_buf_num; i++) {
1270 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1271 cpsw->rx_packet_max,
1274 cpsw_err(priv, ifup, "cannot allocate skb\n");
1278 skb_set_queue_mapping(skb, ch);
1279 ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1280 skb->data, skb_tailroom(skb),
1283 cpsw_err(priv, ifup,
1284 "cannot submit skb to channel %d rx, error %d\n",
1289 kmemleak_not_leak(skb);
1292 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1299 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1303 slave_port = cpsw_get_slave_port(slave->slave_num);
1307 phy_stop(slave->phy);
1308 phy_disconnect(slave->phy);
1310 cpsw_ale_control_set(cpsw->ale, slave_port,
1311 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1312 soft_reset_slave(slave);
1315 static int cpsw_tc_to_fifo(int tc, int num_tc)
1317 if (tc == num_tc - 1)
1320 return CPSW_FIFO_SHAPERS_NUM - tc;
1323 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1325 struct cpsw_common *cpsw = priv->cpsw;
1326 u32 val = 0, send_pct, shift;
1327 struct cpsw_slave *slave;
1330 if (bw > priv->shp_cfg_speed * 1000)
1333 /* shaping has to stay enabled for highest fifos linearly
1334 * and fifo bw no more then interface can allow
1336 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1337 send_pct = slave_read(slave, SEND_PERCENT);
1338 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1340 if (i >= fifo || !priv->fifo_bw[i])
1343 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1347 if (!priv->fifo_bw[i] && i > fifo) {
1348 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1352 shift = (i - 1) * 8;
1354 send_pct &= ~(CPSW_PCT_MASK << shift);
1355 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1359 send_pct |= val << shift;
1364 if (priv->fifo_bw[i])
1365 pct += (send_pct >> shift) & CPSW_PCT_MASK;
1371 slave_write(slave, send_pct, SEND_PERCENT);
1372 priv->fifo_bw[fifo] = bw;
1374 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1375 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1379 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1383 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1385 struct cpsw_common *cpsw = priv->cpsw;
1386 struct cpsw_slave *slave;
1387 u32 tx_in_ctl_rg, val;
1390 ret = cpsw_set_fifo_bw(priv, fifo, bw);
1394 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1395 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1396 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1399 cpsw_fifo_shp_on(priv, fifo, bw);
1401 val = slave_read(slave, tx_in_ctl_rg);
1402 if (cpsw_shp_is_off(priv)) {
1403 /* disable FIFOs rate limited queues */
1404 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1406 /* set type of FIFO queues to normal priority mode */
1407 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1409 /* set type of FIFO queues to be rate limited */
1411 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1413 priv->shp_cfg_speed = 0;
1416 /* toggle a FIFO rate limited queue */
1418 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1420 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1421 slave_write(slave, val, tx_in_ctl_rg);
1423 /* FIFO transmit shape enable */
1424 cpsw_fifo_shp_on(priv, fifo, bw);
1431 * shaping for class A should be set first
1433 static int cpsw_set_cbs(struct net_device *ndev,
1434 struct tc_cbs_qopt_offload *qopt)
1436 struct cpsw_priv *priv = netdev_priv(ndev);
1437 struct cpsw_common *cpsw = priv->cpsw;
1438 struct cpsw_slave *slave;
1443 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1445 /* enable channels in backward order, as highest FIFOs must be rate
1446 * limited first and for compliance with CPDMA rate limited channels
1447 * that also used in bacward order. FIFO0 cannot be rate limited.
1449 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1451 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1455 /* do nothing, it's disabled anyway */
1456 if (!qopt->enable && !priv->fifo_bw[fifo])
1459 /* shapers can be set if link speed is known */
1460 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1461 if (slave->phy && slave->phy->link) {
1462 if (priv->shp_cfg_speed &&
1463 priv->shp_cfg_speed != slave->phy->speed)
1464 prev_speed = priv->shp_cfg_speed;
1466 priv->shp_cfg_speed = slave->phy->speed;
1469 if (!priv->shp_cfg_speed) {
1470 dev_err(priv->dev, "Link speed is not known");
1474 ret = pm_runtime_get_sync(cpsw->dev);
1476 pm_runtime_put_noidle(cpsw->dev);
1480 bw = qopt->enable ? qopt->idleslope : 0;
1481 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1483 priv->shp_cfg_speed = prev_speed;
1487 if (bw && prev_speed)
1489 "Speed was changed, CBS shaper speeds are changed!");
1491 pm_runtime_put_sync(cpsw->dev);
1495 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1499 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1500 bw = priv->fifo_bw[fifo];
1504 cpsw_set_fifo_rlimit(priv, fifo, bw);
1508 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1510 struct cpsw_common *cpsw = priv->cpsw;
1511 u32 tx_prio_map = 0;
1515 if (!priv->mqprio_hw)
1518 for (i = 0; i < 8; i++) {
1519 tc = netdev_get_prio_tc_map(priv->ndev, i);
1520 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1521 tx_prio_map |= fifo << (4 * i);
1524 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1525 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1527 slave_write(slave, tx_prio_map, tx_prio_rg);
1530 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1532 struct cpsw_priv *priv = arg;
1537 cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1541 /* restore resources after port reset */
1542 static void cpsw_restore(struct cpsw_priv *priv)
1544 /* restore vlan configurations */
1545 vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1547 /* restore MQPRIO offload */
1548 for_each_slave(priv, cpsw_mqprio_resume, priv);
1550 /* restore CBS offload */
1551 for_each_slave(priv, cpsw_cbs_resume, priv);
1554 static int cpsw_ndo_open(struct net_device *ndev)
1556 struct cpsw_priv *priv = netdev_priv(ndev);
1557 struct cpsw_common *cpsw = priv->cpsw;
1561 ret = pm_runtime_get_sync(cpsw->dev);
1563 pm_runtime_put_noidle(cpsw->dev);
1567 netif_carrier_off(ndev);
1569 /* Notify the stack of the actual queue counts. */
1570 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1572 dev_err(priv->dev, "cannot set real number of tx queues\n");
1576 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1578 dev_err(priv->dev, "cannot set real number of rx queues\n");
1582 reg = cpsw->version;
1584 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1585 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1586 CPSW_RTL_VERSION(reg));
1588 /* Initialize host and slave ports */
1589 if (!cpsw->usage_count)
1590 cpsw_init_host_port(priv);
1591 for_each_slave(priv, cpsw_slave_open, priv);
1593 /* Add default VLAN */
1594 if (!cpsw->data.dual_emac)
1595 cpsw_add_default_vlan(priv);
1597 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1598 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1600 /* initialize shared resources for every ndev */
1601 if (!cpsw->usage_count) {
1602 /* disable priority elevation */
1603 writel_relaxed(0, &cpsw->regs->ptype);
1605 /* enable statistics collection only on all ports */
1606 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1608 /* Enable internal fifo flow control */
1609 writel(0x7, &cpsw->regs->flow_control);
1611 napi_enable(&cpsw->napi_rx);
1612 napi_enable(&cpsw->napi_tx);
1614 if (cpsw->tx_irq_disabled) {
1615 cpsw->tx_irq_disabled = false;
1616 enable_irq(cpsw->irqs_table[1]);
1619 if (cpsw->rx_irq_disabled) {
1620 cpsw->rx_irq_disabled = false;
1621 enable_irq(cpsw->irqs_table[0]);
1624 ret = cpsw_fill_rx_channels(priv);
1628 if (cpts_register(cpsw->cpts))
1629 dev_err(priv->dev, "error registering cpts device\n");
1635 /* Enable Interrupt pacing if configured */
1636 if (cpsw->coal_intvl != 0) {
1637 struct ethtool_coalesce coal;
1639 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1640 cpsw_set_coalesce(ndev, &coal);
1643 cpdma_ctlr_start(cpsw->dma);
1644 cpsw_intr_enable(cpsw);
1645 cpsw->usage_count++;
1650 cpdma_ctlr_stop(cpsw->dma);
1651 for_each_slave(priv, cpsw_slave_stop, cpsw);
1652 pm_runtime_put_sync(cpsw->dev);
1653 netif_carrier_off(priv->ndev);
1657 static int cpsw_ndo_stop(struct net_device *ndev)
1659 struct cpsw_priv *priv = netdev_priv(ndev);
1660 struct cpsw_common *cpsw = priv->cpsw;
1662 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1663 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
1664 netif_tx_stop_all_queues(priv->ndev);
1665 netif_carrier_off(priv->ndev);
1667 if (cpsw->usage_count <= 1) {
1668 napi_disable(&cpsw->napi_rx);
1669 napi_disable(&cpsw->napi_tx);
1670 cpts_unregister(cpsw->cpts);
1671 cpsw_intr_disable(cpsw);
1672 cpdma_ctlr_stop(cpsw->dma);
1673 cpsw_ale_stop(cpsw->ale);
1675 for_each_slave(priv, cpsw_slave_stop, cpsw);
1677 if (cpsw_need_resplit(cpsw))
1678 cpsw_split_res(cpsw);
1680 cpsw->usage_count--;
1681 pm_runtime_put_sync(cpsw->dev);
1685 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1686 struct net_device *ndev)
1688 struct cpsw_priv *priv = netdev_priv(ndev);
1689 struct cpsw_common *cpsw = priv->cpsw;
1690 struct cpts *cpts = cpsw->cpts;
1691 struct netdev_queue *txq;
1692 struct cpdma_chan *txch;
1695 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1696 cpsw_err(priv, tx_err, "packet pad failed\n");
1697 ndev->stats.tx_dropped++;
1698 return NET_XMIT_DROP;
1701 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1702 priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
1703 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1705 q_idx = skb_get_queue_mapping(skb);
1706 if (q_idx >= cpsw->tx_ch_num)
1707 q_idx = q_idx % cpsw->tx_ch_num;
1709 txch = cpsw->txv[q_idx].ch;
1710 txq = netdev_get_tx_queue(ndev, q_idx);
1711 skb_tx_timestamp(skb);
1712 ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
1713 priv->emac_port + cpsw->data.dual_emac);
1714 if (unlikely(ret != 0)) {
1715 cpsw_err(priv, tx_err, "desc submit failed\n");
1719 /* If there is no more tx desc left free then we need to
1720 * tell the kernel to stop sending us tx frames.
1722 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
1723 netif_tx_stop_queue(txq);
1725 /* Barrier, so that stop_queue visible to other cpus */
1726 smp_mb__after_atomic();
1728 if (cpdma_check_free_tx_desc(txch))
1729 netif_tx_wake_queue(txq);
1732 return NETDEV_TX_OK;
1734 ndev->stats.tx_dropped++;
1735 netif_tx_stop_queue(txq);
1737 /* Barrier, so that stop_queue visible to other cpus */
1738 smp_mb__after_atomic();
1740 if (cpdma_check_free_tx_desc(txch))
1741 netif_tx_wake_queue(txq);
1743 return NETDEV_TX_BUSY;
1746 #if IS_ENABLED(CONFIG_TI_CPTS)
1748 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1750 struct cpsw_common *cpsw = priv->cpsw;
1751 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1754 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
1755 slave_write(slave, 0, CPSW1_TS_CTL);
1759 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1760 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1762 if (priv->tx_ts_enabled)
1763 ts_en |= CPSW_V1_TS_TX_EN;
1765 if (priv->rx_ts_enabled)
1766 ts_en |= CPSW_V1_TS_RX_EN;
1768 slave_write(slave, ts_en, CPSW1_TS_CTL);
1769 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1772 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1774 struct cpsw_slave *slave;
1775 struct cpsw_common *cpsw = priv->cpsw;
1778 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1780 ctrl = slave_read(slave, CPSW2_CONTROL);
1781 switch (cpsw->version) {
1782 case CPSW_VERSION_2:
1783 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1785 if (priv->tx_ts_enabled)
1786 ctrl |= CTRL_V2_TX_TS_BITS;
1788 if (priv->rx_ts_enabled)
1789 ctrl |= CTRL_V2_RX_TS_BITS;
1791 case CPSW_VERSION_3:
1793 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1795 if (priv->tx_ts_enabled)
1796 ctrl |= CTRL_V3_TX_TS_BITS;
1798 if (priv->rx_ts_enabled)
1799 ctrl |= CTRL_V3_RX_TS_BITS;
1803 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1805 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1806 slave_write(slave, ctrl, CPSW2_CONTROL);
1807 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
1808 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
1811 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1813 struct cpsw_priv *priv = netdev_priv(dev);
1814 struct hwtstamp_config cfg;
1815 struct cpsw_common *cpsw = priv->cpsw;
1817 if (cpsw->version != CPSW_VERSION_1 &&
1818 cpsw->version != CPSW_VERSION_2 &&
1819 cpsw->version != CPSW_VERSION_3)
1822 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1825 /* reserved for future extensions */
1829 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1832 switch (cfg.rx_filter) {
1833 case HWTSTAMP_FILTER_NONE:
1834 priv->rx_ts_enabled = 0;
1836 case HWTSTAMP_FILTER_ALL:
1837 case HWTSTAMP_FILTER_NTP_ALL:
1839 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1840 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1841 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1842 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1843 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1845 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1846 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1847 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1848 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1849 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1850 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1851 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1852 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1853 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1854 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
1855 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1861 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
1863 switch (cpsw->version) {
1864 case CPSW_VERSION_1:
1865 cpsw_hwtstamp_v1(priv);
1867 case CPSW_VERSION_2:
1868 case CPSW_VERSION_3:
1869 cpsw_hwtstamp_v2(priv);
1875 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1878 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1880 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
1881 struct cpsw_priv *priv = netdev_priv(dev);
1882 struct hwtstamp_config cfg;
1884 if (cpsw->version != CPSW_VERSION_1 &&
1885 cpsw->version != CPSW_VERSION_2 &&
1886 cpsw->version != CPSW_VERSION_3)
1890 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1891 cfg.rx_filter = priv->rx_ts_enabled;
1893 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1896 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1901 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1905 #endif /*CONFIG_TI_CPTS*/
1907 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1909 struct cpsw_priv *priv = netdev_priv(dev);
1910 struct cpsw_common *cpsw = priv->cpsw;
1911 int slave_no = cpsw_slave_index(cpsw, priv);
1913 if (!netif_running(dev))
1918 return cpsw_hwtstamp_set(dev, req);
1920 return cpsw_hwtstamp_get(dev, req);
1923 if (!cpsw->slaves[slave_no].phy)
1925 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
1928 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1930 struct cpsw_priv *priv = netdev_priv(ndev);
1931 struct cpsw_common *cpsw = priv->cpsw;
1934 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1935 ndev->stats.tx_errors++;
1936 cpsw_intr_disable(cpsw);
1937 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1938 cpdma_chan_stop(cpsw->txv[ch].ch);
1939 cpdma_chan_start(cpsw->txv[ch].ch);
1942 cpsw_intr_enable(cpsw);
1943 netif_trans_update(ndev);
1944 netif_tx_wake_all_queues(ndev);
1947 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1949 struct cpsw_priv *priv = netdev_priv(ndev);
1950 struct sockaddr *addr = (struct sockaddr *)p;
1951 struct cpsw_common *cpsw = priv->cpsw;
1956 if (!is_valid_ether_addr(addr->sa_data))
1957 return -EADDRNOTAVAIL;
1959 ret = pm_runtime_get_sync(cpsw->dev);
1961 pm_runtime_put_noidle(cpsw->dev);
1965 if (cpsw->data.dual_emac) {
1966 vid = cpsw->slaves[priv->emac_port].port_vlan;
1970 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1972 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1975 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1976 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1977 for_each_slave(priv, cpsw_set_slave_mac, priv);
1979 pm_runtime_put(cpsw->dev);
1984 #ifdef CONFIG_NET_POLL_CONTROLLER
1985 static void cpsw_ndo_poll_controller(struct net_device *ndev)
1987 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1989 cpsw_intr_disable(cpsw);
1990 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
1991 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
1992 cpsw_intr_enable(cpsw);
1996 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2000 int unreg_mcast_mask = 0;
2003 struct cpsw_common *cpsw = priv->cpsw;
2005 if (cpsw->data.dual_emac) {
2006 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2008 mcast_mask = ALE_PORT_HOST;
2009 if (priv->ndev->flags & IFF_ALLMULTI)
2010 unreg_mcast_mask = mcast_mask;
2012 port_mask = ALE_ALL_PORTS;
2013 mcast_mask = port_mask;
2015 if (priv->ndev->flags & IFF_ALLMULTI)
2016 unreg_mcast_mask = ALE_ALL_PORTS;
2018 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2021 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2026 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2027 HOST_PORT_NUM, ALE_VLAN, vid);
2031 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2032 mcast_mask, ALE_VLAN, vid, 0);
2034 goto clean_vlan_ucast;
2038 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2039 HOST_PORT_NUM, ALE_VLAN, vid);
2041 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2045 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2046 __be16 proto, u16 vid)
2048 struct cpsw_priv *priv = netdev_priv(ndev);
2049 struct cpsw_common *cpsw = priv->cpsw;
2052 if (vid == cpsw->data.default_vlan)
2055 ret = pm_runtime_get_sync(cpsw->dev);
2057 pm_runtime_put_noidle(cpsw->dev);
2061 if (cpsw->data.dual_emac) {
2062 /* In dual EMAC, reserved VLAN id should not be used for
2063 * creating VLAN interfaces as this can break the dual
2064 * EMAC port separation
2068 for (i = 0; i < cpsw->data.slaves; i++) {
2069 if (vid == cpsw->slaves[i].port_vlan) {
2076 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2077 ret = cpsw_add_vlan_ale_entry(priv, vid);
2079 pm_runtime_put(cpsw->dev);
2083 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2084 __be16 proto, u16 vid)
2086 struct cpsw_priv *priv = netdev_priv(ndev);
2087 struct cpsw_common *cpsw = priv->cpsw;
2090 if (vid == cpsw->data.default_vlan)
2093 ret = pm_runtime_get_sync(cpsw->dev);
2095 pm_runtime_put_noidle(cpsw->dev);
2099 if (cpsw->data.dual_emac) {
2102 for (i = 0; i < cpsw->data.slaves; i++) {
2103 if (vid == cpsw->slaves[i].port_vlan)
2108 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2109 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2110 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2111 HOST_PORT_NUM, ALE_VLAN, vid);
2112 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2114 ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
2116 pm_runtime_put(cpsw->dev);
2120 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2122 struct cpsw_priv *priv = netdev_priv(ndev);
2123 struct cpsw_common *cpsw = priv->cpsw;
2124 struct cpsw_slave *slave;
2129 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2130 if (ch_rate == rate)
2133 ch_rate = rate * 1000;
2134 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2135 if ((ch_rate < min_rate && ch_rate)) {
2136 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2141 if (rate > cpsw->speed) {
2142 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2146 ret = pm_runtime_get_sync(cpsw->dev);
2148 pm_runtime_put_noidle(cpsw->dev);
2152 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
2153 pm_runtime_put(cpsw->dev);
2158 /* update rates for slaves tx queues */
2159 for (i = 0; i < cpsw->data.slaves; i++) {
2160 slave = &cpsw->slaves[i];
2164 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2167 cpsw_split_res(cpsw);
2171 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2173 struct tc_mqprio_qopt_offload *mqprio = type_data;
2174 struct cpsw_priv *priv = netdev_priv(ndev);
2175 struct cpsw_common *cpsw = priv->cpsw;
2176 int fifo, num_tc, count, offset;
2177 struct cpsw_slave *slave;
2178 u32 tx_prio_map = 0;
2181 num_tc = mqprio->qopt.num_tc;
2182 if (num_tc > CPSW_TC_NUM)
2185 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2188 ret = pm_runtime_get_sync(cpsw->dev);
2190 pm_runtime_put_noidle(cpsw->dev);
2195 for (i = 0; i < 8; i++) {
2196 tc = mqprio->qopt.prio_tc_map[i];
2197 fifo = cpsw_tc_to_fifo(tc, num_tc);
2198 tx_prio_map |= fifo << (4 * i);
2201 netdev_set_num_tc(ndev, num_tc);
2202 for (i = 0; i < num_tc; i++) {
2203 count = mqprio->qopt.count[i];
2204 offset = mqprio->qopt.offset[i];
2205 netdev_set_tc_queue(ndev, i, count, offset);
2209 if (!mqprio->qopt.hw) {
2210 /* restore default configuration */
2211 netdev_reset_tc(ndev);
2212 tx_prio_map = TX_PRIORITY_MAPPING;
2215 priv->mqprio_hw = mqprio->qopt.hw;
2217 offset = cpsw->version == CPSW_VERSION_1 ?
2218 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2220 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2221 slave_write(slave, tx_prio_map, offset);
2223 pm_runtime_put_sync(cpsw->dev);
2228 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2232 case TC_SETUP_QDISC_CBS:
2233 return cpsw_set_cbs(ndev, type_data);
2235 case TC_SETUP_QDISC_MQPRIO:
2236 return cpsw_set_mqprio(ndev, type_data);
2243 static const struct net_device_ops cpsw_netdev_ops = {
2244 .ndo_open = cpsw_ndo_open,
2245 .ndo_stop = cpsw_ndo_stop,
2246 .ndo_start_xmit = cpsw_ndo_start_xmit,
2247 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
2248 .ndo_do_ioctl = cpsw_ndo_ioctl,
2249 .ndo_validate_addr = eth_validate_addr,
2250 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
2251 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
2252 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
2253 #ifdef CONFIG_NET_POLL_CONTROLLER
2254 .ndo_poll_controller = cpsw_ndo_poll_controller,
2256 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
2257 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
2258 .ndo_setup_tc = cpsw_ndo_setup_tc,
2261 static int cpsw_get_regs_len(struct net_device *ndev)
2263 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2265 return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2268 static void cpsw_get_regs(struct net_device *ndev,
2269 struct ethtool_regs *regs, void *p)
2272 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2274 /* update CPSW IP version */
2275 regs->version = cpsw->version;
2277 cpsw_ale_dump(cpsw->ale, reg);
2280 static void cpsw_get_drvinfo(struct net_device *ndev,
2281 struct ethtool_drvinfo *info)
2283 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2284 struct platform_device *pdev = to_platform_device(cpsw->dev);
2286 strlcpy(info->driver, "cpsw", sizeof(info->driver));
2287 strlcpy(info->version, "1.0", sizeof(info->version));
2288 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2291 static u32 cpsw_get_msglevel(struct net_device *ndev)
2293 struct cpsw_priv *priv = netdev_priv(ndev);
2294 return priv->msg_enable;
2297 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2299 struct cpsw_priv *priv = netdev_priv(ndev);
2300 priv->msg_enable = value;
2303 #if IS_ENABLED(CONFIG_TI_CPTS)
2304 static int cpsw_get_ts_info(struct net_device *ndev,
2305 struct ethtool_ts_info *info)
2307 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2309 info->so_timestamping =
2310 SOF_TIMESTAMPING_TX_HARDWARE |
2311 SOF_TIMESTAMPING_TX_SOFTWARE |
2312 SOF_TIMESTAMPING_RX_HARDWARE |
2313 SOF_TIMESTAMPING_RX_SOFTWARE |
2314 SOF_TIMESTAMPING_SOFTWARE |
2315 SOF_TIMESTAMPING_RAW_HARDWARE;
2316 info->phc_index = cpsw->cpts->phc_index;
2318 (1 << HWTSTAMP_TX_OFF) |
2319 (1 << HWTSTAMP_TX_ON);
2321 (1 << HWTSTAMP_FILTER_NONE) |
2322 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2323 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2327 static int cpsw_get_ts_info(struct net_device *ndev,
2328 struct ethtool_ts_info *info)
2330 info->so_timestamping =
2331 SOF_TIMESTAMPING_TX_SOFTWARE |
2332 SOF_TIMESTAMPING_RX_SOFTWARE |
2333 SOF_TIMESTAMPING_SOFTWARE;
2334 info->phc_index = -1;
2336 info->rx_filters = 0;
2341 static int cpsw_get_link_ksettings(struct net_device *ndev,
2342 struct ethtool_link_ksettings *ecmd)
2344 struct cpsw_priv *priv = netdev_priv(ndev);
2345 struct cpsw_common *cpsw = priv->cpsw;
2346 int slave_no = cpsw_slave_index(cpsw, priv);
2348 if (!cpsw->slaves[slave_no].phy)
2351 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2355 static int cpsw_set_link_ksettings(struct net_device *ndev,
2356 const struct ethtool_link_ksettings *ecmd)
2358 struct cpsw_priv *priv = netdev_priv(ndev);
2359 struct cpsw_common *cpsw = priv->cpsw;
2360 int slave_no = cpsw_slave_index(cpsw, priv);
2362 if (cpsw->slaves[slave_no].phy)
2363 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2369 static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2371 struct cpsw_priv *priv = netdev_priv(ndev);
2372 struct cpsw_common *cpsw = priv->cpsw;
2373 int slave_no = cpsw_slave_index(cpsw, priv);
2378 if (cpsw->slaves[slave_no].phy)
2379 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2382 static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2384 struct cpsw_priv *priv = netdev_priv(ndev);
2385 struct cpsw_common *cpsw = priv->cpsw;
2386 int slave_no = cpsw_slave_index(cpsw, priv);
2388 if (cpsw->slaves[slave_no].phy)
2389 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2394 static void cpsw_get_pauseparam(struct net_device *ndev,
2395 struct ethtool_pauseparam *pause)
2397 struct cpsw_priv *priv = netdev_priv(ndev);
2399 pause->autoneg = AUTONEG_DISABLE;
2400 pause->rx_pause = priv->rx_pause ? true : false;
2401 pause->tx_pause = priv->tx_pause ? true : false;
2404 static int cpsw_set_pauseparam(struct net_device *ndev,
2405 struct ethtool_pauseparam *pause)
2407 struct cpsw_priv *priv = netdev_priv(ndev);
2410 priv->rx_pause = pause->rx_pause ? true : false;
2411 priv->tx_pause = pause->tx_pause ? true : false;
2413 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2417 static int cpsw_ethtool_op_begin(struct net_device *ndev)
2419 struct cpsw_priv *priv = netdev_priv(ndev);
2420 struct cpsw_common *cpsw = priv->cpsw;
2423 ret = pm_runtime_get_sync(cpsw->dev);
2425 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2426 pm_runtime_put_noidle(cpsw->dev);
2432 static void cpsw_ethtool_op_complete(struct net_device *ndev)
2434 struct cpsw_priv *priv = netdev_priv(ndev);
2437 ret = pm_runtime_put(priv->cpsw->dev);
2439 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2442 static void cpsw_get_channels(struct net_device *ndev,
2443 struct ethtool_channels *ch)
2445 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2447 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2448 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2449 ch->max_combined = 0;
2451 ch->other_count = 0;
2452 ch->rx_count = cpsw->rx_ch_num;
2453 ch->tx_count = cpsw->tx_ch_num;
2454 ch->combined_count = 0;
2457 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2458 struct ethtool_channels *ch)
2460 if (cpsw->quirk_irq) {
2461 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2465 if (ch->combined_count)
2468 /* verify we have at least one channel in each direction */
2469 if (!ch->rx_count || !ch->tx_count)
2472 if (ch->rx_count > cpsw->data.channels ||
2473 ch->tx_count > cpsw->data.channels)
2479 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2481 struct cpsw_common *cpsw = priv->cpsw;
2482 void (*handler)(void *, int, int);
2483 struct netdev_queue *queue;
2484 struct cpsw_vector *vec;
2488 ch = &cpsw->rx_ch_num;
2490 handler = cpsw_rx_handler;
2492 ch = &cpsw->tx_ch_num;
2494 handler = cpsw_tx_handler;
2497 while (*ch < ch_num) {
2498 vch = rx ? *ch : 7 - *ch;
2499 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
2500 queue = netdev_get_tx_queue(priv->ndev, *ch);
2501 queue->tx_maxrate = 0;
2503 if (IS_ERR(vec[*ch].ch))
2504 return PTR_ERR(vec[*ch].ch);
2509 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2510 (rx ? "rx" : "tx"));
2514 while (*ch > ch_num) {
2517 ret = cpdma_chan_destroy(vec[*ch].ch);
2521 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2522 (rx ? "rx" : "tx"));
2528 static int cpsw_update_channels(struct cpsw_priv *priv,
2529 struct ethtool_channels *ch)
2533 ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2537 ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2544 static void cpsw_suspend_data_pass(struct net_device *ndev)
2546 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2547 struct cpsw_slave *slave;
2550 /* Disable NAPI scheduling */
2551 cpsw_intr_disable(cpsw);
2553 /* Stop all transmit queues for every network device.
2554 * Disable re-using rx descriptors with dormant_on.
2556 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2557 if (!(slave->ndev && netif_running(slave->ndev)))
2560 netif_tx_stop_all_queues(slave->ndev);
2561 netif_dormant_on(slave->ndev);
2564 /* Handle rest of tx packets and stop cpdma channels */
2565 cpdma_ctlr_stop(cpsw->dma);
2568 static int cpsw_resume_data_pass(struct net_device *ndev)
2570 struct cpsw_priv *priv = netdev_priv(ndev);
2571 struct cpsw_common *cpsw = priv->cpsw;
2572 struct cpsw_slave *slave;
2575 /* Allow rx packets handling */
2576 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2577 if (slave->ndev && netif_running(slave->ndev))
2578 netif_dormant_off(slave->ndev);
2580 /* After this receive is started */
2581 if (cpsw->usage_count) {
2582 ret = cpsw_fill_rx_channels(priv);
2586 cpdma_ctlr_start(cpsw->dma);
2587 cpsw_intr_enable(cpsw);
2590 /* Resume transmit for every affected interface */
2591 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2592 if (slave->ndev && netif_running(slave->ndev))
2593 netif_tx_start_all_queues(slave->ndev);
2598 static int cpsw_set_channels(struct net_device *ndev,
2599 struct ethtool_channels *chs)
2601 struct cpsw_priv *priv = netdev_priv(ndev);
2602 struct cpsw_common *cpsw = priv->cpsw;
2603 struct cpsw_slave *slave;
2606 ret = cpsw_check_ch_settings(cpsw, chs);
2610 cpsw_suspend_data_pass(ndev);
2611 ret = cpsw_update_channels(priv, chs);
2615 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2616 if (!(slave->ndev && netif_running(slave->ndev)))
2619 /* Inform stack about new count of queues */
2620 ret = netif_set_real_num_tx_queues(slave->ndev,
2623 dev_err(priv->dev, "cannot set real number of tx queues\n");
2627 ret = netif_set_real_num_rx_queues(slave->ndev,
2630 dev_err(priv->dev, "cannot set real number of rx queues\n");
2635 if (cpsw->usage_count)
2636 cpsw_split_res(cpsw);
2638 ret = cpsw_resume_data_pass(ndev);
2642 dev_err(priv->dev, "cannot update channels number, closing device\n");
2647 static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2649 struct cpsw_priv *priv = netdev_priv(ndev);
2650 struct cpsw_common *cpsw = priv->cpsw;
2651 int slave_no = cpsw_slave_index(cpsw, priv);
2653 if (cpsw->slaves[slave_no].phy)
2654 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
2659 static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2661 struct cpsw_priv *priv = netdev_priv(ndev);
2662 struct cpsw_common *cpsw = priv->cpsw;
2663 int slave_no = cpsw_slave_index(cpsw, priv);
2665 if (cpsw->slaves[slave_no].phy)
2666 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
2671 static int cpsw_nway_reset(struct net_device *ndev)
2673 struct cpsw_priv *priv = netdev_priv(ndev);
2674 struct cpsw_common *cpsw = priv->cpsw;
2675 int slave_no = cpsw_slave_index(cpsw, priv);
2677 if (cpsw->slaves[slave_no].phy)
2678 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
2683 static void cpsw_get_ringparam(struct net_device *ndev,
2684 struct ethtool_ringparam *ering)
2686 struct cpsw_priv *priv = netdev_priv(ndev);
2687 struct cpsw_common *cpsw = priv->cpsw;
2690 ering->tx_max_pending = 0;
2691 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
2692 ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
2693 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
2696 static int cpsw_set_ringparam(struct net_device *ndev,
2697 struct ethtool_ringparam *ering)
2699 struct cpsw_priv *priv = netdev_priv(ndev);
2700 struct cpsw_common *cpsw = priv->cpsw;
2703 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
2705 if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
2706 ering->rx_pending < CPSW_MAX_QUEUES ||
2707 ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
2710 if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
2713 cpsw_suspend_data_pass(ndev);
2715 cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
2717 if (cpsw->usage_count)
2718 cpdma_chan_split_pool(cpsw->dma);
2720 ret = cpsw_resume_data_pass(ndev);
2724 dev_err(&ndev->dev, "cannot set ring params, closing device\n");
2729 static const struct ethtool_ops cpsw_ethtool_ops = {
2730 .get_drvinfo = cpsw_get_drvinfo,
2731 .get_msglevel = cpsw_get_msglevel,
2732 .set_msglevel = cpsw_set_msglevel,
2733 .get_link = ethtool_op_get_link,
2734 .get_ts_info = cpsw_get_ts_info,
2735 .get_coalesce = cpsw_get_coalesce,
2736 .set_coalesce = cpsw_set_coalesce,
2737 .get_sset_count = cpsw_get_sset_count,
2738 .get_strings = cpsw_get_strings,
2739 .get_ethtool_stats = cpsw_get_ethtool_stats,
2740 .get_pauseparam = cpsw_get_pauseparam,
2741 .set_pauseparam = cpsw_set_pauseparam,
2742 .get_wol = cpsw_get_wol,
2743 .set_wol = cpsw_set_wol,
2744 .get_regs_len = cpsw_get_regs_len,
2745 .get_regs = cpsw_get_regs,
2746 .begin = cpsw_ethtool_op_begin,
2747 .complete = cpsw_ethtool_op_complete,
2748 .get_channels = cpsw_get_channels,
2749 .set_channels = cpsw_set_channels,
2750 .get_link_ksettings = cpsw_get_link_ksettings,
2751 .set_link_ksettings = cpsw_set_link_ksettings,
2752 .get_eee = cpsw_get_eee,
2753 .set_eee = cpsw_set_eee,
2754 .nway_reset = cpsw_nway_reset,
2755 .get_ringparam = cpsw_get_ringparam,
2756 .set_ringparam = cpsw_set_ringparam,
2759 static int cpsw_probe_dt(struct cpsw_platform_data *data,
2760 struct platform_device *pdev)
2762 struct device_node *node = pdev->dev.of_node;
2763 struct device_node *slave_node;
2770 if (of_property_read_u32(node, "slaves", &prop)) {
2771 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2774 data->slaves = prop;
2776 if (of_property_read_u32(node, "active_slave", &prop)) {
2777 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2780 data->active_slave = prop;
2782 data->slave_data = devm_kcalloc(&pdev->dev,
2784 sizeof(struct cpsw_slave_data),
2786 if (!data->slave_data)
2789 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2790 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2793 data->channels = prop;
2795 if (of_property_read_u32(node, "ale_entries", &prop)) {
2796 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2799 data->ale_entries = prop;
2801 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2802 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2805 data->bd_ram_size = prop;
2807 if (of_property_read_u32(node, "mac_control", &prop)) {
2808 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2811 data->mac_control = prop;
2813 if (of_property_read_bool(node, "dual_emac"))
2814 data->dual_emac = 1;
2817 * Populate all the child nodes here...
2819 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2820 /* We do not want to force this, as in some cases may not have child */
2822 dev_warn(&pdev->dev, "Doesn't have any child node\n");
2824 for_each_available_child_of_node(node, slave_node) {
2825 struct cpsw_slave_data *slave_data = data->slave_data + i;
2826 const void *mac_addr = NULL;
2830 /* This is no slave child node, continue */
2831 if (!of_node_name_eq(slave_node, "slave"))
2834 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
2836 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
2837 IS_ERR(slave_data->ifphy)) {
2838 ret = PTR_ERR(slave_data->ifphy);
2840 "%d: Error retrieving port phy: %d\n", i, ret);
2844 slave_data->phy_node = of_parse_phandle(slave_node,
2846 parp = of_get_property(slave_node, "phy_id", &lenp);
2847 if (slave_data->phy_node) {
2849 "slave[%d] using phy-handle=\"%pOF\"\n",
2850 i, slave_data->phy_node);
2851 } else if (of_phy_is_fixed_link(slave_node)) {
2852 /* In the case of a fixed PHY, the DT node associated
2853 * to the PHY is the Ethernet MAC DT node.
2855 ret = of_phy_register_fixed_link(slave_node);
2857 if (ret != -EPROBE_DEFER)
2858 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2861 slave_data->phy_node = of_node_get(slave_node);
2864 struct device_node *mdio_node;
2865 struct platform_device *mdio;
2867 if (lenp != (sizeof(__be32) * 2)) {
2868 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2872 phyid = be32_to_cpup(parp+1);
2873 mdio = of_find_device_by_node(mdio_node);
2874 of_node_put(mdio_node);
2876 dev_err(&pdev->dev, "Missing mdio platform device\n");
2879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2880 PHY_ID_FMT, mdio->name, phyid);
2881 put_device(&mdio->dev);
2884 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2888 slave_data->phy_if = of_get_phy_mode(slave_node);
2889 if (slave_data->phy_if < 0) {
2890 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2892 return slave_data->phy_if;
2896 mac_addr = of_get_mac_address(slave_node);
2898 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2900 ret = ti_cm_get_macid(&pdev->dev, i,
2901 slave_data->mac_addr);
2905 if (data->dual_emac) {
2906 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2908 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2909 slave_data->dual_emac_res_vlan = i+1;
2910 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2911 slave_data->dual_emac_res_vlan, i);
2913 slave_data->dual_emac_res_vlan = prop;
2918 if (i == data->slaves)
2925 static void cpsw_remove_dt(struct platform_device *pdev)
2927 struct net_device *ndev = platform_get_drvdata(pdev);
2928 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2929 struct cpsw_platform_data *data = &cpsw->data;
2930 struct device_node *node = pdev->dev.of_node;
2931 struct device_node *slave_node;
2934 for_each_available_child_of_node(node, slave_node) {
2935 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2937 if (!of_node_name_eq(slave_node, "slave"))
2940 if (of_phy_is_fixed_link(slave_node))
2941 of_phy_deregister_fixed_link(slave_node);
2943 of_node_put(slave_data->phy_node);
2946 if (i == data->slaves)
2950 of_platform_depopulate(&pdev->dev);
2953 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2955 struct cpsw_common *cpsw = priv->cpsw;
2956 struct cpsw_platform_data *data = &cpsw->data;
2957 struct net_device *ndev;
2958 struct cpsw_priv *priv_sl2;
2961 ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
2962 CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2964 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2968 priv_sl2 = netdev_priv(ndev);
2969 priv_sl2->cpsw = cpsw;
2970 priv_sl2->ndev = ndev;
2971 priv_sl2->dev = &ndev->dev;
2972 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2974 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2975 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2977 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
2978 priv_sl2->mac_addr);
2980 eth_random_addr(priv_sl2->mac_addr);
2981 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
2982 priv_sl2->mac_addr);
2984 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2986 priv_sl2->emac_port = 1;
2987 cpsw->slaves[1].ndev = ndev;
2988 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2990 ndev->netdev_ops = &cpsw_netdev_ops;
2991 ndev->ethtool_ops = &cpsw_ethtool_ops;
2993 /* register the network device */
2994 SET_NETDEV_DEV(ndev, cpsw->dev);
2995 ret = register_netdev(ndev);
2997 dev_err(cpsw->dev, "cpsw: error registering net device\n");
3002 static const struct of_device_id cpsw_of_mtable[] = {
3003 { .compatible = "ti,cpsw"},
3004 { .compatible = "ti,am335x-cpsw"},
3005 { .compatible = "ti,am4372-cpsw"},
3006 { .compatible = "ti,dra7-cpsw"},
3009 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
3011 static const struct soc_device_attribute cpsw_soc_devices[] = {
3012 { .family = "AM33xx", .revision = "ES1.0"},
3016 static int cpsw_probe(struct platform_device *pdev)
3018 struct device *dev = &pdev->dev;
3020 struct cpsw_platform_data *data;
3021 struct net_device *ndev;
3022 struct cpsw_priv *priv;
3023 struct cpdma_params dma_params;
3024 struct cpsw_ale_params ale_params;
3025 void __iomem *ss_regs;
3026 void __iomem *cpts_regs;
3027 struct resource *res, *ss_res;
3028 struct gpio_descs *mode;
3029 u32 slave_offset, sliver_offset, slave_size;
3030 const struct soc_device_attribute *soc;
3031 struct cpsw_common *cpsw;
3035 cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
3041 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
3043 ret = PTR_ERR(mode);
3044 dev_err(dev, "gpio request failed, ret %d\n", ret);
3048 clk = devm_clk_get(dev, "fck");
3050 ret = PTR_ERR(mode);
3051 dev_err(dev, "fck is not found %d\n", ret);
3054 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
3056 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3057 ss_regs = devm_ioremap_resource(dev, ss_res);
3058 if (IS_ERR(ss_regs))
3059 return PTR_ERR(ss_regs);
3060 cpsw->regs = ss_regs;
3062 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3063 cpsw->wr_regs = devm_ioremap_resource(dev, res);
3064 if (IS_ERR(cpsw->wr_regs))
3065 return PTR_ERR(cpsw->wr_regs);
3068 irq = platform_get_irq(pdev, 1);
3071 cpsw->irqs_table[0] = irq;
3074 irq = platform_get_irq(pdev, 2);
3077 cpsw->irqs_table[1] = irq;
3080 * This may be required here for child devices.
3082 pm_runtime_enable(dev);
3084 /* Need to enable clocks with runtime PM api to access module
3087 ret = pm_runtime_get_sync(dev);
3089 pm_runtime_put_noidle(dev);
3090 goto clean_runtime_disable_ret;
3093 ret = cpsw_probe_dt(&cpsw->data, pdev);
3097 soc = soc_device_match(cpsw_soc_devices);
3099 cpsw->quirk_irq = 1;
3102 cpsw->slaves = devm_kcalloc(dev,
3103 data->slaves, sizeof(struct cpsw_slave),
3105 if (!cpsw->slaves) {
3110 cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
3112 cpsw->rx_ch_num = 1;
3113 cpsw->tx_ch_num = 1;
3115 cpsw->version = readl(&cpsw->regs->id_ver);
3117 memset(&dma_params, 0, sizeof(dma_params));
3118 memset(&ale_params, 0, sizeof(ale_params));
3120 switch (cpsw->version) {
3121 case CPSW_VERSION_1:
3122 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
3123 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
3124 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
3125 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
3126 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
3127 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
3128 slave_offset = CPSW1_SLAVE_OFFSET;
3129 slave_size = CPSW1_SLAVE_SIZE;
3130 sliver_offset = CPSW1_SLIVER_OFFSET;
3131 dma_params.desc_mem_phys = 0;
3133 case CPSW_VERSION_2:
3134 case CPSW_VERSION_3:
3135 case CPSW_VERSION_4:
3136 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
3137 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
3138 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
3139 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
3140 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
3141 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
3142 slave_offset = CPSW2_SLAVE_OFFSET;
3143 slave_size = CPSW2_SLAVE_SIZE;
3144 sliver_offset = CPSW2_SLIVER_OFFSET;
3145 dma_params.desc_mem_phys =
3146 (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
3149 dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
3154 for (i = 0; i < cpsw->data.slaves; i++) {
3155 struct cpsw_slave *slave = &cpsw->slaves[i];
3156 void __iomem *regs = cpsw->regs;
3158 slave->slave_num = i;
3159 slave->data = &cpsw->data.slave_data[i];
3160 slave->regs = regs + slave_offset;
3161 slave->sliver = regs + sliver_offset;
3162 slave->port_vlan = slave->data->dual_emac_res_vlan;
3164 slave_offset += slave_size;
3165 sliver_offset += SLIVER_SIZE;
3168 ale_params.dev = dev;
3169 ale_params.ale_ageout = ale_ageout;
3170 ale_params.ale_entries = data->ale_entries;
3171 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
3173 cpsw->ale = cpsw_ale_create(&ale_params);
3175 dev_err(dev, "error initializing ale engine\n");
3180 dma_params.dev = dev;
3181 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
3182 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
3183 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
3184 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
3185 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
3187 dma_params.num_chan = data->channels;
3188 dma_params.has_soft_reset = true;
3189 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
3190 dma_params.desc_mem_size = data->bd_ram_size;
3191 dma_params.desc_align = 16;
3192 dma_params.has_ext_regs = true;
3193 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
3194 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
3195 dma_params.descs_pool_size = descs_pool_size;
3197 cpsw->dma = cpdma_ctlr_create(&dma_params);
3199 dev_err(dev, "error initializing dma\n");
3204 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
3205 if (IS_ERR(cpsw->cpts)) {
3206 ret = PTR_ERR(cpsw->cpts);
3210 ch = cpsw->quirk_irq ? 0 : 7;
3211 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
3212 if (IS_ERR(cpsw->txv[0].ch)) {
3213 dev_err(dev, "error initializing tx dma channel\n");
3214 ret = PTR_ERR(cpsw->txv[0].ch);
3218 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
3219 if (IS_ERR(cpsw->rxv[0].ch)) {
3220 dev_err(dev, "error initializing rx dma channel\n");
3221 ret = PTR_ERR(cpsw->rxv[0].ch);
3224 cpsw_split_res(cpsw);
3227 ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
3228 CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
3230 dev_err(dev, "error allocating net_device\n");
3234 platform_set_drvdata(pdev, ndev);
3235 priv = netdev_priv(ndev);
3239 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3240 priv->emac_port = 0;
3242 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
3243 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
3244 dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
3246 eth_random_addr(priv->mac_addr);
3247 dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
3250 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
3252 cpsw->slaves[0].ndev = ndev;
3254 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3256 ndev->netdev_ops = &cpsw_netdev_ops;
3257 ndev->ethtool_ops = &cpsw_ethtool_ops;
3258 netif_napi_add(ndev, &cpsw->napi_rx,
3259 cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
3261 netif_tx_napi_add(ndev, &cpsw->napi_tx,
3262 cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
3265 /* register the network device */
3266 SET_NETDEV_DEV(ndev, dev);
3267 ret = register_netdev(ndev);
3269 dev_err(dev, "error registering net device\n");
3274 if (cpsw->data.dual_emac) {
3275 ret = cpsw_probe_dual_emac(priv);
3277 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3278 goto clean_unregister_netdev_ret;
3282 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3283 * MISC IRQs which are always kept disabled with this driver so
3284 * we will not request them.
3286 * If anyone wants to implement support for those, make sure to
3287 * first request and append them to irqs_table array.
3289 ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
3290 0, dev_name(dev), cpsw);
3292 dev_err(dev, "error attaching irq (%d)\n", ret);
3293 goto clean_unregister_netdev_ret;
3297 ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
3298 0, dev_name(&pdev->dev), cpsw);
3300 dev_err(dev, "error attaching irq (%d)\n", ret);
3301 goto clean_unregister_netdev_ret;
3304 cpsw_notice(priv, probe,
3305 "initialized device (regs %pa, irq %d, pool size %d)\n",
3306 &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
3308 pm_runtime_put(&pdev->dev);
3312 clean_unregister_netdev_ret:
3313 unregister_netdev(ndev);
3315 cpts_release(cpsw->cpts);
3317 cpdma_ctlr_destroy(cpsw->dma);
3319 cpsw_remove_dt(pdev);
3320 pm_runtime_put_sync(&pdev->dev);
3321 clean_runtime_disable_ret:
3322 pm_runtime_disable(&pdev->dev);
3326 static int cpsw_remove(struct platform_device *pdev)
3328 struct net_device *ndev = platform_get_drvdata(pdev);
3329 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3332 ret = pm_runtime_get_sync(&pdev->dev);
3334 pm_runtime_put_noidle(&pdev->dev);
3338 if (cpsw->data.dual_emac)
3339 unregister_netdev(cpsw->slaves[1].ndev);
3340 unregister_netdev(ndev);
3342 cpts_release(cpsw->cpts);
3343 cpdma_ctlr_destroy(cpsw->dma);
3344 cpsw_remove_dt(pdev);
3345 pm_runtime_put_sync(&pdev->dev);
3346 pm_runtime_disable(&pdev->dev);
3350 #ifdef CONFIG_PM_SLEEP
3351 static int cpsw_suspend(struct device *dev)
3353 struct net_device *ndev = dev_get_drvdata(dev);
3354 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3356 if (cpsw->data.dual_emac) {
3359 for (i = 0; i < cpsw->data.slaves; i++) {
3360 if (netif_running(cpsw->slaves[i].ndev))
3361 cpsw_ndo_stop(cpsw->slaves[i].ndev);
3364 if (netif_running(ndev))
3365 cpsw_ndo_stop(ndev);
3368 /* Select sleep pin state */
3369 pinctrl_pm_select_sleep_state(dev);
3374 static int cpsw_resume(struct device *dev)
3376 struct net_device *ndev = dev_get_drvdata(dev);
3377 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3379 /* Select default pin state */
3380 pinctrl_pm_select_default_state(dev);
3382 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3384 if (cpsw->data.dual_emac) {
3387 for (i = 0; i < cpsw->data.slaves; i++) {
3388 if (netif_running(cpsw->slaves[i].ndev))
3389 cpsw_ndo_open(cpsw->slaves[i].ndev);
3392 if (netif_running(ndev))
3393 cpsw_ndo_open(ndev);
3401 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3403 static struct platform_driver cpsw_driver = {
3407 .of_match_table = cpsw_of_mtable,
3409 .probe = cpsw_probe,
3410 .remove = cpsw_remove,
3413 module_platform_driver(cpsw_driver);
3415 MODULE_LICENSE("GPL");
3416 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3417 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3418 MODULE_DESCRIPTION("TI CPSW Ethernet driver");