1 /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 /* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */
15 #include <linux/if_ether.h>
16 #include <linux/if_vlan.h>
17 #include <linux/interrupt.h>
19 #include <linux/module.h>
21 #include <linux/of_net.h>
22 #include <linux/of_device.h>
23 #include <linux/phy.h>
24 #include <linux/platform_device.h>
28 #include "emac-sgmii.h"
30 #define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
31 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
33 #define EMAC_RRD_SIZE 4
34 /* The RRD size if timestamping is enabled: */
35 #define EMAC_TS_RRD_SIZE 6
36 #define EMAC_TPD_SIZE 4
37 #define EMAC_RFD_SIZE 2
39 #define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0
40 #define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22
41 #define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0
42 #define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24
44 #define RXQ0_NUM_RFD_PREF_DEF 8
45 #define TXQ0_NUM_TPD_PREF_DEF 5
47 #define EMAC_PREAMBLE_DEF 7
49 #define DMAR_DLY_CNT_DEF 15
50 #define DMAW_DLY_CNT_DEF 4
52 #define IMR_NORMAL_MASK (\
58 #define IMR_EXTENDED_MASK (\
72 #define ISR_GPHY_LINK (\
90 /* in sync with enum emac_clk_id */
91 static const char * const emac_clk_name[] = {
92 "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk",
96 void emac_reg_update32(void __iomem *addr, u32 mask, u32 val)
98 u32 data = readl(addr);
100 writel(((data & ~mask) | val), addr);
104 int emac_reinit_locked(struct emac_adapter *adpt)
108 mutex_lock(&adpt->reset_lock);
111 emac_sgmii_reset(adpt);
112 ret = emac_mac_up(adpt);
114 mutex_unlock(&adpt->reset_lock);
120 static int emac_napi_rtx(struct napi_struct *napi, int budget)
122 struct emac_rx_queue *rx_q =
123 container_of(napi, struct emac_rx_queue, napi);
124 struct emac_adapter *adpt = netdev_priv(rx_q->netdev);
125 struct emac_irq *irq = rx_q->irq;
128 emac_mac_rx_process(adpt, rx_q, &work_done, budget);
130 if (work_done < budget) {
133 irq->mask |= rx_q->intr;
134 writel(irq->mask, adpt->base + EMAC_INT_MASK);
140 /* Transmit the packet */
141 static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
143 struct emac_adapter *adpt = netdev_priv(netdev);
145 return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
148 irqreturn_t emac_isr(int _irq, void *data)
150 struct emac_irq *irq = data;
151 struct emac_adapter *adpt =
152 container_of(irq, struct emac_adapter, irq);
153 struct emac_rx_queue *rx_q = &adpt->rx_q;
156 /* disable the interrupt */
157 writel(0, adpt->base + EMAC_INT_MASK);
159 isr = readl_relaxed(adpt->base + EMAC_INT_STATUS);
161 status = isr & irq->mask;
165 if (status & ISR_ERROR) {
166 netif_warn(adpt, intr, adpt->netdev,
167 "warning: error irq status 0x%lx\n",
170 schedule_work(&adpt->work_thread);
173 /* Schedule the napi for receive queue with interrupt
176 if (status & rx_q->intr) {
177 if (napi_schedule_prep(&rx_q->napi)) {
178 irq->mask &= ~rx_q->intr;
179 __napi_schedule(&rx_q->napi);
183 if (status & TX_PKT_INT)
184 emac_mac_tx_process(adpt, &adpt->tx_q);
186 if (status & ISR_OVER)
187 net_warn_ratelimited("warning: TX/RX overflow\n");
190 if (status & ISR_GPHY_LINK)
191 phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
194 /* enable the interrupt */
195 writel(irq->mask, adpt->base + EMAC_INT_MASK);
200 /* Configure VLAN tag strip/insert feature */
201 static int emac_set_features(struct net_device *netdev,
202 netdev_features_t features)
204 netdev_features_t changed = features ^ netdev->features;
205 struct emac_adapter *adpt = netdev_priv(netdev);
207 /* We only need to reprogram the hardware if the VLAN tag features
208 * have changed, and if it's already running.
210 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)))
213 if (!netif_running(netdev))
216 /* emac_mac_mode_config() uses netdev->features to configure the EMAC,
217 * so make sure it's set first.
219 netdev->features = features;
221 return emac_reinit_locked(adpt);
224 /* Configure Multicast and Promiscuous modes */
225 static void emac_rx_mode_set(struct net_device *netdev)
227 struct emac_adapter *adpt = netdev_priv(netdev);
228 struct netdev_hw_addr *ha;
230 emac_mac_mode_config(adpt);
232 /* update multicast address filtering */
233 emac_mac_multicast_addr_clear(adpt);
234 netdev_for_each_mc_addr(ha, netdev)
235 emac_mac_multicast_addr_set(adpt, ha->addr);
238 /* Change the Maximum Transfer Unit (MTU) */
239 static int emac_change_mtu(struct net_device *netdev, int new_mtu)
241 unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
242 struct emac_adapter *adpt = netdev_priv(netdev);
244 if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) ||
245 (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) {
246 netdev_err(adpt->netdev, "error: invalid MTU setting\n");
250 netif_info(adpt, hw, adpt->netdev,
251 "changing MTU from %d to %d\n", netdev->mtu,
253 netdev->mtu = new_mtu;
255 if (netif_running(netdev))
256 return emac_reinit_locked(adpt);
261 /* Called when the network interface is made active */
262 static int emac_open(struct net_device *netdev)
264 struct emac_adapter *adpt = netdev_priv(netdev);
267 /* allocate rx/tx dma buffer & descriptors */
268 ret = emac_mac_rx_tx_rings_alloc_all(adpt);
270 netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
274 ret = emac_mac_up(adpt);
276 emac_mac_rx_tx_rings_free_all(adpt);
280 emac_mac_start(adpt);
285 /* Called when the network interface is disabled */
286 static int emac_close(struct net_device *netdev)
288 struct emac_adapter *adpt = netdev_priv(netdev);
290 mutex_lock(&adpt->reset_lock);
293 emac_mac_rx_tx_rings_free_all(adpt);
295 mutex_unlock(&adpt->reset_lock);
300 /* Respond to a TX hang */
301 static void emac_tx_timeout(struct net_device *netdev)
303 struct emac_adapter *adpt = netdev_priv(netdev);
305 schedule_work(&adpt->work_thread);
308 /* IOCTL support for the interface */
309 static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
311 if (!netif_running(netdev))
317 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
320 /* Provide network statistics info for the interface */
321 static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
322 struct rtnl_link_stats64 *net_stats)
324 struct emac_adapter *adpt = netdev_priv(netdev);
325 unsigned int addr = REG_MAC_RX_STATUS_BIN;
326 struct emac_stats *stats = &adpt->stats;
327 u64 *stats_itr = &adpt->stats.rx_ok;
330 spin_lock(&stats->lock);
332 while (addr <= REG_MAC_RX_STATUS_END) {
333 val = readl_relaxed(adpt->base + addr);
339 /* additional rx status */
340 val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23);
341 adpt->stats.rx_crc_align += val;
342 val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24);
343 adpt->stats.rx_jabbers += val;
345 /* update tx status */
346 addr = REG_MAC_TX_STATUS_BIN;
347 stats_itr = &adpt->stats.tx_ok;
349 while (addr <= REG_MAC_TX_STATUS_END) {
350 val = readl_relaxed(adpt->base + addr);
356 /* additional tx status */
357 val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25);
358 adpt->stats.tx_col += val;
360 /* return parsed statistics */
361 net_stats->rx_packets = stats->rx_ok;
362 net_stats->tx_packets = stats->tx_ok;
363 net_stats->rx_bytes = stats->rx_byte_cnt;
364 net_stats->tx_bytes = stats->tx_byte_cnt;
365 net_stats->multicast = stats->rx_mcast;
366 net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 +
367 stats->tx_late_col + stats->tx_abort_col;
369 net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err +
370 stats->rx_len_err + stats->rx_sz_ov +
372 net_stats->rx_fifo_errors = stats->rx_rxf_ov;
373 net_stats->rx_length_errors = stats->rx_len_err;
374 net_stats->rx_crc_errors = stats->rx_fcs_err;
375 net_stats->rx_frame_errors = stats->rx_align_err;
376 net_stats->rx_over_errors = stats->rx_rxf_ov;
377 net_stats->rx_missed_errors = stats->rx_rxf_ov;
379 net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col +
380 stats->tx_underrun + stats->tx_trunc;
381 net_stats->tx_fifo_errors = stats->tx_underrun;
382 net_stats->tx_aborted_errors = stats->tx_abort_col;
383 net_stats->tx_window_errors = stats->tx_late_col;
385 spin_unlock(&stats->lock);
390 static const struct net_device_ops emac_netdev_ops = {
391 .ndo_open = emac_open,
392 .ndo_stop = emac_close,
393 .ndo_validate_addr = eth_validate_addr,
394 .ndo_start_xmit = emac_start_xmit,
395 .ndo_set_mac_address = eth_mac_addr,
396 .ndo_change_mtu = emac_change_mtu,
397 .ndo_do_ioctl = emac_ioctl,
398 .ndo_tx_timeout = emac_tx_timeout,
399 .ndo_get_stats64 = emac_get_stats64,
400 .ndo_set_features = emac_set_features,
401 .ndo_set_rx_mode = emac_rx_mode_set,
404 /* Watchdog task routine, called to reinitialize the EMAC */
405 static void emac_work_thread(struct work_struct *work)
407 struct emac_adapter *adpt =
408 container_of(work, struct emac_adapter, work_thread);
410 emac_reinit_locked(adpt);
413 /* Initialize various data structures */
414 static void emac_init_adapter(struct emac_adapter *adpt)
419 adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
420 adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
423 adpt->dma_order = emac_dma_ord_out;
424 adpt->dmar_block = emac_dma_req_4096;
425 adpt->dmaw_block = emac_dma_req_128;
426 adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF;
427 adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF;
428 adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF;
429 adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF;
432 reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) |
433 ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT);
437 adpt->preamble = EMAC_PREAMBLE_DEF;
441 static int emac_clks_get(struct platform_device *pdev,
442 struct emac_adapter *adpt)
446 for (i = 0; i < EMAC_CLK_CNT; i++) {
447 struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]);
451 "could not claim clock %s (error=%li)\n",
452 emac_clk_name[i], PTR_ERR(clk));
463 /* Initialize clocks */
464 static int emac_clks_phase1_init(struct platform_device *pdev,
465 struct emac_adapter *adpt)
469 ret = emac_clks_get(pdev, adpt);
473 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]);
477 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
481 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
485 return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
488 /* Enable clocks; needs emac_clks_phase1_init to be called before */
489 static int emac_clks_phase2_init(struct platform_device *pdev,
490 struct emac_adapter *adpt)
494 ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000);
498 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]);
502 ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000);
506 ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000);
510 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]);
514 ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]);
518 return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]);
521 static void emac_clks_teardown(struct emac_adapter *adpt)
526 for (i = 0; i < EMAC_CLK_CNT; i++)
527 clk_disable_unprepare(adpt->clk[i]);
530 /* Get the resources */
531 static int emac_probe_resources(struct platform_device *pdev,
532 struct emac_adapter *adpt)
534 struct device_node *node = pdev->dev.of_node;
535 struct net_device *netdev = adpt->netdev;
536 struct resource *res;
540 /* get mac address */
541 maddr = of_get_mac_address(node);
543 eth_hw_addr_random(netdev);
545 ether_addr_copy(netdev->dev_addr, maddr);
547 /* Core 0 interrupt */
548 ret = platform_get_irq(pdev, 0);
551 "error: missing core0 irq resource (error=%i)\n", ret);
556 /* base register address */
557 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
558 adpt->base = devm_ioremap_resource(&pdev->dev, res);
559 if (IS_ERR(adpt->base))
560 return PTR_ERR(adpt->base);
562 /* CSR register address */
563 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
564 adpt->csr = devm_ioremap_resource(&pdev->dev, res);
565 if (IS_ERR(adpt->csr))
566 return PTR_ERR(adpt->csr);
568 netdev->base_addr = (unsigned long)adpt->base;
573 static const struct of_device_id emac_dt_match[] = {
575 .compatible = "qcom,fsm9900-emac",
580 static int emac_probe(struct platform_device *pdev)
582 struct net_device *netdev;
583 struct emac_adapter *adpt;
584 struct emac_phy *phy;
589 /* The EMAC itself is capable of 64-bit DMA, so try that first. */
590 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
592 /* Some platforms may restrict the EMAC's address bus to less
593 * then the size of DDR. In this case, we need to try a
594 * smaller mask. We could try every possible smaller mask,
595 * but that's overkill. Instead, just fall to 32-bit, which
596 * should always work.
598 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
600 dev_err(&pdev->dev, "could not set DMA mask\n");
605 netdev = alloc_etherdev(sizeof(struct emac_adapter));
609 dev_set_drvdata(&pdev->dev, netdev);
610 SET_NETDEV_DEV(netdev, &pdev->dev);
612 adpt = netdev_priv(netdev);
613 adpt->netdev = netdev;
614 adpt->msg_enable = EMAC_MSG_DEFAULT;
618 mutex_init(&adpt->reset_lock);
619 spin_lock_init(&adpt->stats.lock);
621 adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK;
623 ret = emac_probe_resources(pdev, adpt);
625 goto err_undo_netdev;
627 /* initialize clocks */
628 ret = emac_clks_phase1_init(pdev, adpt);
630 dev_err(&pdev->dev, "could not initialize clocks\n");
631 goto err_undo_netdev;
634 netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
635 netdev->irq = adpt->irq.irq;
637 adpt->rrd_size = EMAC_RRD_SIZE;
638 adpt->tpd_size = EMAC_TPD_SIZE;
639 adpt->rfd_size = EMAC_RFD_SIZE;
641 netdev->netdev_ops = &emac_netdev_ops;
643 emac_init_adapter(adpt);
645 /* init external phy */
646 ret = emac_phy_config(pdev, adpt);
648 goto err_undo_clocks;
650 /* init internal sgmii phy */
651 ret = emac_sgmii_config(pdev, adpt);
653 goto err_undo_mdiobus;
656 ret = emac_clks_phase2_init(pdev, adpt);
658 dev_err(&pdev->dev, "could not initialize clocks\n");
659 goto err_undo_mdiobus;
662 emac_mac_reset(adpt);
664 /* set hw features */
665 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
666 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX |
667 NETIF_F_HW_VLAN_CTAG_TX;
668 netdev->hw_features = netdev->features;
670 netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM |
671 NETIF_F_TSO | NETIF_F_TSO6;
673 INIT_WORK(&adpt->work_thread, emac_work_thread);
675 /* Initialize queues */
676 emac_mac_rx_tx_ring_init_all(pdev, adpt);
678 netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
681 ret = register_netdev(netdev);
683 dev_err(&pdev->dev, "could not register net device\n");
687 reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL);
688 devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT;
689 revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT;
690 reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION);
692 netif_info(adpt, probe, netdev,
693 "hardware id %d.%d, hardware version %d.%d.%d\n",
695 (reg & MAJOR_BMSK) >> MAJOR_SHFT,
696 (reg & MINOR_BMSK) >> MINOR_SHFT,
697 (reg & STEP_BMSK) >> STEP_SHFT);
702 netif_napi_del(&adpt->rx_q.napi);
704 mdiobus_unregister(adpt->mii_bus);
706 emac_clks_teardown(adpt);
713 static int emac_remove(struct platform_device *pdev)
715 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
716 struct emac_adapter *adpt = netdev_priv(netdev);
718 unregister_netdev(netdev);
719 netif_napi_del(&adpt->rx_q.napi);
721 emac_clks_teardown(adpt);
723 mdiobus_unregister(adpt->mii_bus);
725 dev_set_drvdata(&pdev->dev, NULL);
730 static struct platform_driver emac_platform_driver = {
732 .remove = emac_remove,
734 .owner = THIS_MODULE,
736 .of_match_table = emac_dt_match,
740 module_platform_driver(emac_platform_driver);
742 MODULE_LICENSE("GPL v2");
743 MODULE_ALIAS("platform:qcom-emac");