1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
27 #define RES_ENET_CSR 0
28 #define RES_RING_CSR 1
29 #define RES_RING_CMD 2
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36 struct xgene_enet_raw_desc16 *raw_desc;
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
53 struct xgene_enet_raw_desc16 *raw_desc;
54 struct xgene_enet_pdata *pdata;
55 struct net_device *ndev;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
65 pdata = netdev_priv(ndev);
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
72 skb = netdev_alloc_skb_ip_align(ndev, len);
75 buf_pool->rx_skb[tail] = skb;
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
87 tail = (tail + 1) & slots;
90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91 buf_pool->tail = tail;
96 static u8 xgene_enet_hdr_len(const void *data)
98 const struct ethhdr *eth = data;
100 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
103 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
105 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
106 struct xgene_enet_raw_desc16 *raw_desc;
107 u32 slots = buf_pool->slots - 1;
108 u32 tail = buf_pool->tail;
112 len = pdata->ring_ops->len(buf_pool);
113 for (i = 0; i < len; i++) {
114 tail = (tail - 1) & slots;
115 raw_desc = &buf_pool->raw_desc16[tail];
117 /* Hardware stores descriptor in little endian format */
118 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
119 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
122 pdata->ring_ops->wr_cmd(buf_pool, -len);
123 buf_pool->tail = tail;
126 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
128 struct xgene_enet_desc_ring *rx_ring = data;
130 if (napi_schedule_prep(&rx_ring->napi)) {
131 disable_irq_nosync(irq);
132 __napi_schedule(&rx_ring->napi);
138 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
139 struct xgene_enet_raw_desc *raw_desc)
144 dma_addr_t *frag_dma_addr;
149 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
150 skb = cp_ring->cp_skb[skb_index];
151 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
153 dev = ndev_to_dev(cp_ring->ndev);
154 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
159 frag = &skb_shinfo(skb)->frags[i];
160 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
164 /* Checking for error */
165 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
166 if (unlikely(status > 2)) {
167 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
173 dev_kfree_skb_any(skb);
175 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
182 static u64 xgene_enet_work_msg(struct sk_buff *skb)
184 struct net_device *ndev = skb->dev;
186 u8 l3hlen = 0, l4hlen = 0;
187 u8 ethhdr, proto = 0, csum_enable = 0;
189 u32 hdr_len, mss = 0;
190 u32 i, len, nr_frags;
192 ethhdr = xgene_enet_hdr_len(skb->data);
194 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195 unlikely(skb->protocol != htons(ETH_P_8021Q)))
198 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
202 if (unlikely(ip_is_fragment(iph)))
205 if (likely(iph->protocol == IPPROTO_TCP)) {
206 l4hlen = tcp_hdrlen(skb) >> 2;
208 proto = TSO_IPPROTO_TCP;
209 if (ndev->features & NETIF_F_TSO) {
210 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
211 mss = skb_shinfo(skb)->gso_size;
213 if (skb_is_nonlinear(skb)) {
214 len = skb_headlen(skb);
215 nr_frags = skb_shinfo(skb)->nr_frags;
217 for (i = 0; i < 2 && i < nr_frags; i++)
218 len += skb_shinfo(skb)->frags[i].size;
220 /* HW requires header must reside in 3 buffer */
221 if (unlikely(hdr_len > len)) {
222 if (skb_linearize(skb))
227 if (!mss || ((skb->len - hdr_len) <= mss))
230 hopinfo |= SET_BIT(ET);
232 } else if (iph->protocol == IPPROTO_UDP) {
233 l4hlen = UDP_HDR_SIZE;
237 l3hlen = ip_hdrlen(skb) >> 2;
238 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
239 SET_VAL(IPHDR, l3hlen) |
240 SET_VAL(ETHHDR, ethhdr) |
241 SET_VAL(EC, csum_enable) |
244 SET_BIT(TYPE_ETH_WORK_MESSAGE);
249 static u16 xgene_enet_encode_len(u16 len)
251 return (len == BUFLEN_16K) ? 0 : len;
254 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
256 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
257 SET_VAL(BUFDATALEN, len));
260 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
264 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
265 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
266 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
271 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
273 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
276 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
279 struct device *dev = ndev_to_dev(tx_ring->ndev);
280 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
281 struct xgene_enet_raw_desc *raw_desc;
282 __le64 *exp_desc = NULL, *exp_bufs = NULL;
283 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
285 u16 tail = tx_ring->tail;
288 u8 ll = 0, nv = 0, idx = 0;
290 u32 size, offset, ell_bytes = 0;
291 u32 i, fidx, nr_frags, count = 1;
293 raw_desc = &tx_ring->raw_desc[tail];
294 tail = (tail + 1) & (tx_ring->slots - 1);
295 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
297 hopinfo = xgene_enet_work_msg(skb);
300 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
303 len = skb_headlen(skb);
304 hw_len = xgene_enet_encode_len(len);
306 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
307 if (dma_mapping_error(dev, dma_addr)) {
308 netdev_err(tx_ring->ndev, "DMA mapping error\n");
312 /* Hardware expects descriptor in little endian format */
313 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
314 SET_VAL(BUFDATALEN, hw_len) |
317 if (!skb_is_nonlinear(skb))
322 exp_desc = (void *)&tx_ring->raw_desc[tail];
323 tail = (tail + 1) & (tx_ring->slots - 1);
324 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
326 nr_frags = skb_shinfo(skb)->nr_frags;
327 for (i = nr_frags; i < 4 ; i++)
328 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
330 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
332 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
334 frag = &skb_shinfo(skb)->frags[fidx];
335 size = skb_frag_size(frag);
338 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
340 if (dma_mapping_error(dev, pbuf_addr))
343 frag_dma_addr[fidx] = pbuf_addr;
346 if (size > BUFLEN_16K)
350 if (size > BUFLEN_16K) {
358 dma_addr = pbuf_addr + offset;
359 hw_len = xgene_enet_encode_len(len);
365 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
368 if (split || (fidx != nr_frags)) {
369 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
370 xgene_set_addr_len(exp_bufs, idx, dma_addr,
375 xgene_set_addr_len(exp_desc, i, dma_addr,
380 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
387 offset += BUFLEN_16K;
393 dma_addr = dma_map_single(dev, exp_bufs,
394 sizeof(u64) * MAX_EXP_BUFFS,
396 if (dma_mapping_error(dev, dma_addr)) {
397 dev_kfree_skb_any(skb);
400 i = ell_bytes >> LL_BYTES_LSB_LEN;
401 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
402 SET_VAL(LL_BYTES_MSB, i) |
403 SET_VAL(LL_LEN, idx));
404 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
408 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
409 SET_VAL(USERINFO, tx_ring->tail));
410 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
411 pdata->tx_level[tx_ring->cp_ring->index] += count;
412 tx_ring->tail = tail;
417 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
418 struct net_device *ndev)
420 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
421 struct xgene_enet_desc_ring *tx_ring;
422 int index = skb->queue_mapping;
423 u32 tx_level = pdata->tx_level[index];
426 tx_ring = pdata->tx_ring[index];
427 if (tx_level < pdata->txc_level[index])
428 tx_level += ((typeof(pdata->tx_level[index]))~0U);
430 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
431 netif_stop_subqueue(ndev, index);
432 return NETDEV_TX_BUSY;
435 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
438 count = xgene_enet_setup_tx_desc(tx_ring, skb);
440 dev_kfree_skb_any(skb);
444 skb_tx_timestamp(skb);
446 pdata->stats.tx_packets++;
447 pdata->stats.tx_bytes += skb->len;
449 pdata->ring_ops->wr_cmd(tx_ring, count);
453 static void xgene_enet_skip_csum(struct sk_buff *skb)
455 struct iphdr *iph = ip_hdr(skb);
457 if (!ip_is_fragment(iph) ||
458 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
459 skb->ip_summed = CHECKSUM_UNNECESSARY;
463 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
464 struct xgene_enet_raw_desc *raw_desc)
466 struct net_device *ndev;
467 struct xgene_enet_pdata *pdata;
469 struct xgene_enet_desc_ring *buf_pool;
470 u32 datalen, skb_index;
475 ndev = rx_ring->ndev;
476 pdata = netdev_priv(ndev);
477 dev = ndev_to_dev(rx_ring->ndev);
478 buf_pool = rx_ring->buf_pool;
480 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
481 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
482 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
483 skb = buf_pool->rx_skb[skb_index];
485 /* checking for error */
486 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
487 if (unlikely(status > 2)) {
488 dev_kfree_skb_any(skb);
489 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
491 pdata->stats.rx_dropped++;
496 /* strip off CRC as HW isn't doing this */
497 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
498 datalen = (datalen & DATALEN_MASK) - 4;
499 prefetch(skb->data - NET_IP_ALIGN);
500 skb_put(skb, datalen);
502 skb_checksum_none_assert(skb);
503 skb->protocol = eth_type_trans(skb, ndev);
504 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
505 skb->protocol == htons(ETH_P_IP))) {
506 xgene_enet_skip_csum(skb);
509 pdata->stats.rx_packets++;
510 pdata->stats.rx_bytes += datalen;
511 napi_gro_receive(&rx_ring->napi, skb);
513 if (--rx_ring->nbufpool == 0) {
514 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
515 rx_ring->nbufpool = NUM_BUFPOOL;
521 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
523 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
526 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
529 struct net_device *ndev = ring->ndev;
530 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
531 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
532 u16 head = ring->head;
533 u16 slots = ring->slots - 1;
534 int ret, desc_count, count = 0, processed = 0;
538 raw_desc = &ring->raw_desc[head];
540 is_completion = false;
542 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
545 /* read fpqnum field after dataaddr field */
547 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
548 head = (head + 1) & slots;
549 exp_desc = &ring->raw_desc[head];
551 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
552 head = (head - 1) & slots;
559 if (is_rx_desc(raw_desc)) {
560 ret = xgene_enet_rx_frame(ring, raw_desc);
562 ret = xgene_enet_tx_completion(ring, raw_desc);
563 is_completion = true;
565 xgene_enet_mark_desc_slot_empty(raw_desc);
567 xgene_enet_mark_desc_slot_empty(exp_desc);
569 head = (head + 1) & slots;
574 pdata->txc_level[ring->index] += desc_count;
581 pdata->ring_ops->wr_cmd(ring, -count);
584 if (__netif_subqueue_stopped(ndev, ring->index))
585 netif_start_subqueue(ndev, ring->index);
591 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
593 struct xgene_enet_desc_ring *ring;
596 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
597 processed = xgene_enet_process_ring(ring, budget);
599 if (processed != budget) {
601 enable_irq(ring->irq);
607 static void xgene_enet_timeout(struct net_device *ndev)
609 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
610 struct netdev_queue *txq;
613 pdata->mac_ops->reset(pdata);
615 for (i = 0; i < pdata->txq_cnt; i++) {
616 txq = netdev_get_tx_queue(ndev, i);
617 txq->trans_start = jiffies;
618 netif_tx_start_queue(txq);
622 static int xgene_enet_register_irq(struct net_device *ndev)
624 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
625 struct device *dev = ndev_to_dev(ndev);
626 struct xgene_enet_desc_ring *ring;
629 for (i = 0; i < pdata->rxq_cnt; i++) {
630 ring = pdata->rx_ring[i];
631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
633 IRQF_SHARED, ring->irq_name, ring);
635 netdev_err(ndev, "Failed to request irq %s\n",
640 for (i = 0; i < pdata->cq_cnt; i++) {
641 ring = pdata->tx_ring[i]->cp_ring;
642 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
643 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
644 IRQF_SHARED, ring->irq_name, ring);
646 netdev_err(ndev, "Failed to request irq %s\n",
654 static void xgene_enet_free_irq(struct net_device *ndev)
656 struct xgene_enet_pdata *pdata;
657 struct xgene_enet_desc_ring *ring;
661 pdata = netdev_priv(ndev);
662 dev = ndev_to_dev(ndev);
664 for (i = 0; i < pdata->rxq_cnt; i++) {
665 ring = pdata->rx_ring[i];
666 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
667 devm_free_irq(dev, ring->irq, ring);
670 for (i = 0; i < pdata->cq_cnt; i++) {
671 ring = pdata->tx_ring[i]->cp_ring;
672 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
673 devm_free_irq(dev, ring->irq, ring);
677 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
679 struct napi_struct *napi;
682 for (i = 0; i < pdata->rxq_cnt; i++) {
683 napi = &pdata->rx_ring[i]->napi;
687 for (i = 0; i < pdata->cq_cnt; i++) {
688 napi = &pdata->tx_ring[i]->cp_ring->napi;
693 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
695 struct napi_struct *napi;
698 for (i = 0; i < pdata->rxq_cnt; i++) {
699 napi = &pdata->rx_ring[i]->napi;
703 for (i = 0; i < pdata->cq_cnt; i++) {
704 napi = &pdata->tx_ring[i]->cp_ring->napi;
709 static int xgene_enet_open(struct net_device *ndev)
711 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
712 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
715 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
719 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
723 mac_ops->tx_enable(pdata);
724 mac_ops->rx_enable(pdata);
726 xgene_enet_napi_enable(pdata);
727 ret = xgene_enet_register_irq(ndev);
731 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
732 phy_start(pdata->phy_dev);
734 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
736 netif_start_queue(ndev);
741 static int xgene_enet_close(struct net_device *ndev)
743 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
744 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
747 netif_stop_queue(ndev);
749 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
750 phy_stop(pdata->phy_dev);
752 cancel_delayed_work_sync(&pdata->link_work);
754 mac_ops->tx_disable(pdata);
755 mac_ops->rx_disable(pdata);
757 xgene_enet_free_irq(ndev);
758 xgene_enet_napi_disable(pdata);
759 for (i = 0; i < pdata->rxq_cnt; i++)
760 xgene_enet_process_ring(pdata->rx_ring[i], -1);
765 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
767 struct xgene_enet_pdata *pdata;
770 pdata = netdev_priv(ring->ndev);
771 dev = ndev_to_dev(ring->ndev);
773 pdata->ring_ops->clear(ring);
774 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
777 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
779 struct xgene_enet_desc_ring *buf_pool;
780 struct xgene_enet_desc_ring *ring;
783 for (i = 0; i < pdata->txq_cnt; i++) {
784 ring = pdata->tx_ring[i];
786 xgene_enet_delete_ring(ring);
787 pdata->tx_ring[i] = NULL;
791 for (i = 0; i < pdata->rxq_cnt; i++) {
792 ring = pdata->rx_ring[i];
794 buf_pool = ring->buf_pool;
795 xgene_enet_delete_bufpool(buf_pool);
796 xgene_enet_delete_ring(buf_pool);
797 xgene_enet_delete_ring(ring);
798 pdata->rx_ring[i] = NULL;
803 static int xgene_enet_get_ring_size(struct device *dev,
804 enum xgene_enet_ring_cfgsize cfgsize)
809 case RING_CFGSIZE_512B:
812 case RING_CFGSIZE_2KB:
815 case RING_CFGSIZE_16KB:
818 case RING_CFGSIZE_64KB:
821 case RING_CFGSIZE_512KB:
825 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
832 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
834 struct xgene_enet_pdata *pdata;
840 dev = ndev_to_dev(ring->ndev);
841 pdata = netdev_priv(ring->ndev);
843 if (ring->desc_addr) {
844 pdata->ring_ops->clear(ring);
845 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
847 devm_kfree(dev, ring);
850 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
852 struct device *dev = &pdata->pdev->dev;
853 struct xgene_enet_desc_ring *ring;
856 for (i = 0; i < pdata->txq_cnt; i++) {
857 ring = pdata->tx_ring[i];
859 if (ring->cp_ring && ring->cp_ring->cp_skb)
860 devm_kfree(dev, ring->cp_ring->cp_skb);
861 if (ring->cp_ring && pdata->cq_cnt)
862 xgene_enet_free_desc_ring(ring->cp_ring);
863 xgene_enet_free_desc_ring(ring);
867 for (i = 0; i < pdata->rxq_cnt; i++) {
868 ring = pdata->rx_ring[i];
870 if (ring->buf_pool) {
871 if (ring->buf_pool->rx_skb)
872 devm_kfree(dev, ring->buf_pool->rx_skb);
873 xgene_enet_free_desc_ring(ring->buf_pool);
875 xgene_enet_free_desc_ring(ring);
880 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
881 struct xgene_enet_desc_ring *ring)
883 if ((pdata->enet_id == XGENE_ENET2) &&
884 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
891 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
892 struct xgene_enet_desc_ring *ring)
894 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
896 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
899 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
900 struct net_device *ndev, u32 ring_num,
901 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
903 struct xgene_enet_desc_ring *ring;
904 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
905 struct device *dev = ndev_to_dev(ndev);
908 size = xgene_enet_get_ring_size(dev, cfgsize);
912 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
918 ring->num = ring_num;
919 ring->cfgsize = cfgsize;
922 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
924 if (!ring->desc_addr) {
925 devm_kfree(dev, ring);
930 if (is_irq_mbox_required(pdata, ring)) {
931 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
932 &ring->irq_mbox_dma, GFP_KERNEL);
933 if (!ring->irq_mbox_addr) {
934 dma_free_coherent(dev, size, ring->desc_addr,
936 devm_kfree(dev, ring);
941 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
942 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
943 ring = pdata->ring_ops->setup(ring);
944 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
945 ring->num, ring->size, ring->id, ring->slots);
950 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
952 return (owner << 6) | (bufnum & GENMASK(5, 0));
955 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
957 enum xgene_ring_owner owner;
959 if (p->enet_id == XGENE_ENET1) {
960 switch (p->phy_mode) {
961 case PHY_INTERFACE_MODE_SGMII:
962 owner = RING_OWNER_ETH0;
965 owner = (!p->port_id) ? RING_OWNER_ETH0 :
970 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
976 static int xgene_enet_create_desc_rings(struct net_device *ndev)
978 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
979 struct device *dev = ndev_to_dev(ndev);
980 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
981 struct xgene_enet_desc_ring *buf_pool = NULL;
982 enum xgene_ring_owner owner;
983 dma_addr_t dma_exp_bufs;
984 u8 cpu_bufnum = pdata->cpu_bufnum;
985 u8 eth_bufnum = pdata->eth_bufnum;
986 u8 bp_bufnum = pdata->bp_bufnum;
987 u16 ring_num = pdata->ring_num;
991 for (i = 0; i < pdata->rxq_cnt; i++) {
992 /* allocate rx descriptor ring */
993 owner = xgene_derive_ring_owner(pdata);
994 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
995 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1003 /* allocate buffer pool for receiving packets */
1004 owner = xgene_derive_ring_owner(pdata);
1005 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1006 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1014 rx_ring->nbufpool = NUM_BUFPOOL;
1015 rx_ring->buf_pool = buf_pool;
1016 rx_ring->irq = pdata->irqs[i];
1017 if (!pdata->cq_cnt) {
1018 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
1021 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
1024 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1025 sizeof(struct sk_buff *),
1027 if (!buf_pool->rx_skb) {
1032 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1033 rx_ring->buf_pool = buf_pool;
1034 pdata->rx_ring[i] = rx_ring;
1037 for (i = 0; i < pdata->txq_cnt; i++) {
1038 /* allocate tx descriptor ring */
1039 owner = xgene_derive_ring_owner(pdata);
1040 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1041 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1049 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1050 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
1053 if (!tx_ring->exp_bufs) {
1058 pdata->tx_ring[i] = tx_ring;
1060 if (!pdata->cq_cnt) {
1061 cp_ring = pdata->rx_ring[i];
1063 /* allocate tx completion descriptor ring */
1064 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1066 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1074 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1076 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
1080 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1081 sizeof(struct sk_buff *),
1083 if (!cp_ring->cp_skb) {
1088 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1089 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1091 if (!cp_ring->frag_dma_addr) {
1092 devm_kfree(dev, cp_ring->cp_skb);
1097 tx_ring->cp_ring = cp_ring;
1098 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1101 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1102 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1107 xgene_enet_free_desc_rings(pdata);
1111 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1112 struct net_device *ndev,
1113 struct rtnl_link_stats64 *storage)
1115 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1116 struct rtnl_link_stats64 *stats = &pdata->stats;
1118 stats->rx_errors += stats->rx_length_errors +
1119 stats->rx_crc_errors +
1120 stats->rx_frame_errors +
1121 stats->rx_fifo_errors;
1122 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
1127 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1129 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1132 ret = eth_mac_addr(ndev, addr);
1135 pdata->mac_ops->set_mac_addr(pdata);
1140 static const struct net_device_ops xgene_ndev_ops = {
1141 .ndo_open = xgene_enet_open,
1142 .ndo_stop = xgene_enet_close,
1143 .ndo_start_xmit = xgene_enet_start_xmit,
1144 .ndo_tx_timeout = xgene_enet_timeout,
1145 .ndo_get_stats64 = xgene_enet_get_stats64,
1146 .ndo_change_mtu = eth_change_mtu,
1147 .ndo_set_mac_address = xgene_enet_set_mac_address,
1151 static void xgene_get_port_id_acpi(struct device *dev,
1152 struct xgene_enet_pdata *pdata)
1157 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1158 if (ACPI_FAILURE(status)) {
1161 pdata->port_id = temp;
1168 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1172 of_property_read_u32(dev->of_node, "port-id", &id);
1174 pdata->port_id = id & BIT(0);
1179 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1181 struct device *dev = &pdata->pdev->dev;
1184 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1186 pdata->tx_delay = 4;
1190 if (delay < 0 || delay > 7) {
1191 dev_err(dev, "Invalid tx-delay specified\n");
1195 pdata->tx_delay = delay;
1200 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1202 struct device *dev = &pdata->pdev->dev;
1205 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1207 pdata->rx_delay = 2;
1211 if (delay < 0 || delay > 7) {
1212 dev_err(dev, "Invalid rx-delay specified\n");
1216 pdata->rx_delay = delay;
1221 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1223 struct platform_device *pdev = pdata->pdev;
1224 struct device *dev = &pdev->dev;
1225 int i, ret, max_irqs;
1227 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1229 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1232 max_irqs = XGENE_MAX_ENET_IRQ;
1234 for (i = 0; i < max_irqs; i++) {
1235 ret = platform_get_irq(pdev, i);
1237 dev_err(dev, "Unable to get ENET IRQ\n");
1238 ret = ret ? : -ENXIO;
1241 pdata->irqs[i] = ret;
1247 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1249 struct platform_device *pdev;
1250 struct net_device *ndev;
1252 struct resource *res;
1253 void __iomem *base_addr;
1261 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1263 dev_err(dev, "Resource enet_csr not defined\n");
1266 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1267 if (!pdata->base_addr) {
1268 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1272 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1274 dev_err(dev, "Resource ring_csr not defined\n");
1277 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1278 resource_size(res));
1279 if (!pdata->ring_csr_addr) {
1280 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1284 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1286 dev_err(dev, "Resource ring_cmd not defined\n");
1289 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1290 resource_size(res));
1291 if (!pdata->ring_cmd_addr) {
1292 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1297 xgene_get_port_id_dt(dev, pdata);
1300 xgene_get_port_id_acpi(dev, pdata);
1303 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1304 eth_hw_addr_random(ndev);
1306 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1308 pdata->phy_mode = device_get_phy_mode(dev);
1309 if (pdata->phy_mode < 0) {
1310 dev_err(dev, "Unable to get phy-connection-type\n");
1311 return pdata->phy_mode;
1313 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1314 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1315 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1316 dev_err(dev, "Incorrect phy-connection-type specified\n");
1320 ret = xgene_get_tx_delay(pdata);
1324 ret = xgene_get_rx_delay(pdata);
1328 ret = xgene_enet_get_irqs(pdata);
1332 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1333 if (IS_ERR(pdata->clk)) {
1334 /* Firmware may have set up the clock already. */
1335 dev_info(dev, "clocks have been setup already\n");
1338 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1339 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1341 base_addr = pdata->base_addr;
1342 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1343 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1344 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1345 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1346 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1347 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1348 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1349 offset = (pdata->enet_id == XGENE_ENET1) ?
1350 BLOCK_ETH_MAC_CSR_OFFSET :
1351 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1352 pdata->mcx_mac_csr_addr = base_addr + offset;
1354 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1355 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1357 pdata->rx_buff_cnt = NUM_PKT_BUF;
1362 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1364 struct xgene_enet_cle *enet_cle = &pdata->cle;
1365 struct net_device *ndev = pdata->ndev;
1366 struct xgene_enet_desc_ring *buf_pool;
1370 ret = pdata->port_ops->reset(pdata);
1374 ret = xgene_enet_create_desc_rings(ndev);
1376 netdev_err(ndev, "Error in ring configuration\n");
1380 /* setup buffer pool */
1381 for (i = 0; i < pdata->rxq_cnt; i++) {
1382 buf_pool = pdata->rx_ring[i]->buf_pool;
1383 xgene_enet_init_bufpool(buf_pool);
1384 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1386 xgene_enet_delete_desc_rings(pdata);
1391 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1392 buf_pool = pdata->rx_ring[0]->buf_pool;
1393 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1394 /* Initialize and Enable PreClassifier Tree */
1395 enet_cle->max_nodes = 512;
1396 enet_cle->max_dbptrs = 1024;
1397 enet_cle->parsers = 3;
1398 enet_cle->active_parser = PARSER_ALL;
1399 enet_cle->ptree.start_node = 0;
1400 enet_cle->ptree.start_dbptr = 0;
1401 enet_cle->jump_bytes = 8;
1402 ret = pdata->cle_ops->cle_init(pdata);
1404 netdev_err(ndev, "Preclass Tree init error\n");
1408 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1411 pdata->mac_ops->init(pdata);
1416 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1418 switch (pdata->phy_mode) {
1419 case PHY_INTERFACE_MODE_RGMII:
1420 pdata->mac_ops = &xgene_gmac_ops;
1421 pdata->port_ops = &xgene_gport_ops;
1427 case PHY_INTERFACE_MODE_SGMII:
1428 pdata->mac_ops = &xgene_sgmac_ops;
1429 pdata->port_ops = &xgene_sgport_ops;
1436 pdata->mac_ops = &xgene_xgmac_ops;
1437 pdata->port_ops = &xgene_xgport_ops;
1438 pdata->cle_ops = &xgene_cle3in_ops;
1440 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1441 pdata->txq_cnt = XGENE_NUM_TX_RING;
1442 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1446 if (pdata->enet_id == XGENE_ENET1) {
1447 switch (pdata->port_id) {
1449 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1450 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1451 pdata->bp_bufnum = START_BP_BUFNUM_0;
1452 pdata->ring_num = START_RING_NUM_0;
1455 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1456 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1457 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1458 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1459 pdata->ring_num = XG_START_RING_NUM_1;
1461 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1462 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1463 pdata->bp_bufnum = START_BP_BUFNUM_1;
1464 pdata->ring_num = START_RING_NUM_1;
1470 pdata->ring_ops = &xgene_ring1_ops;
1472 switch (pdata->port_id) {
1474 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1475 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1476 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1477 pdata->ring_num = X2_START_RING_NUM_0;
1480 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1481 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1482 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1483 pdata->ring_num = X2_START_RING_NUM_1;
1489 pdata->ring_ops = &xgene_ring2_ops;
1493 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1495 struct napi_struct *napi;
1498 for (i = 0; i < pdata->rxq_cnt; i++) {
1499 napi = &pdata->rx_ring[i]->napi;
1500 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1504 for (i = 0; i < pdata->cq_cnt; i++) {
1505 napi = &pdata->tx_ring[i]->cp_ring->napi;
1506 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1511 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1513 struct napi_struct *napi;
1516 for (i = 0; i < pdata->rxq_cnt; i++) {
1517 napi = &pdata->rx_ring[i]->napi;
1518 netif_napi_del(napi);
1521 for (i = 0; i < pdata->cq_cnt; i++) {
1522 napi = &pdata->tx_ring[i]->cp_ring->napi;
1523 netif_napi_del(napi);
1527 static int xgene_enet_probe(struct platform_device *pdev)
1529 struct net_device *ndev;
1530 struct xgene_enet_pdata *pdata;
1531 struct device *dev = &pdev->dev;
1532 const struct xgene_mac_ops *mac_ops;
1533 const struct of_device_id *of_id;
1536 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1537 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1541 pdata = netdev_priv(ndev);
1545 SET_NETDEV_DEV(ndev, dev);
1546 platform_set_drvdata(pdev, pdata);
1547 ndev->netdev_ops = &xgene_ndev_ops;
1548 xgene_enet_set_ethtool_ops(ndev);
1549 ndev->features |= NETIF_F_IP_CSUM |
1554 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1556 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1560 const struct acpi_device_id *acpi_id;
1562 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1564 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1567 if (!pdata->enet_id) {
1572 ret = xgene_enet_get_resources(pdata);
1576 xgene_enet_setup_ops(pdata);
1578 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1579 ndev->features |= NETIF_F_TSO;
1580 pdata->mss = XGENE_ENET_MSS;
1582 ndev->hw_features = ndev->features;
1584 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1586 netdev_err(ndev, "No usable DMA configuration\n");
1590 ret = register_netdev(ndev);
1592 netdev_err(ndev, "Failed to register netdev\n");
1596 ret = xgene_enet_init_hw(pdata);
1600 mac_ops = pdata->mac_ops;
1601 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1602 ret = xgene_enet_mdio_config(pdata);
1606 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1609 xgene_enet_napi_add(pdata);
1612 unregister_netdev(ndev);
1617 static int xgene_enet_remove(struct platform_device *pdev)
1619 struct xgene_enet_pdata *pdata;
1620 const struct xgene_mac_ops *mac_ops;
1621 struct net_device *ndev;
1623 pdata = platform_get_drvdata(pdev);
1624 mac_ops = pdata->mac_ops;
1627 mac_ops->rx_disable(pdata);
1628 mac_ops->tx_disable(pdata);
1630 xgene_enet_napi_del(pdata);
1631 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1632 xgene_enet_mdio_remove(pdata);
1633 unregister_netdev(ndev);
1634 xgene_enet_delete_desc_rings(pdata);
1635 pdata->port_ops->shutdown(pdata);
1642 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1643 { "APMC0D05", XGENE_ENET1},
1644 { "APMC0D30", XGENE_ENET1},
1645 { "APMC0D31", XGENE_ENET1},
1646 { "APMC0D3F", XGENE_ENET1},
1647 { "APMC0D26", XGENE_ENET2},
1648 { "APMC0D25", XGENE_ENET2},
1651 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1655 static const struct of_device_id xgene_enet_of_match[] = {
1656 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1657 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1658 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1659 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1660 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1664 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1667 static struct platform_driver xgene_enet_driver = {
1669 .name = "xgene-enet",
1670 .of_match_table = of_match_ptr(xgene_enet_of_match),
1671 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1673 .probe = xgene_enet_probe,
1674 .remove = xgene_enet_remove,
1677 module_platform_driver(xgene_enet_driver);
1679 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1680 MODULE_VERSION(XGENE_DRV_VERSION);
1681 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1682 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1683 MODULE_LICENSE("GPL");