drivers: net: xgene: Implement the backward compatibility with the old and new firmwa...
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
32f784b5 24#include "xgene_enet_sgmac.h"
0148d38d 25#include "xgene_enet_xgmac.h"
e6ad7673 26
de7b5b3d
FK
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
bc1b7c13 31static const struct of_device_id xgene_enet_of_match[];
0738c54d 32static const struct acpi_device_id xgene_enet_acpi_match[];
bc1b7c13 33
e6ad7673
IS
34static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35{
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47}
48
49static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51{
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
81cefb81 54 struct xgene_enet_pdata *pdata;
e6ad7673
IS
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
81cefb81 65 pdata = netdev_priv(ndev);
e6ad7673
IS
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
75 buf_pool->rx_skb[tail] = skb;
76
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
81 return -EINVAL;
82 }
83
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
86 SET_BIT(COHERENT));
87 tail = (tail + 1) & slots;
88 }
89
81cefb81 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
e6ad7673
IS
91 buf_pool->tail = tail;
92
93 return 0;
94}
95
96static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
97{
98 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
99
100 return ((u16)pdata->rm << 10) | ring->num;
101}
102
103static u8 xgene_enet_hdr_len(const void *data)
104{
105 const struct ethhdr *eth = data;
106
107 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
108}
109
e6ad7673
IS
110static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
111{
81cefb81 112 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
e6ad7673
IS
113 struct xgene_enet_raw_desc16 *raw_desc;
114 u32 slots = buf_pool->slots - 1;
115 u32 tail = buf_pool->tail;
116 u32 userinfo;
117 int i, len;
118
81cefb81 119 len = pdata->ring_ops->len(buf_pool);
e6ad7673
IS
120 for (i = 0; i < len; i++) {
121 tail = (tail - 1) & slots;
122 raw_desc = &buf_pool->raw_desc16[tail];
123
124 /* Hardware stores descriptor in little endian format */
125 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
126 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
127 }
128
81cefb81 129 pdata->ring_ops->wr_cmd(buf_pool, -len);
e6ad7673
IS
130 buf_pool->tail = tail;
131}
132
133static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
134{
135 struct xgene_enet_desc_ring *rx_ring = data;
136
137 if (napi_schedule_prep(&rx_ring->napi)) {
138 disable_irq_nosync(irq);
139 __napi_schedule(&rx_ring->napi);
140 }
141
142 return IRQ_HANDLED;
143}
144
145static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
146 struct xgene_enet_raw_desc *raw_desc)
147{
148 struct sk_buff *skb;
149 struct device *dev;
150 u16 skb_index;
151 u8 status;
152 int ret = 0;
153
154 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
155 skb = cp_ring->cp_skb[skb_index];
156
157 dev = ndev_to_dev(cp_ring->ndev);
158 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
159 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
160 DMA_TO_DEVICE);
161
162 /* Checking for error */
163 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
164 if (unlikely(status > 2)) {
165 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
166 status);
167 ret = -EIO;
168 }
169
170 if (likely(skb)) {
171 dev_kfree_skb_any(skb);
172 } else {
173 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
174 ret = -EIO;
175 }
176
177 return ret;
178}
179
180static u64 xgene_enet_work_msg(struct sk_buff *skb)
181{
182 struct iphdr *iph;
183 u8 l3hlen, l4hlen = 0;
184 u8 csum_enable = 0;
185 u8 proto = 0;
186 u8 ethhdr;
187 u64 hopinfo;
188
189 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
190 unlikely(skb->protocol != htons(ETH_P_8021Q)))
191 goto out;
192
193 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
194 goto out;
195
196 iph = ip_hdr(skb);
197 if (unlikely(ip_is_fragment(iph)))
198 goto out;
199
200 if (likely(iph->protocol == IPPROTO_TCP)) {
201 l4hlen = tcp_hdrlen(skb) >> 2;
202 csum_enable = 1;
203 proto = TSO_IPPROTO_TCP;
204 } else if (iph->protocol == IPPROTO_UDP) {
205 l4hlen = UDP_HDR_SIZE;
206 csum_enable = 1;
207 }
208out:
209 l3hlen = ip_hdrlen(skb) >> 2;
210 ethhdr = xgene_enet_hdr_len(skb->data);
211 hopinfo = SET_VAL(TCPHDR, l4hlen) |
212 SET_VAL(IPHDR, l3hlen) |
213 SET_VAL(ETHHDR, ethhdr) |
214 SET_VAL(EC, csum_enable) |
215 SET_VAL(IS, proto) |
216 SET_BIT(IC) |
217 SET_BIT(TYPE_ETH_WORK_MESSAGE);
218
219 return hopinfo;
220}
221
222static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
223 struct sk_buff *skb)
224{
225 struct device *dev = ndev_to_dev(tx_ring->ndev);
226 struct xgene_enet_raw_desc *raw_desc;
227 dma_addr_t dma_addr;
228 u16 tail = tx_ring->tail;
229 u64 hopinfo;
230
231 raw_desc = &tx_ring->raw_desc[tail];
232 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
233
234 dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
235 if (dma_mapping_error(dev, dma_addr)) {
236 netdev_err(tx_ring->ndev, "DMA mapping error\n");
237 return -EINVAL;
238 }
239
240 /* Hardware expects descriptor in little endian format */
241 raw_desc->m0 = cpu_to_le64(tail);
242 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
243 SET_VAL(BUFDATALEN, skb->len) |
244 SET_BIT(COHERENT));
245 hopinfo = xgene_enet_work_msg(skb);
246 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
247 hopinfo);
248 tx_ring->cp_ring->cp_skb[tail] = skb;
249
250 return 0;
251}
252
253static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
254 struct net_device *ndev)
255{
256 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
257 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
258 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
259 u32 tx_level, cq_level;
260
81cefb81
IS
261 tx_level = pdata->ring_ops->len(tx_ring);
262 cq_level = pdata->ring_ops->len(cp_ring);
e6ad7673
IS
263 if (unlikely(tx_level > pdata->tx_qcnt_hi ||
264 cq_level > pdata->cp_qcnt_hi)) {
265 netif_stop_queue(ndev);
266 return NETDEV_TX_BUSY;
267 }
268
269 if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
270 dev_kfree_skb_any(skb);
271 return NETDEV_TX_OK;
272 }
273
81cefb81 274 pdata->ring_ops->wr_cmd(tx_ring, 1);
e6ad7673
IS
275 skb_tx_timestamp(skb);
276 tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
277
278 pdata->stats.tx_packets++;
279 pdata->stats.tx_bytes += skb->len;
280
281 return NETDEV_TX_OK;
282}
283
284static void xgene_enet_skip_csum(struct sk_buff *skb)
285{
286 struct iphdr *iph = ip_hdr(skb);
287
288 if (!ip_is_fragment(iph) ||
289 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
290 skb->ip_summed = CHECKSUM_UNNECESSARY;
291 }
292}
293
294static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
295 struct xgene_enet_raw_desc *raw_desc)
296{
297 struct net_device *ndev;
298 struct xgene_enet_pdata *pdata;
299 struct device *dev;
300 struct xgene_enet_desc_ring *buf_pool;
301 u32 datalen, skb_index;
302 struct sk_buff *skb;
303 u8 status;
304 int ret = 0;
305
306 ndev = rx_ring->ndev;
307 pdata = netdev_priv(ndev);
308 dev = ndev_to_dev(rx_ring->ndev);
309 buf_pool = rx_ring->buf_pool;
310
311 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
312 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
313 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
314 skb = buf_pool->rx_skb[skb_index];
315
316 /* checking for error */
317 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
318 if (unlikely(status > 2)) {
319 dev_kfree_skb_any(skb);
320 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
321 status);
322 pdata->stats.rx_dropped++;
323 ret = -EIO;
324 goto out;
325 }
326
327 /* strip off CRC as HW isn't doing this */
328 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
329 datalen -= 4;
330 prefetch(skb->data - NET_IP_ALIGN);
331 skb_put(skb, datalen);
332
333 skb_checksum_none_assert(skb);
334 skb->protocol = eth_type_trans(skb, ndev);
335 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
336 skb->protocol == htons(ETH_P_IP))) {
337 xgene_enet_skip_csum(skb);
338 }
339
340 pdata->stats.rx_packets++;
341 pdata->stats.rx_bytes += datalen;
342 napi_gro_receive(&rx_ring->napi, skb);
343out:
344 if (--rx_ring->nbufpool == 0) {
345 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
346 rx_ring->nbufpool = NUM_BUFPOOL;
347 }
348
349 return ret;
350}
351
352static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
353{
354 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
355}
356
357static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
358 int budget)
359{
360 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
361 struct xgene_enet_raw_desc *raw_desc;
362 u16 head = ring->head;
363 u16 slots = ring->slots - 1;
364 int ret, count = 0;
365
366 do {
367 raw_desc = &ring->raw_desc[head];
368 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
369 break;
370
ecf6ba83
IS
371 /* read fpqnum field after dataaddr field */
372 dma_rmb();
e6ad7673
IS
373 if (is_rx_desc(raw_desc))
374 ret = xgene_enet_rx_frame(ring, raw_desc);
375 else
376 ret = xgene_enet_tx_completion(ring, raw_desc);
377 xgene_enet_mark_desc_slot_empty(raw_desc);
378
379 head = (head + 1) & slots;
380 count++;
381
382 if (ret)
383 break;
384 } while (--budget);
385
386 if (likely(count)) {
81cefb81 387 pdata->ring_ops->wr_cmd(ring, -count);
e6ad7673
IS
388 ring->head = head;
389
390 if (netif_queue_stopped(ring->ndev)) {
81cefb81 391 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
e6ad7673
IS
392 netif_wake_queue(ring->ndev);
393 }
394 }
395
0148d38d 396 return count;
e6ad7673
IS
397}
398
399static int xgene_enet_napi(struct napi_struct *napi, const int budget)
400{
401 struct xgene_enet_desc_ring *ring;
402 int processed;
403
404 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
405 processed = xgene_enet_process_ring(ring, budget);
406
407 if (processed != budget) {
408 napi_complete(napi);
409 enable_irq(ring->irq);
410 }
411
412 return processed;
413}
414
415static void xgene_enet_timeout(struct net_device *ndev)
416{
417 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
418
d0eb7458 419 pdata->mac_ops->reset(pdata);
e6ad7673
IS
420}
421
422static int xgene_enet_register_irq(struct net_device *ndev)
423{
424 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
425 struct device *dev = ndev_to_dev(ndev);
6772b653 426 struct xgene_enet_desc_ring *ring;
e6ad7673
IS
427 int ret;
428
6772b653
IS
429 ring = pdata->rx_ring;
430 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
431 IRQF_SHARED, ring->irq_name, ring);
432 if (ret)
433 netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
434
435 if (pdata->cq_cnt) {
436 ring = pdata->tx_ring->cp_ring;
437 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
438 IRQF_SHARED, ring->irq_name, ring);
439 if (ret) {
440 netdev_err(ndev, "Failed to request irq %s\n",
441 ring->irq_name);
442 }
e6ad7673
IS
443 }
444
445 return ret;
446}
447
448static void xgene_enet_free_irq(struct net_device *ndev)
449{
450 struct xgene_enet_pdata *pdata;
451 struct device *dev;
452
453 pdata = netdev_priv(ndev);
454 dev = ndev_to_dev(ndev);
455 devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
6772b653
IS
456
457 if (pdata->cq_cnt) {
458 devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
459 pdata->tx_ring->cp_ring);
460 }
461}
462
463static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
464{
465 struct napi_struct *napi;
466
467 napi = &pdata->rx_ring->napi;
468 napi_enable(napi);
469
470 if (pdata->cq_cnt) {
471 napi = &pdata->tx_ring->cp_ring->napi;
472 napi_enable(napi);
473 }
474}
475
476static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
477{
478 struct napi_struct *napi;
479
480 napi = &pdata->rx_ring->napi;
481 napi_disable(napi);
482
483 if (pdata->cq_cnt) {
484 napi = &pdata->tx_ring->cp_ring->napi;
485 napi_disable(napi);
486 }
e6ad7673
IS
487}
488
489static int xgene_enet_open(struct net_device *ndev)
490{
491 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
d0eb7458 492 struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
493 int ret;
494
d0eb7458
IS
495 mac_ops->tx_enable(pdata);
496 mac_ops->rx_enable(pdata);
e6ad7673
IS
497
498 ret = xgene_enet_register_irq(ndev);
499 if (ret)
500 return ret;
6772b653 501 xgene_enet_napi_enable(pdata);
e6ad7673 502
0148d38d 503 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 504 phy_start(pdata->phy_dev);
0148d38d
IS
505 else
506 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
e6ad7673 507
81cefb81 508 netif_carrier_off(ndev);
e6ad7673
IS
509 netif_start_queue(ndev);
510
511 return ret;
512}
513
514static int xgene_enet_close(struct net_device *ndev)
515{
516 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
d0eb7458 517 struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
518
519 netif_stop_queue(ndev);
520
0148d38d 521 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 522 phy_stop(pdata->phy_dev);
0148d38d
IS
523 else
524 cancel_delayed_work_sync(&pdata->link_work);
e6ad7673 525
6772b653 526 xgene_enet_napi_disable(pdata);
e6ad7673
IS
527 xgene_enet_free_irq(ndev);
528 xgene_enet_process_ring(pdata->rx_ring, -1);
529
d0eb7458
IS
530 mac_ops->tx_disable(pdata);
531 mac_ops->rx_disable(pdata);
e6ad7673
IS
532
533 return 0;
534}
535
536static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
537{
538 struct xgene_enet_pdata *pdata;
539 struct device *dev;
540
541 pdata = netdev_priv(ring->ndev);
542 dev = ndev_to_dev(ring->ndev);
543
81cefb81 544 pdata->ring_ops->clear(ring);
e6ad7673
IS
545 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
546}
547
548static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
549{
550 struct xgene_enet_desc_ring *buf_pool;
551
552 if (pdata->tx_ring) {
553 xgene_enet_delete_ring(pdata->tx_ring);
554 pdata->tx_ring = NULL;
555 }
556
557 if (pdata->rx_ring) {
558 buf_pool = pdata->rx_ring->buf_pool;
559 xgene_enet_delete_bufpool(buf_pool);
560 xgene_enet_delete_ring(buf_pool);
561 xgene_enet_delete_ring(pdata->rx_ring);
562 pdata->rx_ring = NULL;
563 }
564}
565
566static int xgene_enet_get_ring_size(struct device *dev,
567 enum xgene_enet_ring_cfgsize cfgsize)
568{
569 int size = -EINVAL;
570
571 switch (cfgsize) {
572 case RING_CFGSIZE_512B:
573 size = 0x200;
574 break;
575 case RING_CFGSIZE_2KB:
576 size = 0x800;
577 break;
578 case RING_CFGSIZE_16KB:
579 size = 0x4000;
580 break;
581 case RING_CFGSIZE_64KB:
582 size = 0x10000;
583 break;
584 case RING_CFGSIZE_512KB:
585 size = 0x80000;
586 break;
587 default:
588 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
589 break;
590 }
591
592 return size;
593}
594
595static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
596{
81cefb81 597 struct xgene_enet_pdata *pdata;
e6ad7673
IS
598 struct device *dev;
599
600 if (!ring)
601 return;
602
603 dev = ndev_to_dev(ring->ndev);
81cefb81 604 pdata = netdev_priv(ring->ndev);
e6ad7673
IS
605
606 if (ring->desc_addr) {
81cefb81 607 pdata->ring_ops->clear(ring);
e6ad7673
IS
608 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
609 }
610 devm_kfree(dev, ring);
611}
612
613static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
614{
615 struct device *dev = &pdata->pdev->dev;
616 struct xgene_enet_desc_ring *ring;
617
618 ring = pdata->tx_ring;
c10e4caf
IS
619 if (ring) {
620 if (ring->cp_ring && ring->cp_ring->cp_skb)
621 devm_kfree(dev, ring->cp_ring->cp_skb);
6772b653
IS
622 if (ring->cp_ring && pdata->cq_cnt)
623 xgene_enet_free_desc_ring(ring->cp_ring);
c10e4caf
IS
624 xgene_enet_free_desc_ring(ring);
625 }
e6ad7673
IS
626
627 ring = pdata->rx_ring;
c10e4caf
IS
628 if (ring) {
629 if (ring->buf_pool) {
630 if (ring->buf_pool->rx_skb)
631 devm_kfree(dev, ring->buf_pool->rx_skb);
632 xgene_enet_free_desc_ring(ring->buf_pool);
633 }
634 xgene_enet_free_desc_ring(ring);
635 }
e6ad7673
IS
636}
637
bc1b7c13
IS
638static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
639 struct xgene_enet_desc_ring *ring)
640{
641 if ((pdata->enet_id == XGENE_ENET2) &&
642 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
643 return true;
644 }
645
646 return false;
647}
648
649static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
650 struct xgene_enet_desc_ring *ring)
651{
652 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
653
654 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
655}
656
e6ad7673
IS
657static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
658 struct net_device *ndev, u32 ring_num,
659 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
660{
661 struct xgene_enet_desc_ring *ring;
662 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
663 struct device *dev = ndev_to_dev(ndev);
9b9ba821
TK
664 int size;
665
666 size = xgene_enet_get_ring_size(dev, cfgsize);
667 if (size < 0)
668 return NULL;
e6ad7673
IS
669
670 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
671 GFP_KERNEL);
672 if (!ring)
673 return NULL;
674
675 ring->ndev = ndev;
676 ring->num = ring_num;
677 ring->cfgsize = cfgsize;
678 ring->id = ring_id;
679
e6ad7673
IS
680 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
681 GFP_KERNEL);
682 if (!ring->desc_addr) {
683 devm_kfree(dev, ring);
684 return NULL;
685 }
686 ring->size = size;
687
bc1b7c13
IS
688 if (is_irq_mbox_required(pdata, ring)) {
689 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
690 &ring->irq_mbox_dma, GFP_KERNEL);
691 if (!ring->irq_mbox_addr) {
692 dma_free_coherent(dev, size, ring->desc_addr,
693 ring->dma);
694 devm_kfree(dev, ring);
695 return NULL;
696 }
697 }
698
699 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
e6ad7673 700 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
81cefb81 701 ring = pdata->ring_ops->setup(ring);
e6ad7673
IS
702 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
703 ring->num, ring->size, ring->id, ring->slots);
704
705 return ring;
706}
707
708static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
709{
710 return (owner << 6) | (bufnum & GENMASK(5, 0));
711}
712
bc1b7c13
IS
713static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
714{
715 enum xgene_ring_owner owner;
716
717 if (p->enet_id == XGENE_ENET1) {
718 switch (p->phy_mode) {
719 case PHY_INTERFACE_MODE_SGMII:
720 owner = RING_OWNER_ETH0;
721 break;
722 default:
723 owner = (!p->port_id) ? RING_OWNER_ETH0 :
724 RING_OWNER_ETH1;
725 break;
726 }
727 } else {
728 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
729 }
730
731 return owner;
732}
733
e6ad7673
IS
734static int xgene_enet_create_desc_rings(struct net_device *ndev)
735{
736 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
737 struct device *dev = ndev_to_dev(ndev);
738 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
739 struct xgene_enet_desc_ring *buf_pool = NULL;
bc1b7c13 740 enum xgene_ring_owner owner;
ca626454
KC
741 u8 cpu_bufnum = pdata->cpu_bufnum;
742 u8 eth_bufnum = pdata->eth_bufnum;
743 u8 bp_bufnum = pdata->bp_bufnum;
744 u16 ring_num = pdata->ring_num;
745 u16 ring_id;
e6ad7673
IS
746 int ret;
747
748 /* allocate rx descriptor ring */
bc1b7c13 749 owner = xgene_derive_ring_owner(pdata);
e6ad7673
IS
750 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
751 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
752 RING_CFGSIZE_16KB, ring_id);
753 if (!rx_ring) {
754 ret = -ENOMEM;
755 goto err;
756 }
757
758 /* allocate buffer pool for receiving packets */
bc1b7c13
IS
759 owner = xgene_derive_ring_owner(pdata);
760 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
e6ad7673
IS
761 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
762 RING_CFGSIZE_2KB, ring_id);
763 if (!buf_pool) {
764 ret = -ENOMEM;
765 goto err;
766 }
767
768 rx_ring->nbufpool = NUM_BUFPOOL;
769 rx_ring->buf_pool = buf_pool;
770 rx_ring->irq = pdata->rx_irq;
6772b653
IS
771 if (!pdata->cq_cnt) {
772 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
773 ndev->name);
774 } else {
775 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
776 }
e6ad7673
IS
777 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
778 sizeof(struct sk_buff *), GFP_KERNEL);
779 if (!buf_pool->rx_skb) {
780 ret = -ENOMEM;
781 goto err;
782 }
783
784 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
785 rx_ring->buf_pool = buf_pool;
786 pdata->rx_ring = rx_ring;
787
788 /* allocate tx descriptor ring */
bc1b7c13
IS
789 owner = xgene_derive_ring_owner(pdata);
790 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
e6ad7673
IS
791 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
792 RING_CFGSIZE_16KB, ring_id);
793 if (!tx_ring) {
794 ret = -ENOMEM;
795 goto err;
796 }
797 pdata->tx_ring = tx_ring;
798
6772b653
IS
799 if (!pdata->cq_cnt) {
800 cp_ring = pdata->rx_ring;
801 } else {
802 /* allocate tx completion descriptor ring */
803 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
804 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
805 RING_CFGSIZE_16KB,
806 ring_id);
807 if (!cp_ring) {
808 ret = -ENOMEM;
809 goto err;
810 }
811 cp_ring->irq = pdata->txc_irq;
812 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
813 }
814
e6ad7673
IS
815 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
816 sizeof(struct sk_buff *), GFP_KERNEL);
817 if (!cp_ring->cp_skb) {
818 ret = -ENOMEM;
819 goto err;
820 }
821 pdata->tx_ring->cp_ring = cp_ring;
822 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
823
824 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
825 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
826 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
827
828 return 0;
829
830err:
831 xgene_enet_free_desc_rings(pdata);
832 return ret;
833}
834
835static struct rtnl_link_stats64 *xgene_enet_get_stats64(
836 struct net_device *ndev,
837 struct rtnl_link_stats64 *storage)
838{
839 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
840 struct rtnl_link_stats64 *stats = &pdata->stats;
841
842 stats->rx_errors += stats->rx_length_errors +
843 stats->rx_crc_errors +
844 stats->rx_frame_errors +
845 stats->rx_fifo_errors;
846 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
847
848 return storage;
849}
850
851static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
852{
853 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
854 int ret;
855
856 ret = eth_mac_addr(ndev, addr);
857 if (ret)
858 return ret;
d0eb7458 859 pdata->mac_ops->set_mac_addr(pdata);
e6ad7673
IS
860
861 return ret;
862}
863
864static const struct net_device_ops xgene_ndev_ops = {
865 .ndo_open = xgene_enet_open,
866 .ndo_stop = xgene_enet_close,
867 .ndo_start_xmit = xgene_enet_start_xmit,
868 .ndo_tx_timeout = xgene_enet_timeout,
869 .ndo_get_stats64 = xgene_enet_get_stats64,
870 .ndo_change_mtu = eth_change_mtu,
871 .ndo_set_mac_address = xgene_enet_set_mac_address,
872};
873
0738c54d
ST
874static int xgene_get_port_id_acpi(struct device *dev,
875 struct xgene_enet_pdata *pdata)
876{
877 acpi_status status;
878 u64 temp;
879
880 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
881 if (ACPI_FAILURE(status)) {
882 pdata->port_id = 0;
883 } else {
884 pdata->port_id = temp;
885 }
886
887 return 0;
888}
889
890static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
ca626454
KC
891{
892 u32 id = 0;
893 int ret;
894
0738c54d
ST
895 ret = of_property_read_u32(dev->of_node, "port-id", &id);
896 if (ret) {
561fea6d
IS
897 pdata->port_id = 0;
898 ret = 0;
0738c54d 899 } else {
561fea6d 900 pdata->port_id = id & BIT(0);
561fea6d 901 }
ca626454 902
561fea6d 903 return ret;
ca626454
KC
904}
905
de7b5b3d
FK
906static int xgene_get_mac_address(struct device *dev,
907 unsigned char *addr)
908{
909 int ret;
910
911 ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
912 if (ret)
913 ret = device_property_read_u8_array(dev, "mac-address",
914 addr, 6);
915 if (ret)
916 return -ENODEV;
917
918 return ETH_ALEN;
919}
920
921static int xgene_get_phy_mode(struct device *dev)
922{
923 int i, ret;
924 char *modestr;
925
926 ret = device_property_read_string(dev, "phy-connection-type",
927 (const char **)&modestr);
928 if (ret)
929 ret = device_property_read_string(dev, "phy-mode",
930 (const char **)&modestr);
931 if (ret)
932 return -ENODEV;
933
934 for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
935 if (!strcasecmp(modestr, phy_modes(i)))
936 return i;
937 }
938 return -ENODEV;
939}
940
e6ad7673
IS
941static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
942{
943 struct platform_device *pdev;
944 struct net_device *ndev;
945 struct device *dev;
946 struct resource *res;
947 void __iomem *base_addr;
561fea6d 948 u32 offset;
e6ad7673
IS
949 int ret;
950
951 pdev = pdata->pdev;
952 dev = &pdev->dev;
953 ndev = pdata->ndev;
954
de7b5b3d
FK
955 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
956 if (!res) {
957 dev_err(dev, "Resource enet_csr not defined\n");
958 return -ENODEV;
959 }
960 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
3ec7a176 961 if (!pdata->base_addr) {
e6ad7673 962 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
3ec7a176 963 return -ENOMEM;
e6ad7673
IS
964 }
965
de7b5b3d
FK
966 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
967 if (!res) {
968 dev_err(dev, "Resource ring_csr not defined\n");
969 return -ENODEV;
970 }
971 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
972 resource_size(res));
3ec7a176 973 if (!pdata->ring_csr_addr) {
e6ad7673 974 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
3ec7a176 975 return -ENOMEM;
e6ad7673
IS
976 }
977
de7b5b3d
FK
978 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
979 if (!res) {
980 dev_err(dev, "Resource ring_cmd not defined\n");
981 return -ENODEV;
982 }
983 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
984 resource_size(res));
3ec7a176 985 if (!pdata->ring_cmd_addr) {
e6ad7673 986 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
3ec7a176 987 return -ENOMEM;
e6ad7673
IS
988 }
989
0738c54d
ST
990 if (dev->of_node)
991 ret = xgene_get_port_id_dt(dev, pdata);
992#ifdef CONFIG_ACPI
993 else
994 ret = xgene_get_port_id_acpi(dev, pdata);
995#endif
ca626454
KC
996 if (ret)
997 return ret;
998
de7b5b3d 999 if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
e6ad7673 1000 eth_hw_addr_random(ndev);
de7b5b3d 1001
e6ad7673
IS
1002 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1003
de7b5b3d 1004 pdata->phy_mode = xgene_get_phy_mode(dev);
e6ad7673 1005 if (pdata->phy_mode < 0) {
0148d38d
IS
1006 dev_err(dev, "Unable to get phy-connection-type\n");
1007 return pdata->phy_mode;
1008 }
1009 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
32f784b5 1010 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
0148d38d
IS
1011 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1012 dev_err(dev, "Incorrect phy-connection-type specified\n");
1013 return -ENODEV;
e6ad7673
IS
1014 }
1015
6772b653
IS
1016 ret = platform_get_irq(pdev, 0);
1017 if (ret <= 0) {
1018 dev_err(dev, "Unable to get ENET Rx IRQ\n");
1019 ret = ret ? : -ENXIO;
1020 return ret;
1021 }
1022 pdata->rx_irq = ret;
1023
1024 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
1025 ret = platform_get_irq(pdev, 1);
1026 if (ret <= 0) {
2c7be0ac
ST
1027 pdata->cq_cnt = 0;
1028 dev_info(dev, "Unable to get Tx completion IRQ,"
1029 "using Rx IRQ instead\n");
1030 } else {
1031 pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
1032 pdata->txc_irq = ret;
6772b653 1033 }
6772b653
IS
1034 }
1035
e6ad7673 1036 pdata->clk = devm_clk_get(&pdev->dev, NULL);
e6ad7673 1037 if (IS_ERR(pdata->clk)) {
de7b5b3d
FK
1038 /* Firmware may have set up the clock already. */
1039 pdata->clk = NULL;
e6ad7673
IS
1040 }
1041
bc1b7c13
IS
1042 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1043 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1044 else
1045 base_addr = pdata->base_addr;
e6ad7673
IS
1046 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1047 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1048 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
32f784b5
IS
1049 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1050 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
ca626454 1051 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
561fea6d
IS
1052 offset = (pdata->enet_id == XGENE_ENET1) ?
1053 BLOCK_ETH_MAC_CSR_OFFSET :
1054 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1055 pdata->mcx_mac_csr_addr = base_addr + offset;
0148d38d
IS
1056 } else {
1057 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1058 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
0148d38d 1059 }
e6ad7673
IS
1060 pdata->rx_buff_cnt = NUM_PKT_BUF;
1061
0148d38d 1062 return 0;
e6ad7673
IS
1063}
1064
1065static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1066{
1067 struct net_device *ndev = pdata->ndev;
1068 struct xgene_enet_desc_ring *buf_pool;
1069 u16 dst_ring_num;
1070 int ret;
1071
c3f4465d
IS
1072 ret = pdata->port_ops->reset(pdata);
1073 if (ret)
1074 return ret;
e6ad7673
IS
1075
1076 ret = xgene_enet_create_desc_rings(ndev);
1077 if (ret) {
1078 netdev_err(ndev, "Error in ring configuration\n");
1079 return ret;
1080 }
1081
1082 /* setup buffer pool */
1083 buf_pool = pdata->rx_ring->buf_pool;
1084 xgene_enet_init_bufpool(buf_pool);
1085 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1086 if (ret) {
1087 xgene_enet_delete_desc_rings(pdata);
1088 return ret;
1089 }
1090
1091 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
d0eb7458 1092 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
0148d38d 1093 pdata->mac_ops->init(pdata);
e6ad7673
IS
1094
1095 return ret;
1096}
1097
d0eb7458
IS
1098static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1099{
0148d38d
IS
1100 switch (pdata->phy_mode) {
1101 case PHY_INTERFACE_MODE_RGMII:
1102 pdata->mac_ops = &xgene_gmac_ops;
1103 pdata->port_ops = &xgene_gport_ops;
dc8385f0 1104 pdata->rm = RM3;
0148d38d 1105 break;
32f784b5
IS
1106 case PHY_INTERFACE_MODE_SGMII:
1107 pdata->mac_ops = &xgene_sgmac_ops;
1108 pdata->port_ops = &xgene_sgport_ops;
1109 pdata->rm = RM1;
1110 break;
0148d38d
IS
1111 default:
1112 pdata->mac_ops = &xgene_xgmac_ops;
1113 pdata->port_ops = &xgene_xgport_ops;
dc8385f0 1114 pdata->rm = RM0;
0148d38d
IS
1115 break;
1116 }
ca626454 1117
bc1b7c13
IS
1118 if (pdata->enet_id == XGENE_ENET1) {
1119 switch (pdata->port_id) {
1120 case 0:
1121 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1122 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1123 pdata->bp_bufnum = START_BP_BUFNUM_0;
1124 pdata->ring_num = START_RING_NUM_0;
1125 break;
1126 case 1:
1127 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1128 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1129 pdata->bp_bufnum = START_BP_BUFNUM_1;
1130 pdata->ring_num = START_RING_NUM_1;
1131 break;
1132 default:
1133 break;
1134 }
1135 pdata->ring_ops = &xgene_ring1_ops;
1136 } else {
1137 switch (pdata->port_id) {
1138 case 0:
1139 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1140 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1141 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1142 pdata->ring_num = X2_START_RING_NUM_0;
1143 break;
1144 case 1:
1145 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1146 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1147 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1148 pdata->ring_num = X2_START_RING_NUM_1;
1149 break;
1150 default:
1151 break;
1152 }
1153 pdata->rm = RM0;
1154 pdata->ring_ops = &xgene_ring2_ops;
ca626454 1155 }
d0eb7458
IS
1156}
1157
6772b653
IS
1158static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1159{
1160 struct napi_struct *napi;
1161
1162 napi = &pdata->rx_ring->napi;
1163 netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
1164
1165 if (pdata->cq_cnt) {
1166 napi = &pdata->tx_ring->cp_ring->napi;
1167 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1168 NAPI_POLL_WEIGHT);
1169 }
1170}
1171
1172static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1173{
1174 struct napi_struct *napi;
1175
1176 napi = &pdata->rx_ring->napi;
1177 netif_napi_del(napi);
1178
1179 if (pdata->cq_cnt) {
1180 napi = &pdata->tx_ring->cp_ring->napi;
1181 netif_napi_del(napi);
1182 }
1183}
1184
e6ad7673
IS
1185static int xgene_enet_probe(struct platform_device *pdev)
1186{
1187 struct net_device *ndev;
1188 struct xgene_enet_pdata *pdata;
1189 struct device *dev = &pdev->dev;
dc8385f0 1190 struct xgene_mac_ops *mac_ops;
bc1b7c13 1191 const struct of_device_id *of_id;
e6ad7673
IS
1192 int ret;
1193
1194 ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
1195 if (!ndev)
1196 return -ENOMEM;
1197
1198 pdata = netdev_priv(ndev);
1199
1200 pdata->pdev = pdev;
1201 pdata->ndev = ndev;
1202 SET_NETDEV_DEV(ndev, dev);
1203 platform_set_drvdata(pdev, pdata);
1204 ndev->netdev_ops = &xgene_ndev_ops;
1205 xgene_enet_set_ethtool_ops(ndev);
1206 ndev->features |= NETIF_F_IP_CSUM |
1207 NETIF_F_GSO |
1208 NETIF_F_GRO;
1209
bc1b7c13
IS
1210 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1211 if (of_id) {
1212 pdata->enet_id = (enum xgene_enet_id)of_id->data;
0738c54d
ST
1213 }
1214#ifdef CONFIG_ACPI
1215 else {
1216 const struct acpi_device_id *acpi_id;
1217
1218 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1219 if (acpi_id)
1220 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
bc1b7c13
IS
1221 }
1222#endif
0738c54d
ST
1223 if (!pdata->enet_id) {
1224 free_netdev(ndev);
1225 return -ENODEV;
1226 }
bc1b7c13 1227
e6ad7673
IS
1228 ret = xgene_enet_get_resources(pdata);
1229 if (ret)
1230 goto err;
1231
d0eb7458 1232 xgene_enet_setup_ops(pdata);
e6ad7673
IS
1233
1234 ret = register_netdev(ndev);
1235 if (ret) {
1236 netdev_err(ndev, "Failed to register netdev\n");
1237 goto err;
1238 }
1239
de7b5b3d 1240 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
e6ad7673
IS
1241 if (ret) {
1242 netdev_err(ndev, "No usable DMA configuration\n");
1243 goto err;
1244 }
1245
1246 ret = xgene_enet_init_hw(pdata);
1247 if (ret)
1248 goto err;
1249
6772b653 1250 xgene_enet_napi_add(pdata);
dc8385f0 1251 mac_ops = pdata->mac_ops;
0148d38d
IS
1252 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1253 ret = xgene_enet_mdio_config(pdata);
1254 else
dc8385f0 1255 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
e6ad7673
IS
1256
1257 return ret;
1258err:
c3f4465d 1259 unregister_netdev(ndev);
e6ad7673
IS
1260 free_netdev(ndev);
1261 return ret;
1262}
1263
1264static int xgene_enet_remove(struct platform_device *pdev)
1265{
1266 struct xgene_enet_pdata *pdata;
d0eb7458 1267 struct xgene_mac_ops *mac_ops;
e6ad7673
IS
1268 struct net_device *ndev;
1269
1270 pdata = platform_get_drvdata(pdev);
d0eb7458 1271 mac_ops = pdata->mac_ops;
e6ad7673
IS
1272 ndev = pdata->ndev;
1273
d0eb7458
IS
1274 mac_ops->rx_disable(pdata);
1275 mac_ops->tx_disable(pdata);
e6ad7673 1276
6772b653 1277 xgene_enet_napi_del(pdata);
e6ad7673
IS
1278 xgene_enet_mdio_remove(pdata);
1279 xgene_enet_delete_desc_rings(pdata);
1280 unregister_netdev(ndev);
d0eb7458 1281 pdata->port_ops->shutdown(pdata);
e6ad7673
IS
1282 free_netdev(ndev);
1283
1284 return 0;
1285}
1286
de7b5b3d
FK
1287#ifdef CONFIG_ACPI
1288static const struct acpi_device_id xgene_enet_acpi_match[] = {
0738c54d
ST
1289 { "APMC0D05", XGENE_ENET1},
1290 { "APMC0D30", XGENE_ENET1},
1291 { "APMC0D31", XGENE_ENET1},
de7b5b3d
FK
1292 { }
1293};
1294MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1295#endif
1296
163cff31 1297#ifdef CONFIG_OF
a6b0dc2a 1298static const struct of_device_id xgene_enet_of_match[] = {
bc1b7c13
IS
1299 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1300 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1301 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
561fea6d 1302 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
bc1b7c13 1303 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
e6ad7673
IS
1304 {},
1305};
1306
de7b5b3d 1307MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
163cff31 1308#endif
e6ad7673
IS
1309
1310static struct platform_driver xgene_enet_driver = {
1311 .driver = {
1312 .name = "xgene-enet",
de7b5b3d
FK
1313 .of_match_table = of_match_ptr(xgene_enet_of_match),
1314 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
e6ad7673
IS
1315 },
1316 .probe = xgene_enet_probe,
1317 .remove = xgene_enet_remove,
1318};
1319
1320module_platform_driver(xgene_enet_driver);
1321
1322MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1323MODULE_VERSION(XGENE_DRV_VERSION);
d0eb7458 1324MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
e6ad7673
IS
1325MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1326MODULE_LICENSE("GPL");