drivers: net: xgene: XFI PCS reset when link is down
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
32f784b5 24#include "xgene_enet_sgmac.h"
0148d38d 25#include "xgene_enet_xgmac.h"
e6ad7673 26
de7b5b3d
FK
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
bc1b7c13 31static const struct of_device_id xgene_enet_of_match[];
0738c54d 32static const struct acpi_device_id xgene_enet_acpi_match[];
bc1b7c13 33
e6ad7673
IS
34static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35{
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47}
48
49static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51{
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
81cefb81 54 struct xgene_enet_pdata *pdata;
e6ad7673
IS
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
81cefb81 65 pdata = netdev_priv(ndev);
e6ad7673
IS
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
e6ad7673
IS
75
76 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
77 if (dma_mapping_error(dev, dma_addr)) {
78 netdev_err(ndev, "DMA mapping error\n");
79 dev_kfree_skb_any(skb);
80 return -EINVAL;
81 }
82
6e434627
IS
83 buf_pool->rx_skb[tail] = skb;
84
e6ad7673
IS
85 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
86 SET_VAL(BUFDATALEN, bufdatalen) |
87 SET_BIT(COHERENT));
88 tail = (tail + 1) & slots;
89 }
90
81cefb81 91 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
e6ad7673
IS
92 buf_pool->tail = tail;
93
94 return 0;
95}
96
e6ad7673
IS
97static u8 xgene_enet_hdr_len(const void *data)
98{
99 const struct ethhdr *eth = data;
100
101 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
102}
103
e6ad7673
IS
104static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
105{
6e434627
IS
106 struct device *dev = ndev_to_dev(buf_pool->ndev);
107 struct xgene_enet_raw_desc16 *raw_desc;
108 dma_addr_t dma_addr;
cb11c062 109 int i;
e6ad7673 110
cb11c062
IS
111 /* Free up the buffers held by hardware */
112 for (i = 0; i < buf_pool->slots; i++) {
6e434627 113 if (buf_pool->rx_skb[i]) {
cb11c062 114 dev_kfree_skb_any(buf_pool->rx_skb[i]);
6e434627
IS
115
116 raw_desc = &buf_pool->raw_desc16[i];
117 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
118 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
119 DMA_FROM_DEVICE);
120 }
e6ad7673 121 }
e6ad7673
IS
122}
123
124static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
125{
126 struct xgene_enet_desc_ring *rx_ring = data;
127
128 if (napi_schedule_prep(&rx_ring->napi)) {
129 disable_irq_nosync(irq);
130 __napi_schedule(&rx_ring->napi);
131 }
132
133 return IRQ_HANDLED;
134}
135
136static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
137 struct xgene_enet_raw_desc *raw_desc)
138{
139 struct sk_buff *skb;
140 struct device *dev;
9b00eb49
IS
141 skb_frag_t *frag;
142 dma_addr_t *frag_dma_addr;
e6ad7673
IS
143 u16 skb_index;
144 u8 status;
9b00eb49 145 int i, ret = 0;
e6ad7673
IS
146
147 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
148 skb = cp_ring->cp_skb[skb_index];
9b00eb49 149 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
e6ad7673
IS
150
151 dev = ndev_to_dev(cp_ring->ndev);
152 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
9b00eb49 153 skb_headlen(skb),
e6ad7673
IS
154 DMA_TO_DEVICE);
155
9b00eb49
IS
156 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
157 frag = &skb_shinfo(skb)->frags[i];
158 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
159 DMA_TO_DEVICE);
160 }
161
e6ad7673
IS
162 /* Checking for error */
163 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
164 if (unlikely(status > 2)) {
165 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
166 status);
167 ret = -EIO;
168 }
169
170 if (likely(skb)) {
171 dev_kfree_skb_any(skb);
172 } else {
173 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
174 ret = -EIO;
175 }
176
177 return ret;
178}
179
180static u64 xgene_enet_work_msg(struct sk_buff *skb)
181{
9b00eb49 182 struct net_device *ndev = skb->dev;
e6ad7673 183 struct iphdr *iph;
9b00eb49
IS
184 u8 l3hlen = 0, l4hlen = 0;
185 u8 ethhdr, proto = 0, csum_enable = 0;
186 u64 hopinfo = 0;
187 u32 hdr_len, mss = 0;
188 u32 i, len, nr_frags;
189
190 ethhdr = xgene_enet_hdr_len(skb->data);
e6ad7673
IS
191
192 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
193 unlikely(skb->protocol != htons(ETH_P_8021Q)))
194 goto out;
195
196 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
197 goto out;
198
199 iph = ip_hdr(skb);
200 if (unlikely(ip_is_fragment(iph)))
201 goto out;
202
203 if (likely(iph->protocol == IPPROTO_TCP)) {
204 l4hlen = tcp_hdrlen(skb) >> 2;
205 csum_enable = 1;
206 proto = TSO_IPPROTO_TCP;
9b00eb49
IS
207 if (ndev->features & NETIF_F_TSO) {
208 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
209 mss = skb_shinfo(skb)->gso_size;
210
211 if (skb_is_nonlinear(skb)) {
212 len = skb_headlen(skb);
213 nr_frags = skb_shinfo(skb)->nr_frags;
214
215 for (i = 0; i < 2 && i < nr_frags; i++)
216 len += skb_shinfo(skb)->frags[i].size;
217
218 /* HW requires header must reside in 3 buffer */
219 if (unlikely(hdr_len > len)) {
220 if (skb_linearize(skb))
221 return 0;
222 }
223 }
224
225 if (!mss || ((skb->len - hdr_len) <= mss))
226 goto out;
227
9b00eb49
IS
228 hopinfo |= SET_BIT(ET);
229 }
e6ad7673
IS
230 } else if (iph->protocol == IPPROTO_UDP) {
231 l4hlen = UDP_HDR_SIZE;
232 csum_enable = 1;
233 }
234out:
235 l3hlen = ip_hdrlen(skb) >> 2;
9b00eb49 236 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
e6ad7673
IS
237 SET_VAL(IPHDR, l3hlen) |
238 SET_VAL(ETHHDR, ethhdr) |
239 SET_VAL(EC, csum_enable) |
240 SET_VAL(IS, proto) |
241 SET_BIT(IC) |
242 SET_BIT(TYPE_ETH_WORK_MESSAGE);
243
244 return hopinfo;
245}
246
949c40bb
IS
247static u16 xgene_enet_encode_len(u16 len)
248{
249 return (len == BUFLEN_16K) ? 0 : len;
250}
251
9b00eb49
IS
252static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
253{
254 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
255 SET_VAL(BUFDATALEN, len));
256}
257
258static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
259{
260 __le64 *exp_bufs;
261
262 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
263 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
264 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
265
266 return exp_bufs;
267}
268
269static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
270{
271 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
272}
273
e6ad7673
IS
274static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
275 struct sk_buff *skb)
276{
277 struct device *dev = ndev_to_dev(tx_ring->ndev);
67894eec 278 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
e6ad7673 279 struct xgene_enet_raw_desc *raw_desc;
9b00eb49
IS
280 __le64 *exp_desc = NULL, *exp_bufs = NULL;
281 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
282 skb_frag_t *frag;
e6ad7673
IS
283 u16 tail = tx_ring->tail;
284 u64 hopinfo;
949c40bb 285 u32 len, hw_len;
9b00eb49
IS
286 u8 ll = 0, nv = 0, idx = 0;
287 bool split = false;
288 u32 size, offset, ell_bytes = 0;
289 u32 i, fidx, nr_frags, count = 1;
e6ad7673
IS
290
291 raw_desc = &tx_ring->raw_desc[tail];
9b00eb49 292 tail = (tail + 1) & (tx_ring->slots - 1);
e6ad7673
IS
293 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
294
9b00eb49
IS
295 hopinfo = xgene_enet_work_msg(skb);
296 if (!hopinfo)
297 return -EINVAL;
298 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
299 hopinfo);
300
949c40bb
IS
301 len = skb_headlen(skb);
302 hw_len = xgene_enet_encode_len(len);
303
304 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
e6ad7673
IS
305 if (dma_mapping_error(dev, dma_addr)) {
306 netdev_err(tx_ring->ndev, "DMA mapping error\n");
307 return -EINVAL;
308 }
309
310 /* Hardware expects descriptor in little endian format */
e6ad7673 311 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
949c40bb 312 SET_VAL(BUFDATALEN, hw_len) |
e6ad7673 313 SET_BIT(COHERENT));
949c40bb 314
9b00eb49
IS
315 if (!skb_is_nonlinear(skb))
316 goto out;
e6ad7673 317
9b00eb49
IS
318 /* scatter gather */
319 nv = 1;
320 exp_desc = (void *)&tx_ring->raw_desc[tail];
949c40bb 321 tail = (tail + 1) & (tx_ring->slots - 1);
9b00eb49
IS
322 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
323
324 nr_frags = skb_shinfo(skb)->nr_frags;
325 for (i = nr_frags; i < 4 ; i++)
326 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
327
328 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
329
330 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
331 if (!split) {
332 frag = &skb_shinfo(skb)->frags[fidx];
333 size = skb_frag_size(frag);
334 offset = 0;
335
336 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
337 DMA_TO_DEVICE);
338 if (dma_mapping_error(dev, pbuf_addr))
339 return -EINVAL;
340
341 frag_dma_addr[fidx] = pbuf_addr;
342 fidx++;
343
344 if (size > BUFLEN_16K)
345 split = true;
346 }
347
348 if (size > BUFLEN_16K) {
349 len = BUFLEN_16K;
350 size -= BUFLEN_16K;
351 } else {
352 len = size;
353 split = false;
354 }
355
356 dma_addr = pbuf_addr + offset;
357 hw_len = xgene_enet_encode_len(len);
358
359 switch (i) {
360 case 0:
361 case 1:
362 case 2:
363 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
364 break;
365 case 3:
366 if (split || (fidx != nr_frags)) {
367 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
368 xgene_set_addr_len(exp_bufs, idx, dma_addr,
369 hw_len);
370 idx++;
371 ell_bytes += len;
372 } else {
373 xgene_set_addr_len(exp_desc, i, dma_addr,
374 hw_len);
375 }
376 break;
377 default:
378 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
379 idx++;
380 ell_bytes += len;
381 break;
382 }
383
384 if (split)
385 offset += BUFLEN_16K;
386 }
387 count++;
388
389 if (idx) {
390 ll = 1;
391 dma_addr = dma_map_single(dev, exp_bufs,
392 sizeof(u64) * MAX_EXP_BUFFS,
393 DMA_TO_DEVICE);
394 if (dma_mapping_error(dev, dma_addr)) {
395 dev_kfree_skb_any(skb);
396 return -EINVAL;
397 }
398 i = ell_bytes >> LL_BYTES_LSB_LEN;
399 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
400 SET_VAL(LL_BYTES_MSB, i) |
401 SET_VAL(LL_LEN, idx));
402 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
403 }
404
405out:
406 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
407 SET_VAL(USERINFO, tx_ring->tail));
408 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
107dec27 409 pdata->tx_level[tx_ring->cp_ring->index] += count;
949c40bb
IS
410 tx_ring->tail = tail;
411
412 return count;
e6ad7673
IS
413}
414
415static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
416 struct net_device *ndev)
417{
418 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
419 struct xgene_enet_desc_ring *tx_ring;
420 int index = skb->queue_mapping;
421 u32 tx_level = pdata->tx_level[index];
949c40bb 422 int count;
e6ad7673 423
107dec27
IS
424 tx_ring = pdata->tx_ring[index];
425 if (tx_level < pdata->txc_level[index])
426 tx_level += ((typeof(pdata->tx_level[index]))~0U);
67894eec 427
107dec27
IS
428 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
429 netif_stop_subqueue(ndev, index);
e6ad7673
IS
430 return NETDEV_TX_BUSY;
431 }
432
9b00eb49
IS
433 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
434 return NETDEV_TX_OK;
435
949c40bb
IS
436 count = xgene_enet_setup_tx_desc(tx_ring, skb);
437 if (count <= 0) {
e6ad7673
IS
438 dev_kfree_skb_any(skb);
439 return NETDEV_TX_OK;
440 }
441
e6ad7673 442 skb_tx_timestamp(skb);
e6ad7673 443
3bb502f8
IS
444 tx_ring->tx_packets++;
445 tx_ring->tx_bytes += skb->len;
e6ad7673 446
9ffad80a 447 pdata->ring_ops->wr_cmd(tx_ring, count);
e6ad7673
IS
448 return NETDEV_TX_OK;
449}
450
451static void xgene_enet_skip_csum(struct sk_buff *skb)
452{
453 struct iphdr *iph = ip_hdr(skb);
454
455 if (!ip_is_fragment(iph) ||
456 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
457 skb->ip_summed = CHECKSUM_UNNECESSARY;
458 }
459}
460
461static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
462 struct xgene_enet_raw_desc *raw_desc)
463{
464 struct net_device *ndev;
e6ad7673
IS
465 struct device *dev;
466 struct xgene_enet_desc_ring *buf_pool;
467 u32 datalen, skb_index;
468 struct sk_buff *skb;
469 u8 status;
470 int ret = 0;
471
472 ndev = rx_ring->ndev;
e6ad7673
IS
473 dev = ndev_to_dev(rx_ring->ndev);
474 buf_pool = rx_ring->buf_pool;
475
476 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
477 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
478 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
479 skb = buf_pool->rx_skb[skb_index];
cb11c062 480 buf_pool->rx_skb[skb_index] = NULL;
e6ad7673
IS
481
482 /* checking for error */
3bb502f8
IS
483 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
484 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
e6ad7673
IS
485 if (unlikely(status > 2)) {
486 dev_kfree_skb_any(skb);
487 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
488 status);
e6ad7673
IS
489 ret = -EIO;
490 goto out;
491 }
492
493 /* strip off CRC as HW isn't doing this */
494 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
9b00eb49 495 datalen = (datalen & DATALEN_MASK) - 4;
e6ad7673
IS
496 prefetch(skb->data - NET_IP_ALIGN);
497 skb_put(skb, datalen);
498
499 skb_checksum_none_assert(skb);
500 skb->protocol = eth_type_trans(skb, ndev);
501 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
502 skb->protocol == htons(ETH_P_IP))) {
503 xgene_enet_skip_csum(skb);
504 }
505
3bb502f8
IS
506 rx_ring->rx_packets++;
507 rx_ring->rx_bytes += datalen;
e6ad7673
IS
508 napi_gro_receive(&rx_ring->napi, skb);
509out:
510 if (--rx_ring->nbufpool == 0) {
511 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
512 rx_ring->nbufpool = NUM_BUFPOOL;
513 }
514
515 return ret;
516}
517
518static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
519{
520 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
521}
522
523static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
524 int budget)
525{
107dec27
IS
526 struct net_device *ndev = ring->ndev;
527 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
9b00eb49 528 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
e6ad7673
IS
529 u16 head = ring->head;
530 u16 slots = ring->slots - 1;
67894eec
IS
531 int ret, desc_count, count = 0, processed = 0;
532 bool is_completion;
e6ad7673
IS
533
534 do {
535 raw_desc = &ring->raw_desc[head];
67894eec
IS
536 desc_count = 0;
537 is_completion = false;
9b00eb49 538 exp_desc = NULL;
e6ad7673
IS
539 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
540 break;
541
ecf6ba83
IS
542 /* read fpqnum field after dataaddr field */
543 dma_rmb();
9b00eb49
IS
544 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
545 head = (head + 1) & slots;
546 exp_desc = &ring->raw_desc[head];
547
548 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
549 head = (head - 1) & slots;
550 break;
551 }
552 dma_rmb();
553 count++;
67894eec 554 desc_count++;
9b00eb49 555 }
67894eec 556 if (is_rx_desc(raw_desc)) {
e6ad7673 557 ret = xgene_enet_rx_frame(ring, raw_desc);
67894eec 558 } else {
e6ad7673 559 ret = xgene_enet_tx_completion(ring, raw_desc);
67894eec
IS
560 is_completion = true;
561 }
e6ad7673 562 xgene_enet_mark_desc_slot_empty(raw_desc);
9b00eb49
IS
563 if (exp_desc)
564 xgene_enet_mark_desc_slot_empty(exp_desc);
e6ad7673
IS
565
566 head = (head + 1) & slots;
567 count++;
67894eec 568 desc_count++;
9b00eb49 569 processed++;
67894eec 570 if (is_completion)
107dec27 571 pdata->txc_level[ring->index] += desc_count;
e6ad7673
IS
572
573 if (ret)
574 break;
575 } while (--budget);
576
577 if (likely(count)) {
81cefb81 578 pdata->ring_ops->wr_cmd(ring, -count);
e6ad7673
IS
579 ring->head = head;
580
107dec27
IS
581 if (__netif_subqueue_stopped(ndev, ring->index))
582 netif_start_subqueue(ndev, ring->index);
e6ad7673
IS
583 }
584
9b00eb49 585 return processed;
e6ad7673
IS
586}
587
588static int xgene_enet_napi(struct napi_struct *napi, const int budget)
589{
590 struct xgene_enet_desc_ring *ring;
591 int processed;
592
593 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
594 processed = xgene_enet_process_ring(ring, budget);
595
596 if (processed != budget) {
597 napi_complete(napi);
598 enable_irq(ring->irq);
599 }
600
601 return processed;
602}
603
604static void xgene_enet_timeout(struct net_device *ndev)
605{
606 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
607 struct netdev_queue *txq;
608 int i;
e6ad7673 609
d0eb7458 610 pdata->mac_ops->reset(pdata);
107dec27
IS
611
612 for (i = 0; i < pdata->txq_cnt; i++) {
613 txq = netdev_get_tx_queue(ndev, i);
614 txq->trans_start = jiffies;
615 netif_tx_start_queue(txq);
616 }
e6ad7673
IS
617}
618
cb0366b7
IS
619static void xgene_enet_set_irq_name(struct net_device *ndev)
620{
621 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
622 struct xgene_enet_desc_ring *ring;
623 int i;
624
625 for (i = 0; i < pdata->rxq_cnt; i++) {
626 ring = pdata->rx_ring[i];
627 if (!pdata->cq_cnt) {
628 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
629 ndev->name);
630 } else {
631 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
632 ndev->name, i);
633 }
634 }
635
636 for (i = 0; i < pdata->cq_cnt; i++) {
637 ring = pdata->tx_ring[i]->cp_ring;
638 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
639 ndev->name, i);
640 }
641}
642
e6ad7673
IS
643static int xgene_enet_register_irq(struct net_device *ndev)
644{
645 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
646 struct device *dev = ndev_to_dev(ndev);
6772b653 647 struct xgene_enet_desc_ring *ring;
107dec27 648 int ret = 0, i;
e6ad7673 649
cb0366b7 650 xgene_enet_set_irq_name(ndev);
107dec27
IS
651 for (i = 0; i < pdata->rxq_cnt; i++) {
652 ring = pdata->rx_ring[i];
653 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
654 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
46a22d29 655 0, ring->irq_name, ring);
107dec27
IS
656 if (ret) {
657 netdev_err(ndev, "Failed to request irq %s\n",
658 ring->irq_name);
659 }
660 }
6772b653 661
107dec27
IS
662 for (i = 0; i < pdata->cq_cnt; i++) {
663 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069 664 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
6772b653 665 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
46a22d29 666 0, ring->irq_name, ring);
6772b653
IS
667 if (ret) {
668 netdev_err(ndev, "Failed to request irq %s\n",
669 ring->irq_name);
670 }
e6ad7673
IS
671 }
672
673 return ret;
674}
675
676static void xgene_enet_free_irq(struct net_device *ndev)
677{
678 struct xgene_enet_pdata *pdata;
b5d7a069 679 struct xgene_enet_desc_ring *ring;
e6ad7673 680 struct device *dev;
107dec27 681 int i;
e6ad7673
IS
682
683 pdata = netdev_priv(ndev);
684 dev = ndev_to_dev(ndev);
6772b653 685
107dec27
IS
686 for (i = 0; i < pdata->rxq_cnt; i++) {
687 ring = pdata->rx_ring[i];
688 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
689 devm_free_irq(dev, ring->irq, ring);
690 }
691
692 for (i = 0; i < pdata->cq_cnt; i++) {
693 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069
IS
694 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
695 devm_free_irq(dev, ring->irq, ring);
6772b653
IS
696 }
697}
698
699static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
700{
701 struct napi_struct *napi;
107dec27 702 int i;
6772b653 703
107dec27
IS
704 for (i = 0; i < pdata->rxq_cnt; i++) {
705 napi = &pdata->rx_ring[i]->napi;
706 napi_enable(napi);
707 }
6772b653 708
107dec27
IS
709 for (i = 0; i < pdata->cq_cnt; i++) {
710 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
711 napi_enable(napi);
712 }
713}
714
715static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
716{
717 struct napi_struct *napi;
107dec27 718 int i;
6772b653 719
107dec27
IS
720 for (i = 0; i < pdata->rxq_cnt; i++) {
721 napi = &pdata->rx_ring[i]->napi;
722 napi_disable(napi);
723 }
6772b653 724
107dec27
IS
725 for (i = 0; i < pdata->cq_cnt; i++) {
726 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
727 napi_disable(napi);
728 }
e6ad7673
IS
729}
730
731static int xgene_enet_open(struct net_device *ndev)
732{
733 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 734 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
735 int ret;
736
107dec27
IS
737 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
738 if (ret)
739 return ret;
740
741 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
742 if (ret)
743 return ret;
744
aeb20b6b 745 xgene_enet_napi_enable(pdata);
e6ad7673
IS
746 ret = xgene_enet_register_irq(ndev);
747 if (ret)
748 return ret;
e6ad7673 749
47c62b6d 750 if (pdata->phy_dev) {
e6ad7673 751 phy_start(pdata->phy_dev);
47c62b6d 752 } else {
0148d38d 753 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
9a8c5dde
IS
754 netif_carrier_off(ndev);
755 }
e6ad7673 756
cb11c062
IS
757 mac_ops->tx_enable(pdata);
758 mac_ops->rx_enable(pdata);
cb0366b7 759 netif_tx_start_all_queues(ndev);
e6ad7673
IS
760
761 return ret;
762}
763
764static int xgene_enet_close(struct net_device *ndev)
765{
766 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 767 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
107dec27 768 int i;
e6ad7673 769
cb0366b7 770 netif_tx_stop_all_queues(ndev);
cb11c062
IS
771 mac_ops->tx_disable(pdata);
772 mac_ops->rx_disable(pdata);
e6ad7673 773
8089a96f 774 if (pdata->phy_dev)
e6ad7673 775 phy_stop(pdata->phy_dev);
0148d38d
IS
776 else
777 cancel_delayed_work_sync(&pdata->link_work);
e6ad7673 778
aeb20b6b
IS
779 xgene_enet_free_irq(ndev);
780 xgene_enet_napi_disable(pdata);
107dec27
IS
781 for (i = 0; i < pdata->rxq_cnt; i++)
782 xgene_enet_process_ring(pdata->rx_ring[i], -1);
aeb20b6b 783
e6ad7673
IS
784 return 0;
785}
e6ad7673
IS
786static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
787{
788 struct xgene_enet_pdata *pdata;
789 struct device *dev;
790
791 pdata = netdev_priv(ring->ndev);
792 dev = ndev_to_dev(ring->ndev);
793
81cefb81 794 pdata->ring_ops->clear(ring);
cb0366b7 795 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
e6ad7673
IS
796}
797
798static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
799{
800 struct xgene_enet_desc_ring *buf_pool;
107dec27
IS
801 struct xgene_enet_desc_ring *ring;
802 int i;
e6ad7673 803
107dec27
IS
804 for (i = 0; i < pdata->txq_cnt; i++) {
805 ring = pdata->tx_ring[i];
806 if (ring) {
807 xgene_enet_delete_ring(ring);
cb11c062
IS
808 pdata->port_ops->clear(pdata, ring);
809 if (pdata->cq_cnt)
810 xgene_enet_delete_ring(ring->cp_ring);
107dec27
IS
811 pdata->tx_ring[i] = NULL;
812 }
e6ad7673
IS
813 }
814
107dec27
IS
815 for (i = 0; i < pdata->rxq_cnt; i++) {
816 ring = pdata->rx_ring[i];
817 if (ring) {
818 buf_pool = ring->buf_pool;
819 xgene_enet_delete_bufpool(buf_pool);
820 xgene_enet_delete_ring(buf_pool);
cb11c062 821 pdata->port_ops->clear(pdata, buf_pool);
107dec27
IS
822 xgene_enet_delete_ring(ring);
823 pdata->rx_ring[i] = NULL;
824 }
e6ad7673
IS
825 }
826}
827
828static int xgene_enet_get_ring_size(struct device *dev,
829 enum xgene_enet_ring_cfgsize cfgsize)
830{
831 int size = -EINVAL;
832
833 switch (cfgsize) {
834 case RING_CFGSIZE_512B:
835 size = 0x200;
836 break;
837 case RING_CFGSIZE_2KB:
838 size = 0x800;
839 break;
840 case RING_CFGSIZE_16KB:
841 size = 0x4000;
842 break;
843 case RING_CFGSIZE_64KB:
844 size = 0x10000;
845 break;
846 case RING_CFGSIZE_512KB:
847 size = 0x80000;
848 break;
849 default:
850 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
851 break;
852 }
853
854 return size;
855}
856
857static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
858{
81cefb81 859 struct xgene_enet_pdata *pdata;
e6ad7673
IS
860 struct device *dev;
861
862 if (!ring)
863 return;
864
865 dev = ndev_to_dev(ring->ndev);
81cefb81 866 pdata = netdev_priv(ring->ndev);
e6ad7673
IS
867
868 if (ring->desc_addr) {
81cefb81 869 pdata->ring_ops->clear(ring);
cb0366b7 870 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
e6ad7673
IS
871 }
872 devm_kfree(dev, ring);
873}
874
875static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
876{
877 struct device *dev = &pdata->pdev->dev;
878 struct xgene_enet_desc_ring *ring;
107dec27 879 int i;
e6ad7673 880
107dec27
IS
881 for (i = 0; i < pdata->txq_cnt; i++) {
882 ring = pdata->tx_ring[i];
883 if (ring) {
884 if (ring->cp_ring && ring->cp_ring->cp_skb)
885 devm_kfree(dev, ring->cp_ring->cp_skb);
886 if (ring->cp_ring && pdata->cq_cnt)
887 xgene_enet_free_desc_ring(ring->cp_ring);
888 xgene_enet_free_desc_ring(ring);
889 }
890 }
891
892 for (i = 0; i < pdata->rxq_cnt; i++) {
893 ring = pdata->rx_ring[i];
894 if (ring) {
895 if (ring->buf_pool) {
896 if (ring->buf_pool->rx_skb)
897 devm_kfree(dev, ring->buf_pool->rx_skb);
898 xgene_enet_free_desc_ring(ring->buf_pool);
899 }
900 xgene_enet_free_desc_ring(ring);
c10e4caf 901 }
c10e4caf 902 }
e6ad7673
IS
903}
904
bc1b7c13
IS
905static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
906 struct xgene_enet_desc_ring *ring)
907{
908 if ((pdata->enet_id == XGENE_ENET2) &&
909 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
910 return true;
911 }
912
913 return false;
914}
915
916static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
917 struct xgene_enet_desc_ring *ring)
918{
919 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
920
921 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
922}
923
e6ad7673
IS
924static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
925 struct net_device *ndev, u32 ring_num,
926 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
927{
e6ad7673
IS
928 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
929 struct device *dev = ndev_to_dev(ndev);
cb0366b7
IS
930 struct xgene_enet_desc_ring *ring;
931 void *irq_mbox_addr;
9b9ba821
TK
932 int size;
933
934 size = xgene_enet_get_ring_size(dev, cfgsize);
935 if (size < 0)
936 return NULL;
e6ad7673
IS
937
938 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
939 GFP_KERNEL);
940 if (!ring)
941 return NULL;
942
943 ring->ndev = ndev;
944 ring->num = ring_num;
945 ring->cfgsize = cfgsize;
946 ring->id = ring_id;
947
cb0366b7
IS
948 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
949 GFP_KERNEL | __GFP_ZERO);
e6ad7673
IS
950 if (!ring->desc_addr) {
951 devm_kfree(dev, ring);
952 return NULL;
953 }
954 ring->size = size;
955
bc1b7c13 956 if (is_irq_mbox_required(pdata, ring)) {
cb0366b7
IS
957 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
958 &ring->irq_mbox_dma,
959 GFP_KERNEL | __GFP_ZERO);
960 if (!irq_mbox_addr) {
961 dmam_free_coherent(dev, size, ring->desc_addr,
962 ring->dma);
bc1b7c13
IS
963 devm_kfree(dev, ring);
964 return NULL;
965 }
cb0366b7 966 ring->irq_mbox_addr = irq_mbox_addr;
bc1b7c13
IS
967 }
968
969 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
e6ad7673 970 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
81cefb81 971 ring = pdata->ring_ops->setup(ring);
e6ad7673
IS
972 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
973 ring->num, ring->size, ring->id, ring->slots);
974
975 return ring;
976}
977
978static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
979{
980 return (owner << 6) | (bufnum & GENMASK(5, 0));
981}
982
bc1b7c13
IS
983static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
984{
985 enum xgene_ring_owner owner;
986
987 if (p->enet_id == XGENE_ENET1) {
988 switch (p->phy_mode) {
989 case PHY_INTERFACE_MODE_SGMII:
990 owner = RING_OWNER_ETH0;
991 break;
992 default:
993 owner = (!p->port_id) ? RING_OWNER_ETH0 :
994 RING_OWNER_ETH1;
995 break;
996 }
997 } else {
998 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
999 }
1000
1001 return owner;
1002}
1003
2a37daa6
IS
1004static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1005{
1006 struct device *dev = &pdata->pdev->dev;
1007 u32 cpu_bufnum;
1008 int ret;
1009
1010 ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1011
1012 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1013}
1014
e6ad7673
IS
1015static int xgene_enet_create_desc_rings(struct net_device *ndev)
1016{
1017 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1018 struct device *dev = ndev_to_dev(ndev);
1019 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1020 struct xgene_enet_desc_ring *buf_pool = NULL;
bc1b7c13 1021 enum xgene_ring_owner owner;
9b00eb49 1022 dma_addr_t dma_exp_bufs;
2a37daa6 1023 u8 cpu_bufnum;
ca626454
KC
1024 u8 eth_bufnum = pdata->eth_bufnum;
1025 u8 bp_bufnum = pdata->bp_bufnum;
1026 u16 ring_num = pdata->ring_num;
cb0366b7 1027 __le64 *exp_bufs;
ca626454 1028 u16 ring_id;
107dec27 1029 int i, ret, size;
e6ad7673 1030
2a37daa6
IS
1031 cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1032
107dec27
IS
1033 for (i = 0; i < pdata->rxq_cnt; i++) {
1034 /* allocate rx descriptor ring */
1035 owner = xgene_derive_ring_owner(pdata);
1036 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1037 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1038 RING_CFGSIZE_16KB,
1039 ring_id);
1040 if (!rx_ring) {
1041 ret = -ENOMEM;
1042 goto err;
1043 }
e6ad7673 1044
107dec27
IS
1045 /* allocate buffer pool for receiving packets */
1046 owner = xgene_derive_ring_owner(pdata);
1047 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1048 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1049 RING_CFGSIZE_2KB,
1050 ring_id);
1051 if (!buf_pool) {
1052 ret = -ENOMEM;
1053 goto err;
1054 }
9b00eb49 1055
107dec27
IS
1056 rx_ring->nbufpool = NUM_BUFPOOL;
1057 rx_ring->buf_pool = buf_pool;
1058 rx_ring->irq = pdata->irqs[i];
107dec27
IS
1059 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1060 sizeof(struct sk_buff *),
9b00eb49 1061 GFP_KERNEL);
107dec27
IS
1062 if (!buf_pool->rx_skb) {
1063 ret = -ENOMEM;
1064 goto err;
1065 }
9b00eb49 1066
107dec27
IS
1067 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1068 rx_ring->buf_pool = buf_pool;
1069 pdata->rx_ring[i] = rx_ring;
1070 }
e6ad7673 1071
107dec27
IS
1072 for (i = 0; i < pdata->txq_cnt; i++) {
1073 /* allocate tx descriptor ring */
1074 owner = xgene_derive_ring_owner(pdata);
1075 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1076 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
6772b653
IS
1077 RING_CFGSIZE_16KB,
1078 ring_id);
107dec27 1079 if (!tx_ring) {
6772b653
IS
1080 ret = -ENOMEM;
1081 goto err;
1082 }
6772b653 1083
107dec27 1084 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
cb0366b7
IS
1085 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1086 GFP_KERNEL | __GFP_ZERO);
1087 if (!exp_bufs) {
107dec27
IS
1088 ret = -ENOMEM;
1089 goto err;
1090 }
cb0366b7 1091 tx_ring->exp_bufs = exp_bufs;
9b00eb49 1092
107dec27
IS
1093 pdata->tx_ring[i] = tx_ring;
1094
1095 if (!pdata->cq_cnt) {
1096 cp_ring = pdata->rx_ring[i];
1097 } else {
1098 /* allocate tx completion descriptor ring */
1099 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1100 cpu_bufnum++);
1101 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1102 RING_CFGSIZE_16KB,
1103 ring_id);
1104 if (!cp_ring) {
1105 ret = -ENOMEM;
1106 goto err;
1107 }
9b00eb49 1108
107dec27
IS
1109 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1110 cp_ring->index = i;
107dec27
IS
1111 }
1112
1113 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1114 sizeof(struct sk_buff *),
1115 GFP_KERNEL);
1116 if (!cp_ring->cp_skb) {
1117 ret = -ENOMEM;
1118 goto err;
1119 }
e6ad7673 1120
107dec27
IS
1121 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1122 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1123 size, GFP_KERNEL);
1124 if (!cp_ring->frag_dma_addr) {
1125 devm_kfree(dev, cp_ring->cp_skb);
1126 ret = -ENOMEM;
1127 goto err;
1128 }
1129
1130 tx_ring->cp_ring = cp_ring;
1131 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1132 }
1133
1134 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1135 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
e6ad7673
IS
1136
1137 return 0;
1138
1139err:
1140 xgene_enet_free_desc_rings(pdata);
1141 return ret;
1142}
1143
1144static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1145 struct net_device *ndev,
1146 struct rtnl_link_stats64 *storage)
1147{
1148 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1149 struct rtnl_link_stats64 *stats = &pdata->stats;
3bb502f8
IS
1150 struct xgene_enet_desc_ring *ring;
1151 int i;
e6ad7673 1152
3bb502f8
IS
1153 memset(stats, 0, sizeof(struct rtnl_link_stats64));
1154 for (i = 0; i < pdata->txq_cnt; i++) {
1155 ring = pdata->tx_ring[i];
1156 if (ring) {
1157 stats->tx_packets += ring->tx_packets;
1158 stats->tx_bytes += ring->tx_bytes;
1159 }
1160 }
e6ad7673 1161
3bb502f8
IS
1162 for (i = 0; i < pdata->rxq_cnt; i++) {
1163 ring = pdata->rx_ring[i];
1164 if (ring) {
1165 stats->rx_packets += ring->rx_packets;
1166 stats->rx_bytes += ring->rx_bytes;
1167 stats->rx_errors += ring->rx_length_errors +
1168 ring->rx_crc_errors +
1169 ring->rx_frame_errors +
1170 ring->rx_fifo_errors;
1171 stats->rx_dropped += ring->rx_dropped;
1172 }
1173 }
1174 memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
e6ad7673
IS
1175
1176 return storage;
1177}
1178
1179static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1180{
1181 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1182 int ret;
1183
1184 ret = eth_mac_addr(ndev, addr);
1185 if (ret)
1186 return ret;
d0eb7458 1187 pdata->mac_ops->set_mac_addr(pdata);
e6ad7673
IS
1188
1189 return ret;
1190}
1191
1192static const struct net_device_ops xgene_ndev_ops = {
1193 .ndo_open = xgene_enet_open,
1194 .ndo_stop = xgene_enet_close,
1195 .ndo_start_xmit = xgene_enet_start_xmit,
1196 .ndo_tx_timeout = xgene_enet_timeout,
1197 .ndo_get_stats64 = xgene_enet_get_stats64,
1198 .ndo_change_mtu = eth_change_mtu,
1199 .ndo_set_mac_address = xgene_enet_set_mac_address,
1200};
1201
8beeef8d 1202#ifdef CONFIG_ACPI
724fe695 1203static void xgene_get_port_id_acpi(struct device *dev,
0738c54d
ST
1204 struct xgene_enet_pdata *pdata)
1205{
1206 acpi_status status;
1207 u64 temp;
1208
1209 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1210 if (ACPI_FAILURE(status)) {
1211 pdata->port_id = 0;
1212 } else {
1213 pdata->port_id = temp;
1214 }
1215
724fe695 1216 return;
0738c54d 1217}
8beeef8d 1218#endif
0738c54d 1219
724fe695 1220static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
ca626454
KC
1221{
1222 u32 id = 0;
ca626454 1223
724fe695 1224 of_property_read_u32(dev->of_node, "port-id", &id);
ca626454 1225
724fe695
SS
1226 pdata->port_id = id & BIT(0);
1227
1228 return;
ca626454
KC
1229}
1230
16615a4c
IS
1231static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1232{
1233 struct device *dev = &pdata->pdev->dev;
1234 int delay, ret;
1235
1236 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1237 if (ret) {
1238 pdata->tx_delay = 4;
1239 return 0;
1240 }
1241
1242 if (delay < 0 || delay > 7) {
1243 dev_err(dev, "Invalid tx-delay specified\n");
1244 return -EINVAL;
1245 }
1246
1247 pdata->tx_delay = delay;
1248
1249 return 0;
1250}
1251
1252static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1253{
1254 struct device *dev = &pdata->pdev->dev;
1255 int delay, ret;
1256
1257 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1258 if (ret) {
1259 pdata->rx_delay = 2;
1260 return 0;
1261 }
1262
1263 if (delay < 0 || delay > 7) {
1264 dev_err(dev, "Invalid rx-delay specified\n");
1265 return -EINVAL;
1266 }
1267
1268 pdata->rx_delay = delay;
1269
1270 return 0;
1271}
de7b5b3d 1272
107dec27
IS
1273static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1274{
1275 struct platform_device *pdev = pdata->pdev;
1276 struct device *dev = &pdev->dev;
1277 int i, ret, max_irqs;
1278
1279 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1280 max_irqs = 1;
1281 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1282 max_irqs = 2;
1283 else
1284 max_irqs = XGENE_MAX_ENET_IRQ;
1285
1286 for (i = 0; i < max_irqs; i++) {
1287 ret = platform_get_irq(pdev, i);
1288 if (ret <= 0) {
1b090a48
IS
1289 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1290 max_irqs = i;
1291 pdata->rxq_cnt = max_irqs / 2;
1292 pdata->txq_cnt = max_irqs / 2;
1293 pdata->cq_cnt = max_irqs / 2;
1294 break;
1295 }
107dec27
IS
1296 dev_err(dev, "Unable to get ENET IRQ\n");
1297 ret = ret ? : -ENXIO;
1298 return ret;
1299 }
1300 pdata->irqs[i] = ret;
1301 }
1302
1303 return 0;
1304}
1305
8089a96f
IS
1306static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1307{
1308 int ret;
1309
1310 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1311 return 0;
1312
1313 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1314 return 0;
1315
1316 ret = xgene_enet_phy_connect(pdata->ndev);
1317 if (!ret)
1318 pdata->mdio_driver = true;
1319
1320 return 0;
1321}
1322
e6ad7673
IS
1323static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1324{
1325 struct platform_device *pdev;
1326 struct net_device *ndev;
1327 struct device *dev;
1328 struct resource *res;
1329 void __iomem *base_addr;
561fea6d 1330 u32 offset;
2e598712 1331 int ret = 0;
e6ad7673
IS
1332
1333 pdev = pdata->pdev;
1334 dev = &pdev->dev;
1335 ndev = pdata->ndev;
1336
de7b5b3d
FK
1337 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1338 if (!res) {
1339 dev_err(dev, "Resource enet_csr not defined\n");
1340 return -ENODEV;
1341 }
1342 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
3ec7a176 1343 if (!pdata->base_addr) {
e6ad7673 1344 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
3ec7a176 1345 return -ENOMEM;
e6ad7673
IS
1346 }
1347
de7b5b3d
FK
1348 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1349 if (!res) {
1350 dev_err(dev, "Resource ring_csr not defined\n");
1351 return -ENODEV;
1352 }
1353 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1354 resource_size(res));
3ec7a176 1355 if (!pdata->ring_csr_addr) {
e6ad7673 1356 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
3ec7a176 1357 return -ENOMEM;
e6ad7673
IS
1358 }
1359
de7b5b3d
FK
1360 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1361 if (!res) {
1362 dev_err(dev, "Resource ring_cmd not defined\n");
1363 return -ENODEV;
1364 }
1365 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1366 resource_size(res));
3ec7a176 1367 if (!pdata->ring_cmd_addr) {
e6ad7673 1368 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
3ec7a176 1369 return -ENOMEM;
e6ad7673
IS
1370 }
1371
0738c54d 1372 if (dev->of_node)
724fe695 1373 xgene_get_port_id_dt(dev, pdata);
0738c54d
ST
1374#ifdef CONFIG_ACPI
1375 else
724fe695 1376 xgene_get_port_id_acpi(dev, pdata);
0738c54d 1377#endif
ca626454 1378
938049e1 1379 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
e6ad7673 1380 eth_hw_addr_random(ndev);
de7b5b3d 1381
e6ad7673
IS
1382 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1383
938049e1 1384 pdata->phy_mode = device_get_phy_mode(dev);
e6ad7673 1385 if (pdata->phy_mode < 0) {
0148d38d
IS
1386 dev_err(dev, "Unable to get phy-connection-type\n");
1387 return pdata->phy_mode;
1388 }
1389 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
32f784b5 1390 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
0148d38d
IS
1391 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1392 dev_err(dev, "Incorrect phy-connection-type specified\n");
1393 return -ENODEV;
e6ad7673
IS
1394 }
1395
16615a4c
IS
1396 ret = xgene_get_tx_delay(pdata);
1397 if (ret)
1398 return ret;
1399
1400 ret = xgene_get_rx_delay(pdata);
1401 if (ret)
1402 return ret;
1403
107dec27
IS
1404 ret = xgene_enet_get_irqs(pdata);
1405 if (ret)
6772b653 1406 return ret;
6772b653 1407
8089a96f
IS
1408 ret = xgene_enet_check_phy_handle(pdata);
1409 if (ret)
1410 return ret;
1411
e6ad7673 1412 pdata->clk = devm_clk_get(&pdev->dev, NULL);
e6ad7673 1413 if (IS_ERR(pdata->clk)) {
de7b5b3d 1414 /* Firmware may have set up the clock already. */
c2d33bdc 1415 dev_info(dev, "clocks have been setup already\n");
e6ad7673
IS
1416 }
1417
bc1b7c13
IS
1418 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1419 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1420 else
1421 base_addr = pdata->base_addr;
e6ad7673 1422 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
76f94a9c 1423 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
e6ad7673
IS
1424 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1425 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
32f784b5
IS
1426 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1427 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
ca626454 1428 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
561fea6d
IS
1429 offset = (pdata->enet_id == XGENE_ENET1) ?
1430 BLOCK_ETH_MAC_CSR_OFFSET :
1431 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1432 pdata->mcx_mac_csr_addr = base_addr + offset;
0148d38d
IS
1433 } else {
1434 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1435 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
3eb7cb9d 1436 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
0148d38d 1437 }
e6ad7673
IS
1438 pdata->rx_buff_cnt = NUM_PKT_BUF;
1439
0148d38d 1440 return 0;
e6ad7673
IS
1441}
1442
1443static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1444{
76f94a9c 1445 struct xgene_enet_cle *enet_cle = &pdata->cle;
e6ad7673
IS
1446 struct net_device *ndev = pdata->ndev;
1447 struct xgene_enet_desc_ring *buf_pool;
1448 u16 dst_ring_num;
107dec27 1449 int i, ret;
e6ad7673 1450
c3f4465d
IS
1451 ret = pdata->port_ops->reset(pdata);
1452 if (ret)
1453 return ret;
e6ad7673
IS
1454
1455 ret = xgene_enet_create_desc_rings(ndev);
1456 if (ret) {
1457 netdev_err(ndev, "Error in ring configuration\n");
1458 return ret;
1459 }
1460
1461 /* setup buffer pool */
107dec27
IS
1462 for (i = 0; i < pdata->rxq_cnt; i++) {
1463 buf_pool = pdata->rx_ring[i]->buf_pool;
1464 xgene_enet_init_bufpool(buf_pool);
1465 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
15e32296
IS
1466 if (ret)
1467 goto err;
e6ad7673
IS
1468 }
1469
107dec27
IS
1470 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1471 buf_pool = pdata->rx_ring[0]->buf_pool;
76f94a9c
IS
1472 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1473 /* Initialize and Enable PreClassifier Tree */
1474 enet_cle->max_nodes = 512;
1475 enet_cle->max_dbptrs = 1024;
1476 enet_cle->parsers = 3;
1477 enet_cle->active_parser = PARSER_ALL;
1478 enet_cle->ptree.start_node = 0;
1479 enet_cle->ptree.start_dbptr = 0;
1480 enet_cle->jump_bytes = 8;
1481 ret = pdata->cle_ops->cle_init(pdata);
1482 if (ret) {
1483 netdev_err(ndev, "Preclass Tree init error\n");
15e32296 1484 goto err;
76f94a9c
IS
1485 }
1486 } else {
1487 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1488 }
1489
9a8c5dde 1490 pdata->phy_speed = SPEED_UNKNOWN;
0148d38d 1491 pdata->mac_ops->init(pdata);
e6ad7673
IS
1492
1493 return ret;
15e32296
IS
1494
1495err:
1496 xgene_enet_delete_desc_rings(pdata);
1497 return ret;
e6ad7673
IS
1498}
1499
d0eb7458
IS
1500static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1501{
0148d38d
IS
1502 switch (pdata->phy_mode) {
1503 case PHY_INTERFACE_MODE_RGMII:
1504 pdata->mac_ops = &xgene_gmac_ops;
1505 pdata->port_ops = &xgene_gport_ops;
dc8385f0 1506 pdata->rm = RM3;
107dec27
IS
1507 pdata->rxq_cnt = 1;
1508 pdata->txq_cnt = 1;
1509 pdata->cq_cnt = 0;
0148d38d 1510 break;
32f784b5
IS
1511 case PHY_INTERFACE_MODE_SGMII:
1512 pdata->mac_ops = &xgene_sgmac_ops;
1513 pdata->port_ops = &xgene_sgport_ops;
1514 pdata->rm = RM1;
107dec27
IS
1515 pdata->rxq_cnt = 1;
1516 pdata->txq_cnt = 1;
1517 pdata->cq_cnt = 1;
32f784b5 1518 break;
0148d38d
IS
1519 default:
1520 pdata->mac_ops = &xgene_xgmac_ops;
1521 pdata->port_ops = &xgene_xgport_ops;
76f94a9c 1522 pdata->cle_ops = &xgene_cle3in_ops;
dc8385f0 1523 pdata->rm = RM0;
1b090a48
IS
1524 if (!pdata->rxq_cnt) {
1525 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1526 pdata->txq_cnt = XGENE_NUM_TX_RING;
1527 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1528 }
0148d38d
IS
1529 break;
1530 }
ca626454 1531
bc1b7c13
IS
1532 if (pdata->enet_id == XGENE_ENET1) {
1533 switch (pdata->port_id) {
1534 case 0:
1b090a48
IS
1535 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1536 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1537 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1538 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1539 pdata->ring_num = START_RING_NUM_0;
1540 } else {
1541 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1542 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1543 pdata->bp_bufnum = START_BP_BUFNUM_0;
1544 pdata->ring_num = START_RING_NUM_0;
1545 }
bc1b7c13
IS
1546 break;
1547 case 1:
149e9ab4
IS
1548 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1549 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1550 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1551 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1552 pdata->ring_num = XG_START_RING_NUM_1;
1553 } else {
1554 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1555 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1556 pdata->bp_bufnum = START_BP_BUFNUM_1;
1557 pdata->ring_num = START_RING_NUM_1;
1558 }
bc1b7c13
IS
1559 break;
1560 default:
1561 break;
1562 }
1563 pdata->ring_ops = &xgene_ring1_ops;
1564 } else {
1565 switch (pdata->port_id) {
1566 case 0:
1567 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1568 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1569 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1570 pdata->ring_num = X2_START_RING_NUM_0;
1571 break;
1572 case 1:
1573 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1574 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1575 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1576 pdata->ring_num = X2_START_RING_NUM_1;
1577 break;
1578 default:
1579 break;
1580 }
1581 pdata->rm = RM0;
1582 pdata->ring_ops = &xgene_ring2_ops;
ca626454 1583 }
d0eb7458
IS
1584}
1585
6772b653
IS
1586static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1587{
1588 struct napi_struct *napi;
107dec27 1589 int i;
6772b653 1590
107dec27
IS
1591 for (i = 0; i < pdata->rxq_cnt; i++) {
1592 napi = &pdata->rx_ring[i]->napi;
1593 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1594 NAPI_POLL_WEIGHT);
1595 }
6772b653 1596
107dec27
IS
1597 for (i = 0; i < pdata->cq_cnt; i++) {
1598 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1599 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1600 NAPI_POLL_WEIGHT);
1601 }
1602}
1603
e6ad7673
IS
1604static int xgene_enet_probe(struct platform_device *pdev)
1605{
1606 struct net_device *ndev;
1607 struct xgene_enet_pdata *pdata;
1608 struct device *dev = &pdev->dev;
8089a96f 1609 void (*link_state)(struct work_struct *);
bc1b7c13 1610 const struct of_device_id *of_id;
e6ad7673
IS
1611 int ret;
1612
107dec27
IS
1613 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1614 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
e6ad7673
IS
1615 if (!ndev)
1616 return -ENOMEM;
1617
1618 pdata = netdev_priv(ndev);
1619
1620 pdata->pdev = pdev;
1621 pdata->ndev = ndev;
1622 SET_NETDEV_DEV(ndev, dev);
1623 platform_set_drvdata(pdev, pdata);
1624 ndev->netdev_ops = &xgene_ndev_ops;
1625 xgene_enet_set_ethtool_ops(ndev);
1626 ndev->features |= NETIF_F_IP_CSUM |
1627 NETIF_F_GSO |
9b00eb49
IS
1628 NETIF_F_GRO |
1629 NETIF_F_SG;
e6ad7673 1630
bc1b7c13
IS
1631 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1632 if (of_id) {
1633 pdata->enet_id = (enum xgene_enet_id)of_id->data;
0738c54d
ST
1634 }
1635#ifdef CONFIG_ACPI
1636 else {
1637 const struct acpi_device_id *acpi_id;
1638
1639 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1640 if (acpi_id)
1641 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
bc1b7c13
IS
1642 }
1643#endif
0738c54d 1644 if (!pdata->enet_id) {
cecd6e51
IS
1645 ret = -ENODEV;
1646 goto err;
0738c54d 1647 }
bc1b7c13 1648
e6ad7673
IS
1649 ret = xgene_enet_get_resources(pdata);
1650 if (ret)
1651 goto err;
1652
d0eb7458 1653 xgene_enet_setup_ops(pdata);
e6ad7673 1654
9b00eb49
IS
1655 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1656 ndev->features |= NETIF_F_TSO;
1657 pdata->mss = XGENE_ENET_MSS;
1658 }
1659 ndev->hw_features = ndev->features;
1660
aeb20b6b 1661 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
e6ad7673 1662 if (ret) {
aeb20b6b 1663 netdev_err(ndev, "No usable DMA configuration\n");
e6ad7673
IS
1664 goto err;
1665 }
1666
e6ad7673
IS
1667 ret = xgene_enet_init_hw(pdata);
1668 if (ret)
cecd6e51 1669 goto err;
e6ad7673 1670
8089a96f
IS
1671 link_state = pdata->mac_ops->link_state;
1672 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1673 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1674 } else if (!pdata->mdio_driver) {
1675 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1676 ret = xgene_enet_mdio_config(pdata);
1677 else
1678 INIT_DELAYED_WORK(&pdata->link_work, link_state);
cecd6e51
IS
1679
1680 if (ret)
1681 goto err1;
aeb20b6b 1682 }
e6ad7673 1683
aeb20b6b 1684 xgene_enet_napi_add(pdata);
cb0366b7
IS
1685 ret = register_netdev(ndev);
1686 if (ret) {
1687 netdev_err(ndev, "Failed to register netdev\n");
cecd6e51 1688 goto err2;
cb0366b7
IS
1689 }
1690
aeb20b6b 1691 return 0;
cb0366b7 1692
cecd6e51
IS
1693err2:
1694 /*
1695 * If necessary, free_netdev() will call netif_napi_del() and undo
1696 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1697 */
1698
1699 if (pdata->mdio_driver)
1700 xgene_enet_phy_disconnect(pdata);
1701 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1702 xgene_enet_mdio_remove(pdata);
1703err1:
1704 xgene_enet_delete_desc_rings(pdata);
20decb7e 1705err:
e6ad7673
IS
1706 free_netdev(ndev);
1707 return ret;
1708}
1709
1710static int xgene_enet_remove(struct platform_device *pdev)
1711{
1712 struct xgene_enet_pdata *pdata;
1713 struct net_device *ndev;
1714
1715 pdata = platform_get_drvdata(pdev);
1716 ndev = pdata->ndev;
1717
cb0366b7
IS
1718 rtnl_lock();
1719 if (netif_running(ndev))
1720 dev_close(ndev);
1721 rtnl_unlock();
1722
8089a96f
IS
1723 if (pdata->mdio_driver)
1724 xgene_enet_phy_disconnect(pdata);
1725 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
ccc02ddb 1726 xgene_enet_mdio_remove(pdata);
8089a96f 1727
e6ad7673 1728 unregister_netdev(ndev);
d0eb7458 1729 pdata->port_ops->shutdown(pdata);
cb11c062 1730 xgene_enet_delete_desc_rings(pdata);
e6ad7673
IS
1731 free_netdev(ndev);
1732
1733 return 0;
1734}
1735
cb0366b7
IS
1736static void xgene_enet_shutdown(struct platform_device *pdev)
1737{
1738 struct xgene_enet_pdata *pdata;
1739
1740 pdata = platform_get_drvdata(pdev);
1741 if (!pdata)
1742 return;
1743
1744 if (!pdata->ndev)
1745 return;
1746
1747 xgene_enet_remove(pdev);
1748}
1749
de7b5b3d
FK
1750#ifdef CONFIG_ACPI
1751static const struct acpi_device_id xgene_enet_acpi_match[] = {
0738c54d
ST
1752 { "APMC0D05", XGENE_ENET1},
1753 { "APMC0D30", XGENE_ENET1},
1754 { "APMC0D31", XGENE_ENET1},
149e9ab4 1755 { "APMC0D3F", XGENE_ENET1},
822e34a4
ST
1756 { "APMC0D26", XGENE_ENET2},
1757 { "APMC0D25", XGENE_ENET2},
de7b5b3d
FK
1758 { }
1759};
1760MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1761#endif
1762
163cff31 1763#ifdef CONFIG_OF
a6b0dc2a 1764static const struct of_device_id xgene_enet_of_match[] = {
bc1b7c13
IS
1765 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1766 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1767 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
561fea6d 1768 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
bc1b7c13 1769 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
e6ad7673
IS
1770 {},
1771};
1772
de7b5b3d 1773MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
163cff31 1774#endif
e6ad7673
IS
1775
1776static struct platform_driver xgene_enet_driver = {
1777 .driver = {
1778 .name = "xgene-enet",
de7b5b3d
FK
1779 .of_match_table = of_match_ptr(xgene_enet_of_match),
1780 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
e6ad7673
IS
1781 },
1782 .probe = xgene_enet_probe,
1783 .remove = xgene_enet_remove,
cb0366b7 1784 .shutdown = xgene_enet_shutdown,
e6ad7673
IS
1785};
1786
1787module_platform_driver(xgene_enet_driver);
1788
1789MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1790MODULE_VERSION(XGENE_DRV_VERSION);
d0eb7458 1791MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
e6ad7673
IS
1792MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1793MODULE_LICENSE("GPL");