eth: fwnode: change the return type of mac address helpers
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Driver
3  *
4  * Copyright (c) 2014, Applied Micro Circuits Corporation
5  * Authors: Iyappan Subramanian <isubramanian@apm.com>
6  *          Ravi Patel <rapatel@apm.com>
7  *          Keyur Chudgar <kchudgar@apm.com>
8  */
9
10 #include <linux/gpio.h>
11 #include "xgene_enet_main.h"
12 #include "xgene_enet_hw.h"
13 #include "xgene_enet_sgmac.h"
14 #include "xgene_enet_xgmac.h"
15
16 #define RES_ENET_CSR    0
17 #define RES_RING_CSR    1
18 #define RES_RING_CMD    2
19
20 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
21 {
22         struct xgene_enet_raw_desc16 *raw_desc;
23         int i;
24
25         if (!buf_pool)
26                 return;
27
28         for (i = 0; i < buf_pool->slots; i++) {
29                 raw_desc = &buf_pool->raw_desc16[i];
30
31                 /* Hardware expects descriptor in little endian format */
32                 raw_desc->m0 = cpu_to_le64(i |
33                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
34                                 SET_VAL(STASH, 3));
35         }
36 }
37
38 static u16 xgene_enet_get_data_len(u64 bufdatalen)
39 {
40         u16 hw_len, mask;
41
42         hw_len = GET_VAL(BUFDATALEN, bufdatalen);
43
44         if (unlikely(hw_len == 0x7800)) {
45                 return 0;
46         } else if (!(hw_len & BIT(14))) {
47                 mask = GENMASK(13, 0);
48                 return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
49         } else if (!(hw_len & GENMASK(13, 12))) {
50                 mask = GENMASK(11, 0);
51                 return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
52         } else {
53                 mask = GENMASK(11, 0);
54                 return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
55         }
56 }
57
58 static u16 xgene_enet_set_data_len(u32 size)
59 {
60         u16 hw_len;
61
62         hw_len =  (size == SIZE_4K) ? BIT(14) : 0;
63
64         return hw_len;
65 }
66
67 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
68                                       u32 nbuf)
69 {
70         struct xgene_enet_raw_desc16 *raw_desc;
71         struct xgene_enet_pdata *pdata;
72         struct net_device *ndev;
73         dma_addr_t dma_addr;
74         struct device *dev;
75         struct page *page;
76         u32 slots, tail;
77         u16 hw_len;
78         int i;
79
80         if (unlikely(!buf_pool))
81                 return 0;
82
83         ndev = buf_pool->ndev;
84         pdata = netdev_priv(ndev);
85         dev = ndev_to_dev(ndev);
86         slots = buf_pool->slots - 1;
87         tail = buf_pool->tail;
88
89         for (i = 0; i < nbuf; i++) {
90                 raw_desc = &buf_pool->raw_desc16[tail];
91
92                 page = dev_alloc_page();
93                 if (unlikely(!page))
94                         return -ENOMEM;
95
96                 dma_addr = dma_map_page(dev, page, 0,
97                                         PAGE_SIZE, DMA_FROM_DEVICE);
98                 if (unlikely(dma_mapping_error(dev, dma_addr))) {
99                         put_page(page);
100                         return -ENOMEM;
101                 }
102
103                 hw_len = xgene_enet_set_data_len(PAGE_SIZE);
104                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
105                                            SET_VAL(BUFDATALEN, hw_len) |
106                                            SET_BIT(COHERENT));
107
108                 buf_pool->frag_page[tail] = page;
109                 tail = (tail + 1) & slots;
110         }
111
112         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
113         buf_pool->tail = tail;
114
115         return 0;
116 }
117
118 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
119                                      u32 nbuf)
120 {
121         struct sk_buff *skb;
122         struct xgene_enet_raw_desc16 *raw_desc;
123         struct xgene_enet_pdata *pdata;
124         struct net_device *ndev;
125         struct device *dev;
126         dma_addr_t dma_addr;
127         u32 tail = buf_pool->tail;
128         u32 slots = buf_pool->slots - 1;
129         u16 bufdatalen, len;
130         int i;
131
132         ndev = buf_pool->ndev;
133         dev = ndev_to_dev(buf_pool->ndev);
134         pdata = netdev_priv(ndev);
135
136         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
137         len = XGENE_ENET_STD_MTU;
138
139         for (i = 0; i < nbuf; i++) {
140                 raw_desc = &buf_pool->raw_desc16[tail];
141
142                 skb = netdev_alloc_skb_ip_align(ndev, len);
143                 if (unlikely(!skb))
144                         return -ENOMEM;
145
146                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
147                 if (dma_mapping_error(dev, dma_addr)) {
148                         netdev_err(ndev, "DMA mapping error\n");
149                         dev_kfree_skb_any(skb);
150                         return -EINVAL;
151                 }
152
153                 buf_pool->rx_skb[tail] = skb;
154
155                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
156                                            SET_VAL(BUFDATALEN, bufdatalen) |
157                                            SET_BIT(COHERENT));
158                 tail = (tail + 1) & slots;
159         }
160
161         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
162         buf_pool->tail = tail;
163
164         return 0;
165 }
166
167 static u8 xgene_enet_hdr_len(const void *data)
168 {
169         const struct ethhdr *eth = data;
170
171         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
172 }
173
174 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
175 {
176         struct device *dev = ndev_to_dev(buf_pool->ndev);
177         struct xgene_enet_raw_desc16 *raw_desc;
178         dma_addr_t dma_addr;
179         int i;
180
181         /* Free up the buffers held by hardware */
182         for (i = 0; i < buf_pool->slots; i++) {
183                 if (buf_pool->rx_skb[i]) {
184                         dev_kfree_skb_any(buf_pool->rx_skb[i]);
185
186                         raw_desc = &buf_pool->raw_desc16[i];
187                         dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
188                         dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
189                                          DMA_FROM_DEVICE);
190                 }
191         }
192 }
193
194 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
195 {
196         struct device *dev = ndev_to_dev(buf_pool->ndev);
197         dma_addr_t dma_addr;
198         struct page *page;
199         int i;
200
201         /* Free up the buffers held by hardware */
202         for (i = 0; i < buf_pool->slots; i++) {
203                 page = buf_pool->frag_page[i];
204                 if (page) {
205                         dma_addr = buf_pool->frag_dma_addr[i];
206                         dma_unmap_page(dev, dma_addr, PAGE_SIZE,
207                                        DMA_FROM_DEVICE);
208                         put_page(page);
209                 }
210         }
211 }
212
213 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
214 {
215         struct xgene_enet_desc_ring *rx_ring = data;
216
217         if (napi_schedule_prep(&rx_ring->napi)) {
218                 disable_irq_nosync(irq);
219                 __napi_schedule(&rx_ring->napi);
220         }
221
222         return IRQ_HANDLED;
223 }
224
225 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
226                                     struct xgene_enet_raw_desc *raw_desc)
227 {
228         struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
229         struct sk_buff *skb;
230         struct device *dev;
231         skb_frag_t *frag;
232         dma_addr_t *frag_dma_addr;
233         u16 skb_index;
234         u8 mss_index;
235         u8 status;
236         int i;
237
238         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
239         skb = cp_ring->cp_skb[skb_index];
240         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
241
242         dev = ndev_to_dev(cp_ring->ndev);
243         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
244                          skb_headlen(skb),
245                          DMA_TO_DEVICE);
246
247         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
248                 frag = &skb_shinfo(skb)->frags[i];
249                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
250                                DMA_TO_DEVICE);
251         }
252
253         if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
254                 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
255                 spin_lock(&pdata->mss_lock);
256                 pdata->mss_refcnt[mss_index]--;
257                 spin_unlock(&pdata->mss_lock);
258         }
259
260         /* Checking for error */
261         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
262         if (unlikely(status > 2)) {
263                 cp_ring->tx_dropped++;
264                 cp_ring->tx_errors++;
265         }
266
267         if (likely(skb)) {
268                 dev_kfree_skb_any(skb);
269         } else {
270                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
271         }
272
273         return 0;
274 }
275
276 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
277 {
278         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
279         int mss_index = -EBUSY;
280         int i;
281
282         spin_lock(&pdata->mss_lock);
283
284         /* Reuse the slot if MSS matches */
285         for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
286                 if (pdata->mss[i] == mss) {
287                         pdata->mss_refcnt[i]++;
288                         mss_index = i;
289                 }
290         }
291
292         /* Overwrite the slot with ref_count = 0 */
293         for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
294                 if (!pdata->mss_refcnt[i]) {
295                         pdata->mss_refcnt[i]++;
296                         pdata->mac_ops->set_mss(pdata, mss, i);
297                         pdata->mss[i] = mss;
298                         mss_index = i;
299                 }
300         }
301
302         spin_unlock(&pdata->mss_lock);
303
304         return mss_index;
305 }
306
307 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
308 {
309         struct net_device *ndev = skb->dev;
310         struct iphdr *iph;
311         u8 l3hlen = 0, l4hlen = 0;
312         u8 ethhdr, proto = 0, csum_enable = 0;
313         u32 hdr_len, mss = 0;
314         u32 i, len, nr_frags;
315         int mss_index;
316
317         ethhdr = xgene_enet_hdr_len(skb->data);
318
319         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
320             unlikely(skb->protocol != htons(ETH_P_8021Q)))
321                 goto out;
322
323         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
324                 goto out;
325
326         iph = ip_hdr(skb);
327         if (unlikely(ip_is_fragment(iph)))
328                 goto out;
329
330         if (likely(iph->protocol == IPPROTO_TCP)) {
331                 l4hlen = tcp_hdrlen(skb) >> 2;
332                 csum_enable = 1;
333                 proto = TSO_IPPROTO_TCP;
334                 if (ndev->features & NETIF_F_TSO) {
335                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
336                         mss = skb_shinfo(skb)->gso_size;
337
338                         if (skb_is_nonlinear(skb)) {
339                                 len = skb_headlen(skb);
340                                 nr_frags = skb_shinfo(skb)->nr_frags;
341
342                                 for (i = 0; i < 2 && i < nr_frags; i++)
343                                         len += skb_frag_size(
344                                                 &skb_shinfo(skb)->frags[i]);
345
346                                 /* HW requires header must reside in 3 buffer */
347                                 if (unlikely(hdr_len > len)) {
348                                         if (skb_linearize(skb))
349                                                 return 0;
350                                 }
351                         }
352
353                         if (!mss || ((skb->len - hdr_len) <= mss))
354                                 goto out;
355
356                         mss_index = xgene_enet_setup_mss(ndev, mss);
357                         if (unlikely(mss_index < 0))
358                                 return -EBUSY;
359
360                         *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
361                 }
362         } else if (iph->protocol == IPPROTO_UDP) {
363                 l4hlen = UDP_HDR_SIZE;
364                 csum_enable = 1;
365         }
366 out:
367         l3hlen = ip_hdrlen(skb) >> 2;
368         *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
369                     SET_VAL(IPHDR, l3hlen) |
370                     SET_VAL(ETHHDR, ethhdr) |
371                     SET_VAL(EC, csum_enable) |
372                     SET_VAL(IS, proto) |
373                     SET_BIT(IC) |
374                     SET_BIT(TYPE_ETH_WORK_MESSAGE);
375
376         return 0;
377 }
378
379 static u16 xgene_enet_encode_len(u16 len)
380 {
381         return (len == BUFLEN_16K) ? 0 : len;
382 }
383
384 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
385 {
386         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
387                                     SET_VAL(BUFDATALEN, len));
388 }
389
390 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
391 {
392         __le64 *exp_bufs;
393
394         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
395         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
396         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
397
398         return exp_bufs;
399 }
400
401 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
402 {
403         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
404 }
405
406 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
407                                     struct sk_buff *skb)
408 {
409         struct device *dev = ndev_to_dev(tx_ring->ndev);
410         struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
411         struct xgene_enet_raw_desc *raw_desc;
412         __le64 *exp_desc = NULL, *exp_bufs = NULL;
413         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
414         skb_frag_t *frag;
415         u16 tail = tx_ring->tail;
416         u64 hopinfo = 0;
417         u32 len, hw_len;
418         u8 ll = 0, nv = 0, idx = 0;
419         bool split = false;
420         u32 size, offset, ell_bytes = 0;
421         u32 i, fidx, nr_frags, count = 1;
422         int ret;
423
424         raw_desc = &tx_ring->raw_desc[tail];
425         tail = (tail + 1) & (tx_ring->slots - 1);
426         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
427
428         ret = xgene_enet_work_msg(skb, &hopinfo);
429         if (ret)
430                 return ret;
431
432         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
433                                    hopinfo);
434
435         len = skb_headlen(skb);
436         hw_len = xgene_enet_encode_len(len);
437
438         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
439         if (dma_mapping_error(dev, dma_addr)) {
440                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
441                 return -EINVAL;
442         }
443
444         /* Hardware expects descriptor in little endian format */
445         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
446                                    SET_VAL(BUFDATALEN, hw_len) |
447                                    SET_BIT(COHERENT));
448
449         if (!skb_is_nonlinear(skb))
450                 goto out;
451
452         /* scatter gather */
453         nv = 1;
454         exp_desc = (void *)&tx_ring->raw_desc[tail];
455         tail = (tail + 1) & (tx_ring->slots - 1);
456         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
457
458         nr_frags = skb_shinfo(skb)->nr_frags;
459         for (i = nr_frags; i < 4 ; i++)
460                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
461
462         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
463
464         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
465                 if (!split) {
466                         frag = &skb_shinfo(skb)->frags[fidx];
467                         size = skb_frag_size(frag);
468                         offset = 0;
469
470                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
471                                                      DMA_TO_DEVICE);
472                         if (dma_mapping_error(dev, pbuf_addr))
473                                 return -EINVAL;
474
475                         frag_dma_addr[fidx] = pbuf_addr;
476                         fidx++;
477
478                         if (size > BUFLEN_16K)
479                                 split = true;
480                 }
481
482                 if (size > BUFLEN_16K) {
483                         len = BUFLEN_16K;
484                         size -= BUFLEN_16K;
485                 } else {
486                         len = size;
487                         split = false;
488                 }
489
490                 dma_addr = pbuf_addr + offset;
491                 hw_len = xgene_enet_encode_len(len);
492
493                 switch (i) {
494                 case 0:
495                 case 1:
496                 case 2:
497                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
498                         break;
499                 case 3:
500                         if (split || (fidx != nr_frags)) {
501                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
502                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
503                                                    hw_len);
504                                 idx++;
505                                 ell_bytes += len;
506                         } else {
507                                 xgene_set_addr_len(exp_desc, i, dma_addr,
508                                                    hw_len);
509                         }
510                         break;
511                 default:
512                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
513                         idx++;
514                         ell_bytes += len;
515                         break;
516                 }
517
518                 if (split)
519                         offset += BUFLEN_16K;
520         }
521         count++;
522
523         if (idx) {
524                 ll = 1;
525                 dma_addr = dma_map_single(dev, exp_bufs,
526                                           sizeof(u64) * MAX_EXP_BUFFS,
527                                           DMA_TO_DEVICE);
528                 if (dma_mapping_error(dev, dma_addr)) {
529                         dev_kfree_skb_any(skb);
530                         return -EINVAL;
531                 }
532                 i = ell_bytes >> LL_BYTES_LSB_LEN;
533                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
534                                           SET_VAL(LL_BYTES_MSB, i) |
535                                           SET_VAL(LL_LEN, idx));
536                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
537         }
538
539 out:
540         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
541                                    SET_VAL(USERINFO, tx_ring->tail));
542         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
543         pdata->tx_level[tx_ring->cp_ring->index] += count;
544         tx_ring->tail = tail;
545
546         return count;
547 }
548
549 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
550                                          struct net_device *ndev)
551 {
552         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
553         struct xgene_enet_desc_ring *tx_ring;
554         int index = skb->queue_mapping;
555         u32 tx_level = pdata->tx_level[index];
556         int count;
557
558         tx_ring = pdata->tx_ring[index];
559         if (tx_level < pdata->txc_level[index])
560                 tx_level += ((typeof(pdata->tx_level[index]))~0U);
561
562         if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
563                 netif_stop_subqueue(ndev, index);
564                 return NETDEV_TX_BUSY;
565         }
566
567         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
568                 return NETDEV_TX_OK;
569
570         count = xgene_enet_setup_tx_desc(tx_ring, skb);
571         if (count == -EBUSY)
572                 return NETDEV_TX_BUSY;
573
574         if (count <= 0) {
575                 dev_kfree_skb_any(skb);
576                 return NETDEV_TX_OK;
577         }
578
579         skb_tx_timestamp(skb);
580
581         tx_ring->tx_packets++;
582         tx_ring->tx_bytes += skb->len;
583
584         pdata->ring_ops->wr_cmd(tx_ring, count);
585         return NETDEV_TX_OK;
586 }
587
588 static void xgene_enet_rx_csum(struct sk_buff *skb)
589 {
590         struct net_device *ndev = skb->dev;
591         struct iphdr *iph = ip_hdr(skb);
592
593         if (!(ndev->features & NETIF_F_RXCSUM))
594                 return;
595
596         if (skb->protocol != htons(ETH_P_IP))
597                 return;
598
599         if (ip_is_fragment(iph))
600                 return;
601
602         if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
603                 return;
604
605         skb->ip_summed = CHECKSUM_UNNECESSARY;
606 }
607
608 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
609                                      struct xgene_enet_raw_desc *raw_desc,
610                                      struct xgene_enet_raw_desc *exp_desc)
611 {
612         __le64 *desc = (void *)exp_desc;
613         dma_addr_t dma_addr;
614         struct device *dev;
615         struct page *page;
616         u16 slots, head;
617         u32 frag_size;
618         int i;
619
620         if (!buf_pool || !raw_desc || !exp_desc ||
621             (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
622                 return;
623
624         dev = ndev_to_dev(buf_pool->ndev);
625         slots = buf_pool->slots - 1;
626         head = buf_pool->head;
627
628         for (i = 0; i < 4; i++) {
629                 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
630                 if (!frag_size)
631                         break;
632
633                 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
634                 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
635
636                 page = buf_pool->frag_page[head];
637                 put_page(page);
638
639                 buf_pool->frag_page[head] = NULL;
640                 head = (head + 1) & slots;
641         }
642         buf_pool->head = head;
643 }
644
645 /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
646 static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
647 {
648         if (status == INGRESS_CRC &&
649             len >= (ETHER_STD_PACKET + 1) &&
650             len <= (ETHER_STD_PACKET + 4) &&
651             skb->protocol == htons(ETH_P_8021Q))
652                 return true;
653
654         return false;
655 }
656
657 /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
658 static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
659 {
660         if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
661                 if (ntohs(eth_hdr(skb)->h_proto) < 46)
662                         return true;
663         }
664
665         return false;
666 }
667
668 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
669                                struct xgene_enet_raw_desc *raw_desc,
670                                struct xgene_enet_raw_desc *exp_desc)
671 {
672         struct xgene_enet_desc_ring *buf_pool, *page_pool;
673         u32 datalen, frag_size, skb_index;
674         struct xgene_enet_pdata *pdata;
675         struct net_device *ndev;
676         dma_addr_t dma_addr;
677         struct sk_buff *skb;
678         struct device *dev;
679         struct page *page;
680         u16 slots, head;
681         int i, ret = 0;
682         __le64 *desc;
683         u8 status;
684         bool nv;
685
686         ndev = rx_ring->ndev;
687         pdata = netdev_priv(ndev);
688         dev = ndev_to_dev(rx_ring->ndev);
689         buf_pool = rx_ring->buf_pool;
690         page_pool = rx_ring->page_pool;
691
692         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
693                          XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
694         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
695         skb = buf_pool->rx_skb[skb_index];
696         buf_pool->rx_skb[skb_index] = NULL;
697
698         datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
699         skb_put(skb, datalen);
700         prefetch(skb->data - NET_IP_ALIGN);
701         skb->protocol = eth_type_trans(skb, ndev);
702
703         /* checking for error */
704         status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
705                   GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
706         if (unlikely(status)) {
707                 if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
708                         pdata->false_rflr++;
709                 } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
710                         pdata->vlan_rjbr++;
711                 } else {
712                         dev_kfree_skb_any(skb);
713                         xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
714                         xgene_enet_parse_error(rx_ring, status);
715                         rx_ring->rx_dropped++;
716                         goto out;
717                 }
718         }
719
720         nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
721         if (!nv) {
722                 /* strip off CRC as HW isn't doing this */
723                 datalen -= 4;
724                 goto skip_jumbo;
725         }
726
727         slots = page_pool->slots - 1;
728         head = page_pool->head;
729         desc = (void *)exp_desc;
730
731         for (i = 0; i < 4; i++) {
732                 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
733                 if (!frag_size)
734                         break;
735
736                 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
737                 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
738
739                 page = page_pool->frag_page[head];
740                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
741                                 frag_size, PAGE_SIZE);
742
743                 datalen += frag_size;
744
745                 page_pool->frag_page[head] = NULL;
746                 head = (head + 1) & slots;
747         }
748
749         page_pool->head = head;
750         rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
751
752 skip_jumbo:
753         skb_checksum_none_assert(skb);
754         xgene_enet_rx_csum(skb);
755
756         rx_ring->rx_packets++;
757         rx_ring->rx_bytes += datalen;
758         napi_gro_receive(&rx_ring->napi, skb);
759
760 out:
761         if (rx_ring->npagepool <= 0) {
762                 ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
763                 rx_ring->npagepool = NUM_NXTBUFPOOL;
764                 if (ret)
765                         return ret;
766         }
767
768         if (--rx_ring->nbufpool == 0) {
769                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
770                 rx_ring->nbufpool = NUM_BUFPOOL;
771         }
772
773         return ret;
774 }
775
776 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
777 {
778         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
779 }
780
781 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
782                                    int budget)
783 {
784         struct net_device *ndev = ring->ndev;
785         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
786         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
787         u16 head = ring->head;
788         u16 slots = ring->slots - 1;
789         int ret, desc_count, count = 0, processed = 0;
790         bool is_completion;
791
792         do {
793                 raw_desc = &ring->raw_desc[head];
794                 desc_count = 0;
795                 is_completion = false;
796                 exp_desc = NULL;
797                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
798                         break;
799
800                 /* read fpqnum field after dataaddr field */
801                 dma_rmb();
802                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
803                         head = (head + 1) & slots;
804                         exp_desc = &ring->raw_desc[head];
805
806                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
807                                 head = (head - 1) & slots;
808                                 break;
809                         }
810                         dma_rmb();
811                         count++;
812                         desc_count++;
813                 }
814                 if (is_rx_desc(raw_desc)) {
815                         ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
816                 } else {
817                         ret = xgene_enet_tx_completion(ring, raw_desc);
818                         is_completion = true;
819                 }
820                 xgene_enet_mark_desc_slot_empty(raw_desc);
821                 if (exp_desc)
822                         xgene_enet_mark_desc_slot_empty(exp_desc);
823
824                 head = (head + 1) & slots;
825                 count++;
826                 desc_count++;
827                 processed++;
828                 if (is_completion)
829                         pdata->txc_level[ring->index] += desc_count;
830
831                 if (ret)
832                         break;
833         } while (--budget);
834
835         if (likely(count)) {
836                 pdata->ring_ops->wr_cmd(ring, -count);
837                 ring->head = head;
838
839                 if (__netif_subqueue_stopped(ndev, ring->index))
840                         netif_start_subqueue(ndev, ring->index);
841         }
842
843         return processed;
844 }
845
846 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
847 {
848         struct xgene_enet_desc_ring *ring;
849         int processed;
850
851         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
852         processed = xgene_enet_process_ring(ring, budget);
853
854         if (processed != budget) {
855                 napi_complete_done(napi, processed);
856                 enable_irq(ring->irq);
857         }
858
859         return processed;
860 }
861
862 static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
863 {
864         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
865         struct netdev_queue *txq;
866         int i;
867
868         pdata->mac_ops->reset(pdata);
869
870         for (i = 0; i < pdata->txq_cnt; i++) {
871                 txq = netdev_get_tx_queue(ndev, i);
872                 txq->trans_start = jiffies;
873                 netif_tx_start_queue(txq);
874         }
875 }
876
877 static void xgene_enet_set_irq_name(struct net_device *ndev)
878 {
879         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
880         struct xgene_enet_desc_ring *ring;
881         int i;
882
883         for (i = 0; i < pdata->rxq_cnt; i++) {
884                 ring = pdata->rx_ring[i];
885                 if (!pdata->cq_cnt) {
886                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
887                                  ndev->name);
888                 } else {
889                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
890                                  ndev->name, i);
891                 }
892         }
893
894         for (i = 0; i < pdata->cq_cnt; i++) {
895                 ring = pdata->tx_ring[i]->cp_ring;
896                 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
897                          ndev->name, i);
898         }
899 }
900
901 static int xgene_enet_register_irq(struct net_device *ndev)
902 {
903         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
904         struct device *dev = ndev_to_dev(ndev);
905         struct xgene_enet_desc_ring *ring;
906         int ret = 0, i;
907
908         xgene_enet_set_irq_name(ndev);
909         for (i = 0; i < pdata->rxq_cnt; i++) {
910                 ring = pdata->rx_ring[i];
911                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
912                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
913                                        0, ring->irq_name, ring);
914                 if (ret) {
915                         netdev_err(ndev, "Failed to request irq %s\n",
916                                    ring->irq_name);
917                 }
918         }
919
920         for (i = 0; i < pdata->cq_cnt; i++) {
921                 ring = pdata->tx_ring[i]->cp_ring;
922                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
923                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
924                                        0, ring->irq_name, ring);
925                 if (ret) {
926                         netdev_err(ndev, "Failed to request irq %s\n",
927                                    ring->irq_name);
928                 }
929         }
930
931         return ret;
932 }
933
934 static void xgene_enet_free_irq(struct net_device *ndev)
935 {
936         struct xgene_enet_pdata *pdata;
937         struct xgene_enet_desc_ring *ring;
938         struct device *dev;
939         int i;
940
941         pdata = netdev_priv(ndev);
942         dev = ndev_to_dev(ndev);
943
944         for (i = 0; i < pdata->rxq_cnt; i++) {
945                 ring = pdata->rx_ring[i];
946                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
947                 devm_free_irq(dev, ring->irq, ring);
948         }
949
950         for (i = 0; i < pdata->cq_cnt; i++) {
951                 ring = pdata->tx_ring[i]->cp_ring;
952                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
953                 devm_free_irq(dev, ring->irq, ring);
954         }
955 }
956
957 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
958 {
959         struct napi_struct *napi;
960         int i;
961
962         for (i = 0; i < pdata->rxq_cnt; i++) {
963                 napi = &pdata->rx_ring[i]->napi;
964                 napi_enable(napi);
965         }
966
967         for (i = 0; i < pdata->cq_cnt; i++) {
968                 napi = &pdata->tx_ring[i]->cp_ring->napi;
969                 napi_enable(napi);
970         }
971 }
972
973 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
974 {
975         struct napi_struct *napi;
976         int i;
977
978         for (i = 0; i < pdata->rxq_cnt; i++) {
979                 napi = &pdata->rx_ring[i]->napi;
980                 napi_disable(napi);
981         }
982
983         for (i = 0; i < pdata->cq_cnt; i++) {
984                 napi = &pdata->tx_ring[i]->cp_ring->napi;
985                 napi_disable(napi);
986         }
987 }
988
989 static int xgene_enet_open(struct net_device *ndev)
990 {
991         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
992         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
993         int ret;
994
995         ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
996         if (ret)
997                 return ret;
998
999         ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
1000         if (ret)
1001                 return ret;
1002
1003         xgene_enet_napi_enable(pdata);
1004         ret = xgene_enet_register_irq(ndev);
1005         if (ret)
1006                 return ret;
1007
1008         if (ndev->phydev) {
1009                 phy_start(ndev->phydev);
1010         } else {
1011                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
1012                 netif_carrier_off(ndev);
1013         }
1014
1015         mac_ops->tx_enable(pdata);
1016         mac_ops->rx_enable(pdata);
1017         netif_tx_start_all_queues(ndev);
1018
1019         return ret;
1020 }
1021
1022 static int xgene_enet_close(struct net_device *ndev)
1023 {
1024         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1025         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
1026         int i;
1027
1028         netif_tx_stop_all_queues(ndev);
1029         mac_ops->tx_disable(pdata);
1030         mac_ops->rx_disable(pdata);
1031
1032         if (ndev->phydev)
1033                 phy_stop(ndev->phydev);
1034         else
1035                 cancel_delayed_work_sync(&pdata->link_work);
1036
1037         xgene_enet_free_irq(ndev);
1038         xgene_enet_napi_disable(pdata);
1039         for (i = 0; i < pdata->rxq_cnt; i++)
1040                 xgene_enet_process_ring(pdata->rx_ring[i], -1);
1041
1042         return 0;
1043 }
1044 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
1045 {
1046         struct xgene_enet_pdata *pdata;
1047         struct device *dev;
1048
1049         pdata = netdev_priv(ring->ndev);
1050         dev = ndev_to_dev(ring->ndev);
1051
1052         pdata->ring_ops->clear(ring);
1053         dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1054 }
1055
1056 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
1057 {
1058         struct xgene_enet_desc_ring *buf_pool, *page_pool;
1059         struct xgene_enet_desc_ring *ring;
1060         int i;
1061
1062         for (i = 0; i < pdata->txq_cnt; i++) {
1063                 ring = pdata->tx_ring[i];
1064                 if (ring) {
1065                         xgene_enet_delete_ring(ring);
1066                         pdata->port_ops->clear(pdata, ring);
1067                         if (pdata->cq_cnt)
1068                                 xgene_enet_delete_ring(ring->cp_ring);
1069                         pdata->tx_ring[i] = NULL;
1070                 }
1071
1072         }
1073
1074         for (i = 0; i < pdata->rxq_cnt; i++) {
1075                 ring = pdata->rx_ring[i];
1076                 if (ring) {
1077                         page_pool = ring->page_pool;
1078                         if (page_pool) {
1079                                 xgene_enet_delete_pagepool(page_pool);
1080                                 xgene_enet_delete_ring(page_pool);
1081                                 pdata->port_ops->clear(pdata, page_pool);
1082                         }
1083
1084                         buf_pool = ring->buf_pool;
1085                         xgene_enet_delete_bufpool(buf_pool);
1086                         xgene_enet_delete_ring(buf_pool);
1087                         pdata->port_ops->clear(pdata, buf_pool);
1088
1089                         xgene_enet_delete_ring(ring);
1090                         pdata->rx_ring[i] = NULL;
1091                 }
1092
1093         }
1094 }
1095
1096 static int xgene_enet_get_ring_size(struct device *dev,
1097                                     enum xgene_enet_ring_cfgsize cfgsize)
1098 {
1099         int size = -EINVAL;
1100
1101         switch (cfgsize) {
1102         case RING_CFGSIZE_512B:
1103                 size = 0x200;
1104                 break;
1105         case RING_CFGSIZE_2KB:
1106                 size = 0x800;
1107                 break;
1108         case RING_CFGSIZE_16KB:
1109                 size = 0x4000;
1110                 break;
1111         case RING_CFGSIZE_64KB:
1112                 size = 0x10000;
1113                 break;
1114         case RING_CFGSIZE_512KB:
1115                 size = 0x80000;
1116                 break;
1117         default:
1118                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
1119                 break;
1120         }
1121
1122         return size;
1123 }
1124
1125 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
1126 {
1127         struct xgene_enet_pdata *pdata;
1128         struct device *dev;
1129
1130         if (!ring)
1131                 return;
1132
1133         dev = ndev_to_dev(ring->ndev);
1134         pdata = netdev_priv(ring->ndev);
1135
1136         if (ring->desc_addr) {
1137                 pdata->ring_ops->clear(ring);
1138                 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1139         }
1140         devm_kfree(dev, ring);
1141 }
1142
1143 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
1144 {
1145         struct xgene_enet_desc_ring *page_pool;
1146         struct device *dev = &pdata->pdev->dev;
1147         struct xgene_enet_desc_ring *ring;
1148         void *p;
1149         int i;
1150
1151         for (i = 0; i < pdata->txq_cnt; i++) {
1152                 ring = pdata->tx_ring[i];
1153                 if (ring) {
1154                         if (ring->cp_ring && ring->cp_ring->cp_skb)
1155                                 devm_kfree(dev, ring->cp_ring->cp_skb);
1156
1157                         if (ring->cp_ring && pdata->cq_cnt)
1158                                 xgene_enet_free_desc_ring(ring->cp_ring);
1159
1160                         xgene_enet_free_desc_ring(ring);
1161                 }
1162
1163         }
1164
1165         for (i = 0; i < pdata->rxq_cnt; i++) {
1166                 ring = pdata->rx_ring[i];
1167                 if (ring) {
1168                         if (ring->buf_pool) {
1169                                 if (ring->buf_pool->rx_skb)
1170                                         devm_kfree(dev, ring->buf_pool->rx_skb);
1171
1172                                 xgene_enet_free_desc_ring(ring->buf_pool);
1173                         }
1174
1175                         page_pool = ring->page_pool;
1176                         if (page_pool) {
1177                                 p = page_pool->frag_page;
1178                                 if (p)
1179                                         devm_kfree(dev, p);
1180
1181                                 p = page_pool->frag_dma_addr;
1182                                 if (p)
1183                                         devm_kfree(dev, p);
1184                         }
1185
1186                         xgene_enet_free_desc_ring(ring);
1187                 }
1188         }
1189 }
1190
1191 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
1192                                  struct xgene_enet_desc_ring *ring)
1193 {
1194         if ((pdata->enet_id == XGENE_ENET2) &&
1195             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
1196                 return true;
1197         }
1198
1199         return false;
1200 }
1201
1202 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
1203                                               struct xgene_enet_desc_ring *ring)
1204 {
1205         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
1206
1207         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
1208 }
1209
1210 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
1211                         struct net_device *ndev, u32 ring_num,
1212                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
1213 {
1214         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1215         struct device *dev = ndev_to_dev(ndev);
1216         struct xgene_enet_desc_ring *ring;
1217         void *irq_mbox_addr;
1218         int size;
1219
1220         size = xgene_enet_get_ring_size(dev, cfgsize);
1221         if (size < 0)
1222                 return NULL;
1223
1224         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1225                             GFP_KERNEL);
1226         if (!ring)
1227                 return NULL;
1228
1229         ring->ndev = ndev;
1230         ring->num = ring_num;
1231         ring->cfgsize = cfgsize;
1232         ring->id = ring_id;
1233
1234         ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1235                                               GFP_KERNEL | __GFP_ZERO);
1236         if (!ring->desc_addr) {
1237                 devm_kfree(dev, ring);
1238                 return NULL;
1239         }
1240         ring->size = size;
1241
1242         if (is_irq_mbox_required(pdata, ring)) {
1243                 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1244                                                     &ring->irq_mbox_dma,
1245                                                     GFP_KERNEL | __GFP_ZERO);
1246                 if (!irq_mbox_addr) {
1247                         dmam_free_coherent(dev, size, ring->desc_addr,
1248                                            ring->dma);
1249                         devm_kfree(dev, ring);
1250                         return NULL;
1251                 }
1252                 ring->irq_mbox_addr = irq_mbox_addr;
1253         }
1254
1255         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1256         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1257         ring = pdata->ring_ops->setup(ring);
1258         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
1259                    ring->num, ring->size, ring->id, ring->slots);
1260
1261         return ring;
1262 }
1263
1264 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1265 {
1266         return (owner << 6) | (bufnum & GENMASK(5, 0));
1267 }
1268
1269 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1270 {
1271         enum xgene_ring_owner owner;
1272
1273         if (p->enet_id == XGENE_ENET1) {
1274                 switch (p->phy_mode) {
1275                 case PHY_INTERFACE_MODE_SGMII:
1276                         owner = RING_OWNER_ETH0;
1277                         break;
1278                 default:
1279                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
1280                                                 RING_OWNER_ETH1;
1281                         break;
1282                 }
1283         } else {
1284                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1285         }
1286
1287         return owner;
1288 }
1289
1290 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1291 {
1292         struct device *dev = &pdata->pdev->dev;
1293         u32 cpu_bufnum;
1294         int ret;
1295
1296         ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1297
1298         return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1299 }
1300
1301 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1302 {
1303         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1304         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1305         struct xgene_enet_desc_ring *page_pool = NULL;
1306         struct xgene_enet_desc_ring *buf_pool = NULL;
1307         struct device *dev = ndev_to_dev(ndev);
1308         u8 eth_bufnum = pdata->eth_bufnum;
1309         u8 bp_bufnum = pdata->bp_bufnum;
1310         u16 ring_num = pdata->ring_num;
1311         enum xgene_ring_owner owner;
1312         dma_addr_t dma_exp_bufs;
1313         u16 ring_id, slots;
1314         __le64 *exp_bufs;
1315         int i, ret, size;
1316         u8 cpu_bufnum;
1317
1318         cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1319
1320         for (i = 0; i < pdata->rxq_cnt; i++) {
1321                 /* allocate rx descriptor ring */
1322                 owner = xgene_derive_ring_owner(pdata);
1323                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1324                 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1325                                                       RING_CFGSIZE_16KB,
1326                                                       ring_id);
1327                 if (!rx_ring) {
1328                         ret = -ENOMEM;
1329                         goto err;
1330                 }
1331
1332                 /* allocate buffer pool for receiving packets */
1333                 owner = xgene_derive_ring_owner(pdata);
1334                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1335                 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1336                                                        RING_CFGSIZE_16KB,
1337                                                        ring_id);
1338                 if (!buf_pool) {
1339                         ret = -ENOMEM;
1340                         goto err;
1341                 }
1342
1343                 rx_ring->nbufpool = NUM_BUFPOOL;
1344                 rx_ring->npagepool = NUM_NXTBUFPOOL;
1345                 rx_ring->irq = pdata->irqs[i];
1346                 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1347                                                 sizeof(struct sk_buff *),
1348                                                 GFP_KERNEL);
1349                 if (!buf_pool->rx_skb) {
1350                         ret = -ENOMEM;
1351                         goto err;
1352                 }
1353
1354                 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1355                 rx_ring->buf_pool = buf_pool;
1356                 pdata->rx_ring[i] = rx_ring;
1357
1358                 if ((pdata->enet_id == XGENE_ENET1 &&  pdata->rxq_cnt > 4) ||
1359                     (pdata->enet_id == XGENE_ENET2 &&  pdata->rxq_cnt > 16)) {
1360                         break;
1361                 }
1362
1363                 /* allocate next buffer pool for jumbo packets */
1364                 owner = xgene_derive_ring_owner(pdata);
1365                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1366                 page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1367                                                         RING_CFGSIZE_16KB,
1368                                                         ring_id);
1369                 if (!page_pool) {
1370                         ret = -ENOMEM;
1371                         goto err;
1372                 }
1373
1374                 slots = page_pool->slots;
1375                 page_pool->frag_page = devm_kcalloc(dev, slots,
1376                                                     sizeof(struct page *),
1377                                                     GFP_KERNEL);
1378                 if (!page_pool->frag_page) {
1379                         ret = -ENOMEM;
1380                         goto err;
1381                 }
1382
1383                 page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
1384                                                         sizeof(dma_addr_t),
1385                                                         GFP_KERNEL);
1386                 if (!page_pool->frag_dma_addr) {
1387                         ret = -ENOMEM;
1388                         goto err;
1389                 }
1390
1391                 page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
1392                 rx_ring->page_pool = page_pool;
1393         }
1394
1395         for (i = 0; i < pdata->txq_cnt; i++) {
1396                 /* allocate tx descriptor ring */
1397                 owner = xgene_derive_ring_owner(pdata);
1398                 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1399                 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1400                                                       RING_CFGSIZE_16KB,
1401                                                       ring_id);
1402                 if (!tx_ring) {
1403                         ret = -ENOMEM;
1404                         goto err;
1405                 }
1406
1407                 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1408                 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1409                                                GFP_KERNEL | __GFP_ZERO);
1410                 if (!exp_bufs) {
1411                         ret = -ENOMEM;
1412                         goto err;
1413                 }
1414                 tx_ring->exp_bufs = exp_bufs;
1415
1416                 pdata->tx_ring[i] = tx_ring;
1417
1418                 if (!pdata->cq_cnt) {
1419                         cp_ring = pdata->rx_ring[i];
1420                 } else {
1421                         /* allocate tx completion descriptor ring */
1422                         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1423                                                          cpu_bufnum++);
1424                         cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1425                                                               RING_CFGSIZE_16KB,
1426                                                               ring_id);
1427                         if (!cp_ring) {
1428                                 ret = -ENOMEM;
1429                                 goto err;
1430                         }
1431
1432                         cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1433                         cp_ring->index = i;
1434                 }
1435
1436                 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1437                                                sizeof(struct sk_buff *),
1438                                                GFP_KERNEL);
1439                 if (!cp_ring->cp_skb) {
1440                         ret = -ENOMEM;
1441                         goto err;
1442                 }
1443
1444                 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1445                 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1446                                                       size, GFP_KERNEL);
1447                 if (!cp_ring->frag_dma_addr) {
1448                         devm_kfree(dev, cp_ring->cp_skb);
1449                         ret = -ENOMEM;
1450                         goto err;
1451                 }
1452
1453                 tx_ring->cp_ring = cp_ring;
1454                 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1455         }
1456
1457         if (pdata->ring_ops->coalesce)
1458                 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1459         pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1460
1461         return 0;
1462
1463 err:
1464         xgene_enet_free_desc_rings(pdata);
1465         return ret;
1466 }
1467
1468 static void xgene_enet_get_stats64(
1469                         struct net_device *ndev,
1470                         struct rtnl_link_stats64 *stats)
1471 {
1472         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1473         struct xgene_enet_desc_ring *ring;
1474         int i;
1475
1476         for (i = 0; i < pdata->txq_cnt; i++) {
1477                 ring = pdata->tx_ring[i];
1478                 if (ring) {
1479                         stats->tx_packets += ring->tx_packets;
1480                         stats->tx_bytes += ring->tx_bytes;
1481                         stats->tx_dropped += ring->tx_dropped;
1482                         stats->tx_errors += ring->tx_errors;
1483                 }
1484         }
1485
1486         for (i = 0; i < pdata->rxq_cnt; i++) {
1487                 ring = pdata->rx_ring[i];
1488                 if (ring) {
1489                         stats->rx_packets += ring->rx_packets;
1490                         stats->rx_bytes += ring->rx_bytes;
1491                         stats->rx_dropped += ring->rx_dropped;
1492                         stats->rx_errors += ring->rx_errors +
1493                                 ring->rx_length_errors +
1494                                 ring->rx_crc_errors +
1495                                 ring->rx_frame_errors +
1496                                 ring->rx_fifo_errors;
1497                         stats->rx_length_errors += ring->rx_length_errors;
1498                         stats->rx_crc_errors += ring->rx_crc_errors;
1499                         stats->rx_frame_errors += ring->rx_frame_errors;
1500                         stats->rx_fifo_errors += ring->rx_fifo_errors;
1501                 }
1502         }
1503 }
1504
1505 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1506 {
1507         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1508         int ret;
1509
1510         ret = eth_mac_addr(ndev, addr);
1511         if (ret)
1512                 return ret;
1513         pdata->mac_ops->set_mac_addr(pdata);
1514
1515         return ret;
1516 }
1517
1518 static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
1519 {
1520         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1521         int frame_size;
1522
1523         if (!netif_running(ndev))
1524                 return 0;
1525
1526         frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
1527
1528         xgene_enet_close(ndev);
1529         ndev->mtu = new_mtu;
1530         pdata->mac_ops->set_framesize(pdata, frame_size);
1531         xgene_enet_open(ndev);
1532
1533         return 0;
1534 }
1535
1536 static const struct net_device_ops xgene_ndev_ops = {
1537         .ndo_open = xgene_enet_open,
1538         .ndo_stop = xgene_enet_close,
1539         .ndo_start_xmit = xgene_enet_start_xmit,
1540         .ndo_tx_timeout = xgene_enet_timeout,
1541         .ndo_get_stats64 = xgene_enet_get_stats64,
1542         .ndo_change_mtu = xgene_change_mtu,
1543         .ndo_set_mac_address = xgene_enet_set_mac_address,
1544 };
1545
1546 #ifdef CONFIG_ACPI
1547 static void xgene_get_port_id_acpi(struct device *dev,
1548                                   struct xgene_enet_pdata *pdata)
1549 {
1550         acpi_status status;
1551         u64 temp;
1552
1553         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1554         if (ACPI_FAILURE(status)) {
1555                 pdata->port_id = 0;
1556         } else {
1557                 pdata->port_id = temp;
1558         }
1559
1560         return;
1561 }
1562 #endif
1563
1564 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1565 {
1566         u32 id = 0;
1567
1568         of_property_read_u32(dev->of_node, "port-id", &id);
1569
1570         pdata->port_id = id & BIT(0);
1571
1572         return;
1573 }
1574
1575 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1576 {
1577         struct device *dev = &pdata->pdev->dev;
1578         int delay, ret;
1579
1580         ret = device_property_read_u32(dev, "tx-delay", &delay);
1581         if (ret) {
1582                 pdata->tx_delay = 4;
1583                 return 0;
1584         }
1585
1586         if (delay < 0 || delay > 7) {
1587                 dev_err(dev, "Invalid tx-delay specified\n");
1588                 return -EINVAL;
1589         }
1590
1591         pdata->tx_delay = delay;
1592
1593         return 0;
1594 }
1595
1596 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1597 {
1598         struct device *dev = &pdata->pdev->dev;
1599         int delay, ret;
1600
1601         ret = device_property_read_u32(dev, "rx-delay", &delay);
1602         if (ret) {
1603                 pdata->rx_delay = 2;
1604                 return 0;
1605         }
1606
1607         if (delay < 0 || delay > 7) {
1608                 dev_err(dev, "Invalid rx-delay specified\n");
1609                 return -EINVAL;
1610         }
1611
1612         pdata->rx_delay = delay;
1613
1614         return 0;
1615 }
1616
1617 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1618 {
1619         struct platform_device *pdev = pdata->pdev;
1620         int i, ret, max_irqs;
1621
1622         if (phy_interface_mode_is_rgmii(pdata->phy_mode))
1623                 max_irqs = 1;
1624         else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1625                 max_irqs = 2;
1626         else
1627                 max_irqs = XGENE_MAX_ENET_IRQ;
1628
1629         for (i = 0; i < max_irqs; i++) {
1630                 ret = platform_get_irq(pdev, i);
1631                 if (ret <= 0) {
1632                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1633                                 max_irqs = i;
1634                                 pdata->rxq_cnt = max_irqs / 2;
1635                                 pdata->txq_cnt = max_irqs / 2;
1636                                 pdata->cq_cnt = max_irqs / 2;
1637                                 break;
1638                         }
1639                         return ret ? : -ENXIO;
1640                 }
1641                 pdata->irqs[i] = ret;
1642         }
1643
1644         return 0;
1645 }
1646
1647 static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1648 {
1649         int ret;
1650
1651         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1652                 return;
1653
1654         if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1655                 return;
1656
1657         ret = xgene_enet_phy_connect(pdata->ndev);
1658         if (!ret)
1659                 pdata->mdio_driver = true;
1660 }
1661
1662 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1663 {
1664         struct device *dev = &pdata->pdev->dev;
1665
1666         pdata->sfp_gpio_en = false;
1667         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1668             (!device_property_present(dev, "sfp-gpios") &&
1669              !device_property_present(dev, "rxlos-gpios")))
1670                 return;
1671
1672         pdata->sfp_gpio_en = true;
1673         pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1674         if (IS_ERR(pdata->sfp_rdy))
1675                 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1676 }
1677
1678 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1679 {
1680         struct platform_device *pdev;
1681         struct net_device *ndev;
1682         struct device *dev;
1683         struct resource *res;
1684         void __iomem *base_addr;
1685         u32 offset;
1686         int ret = 0;
1687
1688         pdev = pdata->pdev;
1689         dev = &pdev->dev;
1690         ndev = pdata->ndev;
1691
1692         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1693         if (!res) {
1694                 dev_err(dev, "Resource enet_csr not defined\n");
1695                 return -ENODEV;
1696         }
1697         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1698         if (!pdata->base_addr) {
1699                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1700                 return -ENOMEM;
1701         }
1702
1703         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1704         if (!res) {
1705                 dev_err(dev, "Resource ring_csr not defined\n");
1706                 return -ENODEV;
1707         }
1708         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1709                                                         resource_size(res));
1710         if (!pdata->ring_csr_addr) {
1711                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1712                 return -ENOMEM;
1713         }
1714
1715         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1716         if (!res) {
1717                 dev_err(dev, "Resource ring_cmd not defined\n");
1718                 return -ENODEV;
1719         }
1720         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1721                                                         resource_size(res));
1722         if (!pdata->ring_cmd_addr) {
1723                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1724                 return -ENOMEM;
1725         }
1726
1727         if (dev->of_node)
1728                 xgene_get_port_id_dt(dev, pdata);
1729 #ifdef CONFIG_ACPI
1730         else
1731                 xgene_get_port_id_acpi(dev, pdata);
1732 #endif
1733
1734         if (device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1735                 eth_hw_addr_random(ndev);
1736
1737         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1738
1739         pdata->phy_mode = device_get_phy_mode(dev);
1740         if (pdata->phy_mode < 0) {
1741                 dev_err(dev, "Unable to get phy-connection-type\n");
1742                 return pdata->phy_mode;
1743         }
1744         if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
1745             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1746             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1747                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1748                 return -ENODEV;
1749         }
1750
1751         ret = xgene_get_tx_delay(pdata);
1752         if (ret)
1753                 return ret;
1754
1755         ret = xgene_get_rx_delay(pdata);
1756         if (ret)
1757                 return ret;
1758
1759         ret = xgene_enet_get_irqs(pdata);
1760         if (ret)
1761                 return ret;
1762
1763         xgene_enet_gpiod_get(pdata);
1764
1765         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1766         if (IS_ERR(pdata->clk)) {
1767                 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1768                         /* Abort if the clock is defined but couldn't be
1769                          * retrived. Always abort if the clock is missing on
1770                          * DT system as the driver can't cope with this case.
1771                          */
1772                         if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1773                                 return PTR_ERR(pdata->clk);
1774                         /* Firmware may have set up the clock already. */
1775                         dev_info(dev, "clocks have been setup already\n");
1776                 }
1777         }
1778
1779         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1780                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1781         else
1782                 base_addr = pdata->base_addr;
1783         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1784         pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1785         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1786         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1787         if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
1788             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1789                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1790                 pdata->mcx_stats_addr =
1791                         pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
1792                 offset = (pdata->enet_id == XGENE_ENET1) ?
1793                           BLOCK_ETH_MAC_CSR_OFFSET :
1794                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1795                 pdata->mcx_mac_csr_addr = base_addr + offset;
1796         } else {
1797                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1798                 pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
1799                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1800                 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1801         }
1802         pdata->rx_buff_cnt = NUM_PKT_BUF;
1803
1804         return 0;
1805 }
1806
1807 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1808 {
1809         struct xgene_enet_cle *enet_cle = &pdata->cle;
1810         struct xgene_enet_desc_ring *page_pool;
1811         struct net_device *ndev = pdata->ndev;
1812         struct xgene_enet_desc_ring *buf_pool;
1813         u16 dst_ring_num, ring_id;
1814         int i, ret;
1815         u32 count;
1816
1817         ret = pdata->port_ops->reset(pdata);
1818         if (ret)
1819                 return ret;
1820
1821         ret = xgene_enet_create_desc_rings(ndev);
1822         if (ret) {
1823                 netdev_err(ndev, "Error in ring configuration\n");
1824                 return ret;
1825         }
1826
1827         /* setup buffer pool */
1828         for (i = 0; i < pdata->rxq_cnt; i++) {
1829                 buf_pool = pdata->rx_ring[i]->buf_pool;
1830                 xgene_enet_init_bufpool(buf_pool);
1831                 page_pool = pdata->rx_ring[i]->page_pool;
1832                 xgene_enet_init_bufpool(page_pool);
1833
1834                 count = pdata->rx_buff_cnt;
1835                 ret = xgene_enet_refill_bufpool(buf_pool, count);
1836                 if (ret)
1837                         goto err;
1838
1839                 ret = xgene_enet_refill_pagepool(page_pool, count);
1840                 if (ret)
1841                         goto err;
1842
1843         }
1844
1845         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1846         buf_pool = pdata->rx_ring[0]->buf_pool;
1847         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1848                 /* Initialize and Enable  PreClassifier Tree */
1849                 enet_cle->max_nodes = 512;
1850                 enet_cle->max_dbptrs = 1024;
1851                 enet_cle->parsers = 3;
1852                 enet_cle->active_parser = PARSER_ALL;
1853                 enet_cle->ptree.start_node = 0;
1854                 enet_cle->ptree.start_dbptr = 0;
1855                 enet_cle->jump_bytes = 8;
1856                 ret = pdata->cle_ops->cle_init(pdata);
1857                 if (ret) {
1858                         netdev_err(ndev, "Preclass Tree init error\n");
1859                         goto err;
1860                 }
1861
1862         } else {
1863                 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1864                 buf_pool = pdata->rx_ring[0]->buf_pool;
1865                 page_pool = pdata->rx_ring[0]->page_pool;
1866                 ring_id = (page_pool) ? page_pool->id : 0;
1867                 pdata->port_ops->cle_bypass(pdata, dst_ring_num,
1868                                             buf_pool->id, ring_id);
1869         }
1870
1871         ndev->max_mtu = XGENE_ENET_MAX_MTU;
1872         pdata->phy_speed = SPEED_UNKNOWN;
1873         pdata->mac_ops->init(pdata);
1874
1875         return ret;
1876
1877 err:
1878         xgene_enet_delete_desc_rings(pdata);
1879         return ret;
1880 }
1881
1882 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1883 {
1884         switch (pdata->phy_mode) {
1885         case PHY_INTERFACE_MODE_RGMII:
1886         case PHY_INTERFACE_MODE_RGMII_ID:
1887         case PHY_INTERFACE_MODE_RGMII_RXID:
1888         case PHY_INTERFACE_MODE_RGMII_TXID:
1889                 pdata->mac_ops = &xgene_gmac_ops;
1890                 pdata->port_ops = &xgene_gport_ops;
1891                 pdata->rm = RM3;
1892                 pdata->rxq_cnt = 1;
1893                 pdata->txq_cnt = 1;
1894                 pdata->cq_cnt = 0;
1895                 break;
1896         case PHY_INTERFACE_MODE_SGMII:
1897                 pdata->mac_ops = &xgene_sgmac_ops;
1898                 pdata->port_ops = &xgene_sgport_ops;
1899                 pdata->rm = RM1;
1900                 pdata->rxq_cnt = 1;
1901                 pdata->txq_cnt = 1;
1902                 pdata->cq_cnt = 1;
1903                 break;
1904         default:
1905                 pdata->mac_ops = &xgene_xgmac_ops;
1906                 pdata->port_ops = &xgene_xgport_ops;
1907                 pdata->cle_ops = &xgene_cle3in_ops;
1908                 pdata->rm = RM0;
1909                 if (!pdata->rxq_cnt) {
1910                         pdata->rxq_cnt = XGENE_NUM_RX_RING;
1911                         pdata->txq_cnt = XGENE_NUM_TX_RING;
1912                         pdata->cq_cnt = XGENE_NUM_TXC_RING;
1913                 }
1914                 break;
1915         }
1916
1917         if (pdata->enet_id == XGENE_ENET1) {
1918                 switch (pdata->port_id) {
1919                 case 0:
1920                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1921                                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1922                                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1923                                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1924                                 pdata->ring_num = START_RING_NUM_0;
1925                         } else {
1926                                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1927                                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1928                                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1929                                 pdata->ring_num = START_RING_NUM_0;
1930                         }
1931                         break;
1932                 case 1:
1933                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1934                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1935                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1936                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1937                                 pdata->ring_num = XG_START_RING_NUM_1;
1938                         } else {
1939                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1940                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1941                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1942                                 pdata->ring_num = START_RING_NUM_1;
1943                         }
1944                         break;
1945                 default:
1946                         break;
1947                 }
1948                 pdata->ring_ops = &xgene_ring1_ops;
1949         } else {
1950                 switch (pdata->port_id) {
1951                 case 0:
1952                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1953                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1954                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1955                         pdata->ring_num = X2_START_RING_NUM_0;
1956                         break;
1957                 case 1:
1958                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1959                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1960                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1961                         pdata->ring_num = X2_START_RING_NUM_1;
1962                         break;
1963                 default:
1964                         break;
1965                 }
1966                 pdata->rm = RM0;
1967                 pdata->ring_ops = &xgene_ring2_ops;
1968         }
1969 }
1970
1971 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1972 {
1973         struct napi_struct *napi;
1974         int i;
1975
1976         for (i = 0; i < pdata->rxq_cnt; i++) {
1977                 napi = &pdata->rx_ring[i]->napi;
1978                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1979                                NAPI_POLL_WEIGHT);
1980         }
1981
1982         for (i = 0; i < pdata->cq_cnt; i++) {
1983                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1984                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1985                                NAPI_POLL_WEIGHT);
1986         }
1987 }
1988
1989 #ifdef CONFIG_ACPI
1990 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1991         { "APMC0D05", XGENE_ENET1},
1992         { "APMC0D30", XGENE_ENET1},
1993         { "APMC0D31", XGENE_ENET1},
1994         { "APMC0D3F", XGENE_ENET1},
1995         { "APMC0D26", XGENE_ENET2},
1996         { "APMC0D25", XGENE_ENET2},
1997         { }
1998 };
1999 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
2000 #endif
2001
2002 static const struct of_device_id xgene_enet_of_match[] = {
2003         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
2004         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
2005         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
2006         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
2007         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
2008         {},
2009 };
2010
2011 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
2012
2013 static int xgene_enet_probe(struct platform_device *pdev)
2014 {
2015         struct net_device *ndev;
2016         struct xgene_enet_pdata *pdata;
2017         struct device *dev = &pdev->dev;
2018         void (*link_state)(struct work_struct *);
2019         const struct of_device_id *of_id;
2020         int ret;
2021
2022         ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2023                                   XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
2024         if (!ndev)
2025                 return -ENOMEM;
2026
2027         pdata = netdev_priv(ndev);
2028
2029         pdata->pdev = pdev;
2030         pdata->ndev = ndev;
2031         SET_NETDEV_DEV(ndev, dev);
2032         platform_set_drvdata(pdev, pdata);
2033         ndev->netdev_ops = &xgene_ndev_ops;
2034         xgene_enet_set_ethtool_ops(ndev);
2035         ndev->features |= NETIF_F_IP_CSUM |
2036                           NETIF_F_GSO |
2037                           NETIF_F_GRO |
2038                           NETIF_F_SG;
2039
2040         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
2041         if (of_id) {
2042                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
2043         }
2044 #ifdef CONFIG_ACPI
2045         else {
2046                 const struct acpi_device_id *acpi_id;
2047
2048                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
2049                 if (acpi_id)
2050                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
2051         }
2052 #endif
2053         if (!pdata->enet_id) {
2054                 ret = -ENODEV;
2055                 goto err;
2056         }
2057
2058         ret = xgene_enet_get_resources(pdata);
2059         if (ret)
2060                 goto err;
2061
2062         xgene_enet_setup_ops(pdata);
2063         spin_lock_init(&pdata->mac_lock);
2064
2065         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2066                 ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
2067                 spin_lock_init(&pdata->mss_lock);
2068         }
2069         ndev->hw_features = ndev->features;
2070
2071         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
2072         if (ret) {
2073                 netdev_err(ndev, "No usable DMA configuration\n");
2074                 goto err;
2075         }
2076
2077         xgene_enet_check_phy_handle(pdata);
2078
2079         ret = xgene_enet_init_hw(pdata);
2080         if (ret)
2081                 goto err2;
2082
2083         link_state = pdata->mac_ops->link_state;
2084         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2085                 INIT_DELAYED_WORK(&pdata->link_work, link_state);
2086         } else if (!pdata->mdio_driver) {
2087                 if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2088                         ret = xgene_enet_mdio_config(pdata);
2089                 else
2090                         INIT_DELAYED_WORK(&pdata->link_work, link_state);
2091
2092                 if (ret)
2093                         goto err1;
2094         }
2095
2096         spin_lock_init(&pdata->stats_lock);
2097         ret = xgene_extd_stats_init(pdata);
2098         if (ret)
2099                 goto err1;
2100
2101         xgene_enet_napi_add(pdata);
2102         ret = register_netdev(ndev);
2103         if (ret) {
2104                 netdev_err(ndev, "Failed to register netdev\n");
2105                 goto err1;
2106         }
2107
2108         return 0;
2109
2110 err1:
2111         /*
2112          * If necessary, free_netdev() will call netif_napi_del() and undo
2113          * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2114          */
2115
2116         xgene_enet_delete_desc_rings(pdata);
2117
2118 err2:
2119         if (pdata->mdio_driver)
2120                 xgene_enet_phy_disconnect(pdata);
2121         else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2122                 xgene_enet_mdio_remove(pdata);
2123 err:
2124         free_netdev(ndev);
2125         return ret;
2126 }
2127
2128 static int xgene_enet_remove(struct platform_device *pdev)
2129 {
2130         struct xgene_enet_pdata *pdata;
2131         struct net_device *ndev;
2132
2133         pdata = platform_get_drvdata(pdev);
2134         ndev = pdata->ndev;
2135
2136         rtnl_lock();
2137         if (netif_running(ndev))
2138                 dev_close(ndev);
2139         rtnl_unlock();
2140
2141         if (pdata->mdio_driver)
2142                 xgene_enet_phy_disconnect(pdata);
2143         else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2144                 xgene_enet_mdio_remove(pdata);
2145
2146         unregister_netdev(ndev);
2147         xgene_enet_delete_desc_rings(pdata);
2148         pdata->port_ops->shutdown(pdata);
2149         free_netdev(ndev);
2150
2151         return 0;
2152 }
2153
2154 static void xgene_enet_shutdown(struct platform_device *pdev)
2155 {
2156         struct xgene_enet_pdata *pdata;
2157
2158         pdata = platform_get_drvdata(pdev);
2159         if (!pdata)
2160                 return;
2161
2162         if (!pdata->ndev)
2163                 return;
2164
2165         xgene_enet_remove(pdev);
2166 }
2167
2168 static struct platform_driver xgene_enet_driver = {
2169         .driver = {
2170                    .name = "xgene-enet",
2171                    .of_match_table = of_match_ptr(xgene_enet_of_match),
2172                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
2173         },
2174         .probe = xgene_enet_probe,
2175         .remove = xgene_enet_remove,
2176         .shutdown = xgene_enet_shutdown,
2177 };
2178
2179 module_platform_driver(xgene_enet_driver);
2180
2181 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2182 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2183 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2184 MODULE_LICENSE("GPL");