drivers: net: xgene: fix statistics counters race condition
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
32f784b5 24#include "xgene_enet_sgmac.h"
0148d38d 25#include "xgene_enet_xgmac.h"
e6ad7673 26
de7b5b3d
FK
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
bc1b7c13 31static const struct of_device_id xgene_enet_of_match[];
0738c54d 32static const struct acpi_device_id xgene_enet_acpi_match[];
bc1b7c13 33
e6ad7673
IS
34static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35{
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47}
48
49static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51{
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
81cefb81 54 struct xgene_enet_pdata *pdata;
e6ad7673
IS
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
81cefb81 65 pdata = netdev_priv(ndev);
e6ad7673
IS
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
75 buf_pool->rx_skb[tail] = skb;
76
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
81 return -EINVAL;
82 }
83
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
86 SET_BIT(COHERENT));
87 tail = (tail + 1) & slots;
88 }
89
81cefb81 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
e6ad7673
IS
91 buf_pool->tail = tail;
92
93 return 0;
94}
95
e6ad7673
IS
96static u8 xgene_enet_hdr_len(const void *data)
97{
98 const struct ethhdr *eth = data;
99
100 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
101}
102
e6ad7673
IS
103static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104{
81cefb81 105 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
e6ad7673
IS
106 struct xgene_enet_raw_desc16 *raw_desc;
107 u32 slots = buf_pool->slots - 1;
108 u32 tail = buf_pool->tail;
109 u32 userinfo;
110 int i, len;
111
81cefb81 112 len = pdata->ring_ops->len(buf_pool);
e6ad7673
IS
113 for (i = 0; i < len; i++) {
114 tail = (tail - 1) & slots;
115 raw_desc = &buf_pool->raw_desc16[tail];
116
117 /* Hardware stores descriptor in little endian format */
118 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
119 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
120 }
121
81cefb81 122 pdata->ring_ops->wr_cmd(buf_pool, -len);
e6ad7673
IS
123 buf_pool->tail = tail;
124}
125
126static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
127{
128 struct xgene_enet_desc_ring *rx_ring = data;
129
130 if (napi_schedule_prep(&rx_ring->napi)) {
131 disable_irq_nosync(irq);
132 __napi_schedule(&rx_ring->napi);
133 }
134
135 return IRQ_HANDLED;
136}
137
138static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
139 struct xgene_enet_raw_desc *raw_desc)
140{
141 struct sk_buff *skb;
142 struct device *dev;
9b00eb49
IS
143 skb_frag_t *frag;
144 dma_addr_t *frag_dma_addr;
e6ad7673
IS
145 u16 skb_index;
146 u8 status;
9b00eb49 147 int i, ret = 0;
e6ad7673
IS
148
149 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
150 skb = cp_ring->cp_skb[skb_index];
9b00eb49 151 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
e6ad7673
IS
152
153 dev = ndev_to_dev(cp_ring->ndev);
154 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
9b00eb49 155 skb_headlen(skb),
e6ad7673
IS
156 DMA_TO_DEVICE);
157
9b00eb49
IS
158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
159 frag = &skb_shinfo(skb)->frags[i];
160 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
161 DMA_TO_DEVICE);
162 }
163
e6ad7673
IS
164 /* Checking for error */
165 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
166 if (unlikely(status > 2)) {
167 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
168 status);
169 ret = -EIO;
170 }
171
172 if (likely(skb)) {
173 dev_kfree_skb_any(skb);
174 } else {
175 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
176 ret = -EIO;
177 }
178
179 return ret;
180}
181
182static u64 xgene_enet_work_msg(struct sk_buff *skb)
183{
9b00eb49 184 struct net_device *ndev = skb->dev;
e6ad7673 185 struct iphdr *iph;
9b00eb49
IS
186 u8 l3hlen = 0, l4hlen = 0;
187 u8 ethhdr, proto = 0, csum_enable = 0;
188 u64 hopinfo = 0;
189 u32 hdr_len, mss = 0;
190 u32 i, len, nr_frags;
191
192 ethhdr = xgene_enet_hdr_len(skb->data);
e6ad7673
IS
193
194 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195 unlikely(skb->protocol != htons(ETH_P_8021Q)))
196 goto out;
197
198 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
199 goto out;
200
201 iph = ip_hdr(skb);
202 if (unlikely(ip_is_fragment(iph)))
203 goto out;
204
205 if (likely(iph->protocol == IPPROTO_TCP)) {
206 l4hlen = tcp_hdrlen(skb) >> 2;
207 csum_enable = 1;
208 proto = TSO_IPPROTO_TCP;
9b00eb49
IS
209 if (ndev->features & NETIF_F_TSO) {
210 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
211 mss = skb_shinfo(skb)->gso_size;
212
213 if (skb_is_nonlinear(skb)) {
214 len = skb_headlen(skb);
215 nr_frags = skb_shinfo(skb)->nr_frags;
216
217 for (i = 0; i < 2 && i < nr_frags; i++)
218 len += skb_shinfo(skb)->frags[i].size;
219
220 /* HW requires header must reside in 3 buffer */
221 if (unlikely(hdr_len > len)) {
222 if (skb_linearize(skb))
223 return 0;
224 }
225 }
226
227 if (!mss || ((skb->len - hdr_len) <= mss))
228 goto out;
229
9b00eb49
IS
230 hopinfo |= SET_BIT(ET);
231 }
e6ad7673
IS
232 } else if (iph->protocol == IPPROTO_UDP) {
233 l4hlen = UDP_HDR_SIZE;
234 csum_enable = 1;
235 }
236out:
237 l3hlen = ip_hdrlen(skb) >> 2;
9b00eb49 238 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
e6ad7673
IS
239 SET_VAL(IPHDR, l3hlen) |
240 SET_VAL(ETHHDR, ethhdr) |
241 SET_VAL(EC, csum_enable) |
242 SET_VAL(IS, proto) |
243 SET_BIT(IC) |
244 SET_BIT(TYPE_ETH_WORK_MESSAGE);
245
246 return hopinfo;
247}
248
949c40bb
IS
249static u16 xgene_enet_encode_len(u16 len)
250{
251 return (len == BUFLEN_16K) ? 0 : len;
252}
253
9b00eb49
IS
254static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
255{
256 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
257 SET_VAL(BUFDATALEN, len));
258}
259
260static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
261{
262 __le64 *exp_bufs;
263
264 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
265 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
266 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
267
268 return exp_bufs;
269}
270
271static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
272{
273 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
274}
275
e6ad7673
IS
276static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
277 struct sk_buff *skb)
278{
279 struct device *dev = ndev_to_dev(tx_ring->ndev);
67894eec 280 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
e6ad7673 281 struct xgene_enet_raw_desc *raw_desc;
9b00eb49
IS
282 __le64 *exp_desc = NULL, *exp_bufs = NULL;
283 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
284 skb_frag_t *frag;
e6ad7673
IS
285 u16 tail = tx_ring->tail;
286 u64 hopinfo;
949c40bb 287 u32 len, hw_len;
9b00eb49
IS
288 u8 ll = 0, nv = 0, idx = 0;
289 bool split = false;
290 u32 size, offset, ell_bytes = 0;
291 u32 i, fidx, nr_frags, count = 1;
e6ad7673
IS
292
293 raw_desc = &tx_ring->raw_desc[tail];
9b00eb49 294 tail = (tail + 1) & (tx_ring->slots - 1);
e6ad7673
IS
295 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
296
9b00eb49
IS
297 hopinfo = xgene_enet_work_msg(skb);
298 if (!hopinfo)
299 return -EINVAL;
300 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
301 hopinfo);
302
949c40bb
IS
303 len = skb_headlen(skb);
304 hw_len = xgene_enet_encode_len(len);
305
306 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
e6ad7673
IS
307 if (dma_mapping_error(dev, dma_addr)) {
308 netdev_err(tx_ring->ndev, "DMA mapping error\n");
309 return -EINVAL;
310 }
311
312 /* Hardware expects descriptor in little endian format */
e6ad7673 313 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
949c40bb 314 SET_VAL(BUFDATALEN, hw_len) |
e6ad7673 315 SET_BIT(COHERENT));
949c40bb 316
9b00eb49
IS
317 if (!skb_is_nonlinear(skb))
318 goto out;
e6ad7673 319
9b00eb49
IS
320 /* scatter gather */
321 nv = 1;
322 exp_desc = (void *)&tx_ring->raw_desc[tail];
949c40bb 323 tail = (tail + 1) & (tx_ring->slots - 1);
9b00eb49
IS
324 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
325
326 nr_frags = skb_shinfo(skb)->nr_frags;
327 for (i = nr_frags; i < 4 ; i++)
328 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
329
330 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
331
332 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
333 if (!split) {
334 frag = &skb_shinfo(skb)->frags[fidx];
335 size = skb_frag_size(frag);
336 offset = 0;
337
338 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
339 DMA_TO_DEVICE);
340 if (dma_mapping_error(dev, pbuf_addr))
341 return -EINVAL;
342
343 frag_dma_addr[fidx] = pbuf_addr;
344 fidx++;
345
346 if (size > BUFLEN_16K)
347 split = true;
348 }
349
350 if (size > BUFLEN_16K) {
351 len = BUFLEN_16K;
352 size -= BUFLEN_16K;
353 } else {
354 len = size;
355 split = false;
356 }
357
358 dma_addr = pbuf_addr + offset;
359 hw_len = xgene_enet_encode_len(len);
360
361 switch (i) {
362 case 0:
363 case 1:
364 case 2:
365 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
366 break;
367 case 3:
368 if (split || (fidx != nr_frags)) {
369 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
370 xgene_set_addr_len(exp_bufs, idx, dma_addr,
371 hw_len);
372 idx++;
373 ell_bytes += len;
374 } else {
375 xgene_set_addr_len(exp_desc, i, dma_addr,
376 hw_len);
377 }
378 break;
379 default:
380 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
381 idx++;
382 ell_bytes += len;
383 break;
384 }
385
386 if (split)
387 offset += BUFLEN_16K;
388 }
389 count++;
390
391 if (idx) {
392 ll = 1;
393 dma_addr = dma_map_single(dev, exp_bufs,
394 sizeof(u64) * MAX_EXP_BUFFS,
395 DMA_TO_DEVICE);
396 if (dma_mapping_error(dev, dma_addr)) {
397 dev_kfree_skb_any(skb);
398 return -EINVAL;
399 }
400 i = ell_bytes >> LL_BYTES_LSB_LEN;
401 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
402 SET_VAL(LL_BYTES_MSB, i) |
403 SET_VAL(LL_LEN, idx));
404 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
405 }
406
407out:
408 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
409 SET_VAL(USERINFO, tx_ring->tail));
410 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
107dec27 411 pdata->tx_level[tx_ring->cp_ring->index] += count;
949c40bb
IS
412 tx_ring->tail = tail;
413
414 return count;
e6ad7673
IS
415}
416
417static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
418 struct net_device *ndev)
419{
420 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
421 struct xgene_enet_desc_ring *tx_ring;
422 int index = skb->queue_mapping;
423 u32 tx_level = pdata->tx_level[index];
949c40bb 424 int count;
e6ad7673 425
107dec27
IS
426 tx_ring = pdata->tx_ring[index];
427 if (tx_level < pdata->txc_level[index])
428 tx_level += ((typeof(pdata->tx_level[index]))~0U);
67894eec 429
107dec27
IS
430 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
431 netif_stop_subqueue(ndev, index);
e6ad7673
IS
432 return NETDEV_TX_BUSY;
433 }
434
9b00eb49
IS
435 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
436 return NETDEV_TX_OK;
437
949c40bb
IS
438 count = xgene_enet_setup_tx_desc(tx_ring, skb);
439 if (count <= 0) {
e6ad7673
IS
440 dev_kfree_skb_any(skb);
441 return NETDEV_TX_OK;
442 }
443
e6ad7673 444 skb_tx_timestamp(skb);
e6ad7673 445
3bb502f8
IS
446 tx_ring->tx_packets++;
447 tx_ring->tx_bytes += skb->len;
e6ad7673 448
9ffad80a 449 pdata->ring_ops->wr_cmd(tx_ring, count);
e6ad7673
IS
450 return NETDEV_TX_OK;
451}
452
453static void xgene_enet_skip_csum(struct sk_buff *skb)
454{
455 struct iphdr *iph = ip_hdr(skb);
456
457 if (!ip_is_fragment(iph) ||
458 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
459 skb->ip_summed = CHECKSUM_UNNECESSARY;
460 }
461}
462
463static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
464 struct xgene_enet_raw_desc *raw_desc)
465{
466 struct net_device *ndev;
467 struct xgene_enet_pdata *pdata;
468 struct device *dev;
469 struct xgene_enet_desc_ring *buf_pool;
470 u32 datalen, skb_index;
471 struct sk_buff *skb;
472 u8 status;
473 int ret = 0;
474
475 ndev = rx_ring->ndev;
476 pdata = netdev_priv(ndev);
477 dev = ndev_to_dev(rx_ring->ndev);
478 buf_pool = rx_ring->buf_pool;
479
480 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
481 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
482 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
483 skb = buf_pool->rx_skb[skb_index];
484
485 /* checking for error */
3bb502f8
IS
486 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
487 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
e6ad7673
IS
488 if (unlikely(status > 2)) {
489 dev_kfree_skb_any(skb);
490 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
491 status);
e6ad7673
IS
492 ret = -EIO;
493 goto out;
494 }
495
496 /* strip off CRC as HW isn't doing this */
497 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
9b00eb49 498 datalen = (datalen & DATALEN_MASK) - 4;
e6ad7673
IS
499 prefetch(skb->data - NET_IP_ALIGN);
500 skb_put(skb, datalen);
501
502 skb_checksum_none_assert(skb);
503 skb->protocol = eth_type_trans(skb, ndev);
504 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
505 skb->protocol == htons(ETH_P_IP))) {
506 xgene_enet_skip_csum(skb);
507 }
508
3bb502f8
IS
509 rx_ring->rx_packets++;
510 rx_ring->rx_bytes += datalen;
e6ad7673
IS
511 napi_gro_receive(&rx_ring->napi, skb);
512out:
513 if (--rx_ring->nbufpool == 0) {
514 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
515 rx_ring->nbufpool = NUM_BUFPOOL;
516 }
517
518 return ret;
519}
520
521static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
522{
523 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
524}
525
526static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
527 int budget)
528{
107dec27
IS
529 struct net_device *ndev = ring->ndev;
530 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
9b00eb49 531 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
e6ad7673
IS
532 u16 head = ring->head;
533 u16 slots = ring->slots - 1;
67894eec
IS
534 int ret, desc_count, count = 0, processed = 0;
535 bool is_completion;
e6ad7673
IS
536
537 do {
538 raw_desc = &ring->raw_desc[head];
67894eec
IS
539 desc_count = 0;
540 is_completion = false;
9b00eb49 541 exp_desc = NULL;
e6ad7673
IS
542 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
543 break;
544
ecf6ba83
IS
545 /* read fpqnum field after dataaddr field */
546 dma_rmb();
9b00eb49
IS
547 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
548 head = (head + 1) & slots;
549 exp_desc = &ring->raw_desc[head];
550
551 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
552 head = (head - 1) & slots;
553 break;
554 }
555 dma_rmb();
556 count++;
67894eec 557 desc_count++;
9b00eb49 558 }
67894eec 559 if (is_rx_desc(raw_desc)) {
e6ad7673 560 ret = xgene_enet_rx_frame(ring, raw_desc);
67894eec 561 } else {
e6ad7673 562 ret = xgene_enet_tx_completion(ring, raw_desc);
67894eec
IS
563 is_completion = true;
564 }
e6ad7673 565 xgene_enet_mark_desc_slot_empty(raw_desc);
9b00eb49
IS
566 if (exp_desc)
567 xgene_enet_mark_desc_slot_empty(exp_desc);
e6ad7673
IS
568
569 head = (head + 1) & slots;
570 count++;
67894eec 571 desc_count++;
9b00eb49 572 processed++;
67894eec 573 if (is_completion)
107dec27 574 pdata->txc_level[ring->index] += desc_count;
e6ad7673
IS
575
576 if (ret)
577 break;
578 } while (--budget);
579
580 if (likely(count)) {
81cefb81 581 pdata->ring_ops->wr_cmd(ring, -count);
e6ad7673
IS
582 ring->head = head;
583
107dec27
IS
584 if (__netif_subqueue_stopped(ndev, ring->index))
585 netif_start_subqueue(ndev, ring->index);
e6ad7673
IS
586 }
587
9b00eb49 588 return processed;
e6ad7673
IS
589}
590
591static int xgene_enet_napi(struct napi_struct *napi, const int budget)
592{
593 struct xgene_enet_desc_ring *ring;
594 int processed;
595
596 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
597 processed = xgene_enet_process_ring(ring, budget);
598
599 if (processed != budget) {
600 napi_complete(napi);
601 enable_irq(ring->irq);
602 }
603
604 return processed;
605}
606
607static void xgene_enet_timeout(struct net_device *ndev)
608{
609 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
610 struct netdev_queue *txq;
611 int i;
e6ad7673 612
d0eb7458 613 pdata->mac_ops->reset(pdata);
107dec27
IS
614
615 for (i = 0; i < pdata->txq_cnt; i++) {
616 txq = netdev_get_tx_queue(ndev, i);
617 txq->trans_start = jiffies;
618 netif_tx_start_queue(txq);
619 }
e6ad7673
IS
620}
621
622static int xgene_enet_register_irq(struct net_device *ndev)
623{
624 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
625 struct device *dev = ndev_to_dev(ndev);
6772b653 626 struct xgene_enet_desc_ring *ring;
107dec27 627 int ret = 0, i;
e6ad7673 628
107dec27
IS
629 for (i = 0; i < pdata->rxq_cnt; i++) {
630 ring = pdata->rx_ring[i];
631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
46a22d29 633 0, ring->irq_name, ring);
107dec27
IS
634 if (ret) {
635 netdev_err(ndev, "Failed to request irq %s\n",
636 ring->irq_name);
637 }
638 }
6772b653 639
107dec27
IS
640 for (i = 0; i < pdata->cq_cnt; i++) {
641 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069 642 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
6772b653 643 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
46a22d29 644 0, ring->irq_name, ring);
6772b653
IS
645 if (ret) {
646 netdev_err(ndev, "Failed to request irq %s\n",
647 ring->irq_name);
648 }
e6ad7673
IS
649 }
650
651 return ret;
652}
653
654static void xgene_enet_free_irq(struct net_device *ndev)
655{
656 struct xgene_enet_pdata *pdata;
b5d7a069 657 struct xgene_enet_desc_ring *ring;
e6ad7673 658 struct device *dev;
107dec27 659 int i;
e6ad7673
IS
660
661 pdata = netdev_priv(ndev);
662 dev = ndev_to_dev(ndev);
6772b653 663
107dec27
IS
664 for (i = 0; i < pdata->rxq_cnt; i++) {
665 ring = pdata->rx_ring[i];
666 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
667 devm_free_irq(dev, ring->irq, ring);
668 }
669
670 for (i = 0; i < pdata->cq_cnt; i++) {
671 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069
IS
672 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
673 devm_free_irq(dev, ring->irq, ring);
6772b653
IS
674 }
675}
676
677static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
678{
679 struct napi_struct *napi;
107dec27 680 int i;
6772b653 681
107dec27
IS
682 for (i = 0; i < pdata->rxq_cnt; i++) {
683 napi = &pdata->rx_ring[i]->napi;
684 napi_enable(napi);
685 }
6772b653 686
107dec27
IS
687 for (i = 0; i < pdata->cq_cnt; i++) {
688 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
689 napi_enable(napi);
690 }
691}
692
693static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
694{
695 struct napi_struct *napi;
107dec27 696 int i;
6772b653 697
107dec27
IS
698 for (i = 0; i < pdata->rxq_cnt; i++) {
699 napi = &pdata->rx_ring[i]->napi;
700 napi_disable(napi);
701 }
6772b653 702
107dec27
IS
703 for (i = 0; i < pdata->cq_cnt; i++) {
704 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
705 napi_disable(napi);
706 }
e6ad7673
IS
707}
708
709static int xgene_enet_open(struct net_device *ndev)
710{
711 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 712 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
713 int ret;
714
107dec27
IS
715 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
716 if (ret)
717 return ret;
718
719 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
720 if (ret)
721 return ret;
722
d0eb7458
IS
723 mac_ops->tx_enable(pdata);
724 mac_ops->rx_enable(pdata);
e6ad7673 725
aeb20b6b 726 xgene_enet_napi_enable(pdata);
e6ad7673
IS
727 ret = xgene_enet_register_irq(ndev);
728 if (ret)
729 return ret;
e6ad7673 730
0148d38d 731 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 732 phy_start(pdata->phy_dev);
0148d38d
IS
733 else
734 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
e6ad7673
IS
735
736 netif_start_queue(ndev);
737
738 return ret;
739}
740
741static int xgene_enet_close(struct net_device *ndev)
742{
743 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 744 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
107dec27 745 int i;
e6ad7673
IS
746
747 netif_stop_queue(ndev);
748
0148d38d 749 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 750 phy_stop(pdata->phy_dev);
0148d38d
IS
751 else
752 cancel_delayed_work_sync(&pdata->link_work);
e6ad7673 753
d0eb7458
IS
754 mac_ops->tx_disable(pdata);
755 mac_ops->rx_disable(pdata);
e6ad7673 756
aeb20b6b
IS
757 xgene_enet_free_irq(ndev);
758 xgene_enet_napi_disable(pdata);
107dec27
IS
759 for (i = 0; i < pdata->rxq_cnt; i++)
760 xgene_enet_process_ring(pdata->rx_ring[i], -1);
aeb20b6b 761
e6ad7673
IS
762 return 0;
763}
764
765static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
766{
767 struct xgene_enet_pdata *pdata;
768 struct device *dev;
769
770 pdata = netdev_priv(ring->ndev);
771 dev = ndev_to_dev(ring->ndev);
772
81cefb81 773 pdata->ring_ops->clear(ring);
e6ad7673
IS
774 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
775}
776
777static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
778{
779 struct xgene_enet_desc_ring *buf_pool;
107dec27
IS
780 struct xgene_enet_desc_ring *ring;
781 int i;
e6ad7673 782
107dec27
IS
783 for (i = 0; i < pdata->txq_cnt; i++) {
784 ring = pdata->tx_ring[i];
785 if (ring) {
786 xgene_enet_delete_ring(ring);
787 pdata->tx_ring[i] = NULL;
788 }
e6ad7673
IS
789 }
790
107dec27
IS
791 for (i = 0; i < pdata->rxq_cnt; i++) {
792 ring = pdata->rx_ring[i];
793 if (ring) {
794 buf_pool = ring->buf_pool;
795 xgene_enet_delete_bufpool(buf_pool);
796 xgene_enet_delete_ring(buf_pool);
797 xgene_enet_delete_ring(ring);
798 pdata->rx_ring[i] = NULL;
799 }
e6ad7673
IS
800 }
801}
802
803static int xgene_enet_get_ring_size(struct device *dev,
804 enum xgene_enet_ring_cfgsize cfgsize)
805{
806 int size = -EINVAL;
807
808 switch (cfgsize) {
809 case RING_CFGSIZE_512B:
810 size = 0x200;
811 break;
812 case RING_CFGSIZE_2KB:
813 size = 0x800;
814 break;
815 case RING_CFGSIZE_16KB:
816 size = 0x4000;
817 break;
818 case RING_CFGSIZE_64KB:
819 size = 0x10000;
820 break;
821 case RING_CFGSIZE_512KB:
822 size = 0x80000;
823 break;
824 default:
825 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
826 break;
827 }
828
829 return size;
830}
831
832static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
833{
81cefb81 834 struct xgene_enet_pdata *pdata;
e6ad7673
IS
835 struct device *dev;
836
837 if (!ring)
838 return;
839
840 dev = ndev_to_dev(ring->ndev);
81cefb81 841 pdata = netdev_priv(ring->ndev);
e6ad7673
IS
842
843 if (ring->desc_addr) {
81cefb81 844 pdata->ring_ops->clear(ring);
e6ad7673
IS
845 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
846 }
847 devm_kfree(dev, ring);
848}
849
850static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
851{
852 struct device *dev = &pdata->pdev->dev;
853 struct xgene_enet_desc_ring *ring;
107dec27 854 int i;
e6ad7673 855
107dec27
IS
856 for (i = 0; i < pdata->txq_cnt; i++) {
857 ring = pdata->tx_ring[i];
858 if (ring) {
859 if (ring->cp_ring && ring->cp_ring->cp_skb)
860 devm_kfree(dev, ring->cp_ring->cp_skb);
861 if (ring->cp_ring && pdata->cq_cnt)
862 xgene_enet_free_desc_ring(ring->cp_ring);
863 xgene_enet_free_desc_ring(ring);
864 }
865 }
866
867 for (i = 0; i < pdata->rxq_cnt; i++) {
868 ring = pdata->rx_ring[i];
869 if (ring) {
870 if (ring->buf_pool) {
871 if (ring->buf_pool->rx_skb)
872 devm_kfree(dev, ring->buf_pool->rx_skb);
873 xgene_enet_free_desc_ring(ring->buf_pool);
874 }
875 xgene_enet_free_desc_ring(ring);
c10e4caf 876 }
c10e4caf 877 }
e6ad7673
IS
878}
879
bc1b7c13
IS
880static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
881 struct xgene_enet_desc_ring *ring)
882{
883 if ((pdata->enet_id == XGENE_ENET2) &&
884 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
885 return true;
886 }
887
888 return false;
889}
890
891static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
892 struct xgene_enet_desc_ring *ring)
893{
894 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
895
896 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
897}
898
e6ad7673
IS
899static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
900 struct net_device *ndev, u32 ring_num,
901 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
902{
903 struct xgene_enet_desc_ring *ring;
904 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
905 struct device *dev = ndev_to_dev(ndev);
9b9ba821
TK
906 int size;
907
908 size = xgene_enet_get_ring_size(dev, cfgsize);
909 if (size < 0)
910 return NULL;
e6ad7673
IS
911
912 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
913 GFP_KERNEL);
914 if (!ring)
915 return NULL;
916
917 ring->ndev = ndev;
918 ring->num = ring_num;
919 ring->cfgsize = cfgsize;
920 ring->id = ring_id;
921
e6ad7673
IS
922 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
923 GFP_KERNEL);
924 if (!ring->desc_addr) {
925 devm_kfree(dev, ring);
926 return NULL;
927 }
928 ring->size = size;
929
bc1b7c13
IS
930 if (is_irq_mbox_required(pdata, ring)) {
931 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
932 &ring->irq_mbox_dma, GFP_KERNEL);
933 if (!ring->irq_mbox_addr) {
934 dma_free_coherent(dev, size, ring->desc_addr,
935 ring->dma);
936 devm_kfree(dev, ring);
937 return NULL;
938 }
939 }
940
941 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
e6ad7673 942 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
81cefb81 943 ring = pdata->ring_ops->setup(ring);
e6ad7673
IS
944 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
945 ring->num, ring->size, ring->id, ring->slots);
946
947 return ring;
948}
949
950static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
951{
952 return (owner << 6) | (bufnum & GENMASK(5, 0));
953}
954
bc1b7c13
IS
955static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
956{
957 enum xgene_ring_owner owner;
958
959 if (p->enet_id == XGENE_ENET1) {
960 switch (p->phy_mode) {
961 case PHY_INTERFACE_MODE_SGMII:
962 owner = RING_OWNER_ETH0;
963 break;
964 default:
965 owner = (!p->port_id) ? RING_OWNER_ETH0 :
966 RING_OWNER_ETH1;
967 break;
968 }
969 } else {
970 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
971 }
972
973 return owner;
974}
975
e6ad7673
IS
976static int xgene_enet_create_desc_rings(struct net_device *ndev)
977{
978 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
979 struct device *dev = ndev_to_dev(ndev);
980 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
981 struct xgene_enet_desc_ring *buf_pool = NULL;
bc1b7c13 982 enum xgene_ring_owner owner;
9b00eb49 983 dma_addr_t dma_exp_bufs;
ca626454
KC
984 u8 cpu_bufnum = pdata->cpu_bufnum;
985 u8 eth_bufnum = pdata->eth_bufnum;
986 u8 bp_bufnum = pdata->bp_bufnum;
987 u16 ring_num = pdata->ring_num;
988 u16 ring_id;
107dec27 989 int i, ret, size;
e6ad7673 990
107dec27
IS
991 for (i = 0; i < pdata->rxq_cnt; i++) {
992 /* allocate rx descriptor ring */
993 owner = xgene_derive_ring_owner(pdata);
994 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
995 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
996 RING_CFGSIZE_16KB,
997 ring_id);
998 if (!rx_ring) {
999 ret = -ENOMEM;
1000 goto err;
1001 }
e6ad7673 1002
107dec27
IS
1003 /* allocate buffer pool for receiving packets */
1004 owner = xgene_derive_ring_owner(pdata);
1005 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1006 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1007 RING_CFGSIZE_2KB,
1008 ring_id);
1009 if (!buf_pool) {
1010 ret = -ENOMEM;
1011 goto err;
1012 }
9b00eb49 1013
107dec27
IS
1014 rx_ring->nbufpool = NUM_BUFPOOL;
1015 rx_ring->buf_pool = buf_pool;
1016 rx_ring->irq = pdata->irqs[i];
1017 if (!pdata->cq_cnt) {
1018 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
1019 ndev->name);
1020 } else {
1021 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
1022 ndev->name, i);
1023 }
1024 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1025 sizeof(struct sk_buff *),
9b00eb49 1026 GFP_KERNEL);
107dec27
IS
1027 if (!buf_pool->rx_skb) {
1028 ret = -ENOMEM;
1029 goto err;
1030 }
9b00eb49 1031
107dec27
IS
1032 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1033 rx_ring->buf_pool = buf_pool;
1034 pdata->rx_ring[i] = rx_ring;
1035 }
e6ad7673 1036
107dec27
IS
1037 for (i = 0; i < pdata->txq_cnt; i++) {
1038 /* allocate tx descriptor ring */
1039 owner = xgene_derive_ring_owner(pdata);
1040 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1041 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
6772b653
IS
1042 RING_CFGSIZE_16KB,
1043 ring_id);
107dec27 1044 if (!tx_ring) {
6772b653
IS
1045 ret = -ENOMEM;
1046 goto err;
1047 }
6772b653 1048
107dec27
IS
1049 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1050 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
1051 &dma_exp_bufs,
1052 GFP_KERNEL);
1053 if (!tx_ring->exp_bufs) {
1054 ret = -ENOMEM;
1055 goto err;
1056 }
9b00eb49 1057
107dec27
IS
1058 pdata->tx_ring[i] = tx_ring;
1059
1060 if (!pdata->cq_cnt) {
1061 cp_ring = pdata->rx_ring[i];
1062 } else {
1063 /* allocate tx completion descriptor ring */
1064 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1065 cpu_bufnum++);
1066 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1067 RING_CFGSIZE_16KB,
1068 ring_id);
1069 if (!cp_ring) {
1070 ret = -ENOMEM;
1071 goto err;
1072 }
9b00eb49 1073
107dec27
IS
1074 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1075 cp_ring->index = i;
1076 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
1077 ndev->name, i);
1078 }
1079
1080 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1081 sizeof(struct sk_buff *),
1082 GFP_KERNEL);
1083 if (!cp_ring->cp_skb) {
1084 ret = -ENOMEM;
1085 goto err;
1086 }
e6ad7673 1087
107dec27
IS
1088 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1089 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1090 size, GFP_KERNEL);
1091 if (!cp_ring->frag_dma_addr) {
1092 devm_kfree(dev, cp_ring->cp_skb);
1093 ret = -ENOMEM;
1094 goto err;
1095 }
1096
1097 tx_ring->cp_ring = cp_ring;
1098 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1099 }
1100
1101 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1102 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
e6ad7673
IS
1103
1104 return 0;
1105
1106err:
1107 xgene_enet_free_desc_rings(pdata);
1108 return ret;
1109}
1110
1111static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1112 struct net_device *ndev,
1113 struct rtnl_link_stats64 *storage)
1114{
1115 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1116 struct rtnl_link_stats64 *stats = &pdata->stats;
3bb502f8
IS
1117 struct xgene_enet_desc_ring *ring;
1118 int i;
1119
1120 memset(stats, 0, sizeof(struct rtnl_link_stats64));
1121 for (i = 0; i < pdata->txq_cnt; i++) {
1122 ring = pdata->tx_ring[i];
1123 if (ring) {
1124 stats->tx_packets += ring->tx_packets;
1125 stats->tx_bytes += ring->tx_bytes;
1126 }
1127 }
e6ad7673 1128
3bb502f8
IS
1129 for (i = 0; i < pdata->rxq_cnt; i++) {
1130 ring = pdata->rx_ring[i];
1131 if (ring) {
1132 stats->rx_packets += ring->rx_packets;
1133 stats->rx_bytes += ring->rx_bytes;
1134 stats->rx_errors += ring->rx_length_errors +
1135 ring->rx_crc_errors +
1136 ring->rx_frame_errors +
1137 ring->rx_fifo_errors;
1138 stats->rx_dropped += ring->rx_dropped;
1139 }
1140 }
1141 memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
e6ad7673
IS
1142
1143 return storage;
1144}
1145
1146static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1147{
1148 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1149 int ret;
1150
1151 ret = eth_mac_addr(ndev, addr);
1152 if (ret)
1153 return ret;
d0eb7458 1154 pdata->mac_ops->set_mac_addr(pdata);
e6ad7673
IS
1155
1156 return ret;
1157}
1158
1159static const struct net_device_ops xgene_ndev_ops = {
1160 .ndo_open = xgene_enet_open,
1161 .ndo_stop = xgene_enet_close,
1162 .ndo_start_xmit = xgene_enet_start_xmit,
1163 .ndo_tx_timeout = xgene_enet_timeout,
1164 .ndo_get_stats64 = xgene_enet_get_stats64,
1165 .ndo_change_mtu = eth_change_mtu,
1166 .ndo_set_mac_address = xgene_enet_set_mac_address,
1167};
1168
8beeef8d 1169#ifdef CONFIG_ACPI
724fe695 1170static void xgene_get_port_id_acpi(struct device *dev,
0738c54d
ST
1171 struct xgene_enet_pdata *pdata)
1172{
1173 acpi_status status;
1174 u64 temp;
1175
1176 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1177 if (ACPI_FAILURE(status)) {
1178 pdata->port_id = 0;
1179 } else {
1180 pdata->port_id = temp;
1181 }
1182
724fe695 1183 return;
0738c54d 1184}
8beeef8d 1185#endif
0738c54d 1186
724fe695 1187static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
ca626454
KC
1188{
1189 u32 id = 0;
ca626454 1190
724fe695 1191 of_property_read_u32(dev->of_node, "port-id", &id);
ca626454 1192
724fe695
SS
1193 pdata->port_id = id & BIT(0);
1194
1195 return;
ca626454
KC
1196}
1197
16615a4c
IS
1198static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1199{
1200 struct device *dev = &pdata->pdev->dev;
1201 int delay, ret;
1202
1203 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1204 if (ret) {
1205 pdata->tx_delay = 4;
1206 return 0;
1207 }
1208
1209 if (delay < 0 || delay > 7) {
1210 dev_err(dev, "Invalid tx-delay specified\n");
1211 return -EINVAL;
1212 }
1213
1214 pdata->tx_delay = delay;
1215
1216 return 0;
1217}
1218
1219static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1220{
1221 struct device *dev = &pdata->pdev->dev;
1222 int delay, ret;
1223
1224 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1225 if (ret) {
1226 pdata->rx_delay = 2;
1227 return 0;
1228 }
1229
1230 if (delay < 0 || delay > 7) {
1231 dev_err(dev, "Invalid rx-delay specified\n");
1232 return -EINVAL;
1233 }
1234
1235 pdata->rx_delay = delay;
1236
1237 return 0;
1238}
de7b5b3d 1239
107dec27
IS
1240static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1241{
1242 struct platform_device *pdev = pdata->pdev;
1243 struct device *dev = &pdev->dev;
1244 int i, ret, max_irqs;
1245
1246 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1247 max_irqs = 1;
1248 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1249 max_irqs = 2;
1250 else
1251 max_irqs = XGENE_MAX_ENET_IRQ;
1252
1253 for (i = 0; i < max_irqs; i++) {
1254 ret = platform_get_irq(pdev, i);
1255 if (ret <= 0) {
1b090a48
IS
1256 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1257 max_irqs = i;
1258 pdata->rxq_cnt = max_irqs / 2;
1259 pdata->txq_cnt = max_irqs / 2;
1260 pdata->cq_cnt = max_irqs / 2;
1261 break;
1262 }
107dec27
IS
1263 dev_err(dev, "Unable to get ENET IRQ\n");
1264 ret = ret ? : -ENXIO;
1265 return ret;
1266 }
1267 pdata->irqs[i] = ret;
1268 }
1269
1270 return 0;
1271}
1272
e6ad7673
IS
1273static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1274{
1275 struct platform_device *pdev;
1276 struct net_device *ndev;
1277 struct device *dev;
1278 struct resource *res;
1279 void __iomem *base_addr;
561fea6d 1280 u32 offset;
2e598712 1281 int ret = 0;
e6ad7673
IS
1282
1283 pdev = pdata->pdev;
1284 dev = &pdev->dev;
1285 ndev = pdata->ndev;
1286
de7b5b3d
FK
1287 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1288 if (!res) {
1289 dev_err(dev, "Resource enet_csr not defined\n");
1290 return -ENODEV;
1291 }
1292 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
3ec7a176 1293 if (!pdata->base_addr) {
e6ad7673 1294 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
3ec7a176 1295 return -ENOMEM;
e6ad7673
IS
1296 }
1297
de7b5b3d
FK
1298 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1299 if (!res) {
1300 dev_err(dev, "Resource ring_csr not defined\n");
1301 return -ENODEV;
1302 }
1303 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1304 resource_size(res));
3ec7a176 1305 if (!pdata->ring_csr_addr) {
e6ad7673 1306 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
3ec7a176 1307 return -ENOMEM;
e6ad7673
IS
1308 }
1309
de7b5b3d
FK
1310 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1311 if (!res) {
1312 dev_err(dev, "Resource ring_cmd not defined\n");
1313 return -ENODEV;
1314 }
1315 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1316 resource_size(res));
3ec7a176 1317 if (!pdata->ring_cmd_addr) {
e6ad7673 1318 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
3ec7a176 1319 return -ENOMEM;
e6ad7673
IS
1320 }
1321
0738c54d 1322 if (dev->of_node)
724fe695 1323 xgene_get_port_id_dt(dev, pdata);
0738c54d
ST
1324#ifdef CONFIG_ACPI
1325 else
724fe695 1326 xgene_get_port_id_acpi(dev, pdata);
0738c54d 1327#endif
ca626454 1328
938049e1 1329 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
e6ad7673 1330 eth_hw_addr_random(ndev);
de7b5b3d 1331
e6ad7673
IS
1332 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1333
938049e1 1334 pdata->phy_mode = device_get_phy_mode(dev);
e6ad7673 1335 if (pdata->phy_mode < 0) {
0148d38d
IS
1336 dev_err(dev, "Unable to get phy-connection-type\n");
1337 return pdata->phy_mode;
1338 }
1339 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
32f784b5 1340 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
0148d38d
IS
1341 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1342 dev_err(dev, "Incorrect phy-connection-type specified\n");
1343 return -ENODEV;
e6ad7673
IS
1344 }
1345
16615a4c
IS
1346 ret = xgene_get_tx_delay(pdata);
1347 if (ret)
1348 return ret;
1349
1350 ret = xgene_get_rx_delay(pdata);
1351 if (ret)
1352 return ret;
1353
107dec27
IS
1354 ret = xgene_enet_get_irqs(pdata);
1355 if (ret)
6772b653 1356 return ret;
6772b653 1357
e6ad7673 1358 pdata->clk = devm_clk_get(&pdev->dev, NULL);
e6ad7673 1359 if (IS_ERR(pdata->clk)) {
de7b5b3d 1360 /* Firmware may have set up the clock already. */
c2d33bdc 1361 dev_info(dev, "clocks have been setup already\n");
e6ad7673
IS
1362 }
1363
bc1b7c13
IS
1364 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1365 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1366 else
1367 base_addr = pdata->base_addr;
e6ad7673 1368 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
76f94a9c 1369 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
e6ad7673
IS
1370 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1371 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
32f784b5
IS
1372 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1373 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
ca626454 1374 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
561fea6d
IS
1375 offset = (pdata->enet_id == XGENE_ENET1) ?
1376 BLOCK_ETH_MAC_CSR_OFFSET :
1377 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1378 pdata->mcx_mac_csr_addr = base_addr + offset;
0148d38d
IS
1379 } else {
1380 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1381 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
0148d38d 1382 }
e6ad7673
IS
1383 pdata->rx_buff_cnt = NUM_PKT_BUF;
1384
0148d38d 1385 return 0;
e6ad7673
IS
1386}
1387
1388static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1389{
76f94a9c 1390 struct xgene_enet_cle *enet_cle = &pdata->cle;
e6ad7673
IS
1391 struct net_device *ndev = pdata->ndev;
1392 struct xgene_enet_desc_ring *buf_pool;
1393 u16 dst_ring_num;
107dec27 1394 int i, ret;
e6ad7673 1395
c3f4465d
IS
1396 ret = pdata->port_ops->reset(pdata);
1397 if (ret)
1398 return ret;
e6ad7673
IS
1399
1400 ret = xgene_enet_create_desc_rings(ndev);
1401 if (ret) {
1402 netdev_err(ndev, "Error in ring configuration\n");
1403 return ret;
1404 }
1405
1406 /* setup buffer pool */
107dec27
IS
1407 for (i = 0; i < pdata->rxq_cnt; i++) {
1408 buf_pool = pdata->rx_ring[i]->buf_pool;
1409 xgene_enet_init_bufpool(buf_pool);
1410 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1411 if (ret) {
1412 xgene_enet_delete_desc_rings(pdata);
1413 return ret;
1414 }
e6ad7673
IS
1415 }
1416
107dec27
IS
1417 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1418 buf_pool = pdata->rx_ring[0]->buf_pool;
76f94a9c
IS
1419 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1420 /* Initialize and Enable PreClassifier Tree */
1421 enet_cle->max_nodes = 512;
1422 enet_cle->max_dbptrs = 1024;
1423 enet_cle->parsers = 3;
1424 enet_cle->active_parser = PARSER_ALL;
1425 enet_cle->ptree.start_node = 0;
1426 enet_cle->ptree.start_dbptr = 0;
1427 enet_cle->jump_bytes = 8;
1428 ret = pdata->cle_ops->cle_init(pdata);
1429 if (ret) {
1430 netdev_err(ndev, "Preclass Tree init error\n");
1431 return ret;
1432 }
1433 } else {
1434 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1435 }
1436
0148d38d 1437 pdata->mac_ops->init(pdata);
e6ad7673
IS
1438
1439 return ret;
1440}
1441
d0eb7458
IS
1442static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1443{
0148d38d
IS
1444 switch (pdata->phy_mode) {
1445 case PHY_INTERFACE_MODE_RGMII:
1446 pdata->mac_ops = &xgene_gmac_ops;
1447 pdata->port_ops = &xgene_gport_ops;
dc8385f0 1448 pdata->rm = RM3;
107dec27
IS
1449 pdata->rxq_cnt = 1;
1450 pdata->txq_cnt = 1;
1451 pdata->cq_cnt = 0;
0148d38d 1452 break;
32f784b5
IS
1453 case PHY_INTERFACE_MODE_SGMII:
1454 pdata->mac_ops = &xgene_sgmac_ops;
1455 pdata->port_ops = &xgene_sgport_ops;
1456 pdata->rm = RM1;
107dec27
IS
1457 pdata->rxq_cnt = 1;
1458 pdata->txq_cnt = 1;
1459 pdata->cq_cnt = 1;
32f784b5 1460 break;
0148d38d
IS
1461 default:
1462 pdata->mac_ops = &xgene_xgmac_ops;
1463 pdata->port_ops = &xgene_xgport_ops;
76f94a9c 1464 pdata->cle_ops = &xgene_cle3in_ops;
dc8385f0 1465 pdata->rm = RM0;
1b090a48
IS
1466 if (!pdata->rxq_cnt) {
1467 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1468 pdata->txq_cnt = XGENE_NUM_TX_RING;
1469 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1470 }
0148d38d
IS
1471 break;
1472 }
ca626454 1473
bc1b7c13
IS
1474 if (pdata->enet_id == XGENE_ENET1) {
1475 switch (pdata->port_id) {
1476 case 0:
1b090a48
IS
1477 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1478 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1479 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1480 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1481 pdata->ring_num = START_RING_NUM_0;
1482 } else {
1483 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1484 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1485 pdata->bp_bufnum = START_BP_BUFNUM_0;
1486 pdata->ring_num = START_RING_NUM_0;
1487 }
bc1b7c13
IS
1488 break;
1489 case 1:
149e9ab4
IS
1490 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1491 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1492 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1493 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1494 pdata->ring_num = XG_START_RING_NUM_1;
1495 } else {
1496 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1497 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1498 pdata->bp_bufnum = START_BP_BUFNUM_1;
1499 pdata->ring_num = START_RING_NUM_1;
1500 }
bc1b7c13
IS
1501 break;
1502 default:
1503 break;
1504 }
1505 pdata->ring_ops = &xgene_ring1_ops;
1506 } else {
1507 switch (pdata->port_id) {
1508 case 0:
1509 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1510 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1511 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1512 pdata->ring_num = X2_START_RING_NUM_0;
1513 break;
1514 case 1:
1515 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1516 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1517 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1518 pdata->ring_num = X2_START_RING_NUM_1;
1519 break;
1520 default:
1521 break;
1522 }
1523 pdata->rm = RM0;
1524 pdata->ring_ops = &xgene_ring2_ops;
ca626454 1525 }
d0eb7458
IS
1526}
1527
6772b653
IS
1528static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1529{
1530 struct napi_struct *napi;
107dec27 1531 int i;
6772b653 1532
107dec27
IS
1533 for (i = 0; i < pdata->rxq_cnt; i++) {
1534 napi = &pdata->rx_ring[i]->napi;
1535 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1536 NAPI_POLL_WEIGHT);
1537 }
6772b653 1538
107dec27
IS
1539 for (i = 0; i < pdata->cq_cnt; i++) {
1540 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1541 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1542 NAPI_POLL_WEIGHT);
1543 }
1544}
1545
1546static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1547{
1548 struct napi_struct *napi;
107dec27 1549 int i;
6772b653 1550
107dec27
IS
1551 for (i = 0; i < pdata->rxq_cnt; i++) {
1552 napi = &pdata->rx_ring[i]->napi;
1553 netif_napi_del(napi);
1554 }
6772b653 1555
107dec27
IS
1556 for (i = 0; i < pdata->cq_cnt; i++) {
1557 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1558 netif_napi_del(napi);
1559 }
1560}
1561
e6ad7673
IS
1562static int xgene_enet_probe(struct platform_device *pdev)
1563{
1564 struct net_device *ndev;
1565 struct xgene_enet_pdata *pdata;
1566 struct device *dev = &pdev->dev;
3cdb7309 1567 const struct xgene_mac_ops *mac_ops;
bc1b7c13 1568 const struct of_device_id *of_id;
e6ad7673
IS
1569 int ret;
1570
107dec27
IS
1571 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1572 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
e6ad7673
IS
1573 if (!ndev)
1574 return -ENOMEM;
1575
1576 pdata = netdev_priv(ndev);
1577
1578 pdata->pdev = pdev;
1579 pdata->ndev = ndev;
1580 SET_NETDEV_DEV(ndev, dev);
1581 platform_set_drvdata(pdev, pdata);
1582 ndev->netdev_ops = &xgene_ndev_ops;
1583 xgene_enet_set_ethtool_ops(ndev);
1584 ndev->features |= NETIF_F_IP_CSUM |
1585 NETIF_F_GSO |
9b00eb49
IS
1586 NETIF_F_GRO |
1587 NETIF_F_SG;
e6ad7673 1588
bc1b7c13
IS
1589 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1590 if (of_id) {
1591 pdata->enet_id = (enum xgene_enet_id)of_id->data;
0738c54d
ST
1592 }
1593#ifdef CONFIG_ACPI
1594 else {
1595 const struct acpi_device_id *acpi_id;
1596
1597 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1598 if (acpi_id)
1599 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
bc1b7c13
IS
1600 }
1601#endif
0738c54d
ST
1602 if (!pdata->enet_id) {
1603 free_netdev(ndev);
1604 return -ENODEV;
1605 }
bc1b7c13 1606
e6ad7673
IS
1607 ret = xgene_enet_get_resources(pdata);
1608 if (ret)
1609 goto err;
1610
d0eb7458 1611 xgene_enet_setup_ops(pdata);
e6ad7673 1612
9b00eb49
IS
1613 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1614 ndev->features |= NETIF_F_TSO;
1615 pdata->mss = XGENE_ENET_MSS;
1616 }
1617 ndev->hw_features = ndev->features;
1618
aeb20b6b 1619 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
e6ad7673 1620 if (ret) {
aeb20b6b 1621 netdev_err(ndev, "No usable DMA configuration\n");
e6ad7673
IS
1622 goto err;
1623 }
1624
aeb20b6b 1625 ret = register_netdev(ndev);
e6ad7673 1626 if (ret) {
aeb20b6b 1627 netdev_err(ndev, "Failed to register netdev\n");
e6ad7673
IS
1628 goto err;
1629 }
1630
1631 ret = xgene_enet_init_hw(pdata);
1632 if (ret)
20decb7e 1633 goto err_netdev;
e6ad7673 1634
dc8385f0 1635 mac_ops = pdata->mac_ops;
aeb20b6b 1636 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
0148d38d 1637 ret = xgene_enet_mdio_config(pdata);
aeb20b6b 1638 if (ret)
20decb7e 1639 goto err_netdev;
aeb20b6b 1640 } else {
dc8385f0 1641 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
aeb20b6b 1642 }
e6ad7673 1643
aeb20b6b
IS
1644 xgene_enet_napi_add(pdata);
1645 return 0;
20decb7e 1646err_netdev:
c3f4465d 1647 unregister_netdev(ndev);
20decb7e 1648err:
e6ad7673
IS
1649 free_netdev(ndev);
1650 return ret;
1651}
1652
1653static int xgene_enet_remove(struct platform_device *pdev)
1654{
1655 struct xgene_enet_pdata *pdata;
3cdb7309 1656 const struct xgene_mac_ops *mac_ops;
e6ad7673
IS
1657 struct net_device *ndev;
1658
1659 pdata = platform_get_drvdata(pdev);
d0eb7458 1660 mac_ops = pdata->mac_ops;
e6ad7673
IS
1661 ndev = pdata->ndev;
1662
d0eb7458
IS
1663 mac_ops->rx_disable(pdata);
1664 mac_ops->tx_disable(pdata);
e6ad7673 1665
6772b653 1666 xgene_enet_napi_del(pdata);
ccc02ddb
IS
1667 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1668 xgene_enet_mdio_remove(pdata);
e6ad7673 1669 unregister_netdev(ndev);
ccc02ddb 1670 xgene_enet_delete_desc_rings(pdata);
d0eb7458 1671 pdata->port_ops->shutdown(pdata);
e6ad7673
IS
1672 free_netdev(ndev);
1673
1674 return 0;
1675}
1676
de7b5b3d
FK
1677#ifdef CONFIG_ACPI
1678static const struct acpi_device_id xgene_enet_acpi_match[] = {
0738c54d
ST
1679 { "APMC0D05", XGENE_ENET1},
1680 { "APMC0D30", XGENE_ENET1},
1681 { "APMC0D31", XGENE_ENET1},
149e9ab4 1682 { "APMC0D3F", XGENE_ENET1},
822e34a4
ST
1683 { "APMC0D26", XGENE_ENET2},
1684 { "APMC0D25", XGENE_ENET2},
de7b5b3d
FK
1685 { }
1686};
1687MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1688#endif
1689
163cff31 1690#ifdef CONFIG_OF
a6b0dc2a 1691static const struct of_device_id xgene_enet_of_match[] = {
bc1b7c13
IS
1692 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1693 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1694 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
561fea6d 1695 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
bc1b7c13 1696 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
e6ad7673
IS
1697 {},
1698};
1699
de7b5b3d 1700MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
163cff31 1701#endif
e6ad7673
IS
1702
1703static struct platform_driver xgene_enet_driver = {
1704 .driver = {
1705 .name = "xgene-enet",
de7b5b3d
FK
1706 .of_match_table = of_match_ptr(xgene_enet_of_match),
1707 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
e6ad7673
IS
1708 },
1709 .probe = xgene_enet_probe,
1710 .remove = xgene_enet_remove,
1711};
1712
1713module_platform_driver(xgene_enet_driver);
1714
1715MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1716MODULE_VERSION(XGENE_DRV_VERSION);
d0eb7458 1717MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
e6ad7673
IS
1718MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1719MODULE_LICENSE("GPL");