drivers: net: xgene: Separate set_speed from mac_init
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
32f784b5 24#include "xgene_enet_sgmac.h"
0148d38d 25#include "xgene_enet_xgmac.h"
e6ad7673 26
de7b5b3d
FK
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
bc1b7c13 31static const struct of_device_id xgene_enet_of_match[];
0738c54d 32static const struct acpi_device_id xgene_enet_acpi_match[];
bc1b7c13 33
e6ad7673
IS
34static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35{
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47}
48
49static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51{
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
81cefb81 54 struct xgene_enet_pdata *pdata;
e6ad7673
IS
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
81cefb81 65 pdata = netdev_priv(ndev);
e6ad7673
IS
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
75 buf_pool->rx_skb[tail] = skb;
76
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
81 return -EINVAL;
82 }
83
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
86 SET_BIT(COHERENT));
87 tail = (tail + 1) & slots;
88 }
89
81cefb81 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
e6ad7673
IS
91 buf_pool->tail = tail;
92
93 return 0;
94}
95
e6ad7673
IS
96static u8 xgene_enet_hdr_len(const void *data)
97{
98 const struct ethhdr *eth = data;
99
100 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
101}
102
e6ad7673
IS
103static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104{
81cefb81 105 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
e6ad7673
IS
106 struct xgene_enet_raw_desc16 *raw_desc;
107 u32 slots = buf_pool->slots - 1;
108 u32 tail = buf_pool->tail;
109 u32 userinfo;
110 int i, len;
111
81cefb81 112 len = pdata->ring_ops->len(buf_pool);
e6ad7673
IS
113 for (i = 0; i < len; i++) {
114 tail = (tail - 1) & slots;
115 raw_desc = &buf_pool->raw_desc16[tail];
116
117 /* Hardware stores descriptor in little endian format */
118 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
119 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
120 }
121
81cefb81 122 pdata->ring_ops->wr_cmd(buf_pool, -len);
e6ad7673
IS
123 buf_pool->tail = tail;
124}
125
126static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
127{
128 struct xgene_enet_desc_ring *rx_ring = data;
129
130 if (napi_schedule_prep(&rx_ring->napi)) {
131 disable_irq_nosync(irq);
132 __napi_schedule(&rx_ring->napi);
133 }
134
135 return IRQ_HANDLED;
136}
137
138static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
139 struct xgene_enet_raw_desc *raw_desc)
140{
141 struct sk_buff *skb;
142 struct device *dev;
9b00eb49
IS
143 skb_frag_t *frag;
144 dma_addr_t *frag_dma_addr;
e6ad7673
IS
145 u16 skb_index;
146 u8 status;
9b00eb49 147 int i, ret = 0;
e6ad7673
IS
148
149 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
150 skb = cp_ring->cp_skb[skb_index];
9b00eb49 151 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
e6ad7673
IS
152
153 dev = ndev_to_dev(cp_ring->ndev);
154 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
9b00eb49 155 skb_headlen(skb),
e6ad7673
IS
156 DMA_TO_DEVICE);
157
9b00eb49
IS
158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
159 frag = &skb_shinfo(skb)->frags[i];
160 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
161 DMA_TO_DEVICE);
162 }
163
e6ad7673
IS
164 /* Checking for error */
165 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
166 if (unlikely(status > 2)) {
167 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
168 status);
169 ret = -EIO;
170 }
171
172 if (likely(skb)) {
173 dev_kfree_skb_any(skb);
174 } else {
175 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
176 ret = -EIO;
177 }
178
179 return ret;
180}
181
182static u64 xgene_enet_work_msg(struct sk_buff *skb)
183{
9b00eb49 184 struct net_device *ndev = skb->dev;
e6ad7673 185 struct iphdr *iph;
9b00eb49
IS
186 u8 l3hlen = 0, l4hlen = 0;
187 u8 ethhdr, proto = 0, csum_enable = 0;
188 u64 hopinfo = 0;
189 u32 hdr_len, mss = 0;
190 u32 i, len, nr_frags;
191
192 ethhdr = xgene_enet_hdr_len(skb->data);
e6ad7673
IS
193
194 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195 unlikely(skb->protocol != htons(ETH_P_8021Q)))
196 goto out;
197
198 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
199 goto out;
200
201 iph = ip_hdr(skb);
202 if (unlikely(ip_is_fragment(iph)))
203 goto out;
204
205 if (likely(iph->protocol == IPPROTO_TCP)) {
206 l4hlen = tcp_hdrlen(skb) >> 2;
207 csum_enable = 1;
208 proto = TSO_IPPROTO_TCP;
9b00eb49
IS
209 if (ndev->features & NETIF_F_TSO) {
210 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
211 mss = skb_shinfo(skb)->gso_size;
212
213 if (skb_is_nonlinear(skb)) {
214 len = skb_headlen(skb);
215 nr_frags = skb_shinfo(skb)->nr_frags;
216
217 for (i = 0; i < 2 && i < nr_frags; i++)
218 len += skb_shinfo(skb)->frags[i].size;
219
220 /* HW requires header must reside in 3 buffer */
221 if (unlikely(hdr_len > len)) {
222 if (skb_linearize(skb))
223 return 0;
224 }
225 }
226
227 if (!mss || ((skb->len - hdr_len) <= mss))
228 goto out;
229
9b00eb49
IS
230 hopinfo |= SET_BIT(ET);
231 }
e6ad7673
IS
232 } else if (iph->protocol == IPPROTO_UDP) {
233 l4hlen = UDP_HDR_SIZE;
234 csum_enable = 1;
235 }
236out:
237 l3hlen = ip_hdrlen(skb) >> 2;
9b00eb49 238 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
e6ad7673
IS
239 SET_VAL(IPHDR, l3hlen) |
240 SET_VAL(ETHHDR, ethhdr) |
241 SET_VAL(EC, csum_enable) |
242 SET_VAL(IS, proto) |
243 SET_BIT(IC) |
244 SET_BIT(TYPE_ETH_WORK_MESSAGE);
245
246 return hopinfo;
247}
248
949c40bb
IS
249static u16 xgene_enet_encode_len(u16 len)
250{
251 return (len == BUFLEN_16K) ? 0 : len;
252}
253
9b00eb49
IS
254static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
255{
256 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
257 SET_VAL(BUFDATALEN, len));
258}
259
260static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
261{
262 __le64 *exp_bufs;
263
264 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
265 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
266 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
267
268 return exp_bufs;
269}
270
271static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
272{
273 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
274}
275
e6ad7673
IS
276static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
277 struct sk_buff *skb)
278{
279 struct device *dev = ndev_to_dev(tx_ring->ndev);
67894eec 280 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
e6ad7673 281 struct xgene_enet_raw_desc *raw_desc;
9b00eb49
IS
282 __le64 *exp_desc = NULL, *exp_bufs = NULL;
283 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
284 skb_frag_t *frag;
e6ad7673
IS
285 u16 tail = tx_ring->tail;
286 u64 hopinfo;
949c40bb 287 u32 len, hw_len;
9b00eb49
IS
288 u8 ll = 0, nv = 0, idx = 0;
289 bool split = false;
290 u32 size, offset, ell_bytes = 0;
291 u32 i, fidx, nr_frags, count = 1;
e6ad7673
IS
292
293 raw_desc = &tx_ring->raw_desc[tail];
9b00eb49 294 tail = (tail + 1) & (tx_ring->slots - 1);
e6ad7673
IS
295 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
296
9b00eb49
IS
297 hopinfo = xgene_enet_work_msg(skb);
298 if (!hopinfo)
299 return -EINVAL;
300 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
301 hopinfo);
302
949c40bb
IS
303 len = skb_headlen(skb);
304 hw_len = xgene_enet_encode_len(len);
305
306 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
e6ad7673
IS
307 if (dma_mapping_error(dev, dma_addr)) {
308 netdev_err(tx_ring->ndev, "DMA mapping error\n");
309 return -EINVAL;
310 }
311
312 /* Hardware expects descriptor in little endian format */
e6ad7673 313 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
949c40bb 314 SET_VAL(BUFDATALEN, hw_len) |
e6ad7673 315 SET_BIT(COHERENT));
949c40bb 316
9b00eb49
IS
317 if (!skb_is_nonlinear(skb))
318 goto out;
e6ad7673 319
9b00eb49
IS
320 /* scatter gather */
321 nv = 1;
322 exp_desc = (void *)&tx_ring->raw_desc[tail];
949c40bb 323 tail = (tail + 1) & (tx_ring->slots - 1);
9b00eb49
IS
324 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
325
326 nr_frags = skb_shinfo(skb)->nr_frags;
327 for (i = nr_frags; i < 4 ; i++)
328 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
329
330 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
331
332 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
333 if (!split) {
334 frag = &skb_shinfo(skb)->frags[fidx];
335 size = skb_frag_size(frag);
336 offset = 0;
337
338 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
339 DMA_TO_DEVICE);
340 if (dma_mapping_error(dev, pbuf_addr))
341 return -EINVAL;
342
343 frag_dma_addr[fidx] = pbuf_addr;
344 fidx++;
345
346 if (size > BUFLEN_16K)
347 split = true;
348 }
349
350 if (size > BUFLEN_16K) {
351 len = BUFLEN_16K;
352 size -= BUFLEN_16K;
353 } else {
354 len = size;
355 split = false;
356 }
357
358 dma_addr = pbuf_addr + offset;
359 hw_len = xgene_enet_encode_len(len);
360
361 switch (i) {
362 case 0:
363 case 1:
364 case 2:
365 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
366 break;
367 case 3:
368 if (split || (fidx != nr_frags)) {
369 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
370 xgene_set_addr_len(exp_bufs, idx, dma_addr,
371 hw_len);
372 idx++;
373 ell_bytes += len;
374 } else {
375 xgene_set_addr_len(exp_desc, i, dma_addr,
376 hw_len);
377 }
378 break;
379 default:
380 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
381 idx++;
382 ell_bytes += len;
383 break;
384 }
385
386 if (split)
387 offset += BUFLEN_16K;
388 }
389 count++;
390
391 if (idx) {
392 ll = 1;
393 dma_addr = dma_map_single(dev, exp_bufs,
394 sizeof(u64) * MAX_EXP_BUFFS,
395 DMA_TO_DEVICE);
396 if (dma_mapping_error(dev, dma_addr)) {
397 dev_kfree_skb_any(skb);
398 return -EINVAL;
399 }
400 i = ell_bytes >> LL_BYTES_LSB_LEN;
401 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
402 SET_VAL(LL_BYTES_MSB, i) |
403 SET_VAL(LL_LEN, idx));
404 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
405 }
406
407out:
408 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
409 SET_VAL(USERINFO, tx_ring->tail));
410 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
107dec27 411 pdata->tx_level[tx_ring->cp_ring->index] += count;
949c40bb
IS
412 tx_ring->tail = tail;
413
414 return count;
e6ad7673
IS
415}
416
417static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
418 struct net_device *ndev)
419{
420 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
421 struct xgene_enet_desc_ring *tx_ring;
422 int index = skb->queue_mapping;
423 u32 tx_level = pdata->tx_level[index];
949c40bb 424 int count;
e6ad7673 425
107dec27
IS
426 tx_ring = pdata->tx_ring[index];
427 if (tx_level < pdata->txc_level[index])
428 tx_level += ((typeof(pdata->tx_level[index]))~0U);
67894eec 429
107dec27
IS
430 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
431 netif_stop_subqueue(ndev, index);
e6ad7673
IS
432 return NETDEV_TX_BUSY;
433 }
434
9b00eb49
IS
435 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
436 return NETDEV_TX_OK;
437
949c40bb
IS
438 count = xgene_enet_setup_tx_desc(tx_ring, skb);
439 if (count <= 0) {
e6ad7673
IS
440 dev_kfree_skb_any(skb);
441 return NETDEV_TX_OK;
442 }
443
e6ad7673 444 skb_tx_timestamp(skb);
e6ad7673 445
3bb502f8
IS
446 tx_ring->tx_packets++;
447 tx_ring->tx_bytes += skb->len;
e6ad7673 448
9ffad80a 449 pdata->ring_ops->wr_cmd(tx_ring, count);
e6ad7673
IS
450 return NETDEV_TX_OK;
451}
452
453static void xgene_enet_skip_csum(struct sk_buff *skb)
454{
455 struct iphdr *iph = ip_hdr(skb);
456
457 if (!ip_is_fragment(iph) ||
458 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
459 skb->ip_summed = CHECKSUM_UNNECESSARY;
460 }
461}
462
463static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
464 struct xgene_enet_raw_desc *raw_desc)
465{
466 struct net_device *ndev;
467 struct xgene_enet_pdata *pdata;
468 struct device *dev;
469 struct xgene_enet_desc_ring *buf_pool;
470 u32 datalen, skb_index;
471 struct sk_buff *skb;
472 u8 status;
473 int ret = 0;
474
475 ndev = rx_ring->ndev;
476 pdata = netdev_priv(ndev);
477 dev = ndev_to_dev(rx_ring->ndev);
478 buf_pool = rx_ring->buf_pool;
479
480 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
481 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
482 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
483 skb = buf_pool->rx_skb[skb_index];
484
485 /* checking for error */
3bb502f8
IS
486 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
487 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
e6ad7673
IS
488 if (unlikely(status > 2)) {
489 dev_kfree_skb_any(skb);
490 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
491 status);
e6ad7673
IS
492 ret = -EIO;
493 goto out;
494 }
495
496 /* strip off CRC as HW isn't doing this */
497 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
9b00eb49 498 datalen = (datalen & DATALEN_MASK) - 4;
e6ad7673
IS
499 prefetch(skb->data - NET_IP_ALIGN);
500 skb_put(skb, datalen);
501
502 skb_checksum_none_assert(skb);
503 skb->protocol = eth_type_trans(skb, ndev);
504 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
505 skb->protocol == htons(ETH_P_IP))) {
506 xgene_enet_skip_csum(skb);
507 }
508
3bb502f8
IS
509 rx_ring->rx_packets++;
510 rx_ring->rx_bytes += datalen;
e6ad7673
IS
511 napi_gro_receive(&rx_ring->napi, skb);
512out:
513 if (--rx_ring->nbufpool == 0) {
514 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
515 rx_ring->nbufpool = NUM_BUFPOOL;
516 }
517
518 return ret;
519}
520
521static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
522{
523 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
524}
525
526static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
527 int budget)
528{
107dec27
IS
529 struct net_device *ndev = ring->ndev;
530 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
9b00eb49 531 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
e6ad7673
IS
532 u16 head = ring->head;
533 u16 slots = ring->slots - 1;
67894eec
IS
534 int ret, desc_count, count = 0, processed = 0;
535 bool is_completion;
e6ad7673
IS
536
537 do {
538 raw_desc = &ring->raw_desc[head];
67894eec
IS
539 desc_count = 0;
540 is_completion = false;
9b00eb49 541 exp_desc = NULL;
e6ad7673
IS
542 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
543 break;
544
ecf6ba83
IS
545 /* read fpqnum field after dataaddr field */
546 dma_rmb();
9b00eb49
IS
547 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
548 head = (head + 1) & slots;
549 exp_desc = &ring->raw_desc[head];
550
551 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
552 head = (head - 1) & slots;
553 break;
554 }
555 dma_rmb();
556 count++;
67894eec 557 desc_count++;
9b00eb49 558 }
67894eec 559 if (is_rx_desc(raw_desc)) {
e6ad7673 560 ret = xgene_enet_rx_frame(ring, raw_desc);
67894eec 561 } else {
e6ad7673 562 ret = xgene_enet_tx_completion(ring, raw_desc);
67894eec
IS
563 is_completion = true;
564 }
e6ad7673 565 xgene_enet_mark_desc_slot_empty(raw_desc);
9b00eb49
IS
566 if (exp_desc)
567 xgene_enet_mark_desc_slot_empty(exp_desc);
e6ad7673
IS
568
569 head = (head + 1) & slots;
570 count++;
67894eec 571 desc_count++;
9b00eb49 572 processed++;
67894eec 573 if (is_completion)
107dec27 574 pdata->txc_level[ring->index] += desc_count;
e6ad7673
IS
575
576 if (ret)
577 break;
578 } while (--budget);
579
580 if (likely(count)) {
81cefb81 581 pdata->ring_ops->wr_cmd(ring, -count);
e6ad7673
IS
582 ring->head = head;
583
107dec27
IS
584 if (__netif_subqueue_stopped(ndev, ring->index))
585 netif_start_subqueue(ndev, ring->index);
e6ad7673
IS
586 }
587
9b00eb49 588 return processed;
e6ad7673
IS
589}
590
591static int xgene_enet_napi(struct napi_struct *napi, const int budget)
592{
593 struct xgene_enet_desc_ring *ring;
594 int processed;
595
596 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
597 processed = xgene_enet_process_ring(ring, budget);
598
599 if (processed != budget) {
600 napi_complete(napi);
601 enable_irq(ring->irq);
602 }
603
604 return processed;
605}
606
607static void xgene_enet_timeout(struct net_device *ndev)
608{
609 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
610 struct netdev_queue *txq;
611 int i;
e6ad7673 612
d0eb7458 613 pdata->mac_ops->reset(pdata);
107dec27
IS
614
615 for (i = 0; i < pdata->txq_cnt; i++) {
616 txq = netdev_get_tx_queue(ndev, i);
617 txq->trans_start = jiffies;
618 netif_tx_start_queue(txq);
619 }
e6ad7673
IS
620}
621
622static int xgene_enet_register_irq(struct net_device *ndev)
623{
624 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
625 struct device *dev = ndev_to_dev(ndev);
6772b653 626 struct xgene_enet_desc_ring *ring;
107dec27 627 int ret = 0, i;
e6ad7673 628
107dec27
IS
629 for (i = 0; i < pdata->rxq_cnt; i++) {
630 ring = pdata->rx_ring[i];
631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
46a22d29 633 0, ring->irq_name, ring);
107dec27
IS
634 if (ret) {
635 netdev_err(ndev, "Failed to request irq %s\n",
636 ring->irq_name);
637 }
638 }
6772b653 639
107dec27
IS
640 for (i = 0; i < pdata->cq_cnt; i++) {
641 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069 642 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
6772b653 643 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
46a22d29 644 0, ring->irq_name, ring);
6772b653
IS
645 if (ret) {
646 netdev_err(ndev, "Failed to request irq %s\n",
647 ring->irq_name);
648 }
e6ad7673
IS
649 }
650
651 return ret;
652}
653
654static void xgene_enet_free_irq(struct net_device *ndev)
655{
656 struct xgene_enet_pdata *pdata;
b5d7a069 657 struct xgene_enet_desc_ring *ring;
e6ad7673 658 struct device *dev;
107dec27 659 int i;
e6ad7673
IS
660
661 pdata = netdev_priv(ndev);
662 dev = ndev_to_dev(ndev);
6772b653 663
107dec27
IS
664 for (i = 0; i < pdata->rxq_cnt; i++) {
665 ring = pdata->rx_ring[i];
666 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
667 devm_free_irq(dev, ring->irq, ring);
668 }
669
670 for (i = 0; i < pdata->cq_cnt; i++) {
671 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069
IS
672 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
673 devm_free_irq(dev, ring->irq, ring);
6772b653
IS
674 }
675}
676
677static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
678{
679 struct napi_struct *napi;
107dec27 680 int i;
6772b653 681
107dec27
IS
682 for (i = 0; i < pdata->rxq_cnt; i++) {
683 napi = &pdata->rx_ring[i]->napi;
684 napi_enable(napi);
685 }
6772b653 686
107dec27
IS
687 for (i = 0; i < pdata->cq_cnt; i++) {
688 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
689 napi_enable(napi);
690 }
691}
692
693static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
694{
695 struct napi_struct *napi;
107dec27 696 int i;
6772b653 697
107dec27
IS
698 for (i = 0; i < pdata->rxq_cnt; i++) {
699 napi = &pdata->rx_ring[i]->napi;
700 napi_disable(napi);
701 }
6772b653 702
107dec27
IS
703 for (i = 0; i < pdata->cq_cnt; i++) {
704 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
705 napi_disable(napi);
706 }
e6ad7673
IS
707}
708
709static int xgene_enet_open(struct net_device *ndev)
710{
711 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 712 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
713 int ret;
714
107dec27
IS
715 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
716 if (ret)
717 return ret;
718
719 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
720 if (ret)
721 return ret;
722
d0eb7458
IS
723 mac_ops->tx_enable(pdata);
724 mac_ops->rx_enable(pdata);
e6ad7673 725
aeb20b6b 726 xgene_enet_napi_enable(pdata);
e6ad7673
IS
727 ret = xgene_enet_register_irq(ndev);
728 if (ret)
729 return ret;
e6ad7673 730
0148d38d 731 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 732 phy_start(pdata->phy_dev);
9a8c5dde 733 else {
0148d38d 734 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
9a8c5dde
IS
735 netif_carrier_off(ndev);
736 }
e6ad7673
IS
737
738 netif_start_queue(ndev);
739
740 return ret;
741}
742
743static int xgene_enet_close(struct net_device *ndev)
744{
745 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 746 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
107dec27 747 int i;
e6ad7673
IS
748
749 netif_stop_queue(ndev);
750
0148d38d 751 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 752 phy_stop(pdata->phy_dev);
0148d38d
IS
753 else
754 cancel_delayed_work_sync(&pdata->link_work);
e6ad7673 755
d0eb7458
IS
756 mac_ops->tx_disable(pdata);
757 mac_ops->rx_disable(pdata);
e6ad7673 758
aeb20b6b
IS
759 xgene_enet_free_irq(ndev);
760 xgene_enet_napi_disable(pdata);
107dec27
IS
761 for (i = 0; i < pdata->rxq_cnt; i++)
762 xgene_enet_process_ring(pdata->rx_ring[i], -1);
aeb20b6b 763
e6ad7673
IS
764 return 0;
765}
e6ad7673
IS
766static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
767{
768 struct xgene_enet_pdata *pdata;
769 struct device *dev;
770
771 pdata = netdev_priv(ring->ndev);
772 dev = ndev_to_dev(ring->ndev);
773
81cefb81 774 pdata->ring_ops->clear(ring);
e6ad7673
IS
775 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
776}
777
778static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
779{
780 struct xgene_enet_desc_ring *buf_pool;
107dec27
IS
781 struct xgene_enet_desc_ring *ring;
782 int i;
e6ad7673 783
107dec27
IS
784 for (i = 0; i < pdata->txq_cnt; i++) {
785 ring = pdata->tx_ring[i];
786 if (ring) {
787 xgene_enet_delete_ring(ring);
788 pdata->tx_ring[i] = NULL;
789 }
e6ad7673
IS
790 }
791
107dec27
IS
792 for (i = 0; i < pdata->rxq_cnt; i++) {
793 ring = pdata->rx_ring[i];
794 if (ring) {
795 buf_pool = ring->buf_pool;
796 xgene_enet_delete_bufpool(buf_pool);
797 xgene_enet_delete_ring(buf_pool);
798 xgene_enet_delete_ring(ring);
799 pdata->rx_ring[i] = NULL;
800 }
e6ad7673
IS
801 }
802}
803
804static int xgene_enet_get_ring_size(struct device *dev,
805 enum xgene_enet_ring_cfgsize cfgsize)
806{
807 int size = -EINVAL;
808
809 switch (cfgsize) {
810 case RING_CFGSIZE_512B:
811 size = 0x200;
812 break;
813 case RING_CFGSIZE_2KB:
814 size = 0x800;
815 break;
816 case RING_CFGSIZE_16KB:
817 size = 0x4000;
818 break;
819 case RING_CFGSIZE_64KB:
820 size = 0x10000;
821 break;
822 case RING_CFGSIZE_512KB:
823 size = 0x80000;
824 break;
825 default:
826 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
827 break;
828 }
829
830 return size;
831}
832
833static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
834{
81cefb81 835 struct xgene_enet_pdata *pdata;
e6ad7673
IS
836 struct device *dev;
837
838 if (!ring)
839 return;
840
841 dev = ndev_to_dev(ring->ndev);
81cefb81 842 pdata = netdev_priv(ring->ndev);
e6ad7673
IS
843
844 if (ring->desc_addr) {
81cefb81 845 pdata->ring_ops->clear(ring);
e6ad7673
IS
846 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
847 }
848 devm_kfree(dev, ring);
849}
850
851static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
852{
853 struct device *dev = &pdata->pdev->dev;
854 struct xgene_enet_desc_ring *ring;
107dec27 855 int i;
e6ad7673 856
107dec27
IS
857 for (i = 0; i < pdata->txq_cnt; i++) {
858 ring = pdata->tx_ring[i];
859 if (ring) {
860 if (ring->cp_ring && ring->cp_ring->cp_skb)
861 devm_kfree(dev, ring->cp_ring->cp_skb);
862 if (ring->cp_ring && pdata->cq_cnt)
863 xgene_enet_free_desc_ring(ring->cp_ring);
864 xgene_enet_free_desc_ring(ring);
865 }
866 }
867
868 for (i = 0; i < pdata->rxq_cnt; i++) {
869 ring = pdata->rx_ring[i];
870 if (ring) {
871 if (ring->buf_pool) {
872 if (ring->buf_pool->rx_skb)
873 devm_kfree(dev, ring->buf_pool->rx_skb);
874 xgene_enet_free_desc_ring(ring->buf_pool);
875 }
876 xgene_enet_free_desc_ring(ring);
c10e4caf 877 }
c10e4caf 878 }
e6ad7673
IS
879}
880
bc1b7c13
IS
881static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
882 struct xgene_enet_desc_ring *ring)
883{
884 if ((pdata->enet_id == XGENE_ENET2) &&
885 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
886 return true;
887 }
888
889 return false;
890}
891
892static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
893 struct xgene_enet_desc_ring *ring)
894{
895 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
896
897 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
898}
899
e6ad7673
IS
900static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
901 struct net_device *ndev, u32 ring_num,
902 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
903{
904 struct xgene_enet_desc_ring *ring;
905 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
906 struct device *dev = ndev_to_dev(ndev);
9b9ba821
TK
907 int size;
908
909 size = xgene_enet_get_ring_size(dev, cfgsize);
910 if (size < 0)
911 return NULL;
e6ad7673
IS
912
913 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
914 GFP_KERNEL);
915 if (!ring)
916 return NULL;
917
918 ring->ndev = ndev;
919 ring->num = ring_num;
920 ring->cfgsize = cfgsize;
921 ring->id = ring_id;
922
e6ad7673
IS
923 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
924 GFP_KERNEL);
925 if (!ring->desc_addr) {
926 devm_kfree(dev, ring);
927 return NULL;
928 }
929 ring->size = size;
930
bc1b7c13
IS
931 if (is_irq_mbox_required(pdata, ring)) {
932 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
933 &ring->irq_mbox_dma, GFP_KERNEL);
934 if (!ring->irq_mbox_addr) {
935 dma_free_coherent(dev, size, ring->desc_addr,
936 ring->dma);
937 devm_kfree(dev, ring);
938 return NULL;
939 }
940 }
941
942 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
e6ad7673 943 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
81cefb81 944 ring = pdata->ring_ops->setup(ring);
e6ad7673
IS
945 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
946 ring->num, ring->size, ring->id, ring->slots);
947
948 return ring;
949}
950
951static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
952{
953 return (owner << 6) | (bufnum & GENMASK(5, 0));
954}
955
bc1b7c13
IS
956static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
957{
958 enum xgene_ring_owner owner;
959
960 if (p->enet_id == XGENE_ENET1) {
961 switch (p->phy_mode) {
962 case PHY_INTERFACE_MODE_SGMII:
963 owner = RING_OWNER_ETH0;
964 break;
965 default:
966 owner = (!p->port_id) ? RING_OWNER_ETH0 :
967 RING_OWNER_ETH1;
968 break;
969 }
970 } else {
971 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
972 }
973
974 return owner;
975}
976
2a37daa6
IS
977static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
978{
979 struct device *dev = &pdata->pdev->dev;
980 u32 cpu_bufnum;
981 int ret;
982
983 ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
984
985 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
986}
987
e6ad7673
IS
988static int xgene_enet_create_desc_rings(struct net_device *ndev)
989{
990 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
991 struct device *dev = ndev_to_dev(ndev);
992 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
993 struct xgene_enet_desc_ring *buf_pool = NULL;
bc1b7c13 994 enum xgene_ring_owner owner;
9b00eb49 995 dma_addr_t dma_exp_bufs;
2a37daa6 996 u8 cpu_bufnum;
ca626454
KC
997 u8 eth_bufnum = pdata->eth_bufnum;
998 u8 bp_bufnum = pdata->bp_bufnum;
999 u16 ring_num = pdata->ring_num;
1000 u16 ring_id;
107dec27 1001 int i, ret, size;
e6ad7673 1002
2a37daa6
IS
1003 cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1004
107dec27
IS
1005 for (i = 0; i < pdata->rxq_cnt; i++) {
1006 /* allocate rx descriptor ring */
1007 owner = xgene_derive_ring_owner(pdata);
1008 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1009 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1010 RING_CFGSIZE_16KB,
1011 ring_id);
1012 if (!rx_ring) {
1013 ret = -ENOMEM;
1014 goto err;
1015 }
e6ad7673 1016
107dec27
IS
1017 /* allocate buffer pool for receiving packets */
1018 owner = xgene_derive_ring_owner(pdata);
1019 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1020 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1021 RING_CFGSIZE_2KB,
1022 ring_id);
1023 if (!buf_pool) {
1024 ret = -ENOMEM;
1025 goto err;
1026 }
9b00eb49 1027
107dec27
IS
1028 rx_ring->nbufpool = NUM_BUFPOOL;
1029 rx_ring->buf_pool = buf_pool;
1030 rx_ring->irq = pdata->irqs[i];
1031 if (!pdata->cq_cnt) {
1032 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
1033 ndev->name);
1034 } else {
1035 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
1036 ndev->name, i);
1037 }
1038 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1039 sizeof(struct sk_buff *),
9b00eb49 1040 GFP_KERNEL);
107dec27
IS
1041 if (!buf_pool->rx_skb) {
1042 ret = -ENOMEM;
1043 goto err;
1044 }
9b00eb49 1045
107dec27
IS
1046 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1047 rx_ring->buf_pool = buf_pool;
1048 pdata->rx_ring[i] = rx_ring;
1049 }
e6ad7673 1050
107dec27
IS
1051 for (i = 0; i < pdata->txq_cnt; i++) {
1052 /* allocate tx descriptor ring */
1053 owner = xgene_derive_ring_owner(pdata);
1054 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1055 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
6772b653
IS
1056 RING_CFGSIZE_16KB,
1057 ring_id);
107dec27 1058 if (!tx_ring) {
6772b653
IS
1059 ret = -ENOMEM;
1060 goto err;
1061 }
6772b653 1062
107dec27
IS
1063 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1064 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
1065 &dma_exp_bufs,
1066 GFP_KERNEL);
1067 if (!tx_ring->exp_bufs) {
1068 ret = -ENOMEM;
1069 goto err;
1070 }
9b00eb49 1071
107dec27
IS
1072 pdata->tx_ring[i] = tx_ring;
1073
1074 if (!pdata->cq_cnt) {
1075 cp_ring = pdata->rx_ring[i];
1076 } else {
1077 /* allocate tx completion descriptor ring */
1078 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1079 cpu_bufnum++);
1080 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1081 RING_CFGSIZE_16KB,
1082 ring_id);
1083 if (!cp_ring) {
1084 ret = -ENOMEM;
1085 goto err;
1086 }
9b00eb49 1087
107dec27
IS
1088 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1089 cp_ring->index = i;
1090 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
1091 ndev->name, i);
1092 }
1093
1094 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1095 sizeof(struct sk_buff *),
1096 GFP_KERNEL);
1097 if (!cp_ring->cp_skb) {
1098 ret = -ENOMEM;
1099 goto err;
1100 }
e6ad7673 1101
107dec27
IS
1102 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1103 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1104 size, GFP_KERNEL);
1105 if (!cp_ring->frag_dma_addr) {
1106 devm_kfree(dev, cp_ring->cp_skb);
1107 ret = -ENOMEM;
1108 goto err;
1109 }
1110
1111 tx_ring->cp_ring = cp_ring;
1112 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1113 }
1114
1115 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1116 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
e6ad7673
IS
1117
1118 return 0;
1119
1120err:
1121 xgene_enet_free_desc_rings(pdata);
1122 return ret;
1123}
1124
1125static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1126 struct net_device *ndev,
1127 struct rtnl_link_stats64 *storage)
1128{
1129 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1130 struct rtnl_link_stats64 *stats = &pdata->stats;
3bb502f8
IS
1131 struct xgene_enet_desc_ring *ring;
1132 int i;
e6ad7673 1133
3bb502f8
IS
1134 memset(stats, 0, sizeof(struct rtnl_link_stats64));
1135 for (i = 0; i < pdata->txq_cnt; i++) {
1136 ring = pdata->tx_ring[i];
1137 if (ring) {
1138 stats->tx_packets += ring->tx_packets;
1139 stats->tx_bytes += ring->tx_bytes;
1140 }
1141 }
e6ad7673 1142
3bb502f8
IS
1143 for (i = 0; i < pdata->rxq_cnt; i++) {
1144 ring = pdata->rx_ring[i];
1145 if (ring) {
1146 stats->rx_packets += ring->rx_packets;
1147 stats->rx_bytes += ring->rx_bytes;
1148 stats->rx_errors += ring->rx_length_errors +
1149 ring->rx_crc_errors +
1150 ring->rx_frame_errors +
1151 ring->rx_fifo_errors;
1152 stats->rx_dropped += ring->rx_dropped;
1153 }
1154 }
1155 memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
e6ad7673
IS
1156
1157 return storage;
1158}
1159
1160static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1161{
1162 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1163 int ret;
1164
1165 ret = eth_mac_addr(ndev, addr);
1166 if (ret)
1167 return ret;
d0eb7458 1168 pdata->mac_ops->set_mac_addr(pdata);
e6ad7673
IS
1169
1170 return ret;
1171}
1172
1173static const struct net_device_ops xgene_ndev_ops = {
1174 .ndo_open = xgene_enet_open,
1175 .ndo_stop = xgene_enet_close,
1176 .ndo_start_xmit = xgene_enet_start_xmit,
1177 .ndo_tx_timeout = xgene_enet_timeout,
1178 .ndo_get_stats64 = xgene_enet_get_stats64,
1179 .ndo_change_mtu = eth_change_mtu,
1180 .ndo_set_mac_address = xgene_enet_set_mac_address,
1181};
1182
8beeef8d 1183#ifdef CONFIG_ACPI
724fe695 1184static void xgene_get_port_id_acpi(struct device *dev,
0738c54d
ST
1185 struct xgene_enet_pdata *pdata)
1186{
1187 acpi_status status;
1188 u64 temp;
1189
1190 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1191 if (ACPI_FAILURE(status)) {
1192 pdata->port_id = 0;
1193 } else {
1194 pdata->port_id = temp;
1195 }
1196
724fe695 1197 return;
0738c54d 1198}
8beeef8d 1199#endif
0738c54d 1200
724fe695 1201static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
ca626454
KC
1202{
1203 u32 id = 0;
ca626454 1204
724fe695 1205 of_property_read_u32(dev->of_node, "port-id", &id);
ca626454 1206
724fe695
SS
1207 pdata->port_id = id & BIT(0);
1208
1209 return;
ca626454
KC
1210}
1211
16615a4c
IS
1212static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1213{
1214 struct device *dev = &pdata->pdev->dev;
1215 int delay, ret;
1216
1217 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1218 if (ret) {
1219 pdata->tx_delay = 4;
1220 return 0;
1221 }
1222
1223 if (delay < 0 || delay > 7) {
1224 dev_err(dev, "Invalid tx-delay specified\n");
1225 return -EINVAL;
1226 }
1227
1228 pdata->tx_delay = delay;
1229
1230 return 0;
1231}
1232
1233static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1234{
1235 struct device *dev = &pdata->pdev->dev;
1236 int delay, ret;
1237
1238 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1239 if (ret) {
1240 pdata->rx_delay = 2;
1241 return 0;
1242 }
1243
1244 if (delay < 0 || delay > 7) {
1245 dev_err(dev, "Invalid rx-delay specified\n");
1246 return -EINVAL;
1247 }
1248
1249 pdata->rx_delay = delay;
1250
1251 return 0;
1252}
de7b5b3d 1253
107dec27
IS
1254static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1255{
1256 struct platform_device *pdev = pdata->pdev;
1257 struct device *dev = &pdev->dev;
1258 int i, ret, max_irqs;
1259
1260 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1261 max_irqs = 1;
1262 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1263 max_irqs = 2;
1264 else
1265 max_irqs = XGENE_MAX_ENET_IRQ;
1266
1267 for (i = 0; i < max_irqs; i++) {
1268 ret = platform_get_irq(pdev, i);
1269 if (ret <= 0) {
1b090a48
IS
1270 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1271 max_irqs = i;
1272 pdata->rxq_cnt = max_irqs / 2;
1273 pdata->txq_cnt = max_irqs / 2;
1274 pdata->cq_cnt = max_irqs / 2;
1275 break;
1276 }
107dec27
IS
1277 dev_err(dev, "Unable to get ENET IRQ\n");
1278 ret = ret ? : -ENXIO;
1279 return ret;
1280 }
1281 pdata->irqs[i] = ret;
1282 }
1283
1284 return 0;
1285}
1286
e6ad7673
IS
1287static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1288{
1289 struct platform_device *pdev;
1290 struct net_device *ndev;
1291 struct device *dev;
1292 struct resource *res;
1293 void __iomem *base_addr;
561fea6d 1294 u32 offset;
2e598712 1295 int ret = 0;
e6ad7673
IS
1296
1297 pdev = pdata->pdev;
1298 dev = &pdev->dev;
1299 ndev = pdata->ndev;
1300
de7b5b3d
FK
1301 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1302 if (!res) {
1303 dev_err(dev, "Resource enet_csr not defined\n");
1304 return -ENODEV;
1305 }
1306 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
3ec7a176 1307 if (!pdata->base_addr) {
e6ad7673 1308 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
3ec7a176 1309 return -ENOMEM;
e6ad7673
IS
1310 }
1311
de7b5b3d
FK
1312 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1313 if (!res) {
1314 dev_err(dev, "Resource ring_csr not defined\n");
1315 return -ENODEV;
1316 }
1317 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1318 resource_size(res));
3ec7a176 1319 if (!pdata->ring_csr_addr) {
e6ad7673 1320 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
3ec7a176 1321 return -ENOMEM;
e6ad7673
IS
1322 }
1323
de7b5b3d
FK
1324 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1325 if (!res) {
1326 dev_err(dev, "Resource ring_cmd not defined\n");
1327 return -ENODEV;
1328 }
1329 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1330 resource_size(res));
3ec7a176 1331 if (!pdata->ring_cmd_addr) {
e6ad7673 1332 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
3ec7a176 1333 return -ENOMEM;
e6ad7673
IS
1334 }
1335
0738c54d 1336 if (dev->of_node)
724fe695 1337 xgene_get_port_id_dt(dev, pdata);
0738c54d
ST
1338#ifdef CONFIG_ACPI
1339 else
724fe695 1340 xgene_get_port_id_acpi(dev, pdata);
0738c54d 1341#endif
ca626454 1342
938049e1 1343 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
e6ad7673 1344 eth_hw_addr_random(ndev);
de7b5b3d 1345
e6ad7673
IS
1346 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1347
938049e1 1348 pdata->phy_mode = device_get_phy_mode(dev);
e6ad7673 1349 if (pdata->phy_mode < 0) {
0148d38d
IS
1350 dev_err(dev, "Unable to get phy-connection-type\n");
1351 return pdata->phy_mode;
1352 }
1353 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
32f784b5 1354 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
0148d38d
IS
1355 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1356 dev_err(dev, "Incorrect phy-connection-type specified\n");
1357 return -ENODEV;
e6ad7673
IS
1358 }
1359
16615a4c
IS
1360 ret = xgene_get_tx_delay(pdata);
1361 if (ret)
1362 return ret;
1363
1364 ret = xgene_get_rx_delay(pdata);
1365 if (ret)
1366 return ret;
1367
107dec27
IS
1368 ret = xgene_enet_get_irqs(pdata);
1369 if (ret)
6772b653 1370 return ret;
6772b653 1371
e6ad7673 1372 pdata->clk = devm_clk_get(&pdev->dev, NULL);
e6ad7673 1373 if (IS_ERR(pdata->clk)) {
de7b5b3d 1374 /* Firmware may have set up the clock already. */
c2d33bdc 1375 dev_info(dev, "clocks have been setup already\n");
e6ad7673
IS
1376 }
1377
bc1b7c13
IS
1378 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1379 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1380 else
1381 base_addr = pdata->base_addr;
e6ad7673 1382 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
76f94a9c 1383 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
e6ad7673
IS
1384 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1385 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
32f784b5
IS
1386 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1387 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
ca626454 1388 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
561fea6d
IS
1389 offset = (pdata->enet_id == XGENE_ENET1) ?
1390 BLOCK_ETH_MAC_CSR_OFFSET :
1391 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1392 pdata->mcx_mac_csr_addr = base_addr + offset;
0148d38d
IS
1393 } else {
1394 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1395 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
0148d38d 1396 }
e6ad7673
IS
1397 pdata->rx_buff_cnt = NUM_PKT_BUF;
1398
0148d38d 1399 return 0;
e6ad7673
IS
1400}
1401
1402static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1403{
76f94a9c 1404 struct xgene_enet_cle *enet_cle = &pdata->cle;
e6ad7673
IS
1405 struct net_device *ndev = pdata->ndev;
1406 struct xgene_enet_desc_ring *buf_pool;
1407 u16 dst_ring_num;
107dec27 1408 int i, ret;
e6ad7673 1409
c3f4465d
IS
1410 ret = pdata->port_ops->reset(pdata);
1411 if (ret)
1412 return ret;
e6ad7673
IS
1413
1414 ret = xgene_enet_create_desc_rings(ndev);
1415 if (ret) {
1416 netdev_err(ndev, "Error in ring configuration\n");
1417 return ret;
1418 }
1419
1420 /* setup buffer pool */
107dec27
IS
1421 for (i = 0; i < pdata->rxq_cnt; i++) {
1422 buf_pool = pdata->rx_ring[i]->buf_pool;
1423 xgene_enet_init_bufpool(buf_pool);
1424 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1425 if (ret) {
1426 xgene_enet_delete_desc_rings(pdata);
1427 return ret;
1428 }
e6ad7673
IS
1429 }
1430
107dec27
IS
1431 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1432 buf_pool = pdata->rx_ring[0]->buf_pool;
76f94a9c
IS
1433 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1434 /* Initialize and Enable PreClassifier Tree */
1435 enet_cle->max_nodes = 512;
1436 enet_cle->max_dbptrs = 1024;
1437 enet_cle->parsers = 3;
1438 enet_cle->active_parser = PARSER_ALL;
1439 enet_cle->ptree.start_node = 0;
1440 enet_cle->ptree.start_dbptr = 0;
1441 enet_cle->jump_bytes = 8;
1442 ret = pdata->cle_ops->cle_init(pdata);
1443 if (ret) {
1444 netdev_err(ndev, "Preclass Tree init error\n");
1445 return ret;
1446 }
1447 } else {
1448 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1449 }
1450
9a8c5dde 1451 pdata->phy_speed = SPEED_UNKNOWN;
0148d38d 1452 pdata->mac_ops->init(pdata);
e6ad7673
IS
1453
1454 return ret;
1455}
1456
d0eb7458
IS
1457static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1458{
0148d38d
IS
1459 switch (pdata->phy_mode) {
1460 case PHY_INTERFACE_MODE_RGMII:
1461 pdata->mac_ops = &xgene_gmac_ops;
1462 pdata->port_ops = &xgene_gport_ops;
dc8385f0 1463 pdata->rm = RM3;
107dec27
IS
1464 pdata->rxq_cnt = 1;
1465 pdata->txq_cnt = 1;
1466 pdata->cq_cnt = 0;
0148d38d 1467 break;
32f784b5
IS
1468 case PHY_INTERFACE_MODE_SGMII:
1469 pdata->mac_ops = &xgene_sgmac_ops;
1470 pdata->port_ops = &xgene_sgport_ops;
1471 pdata->rm = RM1;
107dec27
IS
1472 pdata->rxq_cnt = 1;
1473 pdata->txq_cnt = 1;
1474 pdata->cq_cnt = 1;
32f784b5 1475 break;
0148d38d
IS
1476 default:
1477 pdata->mac_ops = &xgene_xgmac_ops;
1478 pdata->port_ops = &xgene_xgport_ops;
76f94a9c 1479 pdata->cle_ops = &xgene_cle3in_ops;
dc8385f0 1480 pdata->rm = RM0;
1b090a48
IS
1481 if (!pdata->rxq_cnt) {
1482 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1483 pdata->txq_cnt = XGENE_NUM_TX_RING;
1484 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1485 }
0148d38d
IS
1486 break;
1487 }
ca626454 1488
bc1b7c13
IS
1489 if (pdata->enet_id == XGENE_ENET1) {
1490 switch (pdata->port_id) {
1491 case 0:
1b090a48
IS
1492 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1493 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1494 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1495 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1496 pdata->ring_num = START_RING_NUM_0;
1497 } else {
1498 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1499 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1500 pdata->bp_bufnum = START_BP_BUFNUM_0;
1501 pdata->ring_num = START_RING_NUM_0;
1502 }
bc1b7c13
IS
1503 break;
1504 case 1:
149e9ab4
IS
1505 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1506 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1507 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1508 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1509 pdata->ring_num = XG_START_RING_NUM_1;
1510 } else {
1511 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1512 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1513 pdata->bp_bufnum = START_BP_BUFNUM_1;
1514 pdata->ring_num = START_RING_NUM_1;
1515 }
bc1b7c13
IS
1516 break;
1517 default:
1518 break;
1519 }
1520 pdata->ring_ops = &xgene_ring1_ops;
1521 } else {
1522 switch (pdata->port_id) {
1523 case 0:
1524 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1525 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1526 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1527 pdata->ring_num = X2_START_RING_NUM_0;
1528 break;
1529 case 1:
1530 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1531 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1532 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1533 pdata->ring_num = X2_START_RING_NUM_1;
1534 break;
1535 default:
1536 break;
1537 }
1538 pdata->rm = RM0;
1539 pdata->ring_ops = &xgene_ring2_ops;
ca626454 1540 }
d0eb7458
IS
1541}
1542
6772b653
IS
1543static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1544{
1545 struct napi_struct *napi;
107dec27 1546 int i;
6772b653 1547
107dec27
IS
1548 for (i = 0; i < pdata->rxq_cnt; i++) {
1549 napi = &pdata->rx_ring[i]->napi;
1550 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1551 NAPI_POLL_WEIGHT);
1552 }
6772b653 1553
107dec27
IS
1554 for (i = 0; i < pdata->cq_cnt; i++) {
1555 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1556 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1557 NAPI_POLL_WEIGHT);
1558 }
1559}
1560
1561static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1562{
1563 struct napi_struct *napi;
107dec27 1564 int i;
6772b653 1565
107dec27
IS
1566 for (i = 0; i < pdata->rxq_cnt; i++) {
1567 napi = &pdata->rx_ring[i]->napi;
1568 netif_napi_del(napi);
1569 }
6772b653 1570
107dec27
IS
1571 for (i = 0; i < pdata->cq_cnt; i++) {
1572 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1573 netif_napi_del(napi);
1574 }
1575}
1576
e6ad7673
IS
1577static int xgene_enet_probe(struct platform_device *pdev)
1578{
1579 struct net_device *ndev;
1580 struct xgene_enet_pdata *pdata;
1581 struct device *dev = &pdev->dev;
3cdb7309 1582 const struct xgene_mac_ops *mac_ops;
bc1b7c13 1583 const struct of_device_id *of_id;
e6ad7673
IS
1584 int ret;
1585
107dec27
IS
1586 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1587 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
e6ad7673
IS
1588 if (!ndev)
1589 return -ENOMEM;
1590
1591 pdata = netdev_priv(ndev);
1592
1593 pdata->pdev = pdev;
1594 pdata->ndev = ndev;
1595 SET_NETDEV_DEV(ndev, dev);
1596 platform_set_drvdata(pdev, pdata);
1597 ndev->netdev_ops = &xgene_ndev_ops;
1598 xgene_enet_set_ethtool_ops(ndev);
1599 ndev->features |= NETIF_F_IP_CSUM |
1600 NETIF_F_GSO |
9b00eb49
IS
1601 NETIF_F_GRO |
1602 NETIF_F_SG;
e6ad7673 1603
bc1b7c13
IS
1604 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1605 if (of_id) {
1606 pdata->enet_id = (enum xgene_enet_id)of_id->data;
0738c54d
ST
1607 }
1608#ifdef CONFIG_ACPI
1609 else {
1610 const struct acpi_device_id *acpi_id;
1611
1612 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1613 if (acpi_id)
1614 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
bc1b7c13
IS
1615 }
1616#endif
0738c54d
ST
1617 if (!pdata->enet_id) {
1618 free_netdev(ndev);
1619 return -ENODEV;
1620 }
bc1b7c13 1621
e6ad7673
IS
1622 ret = xgene_enet_get_resources(pdata);
1623 if (ret)
1624 goto err;
1625
d0eb7458 1626 xgene_enet_setup_ops(pdata);
e6ad7673 1627
9b00eb49
IS
1628 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1629 ndev->features |= NETIF_F_TSO;
1630 pdata->mss = XGENE_ENET_MSS;
1631 }
1632 ndev->hw_features = ndev->features;
1633
aeb20b6b 1634 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
e6ad7673 1635 if (ret) {
aeb20b6b 1636 netdev_err(ndev, "No usable DMA configuration\n");
e6ad7673
IS
1637 goto err;
1638 }
1639
aeb20b6b 1640 ret = register_netdev(ndev);
e6ad7673 1641 if (ret) {
aeb20b6b 1642 netdev_err(ndev, "Failed to register netdev\n");
e6ad7673
IS
1643 goto err;
1644 }
1645
1646 ret = xgene_enet_init_hw(pdata);
1647 if (ret)
20decb7e 1648 goto err_netdev;
e6ad7673 1649
dc8385f0 1650 mac_ops = pdata->mac_ops;
aeb20b6b 1651 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
0148d38d 1652 ret = xgene_enet_mdio_config(pdata);
aeb20b6b 1653 if (ret)
20decb7e 1654 goto err_netdev;
aeb20b6b 1655 } else {
dc8385f0 1656 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
aeb20b6b 1657 }
e6ad7673 1658
aeb20b6b
IS
1659 xgene_enet_napi_add(pdata);
1660 return 0;
20decb7e 1661err_netdev:
c3f4465d 1662 unregister_netdev(ndev);
20decb7e 1663err:
e6ad7673
IS
1664 free_netdev(ndev);
1665 return ret;
1666}
1667
1668static int xgene_enet_remove(struct platform_device *pdev)
1669{
1670 struct xgene_enet_pdata *pdata;
3cdb7309 1671 const struct xgene_mac_ops *mac_ops;
e6ad7673
IS
1672 struct net_device *ndev;
1673
1674 pdata = platform_get_drvdata(pdev);
d0eb7458 1675 mac_ops = pdata->mac_ops;
e6ad7673
IS
1676 ndev = pdata->ndev;
1677
d0eb7458
IS
1678 mac_ops->rx_disable(pdata);
1679 mac_ops->tx_disable(pdata);
e6ad7673 1680
6772b653 1681 xgene_enet_napi_del(pdata);
ccc02ddb
IS
1682 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1683 xgene_enet_mdio_remove(pdata);
e6ad7673 1684 unregister_netdev(ndev);
ccc02ddb 1685 xgene_enet_delete_desc_rings(pdata);
d0eb7458 1686 pdata->port_ops->shutdown(pdata);
e6ad7673
IS
1687 free_netdev(ndev);
1688
1689 return 0;
1690}
1691
de7b5b3d
FK
1692#ifdef CONFIG_ACPI
1693static const struct acpi_device_id xgene_enet_acpi_match[] = {
0738c54d
ST
1694 { "APMC0D05", XGENE_ENET1},
1695 { "APMC0D30", XGENE_ENET1},
1696 { "APMC0D31", XGENE_ENET1},
149e9ab4 1697 { "APMC0D3F", XGENE_ENET1},
822e34a4
ST
1698 { "APMC0D26", XGENE_ENET2},
1699 { "APMC0D25", XGENE_ENET2},
de7b5b3d
FK
1700 { }
1701};
1702MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1703#endif
1704
163cff31 1705#ifdef CONFIG_OF
a6b0dc2a 1706static const struct of_device_id xgene_enet_of_match[] = {
bc1b7c13
IS
1707 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1708 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1709 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
561fea6d 1710 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
bc1b7c13 1711 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
e6ad7673
IS
1712 {},
1713};
1714
de7b5b3d 1715MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
163cff31 1716#endif
e6ad7673
IS
1717
1718static struct platform_driver xgene_enet_driver = {
1719 .driver = {
1720 .name = "xgene-enet",
de7b5b3d
FK
1721 .of_match_table = of_match_ptr(xgene_enet_of_match),
1722 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
e6ad7673
IS
1723 },
1724 .probe = xgene_enet_probe,
1725 .remove = xgene_enet_remove,
1726};
1727
1728module_platform_driver(xgene_enet_driver);
1729
1730MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1731MODULE_VERSION(XGENE_DRV_VERSION);
d0eb7458 1732MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
e6ad7673
IS
1733MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1734MODULE_LICENSE("GPL");