Merge tag 'for-linus-4.6-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
32f784b5 24#include "xgene_enet_sgmac.h"
0148d38d 25#include "xgene_enet_xgmac.h"
e6ad7673 26
de7b5b3d
FK
27#define RES_ENET_CSR 0
28#define RES_RING_CSR 1
29#define RES_RING_CMD 2
30
bc1b7c13 31static const struct of_device_id xgene_enet_of_match[];
0738c54d 32static const struct acpi_device_id xgene_enet_acpi_match[];
bc1b7c13 33
e6ad7673
IS
34static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35{
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47}
48
49static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51{
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
81cefb81 54 struct xgene_enet_pdata *pdata;
e6ad7673
IS
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
81cefb81 65 pdata = netdev_priv(ndev);
e6ad7673
IS
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
75 buf_pool->rx_skb[tail] = skb;
76
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
81 return -EINVAL;
82 }
83
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
86 SET_BIT(COHERENT));
87 tail = (tail + 1) & slots;
88 }
89
81cefb81 90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
e6ad7673
IS
91 buf_pool->tail = tail;
92
93 return 0;
94}
95
e6ad7673
IS
96static u8 xgene_enet_hdr_len(const void *data)
97{
98 const struct ethhdr *eth = data;
99
100 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
101}
102
e6ad7673
IS
103static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104{
81cefb81 105 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
e6ad7673
IS
106 struct xgene_enet_raw_desc16 *raw_desc;
107 u32 slots = buf_pool->slots - 1;
108 u32 tail = buf_pool->tail;
109 u32 userinfo;
110 int i, len;
111
81cefb81 112 len = pdata->ring_ops->len(buf_pool);
e6ad7673
IS
113 for (i = 0; i < len; i++) {
114 tail = (tail - 1) & slots;
115 raw_desc = &buf_pool->raw_desc16[tail];
116
117 /* Hardware stores descriptor in little endian format */
118 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
119 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
120 }
121
81cefb81 122 pdata->ring_ops->wr_cmd(buf_pool, -len);
e6ad7673
IS
123 buf_pool->tail = tail;
124}
125
126static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
127{
128 struct xgene_enet_desc_ring *rx_ring = data;
129
130 if (napi_schedule_prep(&rx_ring->napi)) {
131 disable_irq_nosync(irq);
132 __napi_schedule(&rx_ring->napi);
133 }
134
135 return IRQ_HANDLED;
136}
137
138static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
139 struct xgene_enet_raw_desc *raw_desc)
140{
141 struct sk_buff *skb;
142 struct device *dev;
9b00eb49
IS
143 skb_frag_t *frag;
144 dma_addr_t *frag_dma_addr;
e6ad7673
IS
145 u16 skb_index;
146 u8 status;
9b00eb49 147 int i, ret = 0;
e6ad7673
IS
148
149 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
150 skb = cp_ring->cp_skb[skb_index];
9b00eb49 151 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
e6ad7673
IS
152
153 dev = ndev_to_dev(cp_ring->ndev);
154 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
9b00eb49 155 skb_headlen(skb),
e6ad7673
IS
156 DMA_TO_DEVICE);
157
9b00eb49
IS
158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
159 frag = &skb_shinfo(skb)->frags[i];
160 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
161 DMA_TO_DEVICE);
162 }
163
e6ad7673
IS
164 /* Checking for error */
165 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
166 if (unlikely(status > 2)) {
167 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
168 status);
169 ret = -EIO;
170 }
171
172 if (likely(skb)) {
173 dev_kfree_skb_any(skb);
174 } else {
175 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
176 ret = -EIO;
177 }
178
179 return ret;
180}
181
182static u64 xgene_enet_work_msg(struct sk_buff *skb)
183{
9b00eb49 184 struct net_device *ndev = skb->dev;
e6ad7673 185 struct iphdr *iph;
9b00eb49
IS
186 u8 l3hlen = 0, l4hlen = 0;
187 u8 ethhdr, proto = 0, csum_enable = 0;
188 u64 hopinfo = 0;
189 u32 hdr_len, mss = 0;
190 u32 i, len, nr_frags;
191
192 ethhdr = xgene_enet_hdr_len(skb->data);
e6ad7673
IS
193
194 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195 unlikely(skb->protocol != htons(ETH_P_8021Q)))
196 goto out;
197
198 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
199 goto out;
200
201 iph = ip_hdr(skb);
202 if (unlikely(ip_is_fragment(iph)))
203 goto out;
204
205 if (likely(iph->protocol == IPPROTO_TCP)) {
206 l4hlen = tcp_hdrlen(skb) >> 2;
207 csum_enable = 1;
208 proto = TSO_IPPROTO_TCP;
9b00eb49
IS
209 if (ndev->features & NETIF_F_TSO) {
210 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
211 mss = skb_shinfo(skb)->gso_size;
212
213 if (skb_is_nonlinear(skb)) {
214 len = skb_headlen(skb);
215 nr_frags = skb_shinfo(skb)->nr_frags;
216
217 for (i = 0; i < 2 && i < nr_frags; i++)
218 len += skb_shinfo(skb)->frags[i].size;
219
220 /* HW requires header must reside in 3 buffer */
221 if (unlikely(hdr_len > len)) {
222 if (skb_linearize(skb))
223 return 0;
224 }
225 }
226
227 if (!mss || ((skb->len - hdr_len) <= mss))
228 goto out;
229
9b00eb49
IS
230 hopinfo |= SET_BIT(ET);
231 }
e6ad7673
IS
232 } else if (iph->protocol == IPPROTO_UDP) {
233 l4hlen = UDP_HDR_SIZE;
234 csum_enable = 1;
235 }
236out:
237 l3hlen = ip_hdrlen(skb) >> 2;
9b00eb49 238 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
e6ad7673
IS
239 SET_VAL(IPHDR, l3hlen) |
240 SET_VAL(ETHHDR, ethhdr) |
241 SET_VAL(EC, csum_enable) |
242 SET_VAL(IS, proto) |
243 SET_BIT(IC) |
244 SET_BIT(TYPE_ETH_WORK_MESSAGE);
245
246 return hopinfo;
247}
248
949c40bb
IS
249static u16 xgene_enet_encode_len(u16 len)
250{
251 return (len == BUFLEN_16K) ? 0 : len;
252}
253
9b00eb49
IS
254static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
255{
256 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
257 SET_VAL(BUFDATALEN, len));
258}
259
260static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
261{
262 __le64 *exp_bufs;
263
264 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
265 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
266 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
267
268 return exp_bufs;
269}
270
271static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
272{
273 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
274}
275
e6ad7673
IS
276static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
277 struct sk_buff *skb)
278{
279 struct device *dev = ndev_to_dev(tx_ring->ndev);
67894eec 280 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
e6ad7673 281 struct xgene_enet_raw_desc *raw_desc;
9b00eb49
IS
282 __le64 *exp_desc = NULL, *exp_bufs = NULL;
283 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
284 skb_frag_t *frag;
e6ad7673
IS
285 u16 tail = tx_ring->tail;
286 u64 hopinfo;
949c40bb 287 u32 len, hw_len;
9b00eb49
IS
288 u8 ll = 0, nv = 0, idx = 0;
289 bool split = false;
290 u32 size, offset, ell_bytes = 0;
291 u32 i, fidx, nr_frags, count = 1;
e6ad7673
IS
292
293 raw_desc = &tx_ring->raw_desc[tail];
9b00eb49 294 tail = (tail + 1) & (tx_ring->slots - 1);
e6ad7673
IS
295 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
296
9b00eb49
IS
297 hopinfo = xgene_enet_work_msg(skb);
298 if (!hopinfo)
299 return -EINVAL;
300 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
301 hopinfo);
302
949c40bb
IS
303 len = skb_headlen(skb);
304 hw_len = xgene_enet_encode_len(len);
305
306 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
e6ad7673
IS
307 if (dma_mapping_error(dev, dma_addr)) {
308 netdev_err(tx_ring->ndev, "DMA mapping error\n");
309 return -EINVAL;
310 }
311
312 /* Hardware expects descriptor in little endian format */
e6ad7673 313 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
949c40bb 314 SET_VAL(BUFDATALEN, hw_len) |
e6ad7673 315 SET_BIT(COHERENT));
949c40bb 316
9b00eb49
IS
317 if (!skb_is_nonlinear(skb))
318 goto out;
e6ad7673 319
9b00eb49
IS
320 /* scatter gather */
321 nv = 1;
322 exp_desc = (void *)&tx_ring->raw_desc[tail];
949c40bb 323 tail = (tail + 1) & (tx_ring->slots - 1);
9b00eb49
IS
324 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
325
326 nr_frags = skb_shinfo(skb)->nr_frags;
327 for (i = nr_frags; i < 4 ; i++)
328 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
329
330 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
331
332 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
333 if (!split) {
334 frag = &skb_shinfo(skb)->frags[fidx];
335 size = skb_frag_size(frag);
336 offset = 0;
337
338 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
339 DMA_TO_DEVICE);
340 if (dma_mapping_error(dev, pbuf_addr))
341 return -EINVAL;
342
343 frag_dma_addr[fidx] = pbuf_addr;
344 fidx++;
345
346 if (size > BUFLEN_16K)
347 split = true;
348 }
349
350 if (size > BUFLEN_16K) {
351 len = BUFLEN_16K;
352 size -= BUFLEN_16K;
353 } else {
354 len = size;
355 split = false;
356 }
357
358 dma_addr = pbuf_addr + offset;
359 hw_len = xgene_enet_encode_len(len);
360
361 switch (i) {
362 case 0:
363 case 1:
364 case 2:
365 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
366 break;
367 case 3:
368 if (split || (fidx != nr_frags)) {
369 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
370 xgene_set_addr_len(exp_bufs, idx, dma_addr,
371 hw_len);
372 idx++;
373 ell_bytes += len;
374 } else {
375 xgene_set_addr_len(exp_desc, i, dma_addr,
376 hw_len);
377 }
378 break;
379 default:
380 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
381 idx++;
382 ell_bytes += len;
383 break;
384 }
385
386 if (split)
387 offset += BUFLEN_16K;
388 }
389 count++;
390
391 if (idx) {
392 ll = 1;
393 dma_addr = dma_map_single(dev, exp_bufs,
394 sizeof(u64) * MAX_EXP_BUFFS,
395 DMA_TO_DEVICE);
396 if (dma_mapping_error(dev, dma_addr)) {
397 dev_kfree_skb_any(skb);
398 return -EINVAL;
399 }
400 i = ell_bytes >> LL_BYTES_LSB_LEN;
401 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
402 SET_VAL(LL_BYTES_MSB, i) |
403 SET_VAL(LL_LEN, idx));
404 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
405 }
406
407out:
408 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
409 SET_VAL(USERINFO, tx_ring->tail));
410 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
107dec27 411 pdata->tx_level[tx_ring->cp_ring->index] += count;
949c40bb
IS
412 tx_ring->tail = tail;
413
414 return count;
e6ad7673
IS
415}
416
417static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
418 struct net_device *ndev)
419{
420 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
421 struct xgene_enet_desc_ring *tx_ring;
422 int index = skb->queue_mapping;
423 u32 tx_level = pdata->tx_level[index];
949c40bb 424 int count;
e6ad7673 425
107dec27
IS
426 tx_ring = pdata->tx_ring[index];
427 if (tx_level < pdata->txc_level[index])
428 tx_level += ((typeof(pdata->tx_level[index]))~0U);
67894eec 429
107dec27
IS
430 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
431 netif_stop_subqueue(ndev, index);
e6ad7673
IS
432 return NETDEV_TX_BUSY;
433 }
434
9b00eb49
IS
435 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
436 return NETDEV_TX_OK;
437
949c40bb
IS
438 count = xgene_enet_setup_tx_desc(tx_ring, skb);
439 if (count <= 0) {
e6ad7673
IS
440 dev_kfree_skb_any(skb);
441 return NETDEV_TX_OK;
442 }
443
e6ad7673 444 skb_tx_timestamp(skb);
e6ad7673
IS
445
446 pdata->stats.tx_packets++;
447 pdata->stats.tx_bytes += skb->len;
448
9ffad80a 449 pdata->ring_ops->wr_cmd(tx_ring, count);
e6ad7673
IS
450 return NETDEV_TX_OK;
451}
452
453static void xgene_enet_skip_csum(struct sk_buff *skb)
454{
455 struct iphdr *iph = ip_hdr(skb);
456
457 if (!ip_is_fragment(iph) ||
458 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
459 skb->ip_summed = CHECKSUM_UNNECESSARY;
460 }
461}
462
463static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
464 struct xgene_enet_raw_desc *raw_desc)
465{
466 struct net_device *ndev;
467 struct xgene_enet_pdata *pdata;
468 struct device *dev;
469 struct xgene_enet_desc_ring *buf_pool;
470 u32 datalen, skb_index;
471 struct sk_buff *skb;
472 u8 status;
473 int ret = 0;
474
475 ndev = rx_ring->ndev;
476 pdata = netdev_priv(ndev);
477 dev = ndev_to_dev(rx_ring->ndev);
478 buf_pool = rx_ring->buf_pool;
479
480 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
481 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
482 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
483 skb = buf_pool->rx_skb[skb_index];
484
485 /* checking for error */
486 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
487 if (unlikely(status > 2)) {
488 dev_kfree_skb_any(skb);
489 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
490 status);
491 pdata->stats.rx_dropped++;
492 ret = -EIO;
493 goto out;
494 }
495
496 /* strip off CRC as HW isn't doing this */
497 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
9b00eb49 498 datalen = (datalen & DATALEN_MASK) - 4;
e6ad7673
IS
499 prefetch(skb->data - NET_IP_ALIGN);
500 skb_put(skb, datalen);
501
502 skb_checksum_none_assert(skb);
503 skb->protocol = eth_type_trans(skb, ndev);
504 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
505 skb->protocol == htons(ETH_P_IP))) {
506 xgene_enet_skip_csum(skb);
507 }
508
509 pdata->stats.rx_packets++;
510 pdata->stats.rx_bytes += datalen;
511 napi_gro_receive(&rx_ring->napi, skb);
512out:
513 if (--rx_ring->nbufpool == 0) {
514 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
515 rx_ring->nbufpool = NUM_BUFPOOL;
516 }
517
518 return ret;
519}
520
521static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
522{
523 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
524}
525
526static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
527 int budget)
528{
107dec27
IS
529 struct net_device *ndev = ring->ndev;
530 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
9b00eb49 531 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
e6ad7673
IS
532 u16 head = ring->head;
533 u16 slots = ring->slots - 1;
67894eec
IS
534 int ret, desc_count, count = 0, processed = 0;
535 bool is_completion;
e6ad7673
IS
536
537 do {
538 raw_desc = &ring->raw_desc[head];
67894eec
IS
539 desc_count = 0;
540 is_completion = false;
9b00eb49 541 exp_desc = NULL;
e6ad7673
IS
542 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
543 break;
544
ecf6ba83
IS
545 /* read fpqnum field after dataaddr field */
546 dma_rmb();
9b00eb49
IS
547 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
548 head = (head + 1) & slots;
549 exp_desc = &ring->raw_desc[head];
550
551 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
552 head = (head - 1) & slots;
553 break;
554 }
555 dma_rmb();
556 count++;
67894eec 557 desc_count++;
9b00eb49 558 }
67894eec 559 if (is_rx_desc(raw_desc)) {
e6ad7673 560 ret = xgene_enet_rx_frame(ring, raw_desc);
67894eec 561 } else {
e6ad7673 562 ret = xgene_enet_tx_completion(ring, raw_desc);
67894eec
IS
563 is_completion = true;
564 }
e6ad7673 565 xgene_enet_mark_desc_slot_empty(raw_desc);
9b00eb49
IS
566 if (exp_desc)
567 xgene_enet_mark_desc_slot_empty(exp_desc);
e6ad7673
IS
568
569 head = (head + 1) & slots;
570 count++;
67894eec 571 desc_count++;
9b00eb49 572 processed++;
67894eec 573 if (is_completion)
107dec27 574 pdata->txc_level[ring->index] += desc_count;
e6ad7673
IS
575
576 if (ret)
577 break;
578 } while (--budget);
579
580 if (likely(count)) {
81cefb81 581 pdata->ring_ops->wr_cmd(ring, -count);
e6ad7673
IS
582 ring->head = head;
583
107dec27
IS
584 if (__netif_subqueue_stopped(ndev, ring->index))
585 netif_start_subqueue(ndev, ring->index);
e6ad7673
IS
586 }
587
9b00eb49 588 return processed;
e6ad7673
IS
589}
590
591static int xgene_enet_napi(struct napi_struct *napi, const int budget)
592{
593 struct xgene_enet_desc_ring *ring;
594 int processed;
595
596 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
597 processed = xgene_enet_process_ring(ring, budget);
598
599 if (processed != budget) {
600 napi_complete(napi);
601 enable_irq(ring->irq);
602 }
603
604 return processed;
605}
606
607static void xgene_enet_timeout(struct net_device *ndev)
608{
609 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
107dec27
IS
610 struct netdev_queue *txq;
611 int i;
e6ad7673 612
d0eb7458 613 pdata->mac_ops->reset(pdata);
107dec27
IS
614
615 for (i = 0; i < pdata->txq_cnt; i++) {
616 txq = netdev_get_tx_queue(ndev, i);
617 txq->trans_start = jiffies;
618 netif_tx_start_queue(txq);
619 }
e6ad7673
IS
620}
621
622static int xgene_enet_register_irq(struct net_device *ndev)
623{
624 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
625 struct device *dev = ndev_to_dev(ndev);
6772b653 626 struct xgene_enet_desc_ring *ring;
107dec27 627 int ret = 0, i;
e6ad7673 628
107dec27
IS
629 for (i = 0; i < pdata->rxq_cnt; i++) {
630 ring = pdata->rx_ring[i];
631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
633 IRQF_SHARED, ring->irq_name, ring);
634 if (ret) {
635 netdev_err(ndev, "Failed to request irq %s\n",
636 ring->irq_name);
637 }
638 }
6772b653 639
107dec27
IS
640 for (i = 0; i < pdata->cq_cnt; i++) {
641 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069 642 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
6772b653
IS
643 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
644 IRQF_SHARED, ring->irq_name, ring);
645 if (ret) {
646 netdev_err(ndev, "Failed to request irq %s\n",
647 ring->irq_name);
648 }
e6ad7673
IS
649 }
650
651 return ret;
652}
653
654static void xgene_enet_free_irq(struct net_device *ndev)
655{
656 struct xgene_enet_pdata *pdata;
b5d7a069 657 struct xgene_enet_desc_ring *ring;
e6ad7673 658 struct device *dev;
107dec27 659 int i;
e6ad7673
IS
660
661 pdata = netdev_priv(ndev);
662 dev = ndev_to_dev(ndev);
6772b653 663
107dec27
IS
664 for (i = 0; i < pdata->rxq_cnt; i++) {
665 ring = pdata->rx_ring[i];
666 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
667 devm_free_irq(dev, ring->irq, ring);
668 }
669
670 for (i = 0; i < pdata->cq_cnt; i++) {
671 ring = pdata->tx_ring[i]->cp_ring;
b5d7a069
IS
672 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
673 devm_free_irq(dev, ring->irq, ring);
6772b653
IS
674 }
675}
676
677static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
678{
679 struct napi_struct *napi;
107dec27 680 int i;
6772b653 681
107dec27
IS
682 for (i = 0; i < pdata->rxq_cnt; i++) {
683 napi = &pdata->rx_ring[i]->napi;
684 napi_enable(napi);
685 }
6772b653 686
107dec27
IS
687 for (i = 0; i < pdata->cq_cnt; i++) {
688 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
689 napi_enable(napi);
690 }
691}
692
693static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
694{
695 struct napi_struct *napi;
107dec27 696 int i;
6772b653 697
107dec27
IS
698 for (i = 0; i < pdata->rxq_cnt; i++) {
699 napi = &pdata->rx_ring[i]->napi;
700 napi_disable(napi);
701 }
6772b653 702
107dec27
IS
703 for (i = 0; i < pdata->cq_cnt; i++) {
704 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
705 napi_disable(napi);
706 }
e6ad7673
IS
707}
708
709static int xgene_enet_open(struct net_device *ndev)
710{
711 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 712 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
e6ad7673
IS
713 int ret;
714
107dec27
IS
715 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
716 if (ret)
717 return ret;
718
719 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
720 if (ret)
721 return ret;
722
d0eb7458
IS
723 mac_ops->tx_enable(pdata);
724 mac_ops->rx_enable(pdata);
e6ad7673 725
aeb20b6b 726 xgene_enet_napi_enable(pdata);
e6ad7673
IS
727 ret = xgene_enet_register_irq(ndev);
728 if (ret)
729 return ret;
e6ad7673 730
0148d38d 731 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 732 phy_start(pdata->phy_dev);
0148d38d
IS
733 else
734 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
e6ad7673
IS
735
736 netif_start_queue(ndev);
737
738 return ret;
739}
740
741static int xgene_enet_close(struct net_device *ndev)
742{
743 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
3cdb7309 744 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
107dec27 745 int i;
e6ad7673
IS
746
747 netif_stop_queue(ndev);
748
0148d38d 749 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
e6ad7673 750 phy_stop(pdata->phy_dev);
0148d38d
IS
751 else
752 cancel_delayed_work_sync(&pdata->link_work);
e6ad7673 753
d0eb7458
IS
754 mac_ops->tx_disable(pdata);
755 mac_ops->rx_disable(pdata);
e6ad7673 756
aeb20b6b
IS
757 xgene_enet_free_irq(ndev);
758 xgene_enet_napi_disable(pdata);
107dec27
IS
759 for (i = 0; i < pdata->rxq_cnt; i++)
760 xgene_enet_process_ring(pdata->rx_ring[i], -1);
aeb20b6b 761
e6ad7673
IS
762 return 0;
763}
764
765static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
766{
767 struct xgene_enet_pdata *pdata;
768 struct device *dev;
769
770 pdata = netdev_priv(ring->ndev);
771 dev = ndev_to_dev(ring->ndev);
772
81cefb81 773 pdata->ring_ops->clear(ring);
e6ad7673
IS
774 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
775}
776
777static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
778{
779 struct xgene_enet_desc_ring *buf_pool;
107dec27
IS
780 struct xgene_enet_desc_ring *ring;
781 int i;
e6ad7673 782
107dec27
IS
783 for (i = 0; i < pdata->txq_cnt; i++) {
784 ring = pdata->tx_ring[i];
785 if (ring) {
786 xgene_enet_delete_ring(ring);
787 pdata->tx_ring[i] = NULL;
788 }
e6ad7673
IS
789 }
790
107dec27
IS
791 for (i = 0; i < pdata->rxq_cnt; i++) {
792 ring = pdata->rx_ring[i];
793 if (ring) {
794 buf_pool = ring->buf_pool;
795 xgene_enet_delete_bufpool(buf_pool);
796 xgene_enet_delete_ring(buf_pool);
797 xgene_enet_delete_ring(ring);
798 pdata->rx_ring[i] = NULL;
799 }
e6ad7673
IS
800 }
801}
802
803static int xgene_enet_get_ring_size(struct device *dev,
804 enum xgene_enet_ring_cfgsize cfgsize)
805{
806 int size = -EINVAL;
807
808 switch (cfgsize) {
809 case RING_CFGSIZE_512B:
810 size = 0x200;
811 break;
812 case RING_CFGSIZE_2KB:
813 size = 0x800;
814 break;
815 case RING_CFGSIZE_16KB:
816 size = 0x4000;
817 break;
818 case RING_CFGSIZE_64KB:
819 size = 0x10000;
820 break;
821 case RING_CFGSIZE_512KB:
822 size = 0x80000;
823 break;
824 default:
825 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
826 break;
827 }
828
829 return size;
830}
831
832static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
833{
81cefb81 834 struct xgene_enet_pdata *pdata;
e6ad7673
IS
835 struct device *dev;
836
837 if (!ring)
838 return;
839
840 dev = ndev_to_dev(ring->ndev);
81cefb81 841 pdata = netdev_priv(ring->ndev);
e6ad7673
IS
842
843 if (ring->desc_addr) {
81cefb81 844 pdata->ring_ops->clear(ring);
e6ad7673
IS
845 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
846 }
847 devm_kfree(dev, ring);
848}
849
850static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
851{
852 struct device *dev = &pdata->pdev->dev;
853 struct xgene_enet_desc_ring *ring;
107dec27 854 int i;
e6ad7673 855
107dec27
IS
856 for (i = 0; i < pdata->txq_cnt; i++) {
857 ring = pdata->tx_ring[i];
858 if (ring) {
859 if (ring->cp_ring && ring->cp_ring->cp_skb)
860 devm_kfree(dev, ring->cp_ring->cp_skb);
861 if (ring->cp_ring && pdata->cq_cnt)
862 xgene_enet_free_desc_ring(ring->cp_ring);
863 xgene_enet_free_desc_ring(ring);
864 }
865 }
866
867 for (i = 0; i < pdata->rxq_cnt; i++) {
868 ring = pdata->rx_ring[i];
869 if (ring) {
870 if (ring->buf_pool) {
871 if (ring->buf_pool->rx_skb)
872 devm_kfree(dev, ring->buf_pool->rx_skb);
873 xgene_enet_free_desc_ring(ring->buf_pool);
874 }
875 xgene_enet_free_desc_ring(ring);
c10e4caf 876 }
c10e4caf 877 }
e6ad7673
IS
878}
879
bc1b7c13
IS
880static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
881 struct xgene_enet_desc_ring *ring)
882{
883 if ((pdata->enet_id == XGENE_ENET2) &&
884 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
885 return true;
886 }
887
888 return false;
889}
890
891static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
892 struct xgene_enet_desc_ring *ring)
893{
894 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
895
896 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
897}
898
e6ad7673
IS
899static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
900 struct net_device *ndev, u32 ring_num,
901 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
902{
903 struct xgene_enet_desc_ring *ring;
904 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
905 struct device *dev = ndev_to_dev(ndev);
9b9ba821
TK
906 int size;
907
908 size = xgene_enet_get_ring_size(dev, cfgsize);
909 if (size < 0)
910 return NULL;
e6ad7673
IS
911
912 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
913 GFP_KERNEL);
914 if (!ring)
915 return NULL;
916
917 ring->ndev = ndev;
918 ring->num = ring_num;
919 ring->cfgsize = cfgsize;
920 ring->id = ring_id;
921
e6ad7673
IS
922 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
923 GFP_KERNEL);
924 if (!ring->desc_addr) {
925 devm_kfree(dev, ring);
926 return NULL;
927 }
928 ring->size = size;
929
bc1b7c13
IS
930 if (is_irq_mbox_required(pdata, ring)) {
931 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
932 &ring->irq_mbox_dma, GFP_KERNEL);
933 if (!ring->irq_mbox_addr) {
934 dma_free_coherent(dev, size, ring->desc_addr,
935 ring->dma);
936 devm_kfree(dev, ring);
937 return NULL;
938 }
939 }
940
941 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
e6ad7673 942 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
81cefb81 943 ring = pdata->ring_ops->setup(ring);
e6ad7673
IS
944 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
945 ring->num, ring->size, ring->id, ring->slots);
946
947 return ring;
948}
949
950static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
951{
952 return (owner << 6) | (bufnum & GENMASK(5, 0));
953}
954
bc1b7c13
IS
955static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
956{
957 enum xgene_ring_owner owner;
958
959 if (p->enet_id == XGENE_ENET1) {
960 switch (p->phy_mode) {
961 case PHY_INTERFACE_MODE_SGMII:
962 owner = RING_OWNER_ETH0;
963 break;
964 default:
965 owner = (!p->port_id) ? RING_OWNER_ETH0 :
966 RING_OWNER_ETH1;
967 break;
968 }
969 } else {
970 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
971 }
972
973 return owner;
974}
975
e6ad7673
IS
976static int xgene_enet_create_desc_rings(struct net_device *ndev)
977{
978 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
979 struct device *dev = ndev_to_dev(ndev);
980 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
981 struct xgene_enet_desc_ring *buf_pool = NULL;
bc1b7c13 982 enum xgene_ring_owner owner;
9b00eb49 983 dma_addr_t dma_exp_bufs;
ca626454
KC
984 u8 cpu_bufnum = pdata->cpu_bufnum;
985 u8 eth_bufnum = pdata->eth_bufnum;
986 u8 bp_bufnum = pdata->bp_bufnum;
987 u16 ring_num = pdata->ring_num;
988 u16 ring_id;
107dec27 989 int i, ret, size;
e6ad7673 990
107dec27
IS
991 for (i = 0; i < pdata->rxq_cnt; i++) {
992 /* allocate rx descriptor ring */
993 owner = xgene_derive_ring_owner(pdata);
994 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
995 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
996 RING_CFGSIZE_16KB,
997 ring_id);
998 if (!rx_ring) {
999 ret = -ENOMEM;
1000 goto err;
1001 }
e6ad7673 1002
107dec27
IS
1003 /* allocate buffer pool for receiving packets */
1004 owner = xgene_derive_ring_owner(pdata);
1005 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1006 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1007 RING_CFGSIZE_2KB,
1008 ring_id);
1009 if (!buf_pool) {
1010 ret = -ENOMEM;
1011 goto err;
1012 }
9b00eb49 1013
107dec27
IS
1014 rx_ring->nbufpool = NUM_BUFPOOL;
1015 rx_ring->buf_pool = buf_pool;
1016 rx_ring->irq = pdata->irqs[i];
1017 if (!pdata->cq_cnt) {
1018 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
1019 ndev->name);
1020 } else {
1021 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
1022 ndev->name, i);
1023 }
1024 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1025 sizeof(struct sk_buff *),
9b00eb49 1026 GFP_KERNEL);
107dec27
IS
1027 if (!buf_pool->rx_skb) {
1028 ret = -ENOMEM;
1029 goto err;
1030 }
9b00eb49 1031
107dec27
IS
1032 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1033 rx_ring->buf_pool = buf_pool;
1034 pdata->rx_ring[i] = rx_ring;
1035 }
e6ad7673 1036
107dec27
IS
1037 for (i = 0; i < pdata->txq_cnt; i++) {
1038 /* allocate tx descriptor ring */
1039 owner = xgene_derive_ring_owner(pdata);
1040 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1041 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
6772b653
IS
1042 RING_CFGSIZE_16KB,
1043 ring_id);
107dec27 1044 if (!tx_ring) {
6772b653
IS
1045 ret = -ENOMEM;
1046 goto err;
1047 }
6772b653 1048
107dec27
IS
1049 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1050 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
1051 &dma_exp_bufs,
1052 GFP_KERNEL);
1053 if (!tx_ring->exp_bufs) {
1054 ret = -ENOMEM;
1055 goto err;
1056 }
9b00eb49 1057
107dec27
IS
1058 pdata->tx_ring[i] = tx_ring;
1059
1060 if (!pdata->cq_cnt) {
1061 cp_ring = pdata->rx_ring[i];
1062 } else {
1063 /* allocate tx completion descriptor ring */
1064 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1065 cpu_bufnum++);
1066 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1067 RING_CFGSIZE_16KB,
1068 ring_id);
1069 if (!cp_ring) {
1070 ret = -ENOMEM;
1071 goto err;
1072 }
9b00eb49 1073
107dec27
IS
1074 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1075 cp_ring->index = i;
1076 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
1077 ndev->name, i);
1078 }
1079
1080 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1081 sizeof(struct sk_buff *),
1082 GFP_KERNEL);
1083 if (!cp_ring->cp_skb) {
1084 ret = -ENOMEM;
1085 goto err;
1086 }
e6ad7673 1087
107dec27
IS
1088 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1089 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1090 size, GFP_KERNEL);
1091 if (!cp_ring->frag_dma_addr) {
1092 devm_kfree(dev, cp_ring->cp_skb);
1093 ret = -ENOMEM;
1094 goto err;
1095 }
1096
1097 tx_ring->cp_ring = cp_ring;
1098 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1099 }
1100
1101 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1102 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
e6ad7673
IS
1103
1104 return 0;
1105
1106err:
1107 xgene_enet_free_desc_rings(pdata);
1108 return ret;
1109}
1110
1111static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1112 struct net_device *ndev,
1113 struct rtnl_link_stats64 *storage)
1114{
1115 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1116 struct rtnl_link_stats64 *stats = &pdata->stats;
1117
1118 stats->rx_errors += stats->rx_length_errors +
1119 stats->rx_crc_errors +
1120 stats->rx_frame_errors +
1121 stats->rx_fifo_errors;
1122 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
1123
1124 return storage;
1125}
1126
1127static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1128{
1129 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1130 int ret;
1131
1132 ret = eth_mac_addr(ndev, addr);
1133 if (ret)
1134 return ret;
d0eb7458 1135 pdata->mac_ops->set_mac_addr(pdata);
e6ad7673
IS
1136
1137 return ret;
1138}
1139
1140static const struct net_device_ops xgene_ndev_ops = {
1141 .ndo_open = xgene_enet_open,
1142 .ndo_stop = xgene_enet_close,
1143 .ndo_start_xmit = xgene_enet_start_xmit,
1144 .ndo_tx_timeout = xgene_enet_timeout,
1145 .ndo_get_stats64 = xgene_enet_get_stats64,
1146 .ndo_change_mtu = eth_change_mtu,
1147 .ndo_set_mac_address = xgene_enet_set_mac_address,
1148};
1149
8beeef8d 1150#ifdef CONFIG_ACPI
724fe695 1151static void xgene_get_port_id_acpi(struct device *dev,
0738c54d
ST
1152 struct xgene_enet_pdata *pdata)
1153{
1154 acpi_status status;
1155 u64 temp;
1156
1157 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1158 if (ACPI_FAILURE(status)) {
1159 pdata->port_id = 0;
1160 } else {
1161 pdata->port_id = temp;
1162 }
1163
724fe695 1164 return;
0738c54d 1165}
8beeef8d 1166#endif
0738c54d 1167
724fe695 1168static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
ca626454
KC
1169{
1170 u32 id = 0;
ca626454 1171
724fe695 1172 of_property_read_u32(dev->of_node, "port-id", &id);
ca626454 1173
724fe695
SS
1174 pdata->port_id = id & BIT(0);
1175
1176 return;
ca626454
KC
1177}
1178
16615a4c
IS
1179static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1180{
1181 struct device *dev = &pdata->pdev->dev;
1182 int delay, ret;
1183
1184 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1185 if (ret) {
1186 pdata->tx_delay = 4;
1187 return 0;
1188 }
1189
1190 if (delay < 0 || delay > 7) {
1191 dev_err(dev, "Invalid tx-delay specified\n");
1192 return -EINVAL;
1193 }
1194
1195 pdata->tx_delay = delay;
1196
1197 return 0;
1198}
1199
1200static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1201{
1202 struct device *dev = &pdata->pdev->dev;
1203 int delay, ret;
1204
1205 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1206 if (ret) {
1207 pdata->rx_delay = 2;
1208 return 0;
1209 }
1210
1211 if (delay < 0 || delay > 7) {
1212 dev_err(dev, "Invalid rx-delay specified\n");
1213 return -EINVAL;
1214 }
1215
1216 pdata->rx_delay = delay;
1217
1218 return 0;
1219}
de7b5b3d 1220
107dec27
IS
1221static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1222{
1223 struct platform_device *pdev = pdata->pdev;
1224 struct device *dev = &pdev->dev;
1225 int i, ret, max_irqs;
1226
1227 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1228 max_irqs = 1;
1229 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1230 max_irqs = 2;
1231 else
1232 max_irqs = XGENE_MAX_ENET_IRQ;
1233
1234 for (i = 0; i < max_irqs; i++) {
1235 ret = platform_get_irq(pdev, i);
1236 if (ret <= 0) {
1237 dev_err(dev, "Unable to get ENET IRQ\n");
1238 ret = ret ? : -ENXIO;
1239 return ret;
1240 }
1241 pdata->irqs[i] = ret;
1242 }
1243
1244 return 0;
1245}
1246
e6ad7673
IS
1247static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1248{
1249 struct platform_device *pdev;
1250 struct net_device *ndev;
1251 struct device *dev;
1252 struct resource *res;
1253 void __iomem *base_addr;
561fea6d 1254 u32 offset;
2e598712 1255 int ret = 0;
e6ad7673
IS
1256
1257 pdev = pdata->pdev;
1258 dev = &pdev->dev;
1259 ndev = pdata->ndev;
1260
de7b5b3d
FK
1261 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1262 if (!res) {
1263 dev_err(dev, "Resource enet_csr not defined\n");
1264 return -ENODEV;
1265 }
1266 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
3ec7a176 1267 if (!pdata->base_addr) {
e6ad7673 1268 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
3ec7a176 1269 return -ENOMEM;
e6ad7673
IS
1270 }
1271
de7b5b3d
FK
1272 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1273 if (!res) {
1274 dev_err(dev, "Resource ring_csr not defined\n");
1275 return -ENODEV;
1276 }
1277 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1278 resource_size(res));
3ec7a176 1279 if (!pdata->ring_csr_addr) {
e6ad7673 1280 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
3ec7a176 1281 return -ENOMEM;
e6ad7673
IS
1282 }
1283
de7b5b3d
FK
1284 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1285 if (!res) {
1286 dev_err(dev, "Resource ring_cmd not defined\n");
1287 return -ENODEV;
1288 }
1289 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1290 resource_size(res));
3ec7a176 1291 if (!pdata->ring_cmd_addr) {
e6ad7673 1292 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
3ec7a176 1293 return -ENOMEM;
e6ad7673
IS
1294 }
1295
0738c54d 1296 if (dev->of_node)
724fe695 1297 xgene_get_port_id_dt(dev, pdata);
0738c54d
ST
1298#ifdef CONFIG_ACPI
1299 else
724fe695 1300 xgene_get_port_id_acpi(dev, pdata);
0738c54d 1301#endif
ca626454 1302
938049e1 1303 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
e6ad7673 1304 eth_hw_addr_random(ndev);
de7b5b3d 1305
e6ad7673
IS
1306 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1307
938049e1 1308 pdata->phy_mode = device_get_phy_mode(dev);
e6ad7673 1309 if (pdata->phy_mode < 0) {
0148d38d
IS
1310 dev_err(dev, "Unable to get phy-connection-type\n");
1311 return pdata->phy_mode;
1312 }
1313 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
32f784b5 1314 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
0148d38d
IS
1315 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1316 dev_err(dev, "Incorrect phy-connection-type specified\n");
1317 return -ENODEV;
e6ad7673
IS
1318 }
1319
16615a4c
IS
1320 ret = xgene_get_tx_delay(pdata);
1321 if (ret)
1322 return ret;
1323
1324 ret = xgene_get_rx_delay(pdata);
1325 if (ret)
1326 return ret;
1327
107dec27
IS
1328 ret = xgene_enet_get_irqs(pdata);
1329 if (ret)
6772b653 1330 return ret;
6772b653 1331
e6ad7673 1332 pdata->clk = devm_clk_get(&pdev->dev, NULL);
e6ad7673 1333 if (IS_ERR(pdata->clk)) {
de7b5b3d 1334 /* Firmware may have set up the clock already. */
c2d33bdc 1335 dev_info(dev, "clocks have been setup already\n");
e6ad7673
IS
1336 }
1337
bc1b7c13
IS
1338 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1339 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1340 else
1341 base_addr = pdata->base_addr;
e6ad7673 1342 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
76f94a9c 1343 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
e6ad7673
IS
1344 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1345 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
32f784b5
IS
1346 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1347 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
ca626454 1348 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
561fea6d
IS
1349 offset = (pdata->enet_id == XGENE_ENET1) ?
1350 BLOCK_ETH_MAC_CSR_OFFSET :
1351 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1352 pdata->mcx_mac_csr_addr = base_addr + offset;
0148d38d
IS
1353 } else {
1354 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1355 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
0148d38d 1356 }
e6ad7673
IS
1357 pdata->rx_buff_cnt = NUM_PKT_BUF;
1358
0148d38d 1359 return 0;
e6ad7673
IS
1360}
1361
1362static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1363{
76f94a9c 1364 struct xgene_enet_cle *enet_cle = &pdata->cle;
e6ad7673
IS
1365 struct net_device *ndev = pdata->ndev;
1366 struct xgene_enet_desc_ring *buf_pool;
1367 u16 dst_ring_num;
107dec27 1368 int i, ret;
e6ad7673 1369
c3f4465d
IS
1370 ret = pdata->port_ops->reset(pdata);
1371 if (ret)
1372 return ret;
e6ad7673
IS
1373
1374 ret = xgene_enet_create_desc_rings(ndev);
1375 if (ret) {
1376 netdev_err(ndev, "Error in ring configuration\n");
1377 return ret;
1378 }
1379
1380 /* setup buffer pool */
107dec27
IS
1381 for (i = 0; i < pdata->rxq_cnt; i++) {
1382 buf_pool = pdata->rx_ring[i]->buf_pool;
1383 xgene_enet_init_bufpool(buf_pool);
1384 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1385 if (ret) {
1386 xgene_enet_delete_desc_rings(pdata);
1387 return ret;
1388 }
e6ad7673
IS
1389 }
1390
107dec27
IS
1391 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1392 buf_pool = pdata->rx_ring[0]->buf_pool;
76f94a9c
IS
1393 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1394 /* Initialize and Enable PreClassifier Tree */
1395 enet_cle->max_nodes = 512;
1396 enet_cle->max_dbptrs = 1024;
1397 enet_cle->parsers = 3;
1398 enet_cle->active_parser = PARSER_ALL;
1399 enet_cle->ptree.start_node = 0;
1400 enet_cle->ptree.start_dbptr = 0;
1401 enet_cle->jump_bytes = 8;
1402 ret = pdata->cle_ops->cle_init(pdata);
1403 if (ret) {
1404 netdev_err(ndev, "Preclass Tree init error\n");
1405 return ret;
1406 }
1407 } else {
1408 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1409 }
1410
0148d38d 1411 pdata->mac_ops->init(pdata);
e6ad7673
IS
1412
1413 return ret;
1414}
1415
d0eb7458
IS
1416static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1417{
0148d38d
IS
1418 switch (pdata->phy_mode) {
1419 case PHY_INTERFACE_MODE_RGMII:
1420 pdata->mac_ops = &xgene_gmac_ops;
1421 pdata->port_ops = &xgene_gport_ops;
dc8385f0 1422 pdata->rm = RM3;
107dec27
IS
1423 pdata->rxq_cnt = 1;
1424 pdata->txq_cnt = 1;
1425 pdata->cq_cnt = 0;
0148d38d 1426 break;
32f784b5
IS
1427 case PHY_INTERFACE_MODE_SGMII:
1428 pdata->mac_ops = &xgene_sgmac_ops;
1429 pdata->port_ops = &xgene_sgport_ops;
1430 pdata->rm = RM1;
107dec27
IS
1431 pdata->rxq_cnt = 1;
1432 pdata->txq_cnt = 1;
1433 pdata->cq_cnt = 1;
32f784b5 1434 break;
0148d38d
IS
1435 default:
1436 pdata->mac_ops = &xgene_xgmac_ops;
1437 pdata->port_ops = &xgene_xgport_ops;
76f94a9c 1438 pdata->cle_ops = &xgene_cle3in_ops;
dc8385f0 1439 pdata->rm = RM0;
107dec27
IS
1440 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1441 pdata->txq_cnt = XGENE_NUM_TX_RING;
1442 pdata->cq_cnt = XGENE_NUM_TXC_RING;
0148d38d
IS
1443 break;
1444 }
ca626454 1445
bc1b7c13
IS
1446 if (pdata->enet_id == XGENE_ENET1) {
1447 switch (pdata->port_id) {
1448 case 0:
1449 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1450 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1451 pdata->bp_bufnum = START_BP_BUFNUM_0;
1452 pdata->ring_num = START_RING_NUM_0;
1453 break;
1454 case 1:
149e9ab4
IS
1455 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1456 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1457 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1458 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1459 pdata->ring_num = XG_START_RING_NUM_1;
1460 } else {
1461 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1462 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1463 pdata->bp_bufnum = START_BP_BUFNUM_1;
1464 pdata->ring_num = START_RING_NUM_1;
1465 }
bc1b7c13
IS
1466 break;
1467 default:
1468 break;
1469 }
1470 pdata->ring_ops = &xgene_ring1_ops;
1471 } else {
1472 switch (pdata->port_id) {
1473 case 0:
1474 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1475 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1476 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1477 pdata->ring_num = X2_START_RING_NUM_0;
1478 break;
1479 case 1:
1480 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1481 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1482 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1483 pdata->ring_num = X2_START_RING_NUM_1;
1484 break;
1485 default:
1486 break;
1487 }
1488 pdata->rm = RM0;
1489 pdata->ring_ops = &xgene_ring2_ops;
ca626454 1490 }
d0eb7458
IS
1491}
1492
6772b653
IS
1493static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1494{
1495 struct napi_struct *napi;
107dec27 1496 int i;
6772b653 1497
107dec27
IS
1498 for (i = 0; i < pdata->rxq_cnt; i++) {
1499 napi = &pdata->rx_ring[i]->napi;
1500 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1501 NAPI_POLL_WEIGHT);
1502 }
6772b653 1503
107dec27
IS
1504 for (i = 0; i < pdata->cq_cnt; i++) {
1505 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1506 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1507 NAPI_POLL_WEIGHT);
1508 }
1509}
1510
1511static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1512{
1513 struct napi_struct *napi;
107dec27 1514 int i;
6772b653 1515
107dec27
IS
1516 for (i = 0; i < pdata->rxq_cnt; i++) {
1517 napi = &pdata->rx_ring[i]->napi;
1518 netif_napi_del(napi);
1519 }
6772b653 1520
107dec27
IS
1521 for (i = 0; i < pdata->cq_cnt; i++) {
1522 napi = &pdata->tx_ring[i]->cp_ring->napi;
6772b653
IS
1523 netif_napi_del(napi);
1524 }
1525}
1526
e6ad7673
IS
1527static int xgene_enet_probe(struct platform_device *pdev)
1528{
1529 struct net_device *ndev;
1530 struct xgene_enet_pdata *pdata;
1531 struct device *dev = &pdev->dev;
3cdb7309 1532 const struct xgene_mac_ops *mac_ops;
bc1b7c13 1533 const struct of_device_id *of_id;
e6ad7673
IS
1534 int ret;
1535
107dec27
IS
1536 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1537 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
e6ad7673
IS
1538 if (!ndev)
1539 return -ENOMEM;
1540
1541 pdata = netdev_priv(ndev);
1542
1543 pdata->pdev = pdev;
1544 pdata->ndev = ndev;
1545 SET_NETDEV_DEV(ndev, dev);
1546 platform_set_drvdata(pdev, pdata);
1547 ndev->netdev_ops = &xgene_ndev_ops;
1548 xgene_enet_set_ethtool_ops(ndev);
1549 ndev->features |= NETIF_F_IP_CSUM |
1550 NETIF_F_GSO |
9b00eb49
IS
1551 NETIF_F_GRO |
1552 NETIF_F_SG;
e6ad7673 1553
bc1b7c13
IS
1554 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1555 if (of_id) {
1556 pdata->enet_id = (enum xgene_enet_id)of_id->data;
0738c54d
ST
1557 }
1558#ifdef CONFIG_ACPI
1559 else {
1560 const struct acpi_device_id *acpi_id;
1561
1562 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1563 if (acpi_id)
1564 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
bc1b7c13
IS
1565 }
1566#endif
0738c54d
ST
1567 if (!pdata->enet_id) {
1568 free_netdev(ndev);
1569 return -ENODEV;
1570 }
bc1b7c13 1571
e6ad7673
IS
1572 ret = xgene_enet_get_resources(pdata);
1573 if (ret)
1574 goto err;
1575
d0eb7458 1576 xgene_enet_setup_ops(pdata);
e6ad7673 1577
9b00eb49
IS
1578 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1579 ndev->features |= NETIF_F_TSO;
1580 pdata->mss = XGENE_ENET_MSS;
1581 }
1582 ndev->hw_features = ndev->features;
1583
aeb20b6b 1584 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
e6ad7673 1585 if (ret) {
aeb20b6b 1586 netdev_err(ndev, "No usable DMA configuration\n");
e6ad7673
IS
1587 goto err;
1588 }
1589
aeb20b6b 1590 ret = register_netdev(ndev);
e6ad7673 1591 if (ret) {
aeb20b6b 1592 netdev_err(ndev, "Failed to register netdev\n");
e6ad7673
IS
1593 goto err;
1594 }
1595
1596 ret = xgene_enet_init_hw(pdata);
1597 if (ret)
1598 goto err;
1599
dc8385f0 1600 mac_ops = pdata->mac_ops;
aeb20b6b 1601 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
0148d38d 1602 ret = xgene_enet_mdio_config(pdata);
aeb20b6b
IS
1603 if (ret)
1604 goto err;
1605 } else {
dc8385f0 1606 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
aeb20b6b 1607 }
e6ad7673 1608
aeb20b6b
IS
1609 xgene_enet_napi_add(pdata);
1610 return 0;
e6ad7673 1611err:
c3f4465d 1612 unregister_netdev(ndev);
e6ad7673
IS
1613 free_netdev(ndev);
1614 return ret;
1615}
1616
1617static int xgene_enet_remove(struct platform_device *pdev)
1618{
1619 struct xgene_enet_pdata *pdata;
3cdb7309 1620 const struct xgene_mac_ops *mac_ops;
e6ad7673
IS
1621 struct net_device *ndev;
1622
1623 pdata = platform_get_drvdata(pdev);
d0eb7458 1624 mac_ops = pdata->mac_ops;
e6ad7673
IS
1625 ndev = pdata->ndev;
1626
d0eb7458
IS
1627 mac_ops->rx_disable(pdata);
1628 mac_ops->tx_disable(pdata);
e6ad7673 1629
6772b653 1630 xgene_enet_napi_del(pdata);
ccc02ddb
IS
1631 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1632 xgene_enet_mdio_remove(pdata);
e6ad7673 1633 unregister_netdev(ndev);
ccc02ddb 1634 xgene_enet_delete_desc_rings(pdata);
d0eb7458 1635 pdata->port_ops->shutdown(pdata);
e6ad7673
IS
1636 free_netdev(ndev);
1637
1638 return 0;
1639}
1640
de7b5b3d
FK
1641#ifdef CONFIG_ACPI
1642static const struct acpi_device_id xgene_enet_acpi_match[] = {
0738c54d
ST
1643 { "APMC0D05", XGENE_ENET1},
1644 { "APMC0D30", XGENE_ENET1},
1645 { "APMC0D31", XGENE_ENET1},
149e9ab4 1646 { "APMC0D3F", XGENE_ENET1},
822e34a4
ST
1647 { "APMC0D26", XGENE_ENET2},
1648 { "APMC0D25", XGENE_ENET2},
de7b5b3d
FK
1649 { }
1650};
1651MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1652#endif
1653
163cff31 1654#ifdef CONFIG_OF
a6b0dc2a 1655static const struct of_device_id xgene_enet_of_match[] = {
bc1b7c13
IS
1656 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1657 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1658 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
561fea6d 1659 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
bc1b7c13 1660 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
e6ad7673
IS
1661 {},
1662};
1663
de7b5b3d 1664MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
163cff31 1665#endif
e6ad7673
IS
1666
1667static struct platform_driver xgene_enet_driver = {
1668 .driver = {
1669 .name = "xgene-enet",
de7b5b3d
FK
1670 .of_match_table = of_match_ptr(xgene_enet_of_match),
1671 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
e6ad7673
IS
1672 },
1673 .probe = xgene_enet_probe,
1674 .remove = xgene_enet_remove,
1675};
1676
1677module_platform_driver(xgene_enet_driver);
1678
1679MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1680MODULE_VERSION(XGENE_DRV_VERSION);
d0eb7458 1681MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
e6ad7673
IS
1682MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1683MODULE_LICENSE("GPL");