Commit | Line | Data |
---|---|---|
e6ad7673 IS |
1 | /* Applied Micro X-Gene SoC Ethernet Driver |
2 | * | |
3 | * Copyright (c) 2014, Applied Micro Circuits Corporation | |
4 | * Authors: Iyappan Subramanian <isubramanian@apm.com> | |
5 | * Ravi Patel <rapatel@apm.com> | |
6 | * Keyur Chudgar <kchudgar@apm.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the | |
10 | * Free Software Foundation; either version 2 of the License, or (at your | |
11 | * option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include "xgene_enet_main.h" | |
23 | #include "xgene_enet_hw.h" | |
32f784b5 | 24 | #include "xgene_enet_sgmac.h" |
0148d38d | 25 | #include "xgene_enet_xgmac.h" |
e6ad7673 | 26 | |
de7b5b3d FK |
27 | #define RES_ENET_CSR 0 |
28 | #define RES_RING_CSR 1 | |
29 | #define RES_RING_CMD 2 | |
30 | ||
e6ad7673 IS |
31 | static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) |
32 | { | |
33 | struct xgene_enet_raw_desc16 *raw_desc; | |
34 | int i; | |
35 | ||
36 | for (i = 0; i < buf_pool->slots; i++) { | |
37 | raw_desc = &buf_pool->raw_desc16[i]; | |
38 | ||
39 | /* Hardware expects descriptor in little endian format */ | |
40 | raw_desc->m0 = cpu_to_le64(i | | |
41 | SET_VAL(FPQNUM, buf_pool->dst_ring_num) | | |
42 | SET_VAL(STASH, 3)); | |
43 | } | |
44 | } | |
45 | ||
46 | static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, | |
47 | u32 nbuf) | |
48 | { | |
49 | struct sk_buff *skb; | |
50 | struct xgene_enet_raw_desc16 *raw_desc; | |
81cefb81 | 51 | struct xgene_enet_pdata *pdata; |
e6ad7673 IS |
52 | struct net_device *ndev; |
53 | struct device *dev; | |
54 | dma_addr_t dma_addr; | |
55 | u32 tail = buf_pool->tail; | |
56 | u32 slots = buf_pool->slots - 1; | |
57 | u16 bufdatalen, len; | |
58 | int i; | |
59 | ||
60 | ndev = buf_pool->ndev; | |
61 | dev = ndev_to_dev(buf_pool->ndev); | |
81cefb81 | 62 | pdata = netdev_priv(ndev); |
e6ad7673 IS |
63 | bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); |
64 | len = XGENE_ENET_MAX_MTU; | |
65 | ||
66 | for (i = 0; i < nbuf; i++) { | |
67 | raw_desc = &buf_pool->raw_desc16[tail]; | |
68 | ||
69 | skb = netdev_alloc_skb_ip_align(ndev, len); | |
70 | if (unlikely(!skb)) | |
71 | return -ENOMEM; | |
72 | buf_pool->rx_skb[tail] = skb; | |
73 | ||
74 | dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); | |
75 | if (dma_mapping_error(dev, dma_addr)) { | |
76 | netdev_err(ndev, "DMA mapping error\n"); | |
77 | dev_kfree_skb_any(skb); | |
78 | return -EINVAL; | |
79 | } | |
80 | ||
81 | raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | | |
82 | SET_VAL(BUFDATALEN, bufdatalen) | | |
83 | SET_BIT(COHERENT)); | |
84 | tail = (tail + 1) & slots; | |
85 | } | |
86 | ||
81cefb81 | 87 | pdata->ring_ops->wr_cmd(buf_pool, nbuf); |
e6ad7673 IS |
88 | buf_pool->tail = tail; |
89 | ||
90 | return 0; | |
91 | } | |
92 | ||
93 | static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) | |
94 | { | |
95 | struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); | |
96 | ||
97 | return ((u16)pdata->rm << 10) | ring->num; | |
98 | } | |
99 | ||
100 | static u8 xgene_enet_hdr_len(const void *data) | |
101 | { | |
102 | const struct ethhdr *eth = data; | |
103 | ||
104 | return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; | |
105 | } | |
106 | ||
e6ad7673 IS |
107 | static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) |
108 | { | |
81cefb81 | 109 | struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev); |
e6ad7673 IS |
110 | struct xgene_enet_raw_desc16 *raw_desc; |
111 | u32 slots = buf_pool->slots - 1; | |
112 | u32 tail = buf_pool->tail; | |
113 | u32 userinfo; | |
114 | int i, len; | |
115 | ||
81cefb81 | 116 | len = pdata->ring_ops->len(buf_pool); |
e6ad7673 IS |
117 | for (i = 0; i < len; i++) { |
118 | tail = (tail - 1) & slots; | |
119 | raw_desc = &buf_pool->raw_desc16[tail]; | |
120 | ||
121 | /* Hardware stores descriptor in little endian format */ | |
122 | userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); | |
123 | dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); | |
124 | } | |
125 | ||
81cefb81 | 126 | pdata->ring_ops->wr_cmd(buf_pool, -len); |
e6ad7673 IS |
127 | buf_pool->tail = tail; |
128 | } | |
129 | ||
130 | static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) | |
131 | { | |
132 | struct xgene_enet_desc_ring *rx_ring = data; | |
133 | ||
134 | if (napi_schedule_prep(&rx_ring->napi)) { | |
135 | disable_irq_nosync(irq); | |
136 | __napi_schedule(&rx_ring->napi); | |
137 | } | |
138 | ||
139 | return IRQ_HANDLED; | |
140 | } | |
141 | ||
142 | static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, | |
143 | struct xgene_enet_raw_desc *raw_desc) | |
144 | { | |
145 | struct sk_buff *skb; | |
146 | struct device *dev; | |
147 | u16 skb_index; | |
148 | u8 status; | |
149 | int ret = 0; | |
150 | ||
151 | skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); | |
152 | skb = cp_ring->cp_skb[skb_index]; | |
153 | ||
154 | dev = ndev_to_dev(cp_ring->ndev); | |
155 | dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), | |
156 | GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)), | |
157 | DMA_TO_DEVICE); | |
158 | ||
159 | /* Checking for error */ | |
160 | status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); | |
161 | if (unlikely(status > 2)) { | |
162 | xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), | |
163 | status); | |
164 | ret = -EIO; | |
165 | } | |
166 | ||
167 | if (likely(skb)) { | |
168 | dev_kfree_skb_any(skb); | |
169 | } else { | |
170 | netdev_err(cp_ring->ndev, "completion skb is NULL\n"); | |
171 | ret = -EIO; | |
172 | } | |
173 | ||
174 | return ret; | |
175 | } | |
176 | ||
177 | static u64 xgene_enet_work_msg(struct sk_buff *skb) | |
178 | { | |
179 | struct iphdr *iph; | |
180 | u8 l3hlen, l4hlen = 0; | |
181 | u8 csum_enable = 0; | |
182 | u8 proto = 0; | |
183 | u8 ethhdr; | |
184 | u64 hopinfo; | |
185 | ||
186 | if (unlikely(skb->protocol != htons(ETH_P_IP)) && | |
187 | unlikely(skb->protocol != htons(ETH_P_8021Q))) | |
188 | goto out; | |
189 | ||
190 | if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) | |
191 | goto out; | |
192 | ||
193 | iph = ip_hdr(skb); | |
194 | if (unlikely(ip_is_fragment(iph))) | |
195 | goto out; | |
196 | ||
197 | if (likely(iph->protocol == IPPROTO_TCP)) { | |
198 | l4hlen = tcp_hdrlen(skb) >> 2; | |
199 | csum_enable = 1; | |
200 | proto = TSO_IPPROTO_TCP; | |
201 | } else if (iph->protocol == IPPROTO_UDP) { | |
202 | l4hlen = UDP_HDR_SIZE; | |
203 | csum_enable = 1; | |
204 | } | |
205 | out: | |
206 | l3hlen = ip_hdrlen(skb) >> 2; | |
207 | ethhdr = xgene_enet_hdr_len(skb->data); | |
208 | hopinfo = SET_VAL(TCPHDR, l4hlen) | | |
209 | SET_VAL(IPHDR, l3hlen) | | |
210 | SET_VAL(ETHHDR, ethhdr) | | |
211 | SET_VAL(EC, csum_enable) | | |
212 | SET_VAL(IS, proto) | | |
213 | SET_BIT(IC) | | |
214 | SET_BIT(TYPE_ETH_WORK_MESSAGE); | |
215 | ||
216 | return hopinfo; | |
217 | } | |
218 | ||
219 | static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, | |
220 | struct sk_buff *skb) | |
221 | { | |
222 | struct device *dev = ndev_to_dev(tx_ring->ndev); | |
223 | struct xgene_enet_raw_desc *raw_desc; | |
224 | dma_addr_t dma_addr; | |
225 | u16 tail = tx_ring->tail; | |
226 | u64 hopinfo; | |
227 | ||
228 | raw_desc = &tx_ring->raw_desc[tail]; | |
229 | memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); | |
230 | ||
231 | dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); | |
232 | if (dma_mapping_error(dev, dma_addr)) { | |
233 | netdev_err(tx_ring->ndev, "DMA mapping error\n"); | |
234 | return -EINVAL; | |
235 | } | |
236 | ||
237 | /* Hardware expects descriptor in little endian format */ | |
238 | raw_desc->m0 = cpu_to_le64(tail); | |
239 | raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | | |
240 | SET_VAL(BUFDATALEN, skb->len) | | |
241 | SET_BIT(COHERENT)); | |
242 | hopinfo = xgene_enet_work_msg(skb); | |
243 | raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | | |
244 | hopinfo); | |
245 | tx_ring->cp_ring->cp_skb[tail] = skb; | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, | |
251 | struct net_device *ndev) | |
252 | { | |
253 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
254 | struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; | |
255 | struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; | |
256 | u32 tx_level, cq_level; | |
257 | ||
81cefb81 IS |
258 | tx_level = pdata->ring_ops->len(tx_ring); |
259 | cq_level = pdata->ring_ops->len(cp_ring); | |
e6ad7673 IS |
260 | if (unlikely(tx_level > pdata->tx_qcnt_hi || |
261 | cq_level > pdata->cp_qcnt_hi)) { | |
262 | netif_stop_queue(ndev); | |
263 | return NETDEV_TX_BUSY; | |
264 | } | |
265 | ||
266 | if (xgene_enet_setup_tx_desc(tx_ring, skb)) { | |
267 | dev_kfree_skb_any(skb); | |
268 | return NETDEV_TX_OK; | |
269 | } | |
270 | ||
81cefb81 | 271 | pdata->ring_ops->wr_cmd(tx_ring, 1); |
e6ad7673 IS |
272 | skb_tx_timestamp(skb); |
273 | tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); | |
274 | ||
275 | pdata->stats.tx_packets++; | |
276 | pdata->stats.tx_bytes += skb->len; | |
277 | ||
278 | return NETDEV_TX_OK; | |
279 | } | |
280 | ||
281 | static void xgene_enet_skip_csum(struct sk_buff *skb) | |
282 | { | |
283 | struct iphdr *iph = ip_hdr(skb); | |
284 | ||
285 | if (!ip_is_fragment(iph) || | |
286 | (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { | |
287 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
288 | } | |
289 | } | |
290 | ||
291 | static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, | |
292 | struct xgene_enet_raw_desc *raw_desc) | |
293 | { | |
294 | struct net_device *ndev; | |
295 | struct xgene_enet_pdata *pdata; | |
296 | struct device *dev; | |
297 | struct xgene_enet_desc_ring *buf_pool; | |
298 | u32 datalen, skb_index; | |
299 | struct sk_buff *skb; | |
300 | u8 status; | |
301 | int ret = 0; | |
302 | ||
303 | ndev = rx_ring->ndev; | |
304 | pdata = netdev_priv(ndev); | |
305 | dev = ndev_to_dev(rx_ring->ndev); | |
306 | buf_pool = rx_ring->buf_pool; | |
307 | ||
308 | dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), | |
309 | XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); | |
310 | skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); | |
311 | skb = buf_pool->rx_skb[skb_index]; | |
312 | ||
313 | /* checking for error */ | |
314 | status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); | |
315 | if (unlikely(status > 2)) { | |
316 | dev_kfree_skb_any(skb); | |
317 | xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), | |
318 | status); | |
319 | pdata->stats.rx_dropped++; | |
320 | ret = -EIO; | |
321 | goto out; | |
322 | } | |
323 | ||
324 | /* strip off CRC as HW isn't doing this */ | |
325 | datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); | |
326 | datalen -= 4; | |
327 | prefetch(skb->data - NET_IP_ALIGN); | |
328 | skb_put(skb, datalen); | |
329 | ||
330 | skb_checksum_none_assert(skb); | |
331 | skb->protocol = eth_type_trans(skb, ndev); | |
332 | if (likely((ndev->features & NETIF_F_IP_CSUM) && | |
333 | skb->protocol == htons(ETH_P_IP))) { | |
334 | xgene_enet_skip_csum(skb); | |
335 | } | |
336 | ||
337 | pdata->stats.rx_packets++; | |
338 | pdata->stats.rx_bytes += datalen; | |
339 | napi_gro_receive(&rx_ring->napi, skb); | |
340 | out: | |
341 | if (--rx_ring->nbufpool == 0) { | |
342 | ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); | |
343 | rx_ring->nbufpool = NUM_BUFPOOL; | |
344 | } | |
345 | ||
346 | return ret; | |
347 | } | |
348 | ||
349 | static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) | |
350 | { | |
351 | return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; | |
352 | } | |
353 | ||
354 | static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |
355 | int budget) | |
356 | { | |
357 | struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); | |
358 | struct xgene_enet_raw_desc *raw_desc; | |
359 | u16 head = ring->head; | |
360 | u16 slots = ring->slots - 1; | |
361 | int ret, count = 0; | |
362 | ||
363 | do { | |
364 | raw_desc = &ring->raw_desc[head]; | |
365 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) | |
366 | break; | |
367 | ||
ecf6ba83 IS |
368 | /* read fpqnum field after dataaddr field */ |
369 | dma_rmb(); | |
e6ad7673 IS |
370 | if (is_rx_desc(raw_desc)) |
371 | ret = xgene_enet_rx_frame(ring, raw_desc); | |
372 | else | |
373 | ret = xgene_enet_tx_completion(ring, raw_desc); | |
374 | xgene_enet_mark_desc_slot_empty(raw_desc); | |
375 | ||
376 | head = (head + 1) & slots; | |
377 | count++; | |
378 | ||
379 | if (ret) | |
380 | break; | |
381 | } while (--budget); | |
382 | ||
383 | if (likely(count)) { | |
81cefb81 | 384 | pdata->ring_ops->wr_cmd(ring, -count); |
e6ad7673 IS |
385 | ring->head = head; |
386 | ||
387 | if (netif_queue_stopped(ring->ndev)) { | |
81cefb81 | 388 | if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) |
e6ad7673 IS |
389 | netif_wake_queue(ring->ndev); |
390 | } | |
391 | } | |
392 | ||
0148d38d | 393 | return count; |
e6ad7673 IS |
394 | } |
395 | ||
396 | static int xgene_enet_napi(struct napi_struct *napi, const int budget) | |
397 | { | |
398 | struct xgene_enet_desc_ring *ring; | |
399 | int processed; | |
400 | ||
401 | ring = container_of(napi, struct xgene_enet_desc_ring, napi); | |
402 | processed = xgene_enet_process_ring(ring, budget); | |
403 | ||
404 | if (processed != budget) { | |
405 | napi_complete(napi); | |
406 | enable_irq(ring->irq); | |
407 | } | |
408 | ||
409 | return processed; | |
410 | } | |
411 | ||
412 | static void xgene_enet_timeout(struct net_device *ndev) | |
413 | { | |
414 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
415 | ||
d0eb7458 | 416 | pdata->mac_ops->reset(pdata); |
e6ad7673 IS |
417 | } |
418 | ||
419 | static int xgene_enet_register_irq(struct net_device *ndev) | |
420 | { | |
421 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
422 | struct device *dev = ndev_to_dev(ndev); | |
6772b653 | 423 | struct xgene_enet_desc_ring *ring; |
e6ad7673 IS |
424 | int ret; |
425 | ||
6772b653 IS |
426 | ring = pdata->rx_ring; |
427 | ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, | |
428 | IRQF_SHARED, ring->irq_name, ring); | |
429 | if (ret) | |
430 | netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); | |
431 | ||
432 | if (pdata->cq_cnt) { | |
433 | ring = pdata->tx_ring->cp_ring; | |
434 | ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, | |
435 | IRQF_SHARED, ring->irq_name, ring); | |
436 | if (ret) { | |
437 | netdev_err(ndev, "Failed to request irq %s\n", | |
438 | ring->irq_name); | |
439 | } | |
e6ad7673 IS |
440 | } |
441 | ||
442 | return ret; | |
443 | } | |
444 | ||
445 | static void xgene_enet_free_irq(struct net_device *ndev) | |
446 | { | |
447 | struct xgene_enet_pdata *pdata; | |
448 | struct device *dev; | |
449 | ||
450 | pdata = netdev_priv(ndev); | |
451 | dev = ndev_to_dev(ndev); | |
452 | devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); | |
6772b653 IS |
453 | |
454 | if (pdata->cq_cnt) { | |
455 | devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, | |
456 | pdata->tx_ring->cp_ring); | |
457 | } | |
458 | } | |
459 | ||
460 | static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) | |
461 | { | |
462 | struct napi_struct *napi; | |
463 | ||
464 | napi = &pdata->rx_ring->napi; | |
465 | napi_enable(napi); | |
466 | ||
467 | if (pdata->cq_cnt) { | |
468 | napi = &pdata->tx_ring->cp_ring->napi; | |
469 | napi_enable(napi); | |
470 | } | |
471 | } | |
472 | ||
473 | static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) | |
474 | { | |
475 | struct napi_struct *napi; | |
476 | ||
477 | napi = &pdata->rx_ring->napi; | |
478 | napi_disable(napi); | |
479 | ||
480 | if (pdata->cq_cnt) { | |
481 | napi = &pdata->tx_ring->cp_ring->napi; | |
482 | napi_disable(napi); | |
483 | } | |
e6ad7673 IS |
484 | } |
485 | ||
486 | static int xgene_enet_open(struct net_device *ndev) | |
487 | { | |
488 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
d0eb7458 | 489 | struct xgene_mac_ops *mac_ops = pdata->mac_ops; |
e6ad7673 IS |
490 | int ret; |
491 | ||
d0eb7458 IS |
492 | mac_ops->tx_enable(pdata); |
493 | mac_ops->rx_enable(pdata); | |
e6ad7673 IS |
494 | |
495 | ret = xgene_enet_register_irq(ndev); | |
496 | if (ret) | |
497 | return ret; | |
6772b653 | 498 | xgene_enet_napi_enable(pdata); |
e6ad7673 | 499 | |
0148d38d | 500 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
e6ad7673 | 501 | phy_start(pdata->phy_dev); |
0148d38d IS |
502 | else |
503 | schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); | |
e6ad7673 | 504 | |
81cefb81 | 505 | netif_carrier_off(ndev); |
e6ad7673 IS |
506 | netif_start_queue(ndev); |
507 | ||
508 | return ret; | |
509 | } | |
510 | ||
511 | static int xgene_enet_close(struct net_device *ndev) | |
512 | { | |
513 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
d0eb7458 | 514 | struct xgene_mac_ops *mac_ops = pdata->mac_ops; |
e6ad7673 IS |
515 | |
516 | netif_stop_queue(ndev); | |
517 | ||
0148d38d | 518 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
e6ad7673 | 519 | phy_stop(pdata->phy_dev); |
0148d38d IS |
520 | else |
521 | cancel_delayed_work_sync(&pdata->link_work); | |
e6ad7673 | 522 | |
6772b653 | 523 | xgene_enet_napi_disable(pdata); |
e6ad7673 IS |
524 | xgene_enet_free_irq(ndev); |
525 | xgene_enet_process_ring(pdata->rx_ring, -1); | |
526 | ||
d0eb7458 IS |
527 | mac_ops->tx_disable(pdata); |
528 | mac_ops->rx_disable(pdata); | |
e6ad7673 IS |
529 | |
530 | return 0; | |
531 | } | |
532 | ||
533 | static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) | |
534 | { | |
535 | struct xgene_enet_pdata *pdata; | |
536 | struct device *dev; | |
537 | ||
538 | pdata = netdev_priv(ring->ndev); | |
539 | dev = ndev_to_dev(ring->ndev); | |
540 | ||
81cefb81 | 541 | pdata->ring_ops->clear(ring); |
e6ad7673 IS |
542 | dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); |
543 | } | |
544 | ||
545 | static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) | |
546 | { | |
547 | struct xgene_enet_desc_ring *buf_pool; | |
548 | ||
549 | if (pdata->tx_ring) { | |
550 | xgene_enet_delete_ring(pdata->tx_ring); | |
551 | pdata->tx_ring = NULL; | |
552 | } | |
553 | ||
554 | if (pdata->rx_ring) { | |
555 | buf_pool = pdata->rx_ring->buf_pool; | |
556 | xgene_enet_delete_bufpool(buf_pool); | |
557 | xgene_enet_delete_ring(buf_pool); | |
558 | xgene_enet_delete_ring(pdata->rx_ring); | |
559 | pdata->rx_ring = NULL; | |
560 | } | |
561 | } | |
562 | ||
563 | static int xgene_enet_get_ring_size(struct device *dev, | |
564 | enum xgene_enet_ring_cfgsize cfgsize) | |
565 | { | |
566 | int size = -EINVAL; | |
567 | ||
568 | switch (cfgsize) { | |
569 | case RING_CFGSIZE_512B: | |
570 | size = 0x200; | |
571 | break; | |
572 | case RING_CFGSIZE_2KB: | |
573 | size = 0x800; | |
574 | break; | |
575 | case RING_CFGSIZE_16KB: | |
576 | size = 0x4000; | |
577 | break; | |
578 | case RING_CFGSIZE_64KB: | |
579 | size = 0x10000; | |
580 | break; | |
581 | case RING_CFGSIZE_512KB: | |
582 | size = 0x80000; | |
583 | break; | |
584 | default: | |
585 | dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); | |
586 | break; | |
587 | } | |
588 | ||
589 | return size; | |
590 | } | |
591 | ||
592 | static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) | |
593 | { | |
81cefb81 | 594 | struct xgene_enet_pdata *pdata; |
e6ad7673 IS |
595 | struct device *dev; |
596 | ||
597 | if (!ring) | |
598 | return; | |
599 | ||
600 | dev = ndev_to_dev(ring->ndev); | |
81cefb81 | 601 | pdata = netdev_priv(ring->ndev); |
e6ad7673 IS |
602 | |
603 | if (ring->desc_addr) { | |
81cefb81 | 604 | pdata->ring_ops->clear(ring); |
e6ad7673 IS |
605 | dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); |
606 | } | |
607 | devm_kfree(dev, ring); | |
608 | } | |
609 | ||
610 | static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) | |
611 | { | |
612 | struct device *dev = &pdata->pdev->dev; | |
613 | struct xgene_enet_desc_ring *ring; | |
614 | ||
615 | ring = pdata->tx_ring; | |
c10e4caf IS |
616 | if (ring) { |
617 | if (ring->cp_ring && ring->cp_ring->cp_skb) | |
618 | devm_kfree(dev, ring->cp_ring->cp_skb); | |
6772b653 IS |
619 | if (ring->cp_ring && pdata->cq_cnt) |
620 | xgene_enet_free_desc_ring(ring->cp_ring); | |
c10e4caf IS |
621 | xgene_enet_free_desc_ring(ring); |
622 | } | |
e6ad7673 IS |
623 | |
624 | ring = pdata->rx_ring; | |
c10e4caf IS |
625 | if (ring) { |
626 | if (ring->buf_pool) { | |
627 | if (ring->buf_pool->rx_skb) | |
628 | devm_kfree(dev, ring->buf_pool->rx_skb); | |
629 | xgene_enet_free_desc_ring(ring->buf_pool); | |
630 | } | |
631 | xgene_enet_free_desc_ring(ring); | |
632 | } | |
e6ad7673 IS |
633 | } |
634 | ||
635 | static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( | |
636 | struct net_device *ndev, u32 ring_num, | |
637 | enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) | |
638 | { | |
639 | struct xgene_enet_desc_ring *ring; | |
640 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
641 | struct device *dev = ndev_to_dev(ndev); | |
9b9ba821 TK |
642 | int size; |
643 | ||
644 | size = xgene_enet_get_ring_size(dev, cfgsize); | |
645 | if (size < 0) | |
646 | return NULL; | |
e6ad7673 IS |
647 | |
648 | ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), | |
649 | GFP_KERNEL); | |
650 | if (!ring) | |
651 | return NULL; | |
652 | ||
653 | ring->ndev = ndev; | |
654 | ring->num = ring_num; | |
655 | ring->cfgsize = cfgsize; | |
656 | ring->id = ring_id; | |
657 | ||
e6ad7673 IS |
658 | ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, |
659 | GFP_KERNEL); | |
660 | if (!ring->desc_addr) { | |
661 | devm_kfree(dev, ring); | |
662 | return NULL; | |
663 | } | |
664 | ring->size = size; | |
665 | ||
666 | ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); | |
667 | ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; | |
81cefb81 | 668 | ring = pdata->ring_ops->setup(ring); |
e6ad7673 IS |
669 | netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", |
670 | ring->num, ring->size, ring->id, ring->slots); | |
671 | ||
672 | return ring; | |
673 | } | |
674 | ||
675 | static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) | |
676 | { | |
677 | return (owner << 6) | (bufnum & GENMASK(5, 0)); | |
678 | } | |
679 | ||
680 | static int xgene_enet_create_desc_rings(struct net_device *ndev) | |
681 | { | |
682 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
683 | struct device *dev = ndev_to_dev(ndev); | |
684 | struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; | |
685 | struct xgene_enet_desc_ring *buf_pool = NULL; | |
ca626454 KC |
686 | u8 cpu_bufnum = pdata->cpu_bufnum; |
687 | u8 eth_bufnum = pdata->eth_bufnum; | |
688 | u8 bp_bufnum = pdata->bp_bufnum; | |
689 | u16 ring_num = pdata->ring_num; | |
690 | u16 ring_id; | |
e6ad7673 IS |
691 | int ret; |
692 | ||
693 | /* allocate rx descriptor ring */ | |
694 | ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); | |
695 | rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, | |
696 | RING_CFGSIZE_16KB, ring_id); | |
697 | if (!rx_ring) { | |
698 | ret = -ENOMEM; | |
699 | goto err; | |
700 | } | |
701 | ||
702 | /* allocate buffer pool for receiving packets */ | |
703 | ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++); | |
704 | buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, | |
705 | RING_CFGSIZE_2KB, ring_id); | |
706 | if (!buf_pool) { | |
707 | ret = -ENOMEM; | |
708 | goto err; | |
709 | } | |
710 | ||
711 | rx_ring->nbufpool = NUM_BUFPOOL; | |
712 | rx_ring->buf_pool = buf_pool; | |
713 | rx_ring->irq = pdata->rx_irq; | |
6772b653 IS |
714 | if (!pdata->cq_cnt) { |
715 | snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", | |
716 | ndev->name); | |
717 | } else { | |
718 | snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); | |
719 | } | |
e6ad7673 IS |
720 | buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, |
721 | sizeof(struct sk_buff *), GFP_KERNEL); | |
722 | if (!buf_pool->rx_skb) { | |
723 | ret = -ENOMEM; | |
724 | goto err; | |
725 | } | |
726 | ||
727 | buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); | |
728 | rx_ring->buf_pool = buf_pool; | |
729 | pdata->rx_ring = rx_ring; | |
730 | ||
731 | /* allocate tx descriptor ring */ | |
732 | ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++); | |
733 | tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, | |
734 | RING_CFGSIZE_16KB, ring_id); | |
735 | if (!tx_ring) { | |
736 | ret = -ENOMEM; | |
737 | goto err; | |
738 | } | |
739 | pdata->tx_ring = tx_ring; | |
740 | ||
6772b653 IS |
741 | if (!pdata->cq_cnt) { |
742 | cp_ring = pdata->rx_ring; | |
743 | } else { | |
744 | /* allocate tx completion descriptor ring */ | |
745 | ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); | |
746 | cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, | |
747 | RING_CFGSIZE_16KB, | |
748 | ring_id); | |
749 | if (!cp_ring) { | |
750 | ret = -ENOMEM; | |
751 | goto err; | |
752 | } | |
753 | cp_ring->irq = pdata->txc_irq; | |
754 | snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); | |
755 | } | |
756 | ||
e6ad7673 IS |
757 | cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, |
758 | sizeof(struct sk_buff *), GFP_KERNEL); | |
759 | if (!cp_ring->cp_skb) { | |
760 | ret = -ENOMEM; | |
761 | goto err; | |
762 | } | |
763 | pdata->tx_ring->cp_ring = cp_ring; | |
764 | pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); | |
765 | ||
766 | pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; | |
767 | pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; | |
768 | pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; | |
769 | ||
770 | return 0; | |
771 | ||
772 | err: | |
773 | xgene_enet_free_desc_rings(pdata); | |
774 | return ret; | |
775 | } | |
776 | ||
777 | static struct rtnl_link_stats64 *xgene_enet_get_stats64( | |
778 | struct net_device *ndev, | |
779 | struct rtnl_link_stats64 *storage) | |
780 | { | |
781 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
782 | struct rtnl_link_stats64 *stats = &pdata->stats; | |
783 | ||
784 | stats->rx_errors += stats->rx_length_errors + | |
785 | stats->rx_crc_errors + | |
786 | stats->rx_frame_errors + | |
787 | stats->rx_fifo_errors; | |
788 | memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); | |
789 | ||
790 | return storage; | |
791 | } | |
792 | ||
793 | static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) | |
794 | { | |
795 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | |
796 | int ret; | |
797 | ||
798 | ret = eth_mac_addr(ndev, addr); | |
799 | if (ret) | |
800 | return ret; | |
d0eb7458 | 801 | pdata->mac_ops->set_mac_addr(pdata); |
e6ad7673 IS |
802 | |
803 | return ret; | |
804 | } | |
805 | ||
806 | static const struct net_device_ops xgene_ndev_ops = { | |
807 | .ndo_open = xgene_enet_open, | |
808 | .ndo_stop = xgene_enet_close, | |
809 | .ndo_start_xmit = xgene_enet_start_xmit, | |
810 | .ndo_tx_timeout = xgene_enet_timeout, | |
811 | .ndo_get_stats64 = xgene_enet_get_stats64, | |
812 | .ndo_change_mtu = eth_change_mtu, | |
813 | .ndo_set_mac_address = xgene_enet_set_mac_address, | |
814 | }; | |
815 | ||
ca626454 KC |
816 | static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata) |
817 | { | |
818 | u32 id = 0; | |
819 | int ret; | |
820 | ||
821 | ret = device_property_read_u32(dev, "port-id", &id); | |
822 | if (!ret && id > 1) { | |
823 | dev_err(dev, "Incorrect port-id specified\n"); | |
824 | return -ENODEV; | |
825 | } | |
826 | ||
827 | pdata->port_id = id; | |
828 | ||
829 | return 0; | |
830 | } | |
831 | ||
de7b5b3d FK |
832 | static int xgene_get_mac_address(struct device *dev, |
833 | unsigned char *addr) | |
834 | { | |
835 | int ret; | |
836 | ||
837 | ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6); | |
838 | if (ret) | |
839 | ret = device_property_read_u8_array(dev, "mac-address", | |
840 | addr, 6); | |
841 | if (ret) | |
842 | return -ENODEV; | |
843 | ||
844 | return ETH_ALEN; | |
845 | } | |
846 | ||
847 | static int xgene_get_phy_mode(struct device *dev) | |
848 | { | |
849 | int i, ret; | |
850 | char *modestr; | |
851 | ||
852 | ret = device_property_read_string(dev, "phy-connection-type", | |
853 | (const char **)&modestr); | |
854 | if (ret) | |
855 | ret = device_property_read_string(dev, "phy-mode", | |
856 | (const char **)&modestr); | |
857 | if (ret) | |
858 | return -ENODEV; | |
859 | ||
860 | for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { | |
861 | if (!strcasecmp(modestr, phy_modes(i))) | |
862 | return i; | |
863 | } | |
864 | return -ENODEV; | |
865 | } | |
866 | ||
e6ad7673 IS |
867 | static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) |
868 | { | |
869 | struct platform_device *pdev; | |
870 | struct net_device *ndev; | |
871 | struct device *dev; | |
872 | struct resource *res; | |
873 | void __iomem *base_addr; | |
e6ad7673 IS |
874 | int ret; |
875 | ||
876 | pdev = pdata->pdev; | |
877 | dev = &pdev->dev; | |
878 | ndev = pdata->ndev; | |
879 | ||
de7b5b3d FK |
880 | res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); |
881 | if (!res) { | |
882 | dev_err(dev, "Resource enet_csr not defined\n"); | |
883 | return -ENODEV; | |
884 | } | |
885 | pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); | |
3ec7a176 | 886 | if (!pdata->base_addr) { |
e6ad7673 | 887 | dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); |
3ec7a176 | 888 | return -ENOMEM; |
e6ad7673 IS |
889 | } |
890 | ||
de7b5b3d FK |
891 | res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); |
892 | if (!res) { | |
893 | dev_err(dev, "Resource ring_csr not defined\n"); | |
894 | return -ENODEV; | |
895 | } | |
896 | pdata->ring_csr_addr = devm_ioremap(dev, res->start, | |
897 | resource_size(res)); | |
3ec7a176 | 898 | if (!pdata->ring_csr_addr) { |
e6ad7673 | 899 | dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); |
3ec7a176 | 900 | return -ENOMEM; |
e6ad7673 IS |
901 | } |
902 | ||
de7b5b3d FK |
903 | res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); |
904 | if (!res) { | |
905 | dev_err(dev, "Resource ring_cmd not defined\n"); | |
906 | return -ENODEV; | |
907 | } | |
908 | pdata->ring_cmd_addr = devm_ioremap(dev, res->start, | |
909 | resource_size(res)); | |
3ec7a176 | 910 | if (!pdata->ring_cmd_addr) { |
e6ad7673 | 911 | dev_err(dev, "Unable to retrieve ENET Ring command region\n"); |
3ec7a176 | 912 | return -ENOMEM; |
e6ad7673 IS |
913 | } |
914 | ||
ca626454 KC |
915 | ret = xgene_get_port_id(dev, pdata); |
916 | if (ret) | |
917 | return ret; | |
918 | ||
de7b5b3d | 919 | if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) |
e6ad7673 | 920 | eth_hw_addr_random(ndev); |
de7b5b3d | 921 | |
e6ad7673 IS |
922 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); |
923 | ||
de7b5b3d | 924 | pdata->phy_mode = xgene_get_phy_mode(dev); |
e6ad7673 | 925 | if (pdata->phy_mode < 0) { |
0148d38d IS |
926 | dev_err(dev, "Unable to get phy-connection-type\n"); |
927 | return pdata->phy_mode; | |
928 | } | |
929 | if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && | |
32f784b5 | 930 | pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && |
0148d38d IS |
931 | pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { |
932 | dev_err(dev, "Incorrect phy-connection-type specified\n"); | |
933 | return -ENODEV; | |
e6ad7673 IS |
934 | } |
935 | ||
6772b653 IS |
936 | ret = platform_get_irq(pdev, 0); |
937 | if (ret <= 0) { | |
938 | dev_err(dev, "Unable to get ENET Rx IRQ\n"); | |
939 | ret = ret ? : -ENXIO; | |
940 | return ret; | |
941 | } | |
942 | pdata->rx_irq = ret; | |
943 | ||
944 | if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { | |
945 | ret = platform_get_irq(pdev, 1); | |
946 | if (ret <= 0) { | |
947 | dev_err(dev, "Unable to get ENET Tx completion IRQ\n"); | |
948 | ret = ret ? : -ENXIO; | |
949 | return ret; | |
950 | } | |
951 | pdata->txc_irq = ret; | |
952 | } | |
953 | ||
e6ad7673 | 954 | pdata->clk = devm_clk_get(&pdev->dev, NULL); |
e6ad7673 | 955 | if (IS_ERR(pdata->clk)) { |
de7b5b3d FK |
956 | /* Firmware may have set up the clock already. */ |
957 | pdata->clk = NULL; | |
e6ad7673 IS |
958 | } |
959 | ||
ca626454 | 960 | base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); |
e6ad7673 IS |
961 | pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; |
962 | pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; | |
963 | pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; | |
32f784b5 IS |
964 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || |
965 | pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { | |
ca626454 | 966 | pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; |
0148d38d | 967 | pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; |
0148d38d IS |
968 | } else { |
969 | pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; | |
970 | pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; | |
0148d38d | 971 | } |
e6ad7673 IS |
972 | pdata->rx_buff_cnt = NUM_PKT_BUF; |
973 | ||
0148d38d | 974 | return 0; |
e6ad7673 IS |
975 | } |
976 | ||
977 | static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) | |
978 | { | |
979 | struct net_device *ndev = pdata->ndev; | |
980 | struct xgene_enet_desc_ring *buf_pool; | |
981 | u16 dst_ring_num; | |
982 | int ret; | |
983 | ||
c3f4465d IS |
984 | ret = pdata->port_ops->reset(pdata); |
985 | if (ret) | |
986 | return ret; | |
e6ad7673 IS |
987 | |
988 | ret = xgene_enet_create_desc_rings(ndev); | |
989 | if (ret) { | |
990 | netdev_err(ndev, "Error in ring configuration\n"); | |
991 | return ret; | |
992 | } | |
993 | ||
994 | /* setup buffer pool */ | |
995 | buf_pool = pdata->rx_ring->buf_pool; | |
996 | xgene_enet_init_bufpool(buf_pool); | |
997 | ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); | |
998 | if (ret) { | |
999 | xgene_enet_delete_desc_rings(pdata); | |
1000 | return ret; | |
1001 | } | |
1002 | ||
1003 | dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); | |
d0eb7458 | 1004 | pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); |
0148d38d | 1005 | pdata->mac_ops->init(pdata); |
e6ad7673 IS |
1006 | |
1007 | return ret; | |
1008 | } | |
1009 | ||
d0eb7458 IS |
1010 | static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) |
1011 | { | |
0148d38d IS |
1012 | switch (pdata->phy_mode) { |
1013 | case PHY_INTERFACE_MODE_RGMII: | |
1014 | pdata->mac_ops = &xgene_gmac_ops; | |
1015 | pdata->port_ops = &xgene_gport_ops; | |
dc8385f0 | 1016 | pdata->rm = RM3; |
0148d38d | 1017 | break; |
32f784b5 IS |
1018 | case PHY_INTERFACE_MODE_SGMII: |
1019 | pdata->mac_ops = &xgene_sgmac_ops; | |
1020 | pdata->port_ops = &xgene_sgport_ops; | |
1021 | pdata->rm = RM1; | |
6772b653 | 1022 | pdata->cq_cnt = XGENE_MAX_TXC_RINGS; |
32f784b5 | 1023 | break; |
0148d38d IS |
1024 | default: |
1025 | pdata->mac_ops = &xgene_xgmac_ops; | |
1026 | pdata->port_ops = &xgene_xgport_ops; | |
dc8385f0 | 1027 | pdata->rm = RM0; |
6772b653 | 1028 | pdata->cq_cnt = XGENE_MAX_TXC_RINGS; |
0148d38d IS |
1029 | break; |
1030 | } | |
ca626454 KC |
1031 | |
1032 | switch (pdata->port_id) { | |
1033 | case 0: | |
1034 | pdata->cpu_bufnum = START_CPU_BUFNUM_0; | |
1035 | pdata->eth_bufnum = START_ETH_BUFNUM_0; | |
1036 | pdata->bp_bufnum = START_BP_BUFNUM_0; | |
1037 | pdata->ring_num = START_RING_NUM_0; | |
1038 | break; | |
1039 | case 1: | |
1040 | pdata->cpu_bufnum = START_CPU_BUFNUM_1; | |
1041 | pdata->eth_bufnum = START_ETH_BUFNUM_1; | |
1042 | pdata->bp_bufnum = START_BP_BUFNUM_1; | |
1043 | pdata->ring_num = START_RING_NUM_1; | |
1044 | break; | |
1045 | default: | |
1046 | break; | |
1047 | } | |
1048 | ||
81cefb81 | 1049 | pdata->ring_ops = &xgene_ring1_ops; |
d0eb7458 IS |
1050 | } |
1051 | ||
6772b653 IS |
1052 | static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) |
1053 | { | |
1054 | struct napi_struct *napi; | |
1055 | ||
1056 | napi = &pdata->rx_ring->napi; | |
1057 | netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); | |
1058 | ||
1059 | if (pdata->cq_cnt) { | |
1060 | napi = &pdata->tx_ring->cp_ring->napi; | |
1061 | netif_napi_add(pdata->ndev, napi, xgene_enet_napi, | |
1062 | NAPI_POLL_WEIGHT); | |
1063 | } | |
1064 | } | |
1065 | ||
1066 | static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) | |
1067 | { | |
1068 | struct napi_struct *napi; | |
1069 | ||
1070 | napi = &pdata->rx_ring->napi; | |
1071 | netif_napi_del(napi); | |
1072 | ||
1073 | if (pdata->cq_cnt) { | |
1074 | napi = &pdata->tx_ring->cp_ring->napi; | |
1075 | netif_napi_del(napi); | |
1076 | } | |
1077 | } | |
1078 | ||
e6ad7673 IS |
1079 | static int xgene_enet_probe(struct platform_device *pdev) |
1080 | { | |
1081 | struct net_device *ndev; | |
1082 | struct xgene_enet_pdata *pdata; | |
1083 | struct device *dev = &pdev->dev; | |
dc8385f0 | 1084 | struct xgene_mac_ops *mac_ops; |
e6ad7673 IS |
1085 | int ret; |
1086 | ||
1087 | ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); | |
1088 | if (!ndev) | |
1089 | return -ENOMEM; | |
1090 | ||
1091 | pdata = netdev_priv(ndev); | |
1092 | ||
1093 | pdata->pdev = pdev; | |
1094 | pdata->ndev = ndev; | |
1095 | SET_NETDEV_DEV(ndev, dev); | |
1096 | platform_set_drvdata(pdev, pdata); | |
1097 | ndev->netdev_ops = &xgene_ndev_ops; | |
1098 | xgene_enet_set_ethtool_ops(ndev); | |
1099 | ndev->features |= NETIF_F_IP_CSUM | | |
1100 | NETIF_F_GSO | | |
1101 | NETIF_F_GRO; | |
1102 | ||
1103 | ret = xgene_enet_get_resources(pdata); | |
1104 | if (ret) | |
1105 | goto err; | |
1106 | ||
d0eb7458 | 1107 | xgene_enet_setup_ops(pdata); |
e6ad7673 IS |
1108 | |
1109 | ret = register_netdev(ndev); | |
1110 | if (ret) { | |
1111 | netdev_err(ndev, "Failed to register netdev\n"); | |
1112 | goto err; | |
1113 | } | |
1114 | ||
de7b5b3d | 1115 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
e6ad7673 IS |
1116 | if (ret) { |
1117 | netdev_err(ndev, "No usable DMA configuration\n"); | |
1118 | goto err; | |
1119 | } | |
1120 | ||
1121 | ret = xgene_enet_init_hw(pdata); | |
1122 | if (ret) | |
1123 | goto err; | |
1124 | ||
6772b653 | 1125 | xgene_enet_napi_add(pdata); |
dc8385f0 | 1126 | mac_ops = pdata->mac_ops; |
0148d38d IS |
1127 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
1128 | ret = xgene_enet_mdio_config(pdata); | |
1129 | else | |
dc8385f0 | 1130 | INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); |
e6ad7673 IS |
1131 | |
1132 | return ret; | |
1133 | err: | |
c3f4465d | 1134 | unregister_netdev(ndev); |
e6ad7673 IS |
1135 | free_netdev(ndev); |
1136 | return ret; | |
1137 | } | |
1138 | ||
1139 | static int xgene_enet_remove(struct platform_device *pdev) | |
1140 | { | |
1141 | struct xgene_enet_pdata *pdata; | |
d0eb7458 | 1142 | struct xgene_mac_ops *mac_ops; |
e6ad7673 IS |
1143 | struct net_device *ndev; |
1144 | ||
1145 | pdata = platform_get_drvdata(pdev); | |
d0eb7458 | 1146 | mac_ops = pdata->mac_ops; |
e6ad7673 IS |
1147 | ndev = pdata->ndev; |
1148 | ||
d0eb7458 IS |
1149 | mac_ops->rx_disable(pdata); |
1150 | mac_ops->tx_disable(pdata); | |
e6ad7673 | 1151 | |
6772b653 | 1152 | xgene_enet_napi_del(pdata); |
e6ad7673 IS |
1153 | xgene_enet_mdio_remove(pdata); |
1154 | xgene_enet_delete_desc_rings(pdata); | |
1155 | unregister_netdev(ndev); | |
d0eb7458 | 1156 | pdata->port_ops->shutdown(pdata); |
e6ad7673 IS |
1157 | free_netdev(ndev); |
1158 | ||
1159 | return 0; | |
1160 | } | |
1161 | ||
de7b5b3d FK |
1162 | #ifdef CONFIG_ACPI |
1163 | static const struct acpi_device_id xgene_enet_acpi_match[] = { | |
1164 | { "APMC0D05", }, | |
ecadf4e7 IS |
1165 | { "APMC0D30", }, |
1166 | { "APMC0D31", }, | |
de7b5b3d FK |
1167 | { } |
1168 | }; | |
1169 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | |
1170 | #endif | |
1171 | ||
163cff31 | 1172 | #ifdef CONFIG_OF |
a6b0dc2a | 1173 | static const struct of_device_id xgene_enet_of_match[] = { |
e6ad7673 | 1174 | {.compatible = "apm,xgene-enet",}, |
ecadf4e7 IS |
1175 | {.compatible = "apm,xgene1-sgenet",}, |
1176 | {.compatible = "apm,xgene1-xgenet",}, | |
e6ad7673 IS |
1177 | {}, |
1178 | }; | |
1179 | ||
de7b5b3d | 1180 | MODULE_DEVICE_TABLE(of, xgene_enet_of_match); |
163cff31 | 1181 | #endif |
e6ad7673 IS |
1182 | |
1183 | static struct platform_driver xgene_enet_driver = { | |
1184 | .driver = { | |
1185 | .name = "xgene-enet", | |
de7b5b3d FK |
1186 | .of_match_table = of_match_ptr(xgene_enet_of_match), |
1187 | .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), | |
e6ad7673 IS |
1188 | }, |
1189 | .probe = xgene_enet_probe, | |
1190 | .remove = xgene_enet_remove, | |
1191 | }; | |
1192 | ||
1193 | module_platform_driver(xgene_enet_driver); | |
1194 | ||
1195 | MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); | |
1196 | MODULE_VERSION(XGENE_DRV_VERSION); | |
d0eb7458 | 1197 | MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); |
e6ad7673 IS |
1198 | MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>"); |
1199 | MODULE_LICENSE("GPL"); |