Commit | Line | Data |
---|---|---|
de6e0b19 CC |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * Actions Semi Owl SoCs Ethernet MAC driver | |
4 | * | |
5 | * Copyright (c) 2012 Actions Semi Inc. | |
6 | * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com> | |
7 | */ | |
8 | ||
9 | #include <linux/circ_buf.h> | |
10 | #include <linux/clk.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/etherdevice.h> | |
13 | #include <linux/of_mdio.h> | |
14 | #include <linux/of_net.h> | |
15 | #include <linux/platform_device.h> | |
16 | #include <linux/pm.h> | |
17 | #include <linux/reset.h> | |
18 | ||
19 | #include "owl-emac.h" | |
20 | ||
21 | #define OWL_EMAC_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ | |
22 | NETIF_MSG_PROBE | \ | |
23 | NETIF_MSG_LINK) | |
24 | ||
25 | static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg) | |
26 | { | |
27 | return readl(priv->base + reg); | |
28 | } | |
29 | ||
30 | static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data) | |
31 | { | |
32 | writel(data, priv->base + reg); | |
33 | } | |
34 | ||
35 | static u32 owl_emac_reg_update(struct owl_emac_priv *priv, | |
36 | u32 reg, u32 mask, u32 val) | |
37 | { | |
38 | u32 data, old_val; | |
39 | ||
40 | data = owl_emac_reg_read(priv, reg); | |
41 | old_val = data & mask; | |
42 | ||
43 | data &= ~mask; | |
44 | data |= val & mask; | |
45 | ||
46 | owl_emac_reg_write(priv, reg, data); | |
47 | ||
48 | return old_val; | |
49 | } | |
50 | ||
51 | static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits) | |
52 | { | |
53 | owl_emac_reg_update(priv, reg, bits, bits); | |
54 | } | |
55 | ||
56 | static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits) | |
57 | { | |
58 | owl_emac_reg_update(priv, reg, bits, 0); | |
59 | } | |
60 | ||
61 | static struct device *owl_emac_get_dev(struct owl_emac_priv *priv) | |
62 | { | |
63 | return priv->netdev->dev.parent; | |
64 | } | |
65 | ||
66 | static void owl_emac_irq_enable(struct owl_emac_priv *priv) | |
67 | { | |
68 | /* Enable all interrupts except TU. | |
69 | * | |
70 | * Note the NIE and AIE bits shall also be set in order to actually | |
71 | * enable the selected interrupts. | |
72 | */ | |
73 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7, | |
74 | OWL_EMAC_BIT_MAC_CSR7_NIE | | |
75 | OWL_EMAC_BIT_MAC_CSR7_AIE | | |
76 | OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE); | |
77 | } | |
78 | ||
79 | static void owl_emac_irq_disable(struct owl_emac_priv *priv) | |
80 | { | |
81 | /* Disable all interrupts. | |
82 | * | |
83 | * WARNING: Unset only the NIE and AIE bits in CSR7 to workaround an | |
84 | * unexpected side effect (MAC hardware bug?!) where some bits in the | |
85 | * status register (CSR5) are cleared automatically before being able | |
86 | * to read them via owl_emac_irq_clear(). | |
87 | */ | |
88 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7, | |
89 | OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE); | |
90 | } | |
91 | ||
92 | static u32 owl_emac_irq_status(struct owl_emac_priv *priv) | |
93 | { | |
94 | return owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR5); | |
95 | } | |
96 | ||
97 | static u32 owl_emac_irq_clear(struct owl_emac_priv *priv) | |
98 | { | |
99 | u32 val = owl_emac_irq_status(priv); | |
100 | ||
101 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR5, val); | |
102 | ||
103 | return val; | |
104 | } | |
105 | ||
106 | static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv, | |
107 | struct sk_buff *skb) | |
108 | { | |
109 | struct device *dev = owl_emac_get_dev(priv); | |
110 | ||
111 | /* Buffer pointer for the RX DMA descriptor must be word aligned. */ | |
112 | return dma_map_single(dev, skb_tail_pointer(skb), | |
113 | skb_tailroom(skb), DMA_FROM_DEVICE); | |
114 | } | |
115 | ||
116 | static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv, | |
117 | struct sk_buff *skb, dma_addr_t dma_addr) | |
118 | { | |
119 | struct device *dev = owl_emac_get_dev(priv); | |
120 | ||
121 | dma_unmap_single(dev, dma_addr, skb_tailroom(skb), DMA_FROM_DEVICE); | |
122 | } | |
123 | ||
124 | static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv, | |
125 | struct sk_buff *skb) | |
126 | { | |
127 | struct device *dev = owl_emac_get_dev(priv); | |
128 | ||
129 | return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | |
130 | } | |
131 | ||
132 | static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv, | |
133 | struct sk_buff *skb, dma_addr_t dma_addr) | |
134 | { | |
135 | struct device *dev = owl_emac_get_dev(priv); | |
136 | ||
137 | dma_unmap_single(dev, dma_addr, skb_headlen(skb), DMA_TO_DEVICE); | |
138 | } | |
139 | ||
140 | static unsigned int owl_emac_ring_num_unused(struct owl_emac_ring *ring) | |
141 | { | |
142 | return CIRC_SPACE(ring->head, ring->tail, ring->size); | |
143 | } | |
144 | ||
145 | static unsigned int owl_emac_ring_get_next(struct owl_emac_ring *ring, | |
146 | unsigned int cur) | |
147 | { | |
148 | return (cur + 1) & (ring->size - 1); | |
149 | } | |
150 | ||
151 | static void owl_emac_ring_push_head(struct owl_emac_ring *ring) | |
152 | { | |
153 | ring->head = owl_emac_ring_get_next(ring, ring->head); | |
154 | } | |
155 | ||
156 | static void owl_emac_ring_pop_tail(struct owl_emac_ring *ring) | |
157 | { | |
158 | ring->tail = owl_emac_ring_get_next(ring, ring->tail); | |
159 | } | |
160 | ||
161 | static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev) | |
162 | { | |
163 | struct sk_buff *skb; | |
164 | int offset; | |
165 | ||
166 | skb = netdev_alloc_skb(netdev, OWL_EMAC_RX_FRAME_MAX_LEN + | |
167 | OWL_EMAC_SKB_RESERVE); | |
168 | if (unlikely(!skb)) | |
169 | return NULL; | |
170 | ||
171 | /* Ensure 4 bytes DMA alignment. */ | |
172 | offset = ((uintptr_t)skb->data) & (OWL_EMAC_SKB_ALIGN - 1); | |
173 | if (unlikely(offset)) | |
174 | skb_reserve(skb, OWL_EMAC_SKB_ALIGN - offset); | |
175 | ||
176 | return skb; | |
177 | } | |
178 | ||
179 | static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv) | |
180 | { | |
181 | struct owl_emac_ring *ring = &priv->rx_ring; | |
182 | struct device *dev = owl_emac_get_dev(priv); | |
183 | struct net_device *netdev = priv->netdev; | |
184 | struct owl_emac_ring_desc *desc; | |
185 | struct sk_buff *skb; | |
186 | dma_addr_t dma_addr; | |
187 | int i; | |
188 | ||
189 | for (i = 0; i < ring->size; i++) { | |
190 | skb = owl_emac_alloc_skb(netdev); | |
191 | if (!skb) | |
192 | return -ENOMEM; | |
193 | ||
194 | dma_addr = owl_emac_dma_map_rx(priv, skb); | |
195 | if (dma_mapping_error(dev, dma_addr)) { | |
196 | dev_kfree_skb(skb); | |
197 | return -ENOMEM; | |
198 | } | |
199 | ||
200 | desc = &ring->descs[i]; | |
201 | desc->status = OWL_EMAC_BIT_RDES0_OWN; | |
202 | desc->control = skb_tailroom(skb) & OWL_EMAC_MSK_RDES1_RBS1; | |
203 | desc->buf_addr = dma_addr; | |
204 | desc->reserved = 0; | |
205 | ||
206 | ring->skbs[i] = skb; | |
207 | ring->skbs_dma[i] = dma_addr; | |
208 | } | |
209 | ||
210 | desc->control |= OWL_EMAC_BIT_RDES1_RER; | |
211 | ||
212 | ring->head = 0; | |
213 | ring->tail = 0; | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv) | |
219 | { | |
220 | struct owl_emac_ring *ring = &priv->tx_ring; | |
221 | struct owl_emac_ring_desc *desc; | |
222 | int i; | |
223 | ||
224 | for (i = 0; i < ring->size; i++) { | |
225 | desc = &ring->descs[i]; | |
226 | ||
227 | desc->status = 0; | |
228 | desc->control = OWL_EMAC_BIT_TDES1_IC; | |
229 | desc->buf_addr = 0; | |
230 | desc->reserved = 0; | |
231 | } | |
232 | ||
233 | desc->control |= OWL_EMAC_BIT_TDES1_TER; | |
234 | ||
235 | memset(ring->skbs_dma, 0, sizeof(dma_addr_t) * ring->size); | |
236 | ||
237 | ring->head = 0; | |
238 | ring->tail = 0; | |
239 | } | |
240 | ||
241 | static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv) | |
242 | { | |
243 | struct owl_emac_ring *ring = &priv->rx_ring; | |
244 | int i; | |
245 | ||
246 | for (i = 0; i < ring->size; i++) { | |
247 | ring->descs[i].status = 0; | |
248 | ||
249 | if (!ring->skbs_dma[i]) | |
250 | continue; | |
251 | ||
252 | owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]); | |
253 | ring->skbs_dma[i] = 0; | |
254 | ||
255 | dev_kfree_skb(ring->skbs[i]); | |
256 | ring->skbs[i] = NULL; | |
257 | } | |
258 | } | |
259 | ||
260 | static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv) | |
261 | { | |
262 | struct owl_emac_ring *ring = &priv->tx_ring; | |
263 | int i; | |
264 | ||
265 | for (i = 0; i < ring->size; i++) { | |
266 | ring->descs[i].status = 0; | |
267 | ||
268 | if (!ring->skbs_dma[i]) | |
269 | continue; | |
270 | ||
271 | owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]); | |
272 | ring->skbs_dma[i] = 0; | |
273 | ||
274 | dev_kfree_skb(ring->skbs[i]); | |
275 | ring->skbs[i] = NULL; | |
276 | } | |
277 | } | |
278 | ||
279 | static int owl_emac_ring_alloc(struct device *dev, struct owl_emac_ring *ring, | |
280 | unsigned int size) | |
281 | { | |
282 | ring->descs = dmam_alloc_coherent(dev, | |
283 | sizeof(struct owl_emac_ring_desc) * size, | |
284 | &ring->descs_dma, GFP_KERNEL); | |
285 | if (!ring->descs) | |
286 | return -ENOMEM; | |
287 | ||
288 | ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *), | |
289 | GFP_KERNEL); | |
290 | if (!ring->skbs) | |
291 | return -ENOMEM; | |
292 | ||
293 | ring->skbs_dma = devm_kcalloc(dev, size, sizeof(dma_addr_t), | |
294 | GFP_KERNEL); | |
295 | if (!ring->skbs_dma) | |
296 | return -ENOMEM; | |
297 | ||
298 | ring->size = size; | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
303 | static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv) | |
304 | { | |
305 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR2, | |
306 | OWL_EMAC_VAL_MAC_CSR2_RPD); | |
307 | } | |
308 | ||
309 | static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv) | |
310 | { | |
311 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR1, | |
312 | OWL_EMAC_VAL_MAC_CSR1_TPD); | |
313 | } | |
314 | ||
315 | static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status) | |
316 | { | |
317 | return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, | |
318 | OWL_EMAC_BIT_MAC_CSR6_ST, status); | |
319 | } | |
320 | ||
321 | static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv) | |
322 | { | |
323 | return owl_emac_dma_cmd_set_tx(priv, ~0); | |
324 | } | |
325 | ||
326 | static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status) | |
327 | { | |
328 | return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, | |
329 | OWL_EMAC_MSK_MAC_CSR6_STSR, status); | |
330 | } | |
331 | ||
332 | static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv) | |
333 | { | |
334 | return owl_emac_dma_cmd_set(priv, ~0); | |
335 | } | |
336 | ||
337 | static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv) | |
338 | { | |
339 | return owl_emac_dma_cmd_set(priv, 0); | |
340 | } | |
341 | ||
342 | static void owl_emac_set_hw_mac_addr(struct net_device *netdev) | |
343 | { | |
344 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
345 | u8 *mac_addr = netdev->dev_addr; | |
346 | u32 addr_high, addr_low; | |
347 | ||
348 | addr_high = mac_addr[0] << 8 | mac_addr[1]; | |
349 | addr_low = mac_addr[2] << 24 | mac_addr[3] << 16 | | |
350 | mac_addr[4] << 8 | mac_addr[5]; | |
351 | ||
352 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR17, addr_high); | |
353 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR16, addr_low); | |
354 | } | |
355 | ||
356 | static void owl_emac_update_link_state(struct owl_emac_priv *priv) | |
357 | { | |
358 | u32 val, status; | |
359 | ||
360 | if (priv->pause) { | |
361 | val = OWL_EMAC_BIT_MAC_CSR20_FCE | OWL_EMAC_BIT_MAC_CSR20_TUE; | |
362 | val |= OWL_EMAC_BIT_MAC_CSR20_TPE | OWL_EMAC_BIT_MAC_CSR20_RPE; | |
363 | val |= OWL_EMAC_BIT_MAC_CSR20_BPE; | |
364 | } else { | |
365 | val = 0; | |
366 | } | |
367 | ||
368 | /* Update flow control. */ | |
369 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR20, val); | |
370 | ||
371 | val = (priv->speed == SPEED_100) ? OWL_EMAC_VAL_MAC_CSR6_SPEED_100M : | |
372 | OWL_EMAC_VAL_MAC_CSR6_SPEED_10M; | |
373 | val <<= OWL_EMAC_OFF_MAC_CSR6_SPEED; | |
374 | ||
375 | if (priv->duplex == DUPLEX_FULL) | |
376 | val |= OWL_EMAC_BIT_MAC_CSR6_FD; | |
377 | ||
378 | spin_lock_bh(&priv->lock); | |
379 | ||
380 | /* Temporarily stop DMA TX & RX. */ | |
381 | status = owl_emac_dma_cmd_stop(priv); | |
382 | ||
383 | /* Update operation modes. */ | |
384 | owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, | |
385 | OWL_EMAC_MSK_MAC_CSR6_SPEED | | |
386 | OWL_EMAC_BIT_MAC_CSR6_FD, val); | |
387 | ||
388 | /* Restore DMA TX & RX status. */ | |
389 | owl_emac_dma_cmd_set(priv, status); | |
390 | ||
391 | spin_unlock_bh(&priv->lock); | |
392 | } | |
393 | ||
394 | static void owl_emac_adjust_link(struct net_device *netdev) | |
395 | { | |
396 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
397 | struct phy_device *phydev = netdev->phydev; | |
398 | bool state_changed = false; | |
399 | ||
400 | if (phydev->link) { | |
401 | if (!priv->link) { | |
402 | priv->link = phydev->link; | |
403 | state_changed = true; | |
404 | } | |
405 | ||
406 | if (priv->speed != phydev->speed) { | |
407 | priv->speed = phydev->speed; | |
408 | state_changed = true; | |
409 | } | |
410 | ||
411 | if (priv->duplex != phydev->duplex) { | |
412 | priv->duplex = phydev->duplex; | |
413 | state_changed = true; | |
414 | } | |
415 | ||
416 | if (priv->pause != phydev->pause) { | |
417 | priv->pause = phydev->pause; | |
418 | state_changed = true; | |
419 | } | |
420 | } else { | |
421 | if (priv->link) { | |
422 | priv->link = phydev->link; | |
423 | state_changed = true; | |
424 | } | |
425 | } | |
426 | ||
427 | if (state_changed) { | |
428 | if (phydev->link) | |
429 | owl_emac_update_link_state(priv); | |
430 | ||
431 | if (netif_msg_link(priv)) | |
432 | phy_print_status(phydev); | |
433 | } | |
434 | } | |
435 | ||
436 | static irqreturn_t owl_emac_handle_irq(int irq, void *data) | |
437 | { | |
438 | struct net_device *netdev = data; | |
439 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
440 | ||
441 | if (netif_running(netdev)) { | |
442 | owl_emac_irq_disable(priv); | |
443 | napi_schedule(&priv->napi); | |
444 | } | |
445 | ||
446 | return IRQ_HANDLED; | |
447 | } | |
448 | ||
449 | static void owl_emac_ether_addr_push(u8 **dst, const u8 *src) | |
450 | { | |
451 | u32 *a = (u32 *)(*dst); | |
452 | const u16 *b = (const u16 *)src; | |
453 | ||
454 | a[0] = b[0]; | |
455 | a[1] = b[1]; | |
456 | a[2] = b[2]; | |
457 | ||
458 | *dst += 12; | |
459 | } | |
460 | ||
461 | static void | |
462 | owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb) | |
463 | { | |
464 | const u8 bcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | |
465 | const u8 *mac_addr = priv->netdev->dev_addr; | |
466 | u8 *frame; | |
467 | int i; | |
468 | ||
469 | skb_put(skb, OWL_EMAC_SETUP_FRAME_LEN); | |
470 | ||
471 | frame = skb->data; | |
472 | memset(frame, 0, skb->len); | |
473 | ||
474 | owl_emac_ether_addr_push(&frame, mac_addr); | |
475 | owl_emac_ether_addr_push(&frame, bcast_addr); | |
476 | ||
477 | /* Fill multicast addresses. */ | |
478 | WARN_ON(priv->mcaddr_list.count >= OWL_EMAC_MAX_MULTICAST_ADDRS); | |
479 | for (i = 0; i < priv->mcaddr_list.count; i++) { | |
480 | mac_addr = priv->mcaddr_list.addrs[i]; | |
481 | owl_emac_ether_addr_push(&frame, mac_addr); | |
482 | } | |
483 | } | |
484 | ||
485 | /* The setup frame is a special descriptor which is used to provide physical | |
486 | * addresses (i.e. mac, broadcast and multicast) to the MAC hardware for | |
487 | * filtering purposes. To be recognized as a setup frame, the TDES1_SET bit | |
488 | * must be set in the TX descriptor control field. | |
489 | */ | |
490 | static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv) | |
491 | { | |
492 | struct owl_emac_ring *ring = &priv->tx_ring; | |
493 | struct net_device *netdev = priv->netdev; | |
494 | struct owl_emac_ring_desc *desc; | |
495 | struct sk_buff *skb; | |
496 | unsigned int tx_head; | |
497 | u32 status, control; | |
498 | dma_addr_t dma_addr; | |
499 | int ret; | |
500 | ||
501 | skb = owl_emac_alloc_skb(netdev); | |
502 | if (!skb) | |
503 | return -ENOMEM; | |
504 | ||
505 | owl_emac_setup_frame_prepare(priv, skb); | |
506 | ||
507 | dma_addr = owl_emac_dma_map_tx(priv, skb); | |
508 | if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) { | |
509 | ret = -ENOMEM; | |
510 | goto err_free_skb; | |
511 | } | |
512 | ||
513 | spin_lock_bh(&priv->lock); | |
514 | ||
515 | tx_head = ring->head; | |
516 | desc = &ring->descs[tx_head]; | |
517 | ||
518 | status = READ_ONCE(desc->status); | |
519 | control = READ_ONCE(desc->control); | |
520 | dma_rmb(); /* Ensure data has been read before used. */ | |
521 | ||
522 | if (unlikely(status & OWL_EMAC_BIT_TDES0_OWN) || | |
523 | !owl_emac_ring_num_unused(ring)) { | |
524 | spin_unlock_bh(&priv->lock); | |
525 | owl_emac_dma_unmap_tx(priv, skb, dma_addr); | |
526 | ret = -EBUSY; | |
527 | goto err_free_skb; | |
528 | } | |
529 | ||
530 | ring->skbs[tx_head] = skb; | |
531 | ring->skbs_dma[tx_head] = dma_addr; | |
532 | ||
533 | control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */ | |
534 | control |= OWL_EMAC_BIT_TDES1_SET; | |
535 | control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len; | |
536 | ||
537 | WRITE_ONCE(desc->control, control); | |
538 | WRITE_ONCE(desc->buf_addr, dma_addr); | |
539 | dma_wmb(); /* Flush descriptor before changing ownership. */ | |
540 | WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN); | |
541 | ||
542 | owl_emac_ring_push_head(ring); | |
543 | ||
544 | /* Temporarily enable DMA TX. */ | |
545 | status = owl_emac_dma_cmd_start_tx(priv); | |
546 | ||
547 | /* Trigger setup frame processing. */ | |
548 | owl_emac_dma_cmd_resume_tx(priv); | |
549 | ||
550 | /* Restore DMA TX status. */ | |
551 | owl_emac_dma_cmd_set_tx(priv, status); | |
552 | ||
553 | /* Stop regular TX until setup frame is processed. */ | |
554 | netif_stop_queue(netdev); | |
555 | ||
556 | spin_unlock_bh(&priv->lock); | |
557 | ||
558 | return 0; | |
559 | ||
560 | err_free_skb: | |
561 | dev_kfree_skb(skb); | |
562 | return ret; | |
563 | } | |
564 | ||
565 | static netdev_tx_t owl_emac_ndo_start_xmit(struct sk_buff *skb, | |
566 | struct net_device *netdev) | |
567 | { | |
568 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
569 | struct device *dev = owl_emac_get_dev(priv); | |
570 | struct owl_emac_ring *ring = &priv->tx_ring; | |
571 | struct owl_emac_ring_desc *desc; | |
572 | unsigned int tx_head; | |
573 | u32 status, control; | |
574 | dma_addr_t dma_addr; | |
575 | ||
576 | dma_addr = owl_emac_dma_map_tx(priv, skb); | |
577 | if (dma_mapping_error(dev, dma_addr)) { | |
578 | dev_err_ratelimited(&netdev->dev, "TX DMA mapping failed\n"); | |
579 | dev_kfree_skb(skb); | |
580 | netdev->stats.tx_dropped++; | |
581 | return NETDEV_TX_OK; | |
582 | } | |
583 | ||
584 | spin_lock_bh(&priv->lock); | |
585 | ||
586 | tx_head = ring->head; | |
587 | desc = &ring->descs[tx_head]; | |
588 | ||
589 | status = READ_ONCE(desc->status); | |
590 | control = READ_ONCE(desc->control); | |
591 | dma_rmb(); /* Ensure data has been read before used. */ | |
592 | ||
593 | if (!owl_emac_ring_num_unused(ring) || | |
594 | unlikely(status & OWL_EMAC_BIT_TDES0_OWN)) { | |
595 | netif_stop_queue(netdev); | |
596 | spin_unlock_bh(&priv->lock); | |
597 | ||
598 | dev_dbg_ratelimited(&netdev->dev, "TX buffer full, status=0x%08x\n", | |
599 | owl_emac_irq_status(priv)); | |
600 | owl_emac_dma_unmap_tx(priv, skb, dma_addr); | |
601 | netdev->stats.tx_dropped++; | |
602 | return NETDEV_TX_BUSY; | |
603 | } | |
604 | ||
605 | ring->skbs[tx_head] = skb; | |
606 | ring->skbs_dma[tx_head] = dma_addr; | |
607 | ||
608 | control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */ | |
609 | control |= OWL_EMAC_BIT_TDES1_FS | OWL_EMAC_BIT_TDES1_LS; | |
610 | control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len; | |
611 | ||
612 | WRITE_ONCE(desc->control, control); | |
613 | WRITE_ONCE(desc->buf_addr, dma_addr); | |
614 | dma_wmb(); /* Flush descriptor before changing ownership. */ | |
615 | WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN); | |
616 | ||
617 | owl_emac_dma_cmd_resume_tx(priv); | |
618 | owl_emac_ring_push_head(ring); | |
619 | ||
620 | /* FIXME: The transmission is currently restricted to a single frame | |
621 | * at a time as a workaround for a MAC hardware bug that causes random | |
622 | * freeze of the TX queue processor. | |
623 | */ | |
624 | netif_stop_queue(netdev); | |
625 | ||
626 | spin_unlock_bh(&priv->lock); | |
627 | ||
628 | return NETDEV_TX_OK; | |
629 | } | |
630 | ||
631 | static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv) | |
632 | { | |
633 | struct owl_emac_ring *ring = &priv->tx_ring; | |
634 | struct net_device *netdev = priv->netdev; | |
635 | struct owl_emac_ring_desc *desc; | |
636 | struct sk_buff *skb; | |
637 | unsigned int tx_tail; | |
638 | u32 status; | |
639 | ||
640 | tx_tail = ring->tail; | |
641 | desc = &ring->descs[tx_tail]; | |
642 | ||
643 | status = READ_ONCE(desc->status); | |
644 | dma_rmb(); /* Ensure data has been read before used. */ | |
645 | ||
646 | if (status & OWL_EMAC_BIT_TDES0_OWN) | |
647 | return false; | |
648 | ||
649 | /* Check for errors. */ | |
650 | if (status & OWL_EMAC_BIT_TDES0_ES) { | |
651 | dev_dbg_ratelimited(&netdev->dev, | |
652 | "TX complete error status: 0x%08x\n", | |
653 | status); | |
654 | ||
655 | netdev->stats.tx_errors++; | |
656 | ||
657 | if (status & OWL_EMAC_BIT_TDES0_UF) | |
658 | netdev->stats.tx_fifo_errors++; | |
659 | ||
660 | if (status & OWL_EMAC_BIT_TDES0_EC) | |
661 | netdev->stats.tx_aborted_errors++; | |
662 | ||
663 | if (status & OWL_EMAC_BIT_TDES0_LC) | |
664 | netdev->stats.tx_window_errors++; | |
665 | ||
666 | if (status & OWL_EMAC_BIT_TDES0_NC) | |
667 | netdev->stats.tx_heartbeat_errors++; | |
668 | ||
669 | if (status & OWL_EMAC_BIT_TDES0_LO) | |
670 | netdev->stats.tx_carrier_errors++; | |
671 | } else { | |
672 | netdev->stats.tx_packets++; | |
673 | netdev->stats.tx_bytes += ring->skbs[tx_tail]->len; | |
674 | } | |
675 | ||
676 | /* Some collisions occurred, but pkt has been transmitted. */ | |
677 | if (status & OWL_EMAC_BIT_TDES0_DE) | |
678 | netdev->stats.collisions++; | |
679 | ||
680 | skb = ring->skbs[tx_tail]; | |
681 | owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]); | |
682 | dev_kfree_skb(skb); | |
683 | ||
684 | ring->skbs[tx_tail] = NULL; | |
685 | ring->skbs_dma[tx_tail] = 0; | |
686 | ||
687 | owl_emac_ring_pop_tail(ring); | |
688 | ||
689 | if (unlikely(netif_queue_stopped(netdev))) | |
690 | netif_wake_queue(netdev); | |
691 | ||
692 | return true; | |
693 | } | |
694 | ||
695 | static void owl_emac_tx_complete(struct owl_emac_priv *priv) | |
696 | { | |
697 | struct owl_emac_ring *ring = &priv->tx_ring; | |
698 | struct net_device *netdev = priv->netdev; | |
699 | unsigned int tx_next; | |
700 | u32 status; | |
701 | ||
702 | spin_lock(&priv->lock); | |
703 | ||
704 | while (ring->tail != ring->head) { | |
705 | if (!owl_emac_tx_complete_tail(priv)) | |
706 | break; | |
707 | } | |
708 | ||
709 | /* FIXME: This is a workaround for a MAC hardware bug not clearing | |
710 | * (sometimes) the OWN bit for a transmitted frame descriptor. | |
711 | * | |
712 | * At this point, when TX queue is full, the tail descriptor has the | |
713 | * OWN bit set, which normally means the frame has not been processed | |
714 | * or transmitted yet. But if there is at least one descriptor in the | |
715 | * queue having the OWN bit cleared, we can safely assume the tail | |
716 | * frame has been also processed by the MAC hardware. | |
717 | * | |
718 | * If that's the case, let's force the frame completion by manually | |
719 | * clearing the OWN bit. | |
720 | */ | |
721 | if (unlikely(!owl_emac_ring_num_unused(ring))) { | |
722 | tx_next = ring->tail; | |
723 | ||
724 | while ((tx_next = owl_emac_ring_get_next(ring, tx_next)) != ring->head) { | |
725 | status = READ_ONCE(ring->descs[tx_next].status); | |
726 | dma_rmb(); /* Ensure data has been read before used. */ | |
727 | ||
728 | if (status & OWL_EMAC_BIT_TDES0_OWN) | |
729 | continue; | |
730 | ||
731 | netdev_dbg(netdev, "Found uncleared TX desc OWN bit\n"); | |
732 | ||
733 | status = READ_ONCE(ring->descs[ring->tail].status); | |
734 | dma_rmb(); /* Ensure data has been read before used. */ | |
735 | status &= ~OWL_EMAC_BIT_TDES0_OWN; | |
736 | WRITE_ONCE(ring->descs[ring->tail].status, status); | |
737 | ||
738 | owl_emac_tx_complete_tail(priv); | |
739 | break; | |
740 | } | |
741 | } | |
742 | ||
743 | spin_unlock(&priv->lock); | |
744 | } | |
745 | ||
746 | static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget) | |
747 | { | |
748 | struct owl_emac_ring *ring = &priv->rx_ring; | |
749 | struct device *dev = owl_emac_get_dev(priv); | |
750 | struct net_device *netdev = priv->netdev; | |
751 | struct owl_emac_ring_desc *desc; | |
752 | struct sk_buff *curr_skb, *new_skb; | |
753 | dma_addr_t curr_dma, new_dma; | |
754 | unsigned int rx_tail, len; | |
755 | u32 status; | |
756 | int recv = 0; | |
757 | ||
758 | while (recv < budget) { | |
759 | spin_lock(&priv->lock); | |
760 | ||
761 | rx_tail = ring->tail; | |
762 | desc = &ring->descs[rx_tail]; | |
763 | ||
764 | status = READ_ONCE(desc->status); | |
765 | dma_rmb(); /* Ensure data has been read before used. */ | |
766 | ||
767 | if (status & OWL_EMAC_BIT_RDES0_OWN) { | |
768 | spin_unlock(&priv->lock); | |
769 | break; | |
770 | } | |
771 | ||
772 | curr_skb = ring->skbs[rx_tail]; | |
773 | curr_dma = ring->skbs_dma[rx_tail]; | |
774 | owl_emac_ring_pop_tail(ring); | |
775 | ||
776 | spin_unlock(&priv->lock); | |
777 | ||
778 | if (status & (OWL_EMAC_BIT_RDES0_DE | OWL_EMAC_BIT_RDES0_RF | | |
779 | OWL_EMAC_BIT_RDES0_TL | OWL_EMAC_BIT_RDES0_CS | | |
780 | OWL_EMAC_BIT_RDES0_DB | OWL_EMAC_BIT_RDES0_CE | | |
781 | OWL_EMAC_BIT_RDES0_ZERO)) { | |
782 | dev_dbg_ratelimited(&netdev->dev, | |
783 | "RX desc error status: 0x%08x\n", | |
784 | status); | |
785 | ||
786 | if (status & OWL_EMAC_BIT_RDES0_DE) | |
787 | netdev->stats.rx_over_errors++; | |
788 | ||
789 | if (status & (OWL_EMAC_BIT_RDES0_RF | OWL_EMAC_BIT_RDES0_DB)) | |
790 | netdev->stats.rx_frame_errors++; | |
791 | ||
792 | if (status & OWL_EMAC_BIT_RDES0_TL) | |
793 | netdev->stats.rx_length_errors++; | |
794 | ||
795 | if (status & OWL_EMAC_BIT_RDES0_CS) | |
796 | netdev->stats.collisions++; | |
797 | ||
798 | if (status & OWL_EMAC_BIT_RDES0_CE) | |
799 | netdev->stats.rx_crc_errors++; | |
800 | ||
801 | if (status & OWL_EMAC_BIT_RDES0_ZERO) | |
802 | netdev->stats.rx_fifo_errors++; | |
803 | ||
804 | goto drop_skb; | |
805 | } | |
806 | ||
807 | len = (status & OWL_EMAC_MSK_RDES0_FL) >> OWL_EMAC_OFF_RDES0_FL; | |
808 | if (unlikely(len > OWL_EMAC_RX_FRAME_MAX_LEN)) { | |
809 | netdev->stats.rx_length_errors++; | |
810 | netdev_err(netdev, "invalid RX frame len: %u\n", len); | |
811 | goto drop_skb; | |
812 | } | |
813 | ||
814 | /* Prepare new skb before receiving the current one. */ | |
815 | new_skb = owl_emac_alloc_skb(netdev); | |
816 | if (unlikely(!new_skb)) | |
817 | goto drop_skb; | |
818 | ||
819 | new_dma = owl_emac_dma_map_rx(priv, new_skb); | |
820 | if (dma_mapping_error(dev, new_dma)) { | |
821 | dev_kfree_skb(new_skb); | |
822 | netdev_err(netdev, "RX DMA mapping failed\n"); | |
823 | goto drop_skb; | |
824 | } | |
825 | ||
826 | owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma); | |
827 | ||
828 | skb_put(curr_skb, len - ETH_FCS_LEN); | |
829 | curr_skb->ip_summed = CHECKSUM_NONE; | |
830 | curr_skb->protocol = eth_type_trans(curr_skb, netdev); | |
831 | curr_skb->dev = netdev; | |
832 | ||
833 | netif_receive_skb(curr_skb); | |
834 | ||
835 | netdev->stats.rx_packets++; | |
836 | netdev->stats.rx_bytes += len; | |
837 | recv++; | |
838 | goto push_skb; | |
839 | ||
840 | drop_skb: | |
841 | netdev->stats.rx_dropped++; | |
842 | netdev->stats.rx_errors++; | |
843 | /* Reuse the current skb. */ | |
844 | new_skb = curr_skb; | |
845 | new_dma = curr_dma; | |
846 | ||
847 | push_skb: | |
848 | spin_lock(&priv->lock); | |
849 | ||
850 | ring->skbs[ring->head] = new_skb; | |
851 | ring->skbs_dma[ring->head] = new_dma; | |
852 | ||
853 | WRITE_ONCE(desc->buf_addr, new_dma); | |
854 | dma_wmb(); /* Flush descriptor before changing ownership. */ | |
855 | WRITE_ONCE(desc->status, OWL_EMAC_BIT_RDES0_OWN); | |
856 | ||
857 | owl_emac_ring_push_head(ring); | |
858 | ||
859 | spin_unlock(&priv->lock); | |
860 | } | |
861 | ||
862 | return recv; | |
863 | } | |
864 | ||
865 | static int owl_emac_poll(struct napi_struct *napi, int budget) | |
866 | { | |
867 | int work_done = 0, ru_cnt = 0, recv; | |
868 | static int tx_err_cnt, rx_err_cnt; | |
869 | struct owl_emac_priv *priv; | |
870 | u32 status, proc_status; | |
871 | ||
872 | priv = container_of(napi, struct owl_emac_priv, napi); | |
873 | ||
874 | while ((status = owl_emac_irq_clear(priv)) & | |
875 | (OWL_EMAC_BIT_MAC_CSR5_NIS | OWL_EMAC_BIT_MAC_CSR5_AIS)) { | |
876 | recv = 0; | |
877 | ||
878 | /* TX setup frame raises ETI instead of TI. */ | |
879 | if (status & (OWL_EMAC_BIT_MAC_CSR5_TI | OWL_EMAC_BIT_MAC_CSR5_ETI)) { | |
880 | owl_emac_tx_complete(priv); | |
881 | tx_err_cnt = 0; | |
882 | ||
883 | /* Count MAC internal RX errors. */ | |
884 | proc_status = status & OWL_EMAC_MSK_MAC_CSR5_RS; | |
885 | proc_status >>= OWL_EMAC_OFF_MAC_CSR5_RS; | |
886 | if (proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_DATA || | |
887 | proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_CDES || | |
888 | proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_FDES) | |
889 | rx_err_cnt++; | |
890 | } | |
891 | ||
892 | if (status & OWL_EMAC_BIT_MAC_CSR5_RI) { | |
893 | recv = owl_emac_rx_process(priv, budget - work_done); | |
894 | rx_err_cnt = 0; | |
895 | ||
896 | /* Count MAC internal TX errors. */ | |
897 | proc_status = status & OWL_EMAC_MSK_MAC_CSR5_TS; | |
898 | proc_status >>= OWL_EMAC_OFF_MAC_CSR5_TS; | |
899 | if (proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_DATA || | |
900 | proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_CDES) | |
901 | tx_err_cnt++; | |
902 | } else if (status & OWL_EMAC_BIT_MAC_CSR5_RU) { | |
903 | /* MAC AHB is in suspended state, will return to RX | |
904 | * descriptor processing when the host changes ownership | |
905 | * of the descriptor and either an RX poll demand CMD is | |
906 | * issued or a new frame is recognized by the MAC AHB. | |
907 | */ | |
908 | if (++ru_cnt == 2) | |
909 | owl_emac_dma_cmd_resume_rx(priv); | |
910 | ||
911 | recv = owl_emac_rx_process(priv, budget - work_done); | |
912 | ||
913 | /* Guard against too many RU interrupts. */ | |
914 | if (ru_cnt > 3) | |
915 | break; | |
916 | } | |
917 | ||
918 | work_done += recv; | |
919 | if (work_done >= budget) | |
920 | break; | |
921 | } | |
922 | ||
923 | if (work_done < budget) { | |
924 | napi_complete_done(napi, work_done); | |
925 | owl_emac_irq_enable(priv); | |
926 | } | |
927 | ||
928 | /* Reset MAC when getting too many internal TX or RX errors. */ | |
929 | if (tx_err_cnt > 10 || rx_err_cnt > 10) { | |
930 | netdev_dbg(priv->netdev, "%s error status: 0x%08x\n", | |
931 | tx_err_cnt > 10 ? "TX" : "RX", status); | |
932 | rx_err_cnt = 0; | |
933 | tx_err_cnt = 0; | |
934 | schedule_work(&priv->mac_reset_task); | |
935 | } | |
936 | ||
937 | return work_done; | |
938 | } | |
939 | ||
940 | static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv) | |
941 | { | |
942 | u32 val; | |
943 | ||
944 | /* Enable MDC clock generation by adjusting CLKDIV according to | |
945 | * the vendor implementation of the original driver. | |
946 | */ | |
947 | val = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10); | |
948 | val &= OWL_EMAC_MSK_MAC_CSR10_CLKDIV; | |
949 | val |= OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 << OWL_EMAC_OFF_MAC_CSR10_CLKDIV; | |
950 | ||
951 | val |= OWL_EMAC_BIT_MAC_CSR10_SB; | |
952 | val |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS << OWL_EMAC_OFF_MAC_CSR10_OPCODE; | |
953 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, val); | |
954 | } | |
955 | ||
956 | static void owl_emac_core_hw_reset(struct owl_emac_priv *priv) | |
957 | { | |
958 | /* Trigger hardware reset. */ | |
959 | reset_control_assert(priv->reset); | |
960 | usleep_range(10, 20); | |
961 | reset_control_deassert(priv->reset); | |
962 | usleep_range(100, 200); | |
963 | } | |
964 | ||
965 | static int owl_emac_core_sw_reset(struct owl_emac_priv *priv) | |
966 | { | |
967 | u32 val; | |
968 | int ret; | |
969 | ||
970 | /* Trigger software reset. */ | |
971 | owl_emac_reg_set(priv, OWL_EMAC_REG_MAC_CSR0, OWL_EMAC_BIT_MAC_CSR0_SWR); | |
972 | ret = readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR0, | |
973 | val, !(val & OWL_EMAC_BIT_MAC_CSR0_SWR), | |
974 | OWL_EMAC_POLL_DELAY_USEC, | |
975 | OWL_EMAC_RESET_POLL_TIMEOUT_USEC); | |
976 | if (ret) | |
977 | return ret; | |
978 | ||
979 | if (priv->phy_mode == PHY_INTERFACE_MODE_RMII) { | |
980 | /* Enable RMII and use the 50MHz rmii clk as output to PHY. */ | |
981 | val = 0; | |
982 | } else { | |
983 | /* Enable SMII and use the 125MHz rmii clk as output to PHY. | |
984 | * Additionally set SMII SYNC delay to 4 half cycle. | |
985 | */ | |
986 | val = 0x04 << OWL_EMAC_OFF_MAC_CTRL_SSDC; | |
987 | val |= OWL_EMAC_BIT_MAC_CTRL_RSIS; | |
988 | } | |
989 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CTRL, val); | |
990 | ||
991 | /* MDC is disabled after reset. */ | |
992 | owl_emac_mdio_clock_enable(priv); | |
993 | ||
994 | /* Set FIFO pause & restart threshold levels. */ | |
995 | val = 0x40 << OWL_EMAC_OFF_MAC_CSR19_FPTL; | |
996 | val |= 0x10 << OWL_EMAC_OFF_MAC_CSR19_FRTL; | |
997 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR19, val); | |
998 | ||
999 | /* Set flow control pause quanta time to ~100 ms. */ | |
1000 | val = 0x4FFF << OWL_EMAC_OFF_MAC_CSR18_PQT; | |
1001 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR18, val); | |
1002 | ||
1003 | /* Setup interrupt mitigation. */ | |
1004 | val = 7 << OWL_EMAC_OFF_MAC_CSR11_NRP; | |
1005 | val |= 4 << OWL_EMAC_OFF_MAC_CSR11_RT; | |
1006 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR11, val); | |
1007 | ||
1008 | /* Set RX/TX rings base addresses. */ | |
1009 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR3, | |
1010 | (u32)(priv->rx_ring.descs_dma)); | |
1011 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR4, | |
1012 | (u32)(priv->tx_ring.descs_dma)); | |
1013 | ||
1014 | /* Setup initial operation mode. */ | |
1015 | val = OWL_EMAC_VAL_MAC_CSR6_SPEED_100M << OWL_EMAC_OFF_MAC_CSR6_SPEED; | |
1016 | val |= OWL_EMAC_BIT_MAC_CSR6_FD; | |
1017 | owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, | |
1018 | OWL_EMAC_MSK_MAC_CSR6_SPEED | | |
1019 | OWL_EMAC_BIT_MAC_CSR6_FD, val); | |
1020 | owl_emac_reg_clear(priv, OWL_EMAC_REG_MAC_CSR6, | |
1021 | OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM); | |
1022 | ||
1023 | priv->link = 0; | |
1024 | priv->speed = SPEED_UNKNOWN; | |
1025 | priv->duplex = DUPLEX_UNKNOWN; | |
1026 | priv->pause = 0; | |
1027 | priv->mcaddr_list.count = 0; | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | static int owl_emac_enable(struct net_device *netdev, bool start_phy) | |
1033 | { | |
1034 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1035 | int ret; | |
1036 | ||
1037 | owl_emac_dma_cmd_stop(priv); | |
1038 | owl_emac_irq_disable(priv); | |
1039 | owl_emac_irq_clear(priv); | |
1040 | ||
1041 | owl_emac_ring_prepare_tx(priv); | |
1042 | ret = owl_emac_ring_prepare_rx(priv); | |
1043 | if (ret) | |
1044 | goto err_unprep; | |
1045 | ||
1046 | ret = owl_emac_core_sw_reset(priv); | |
1047 | if (ret) { | |
1048 | netdev_err(netdev, "failed to soft reset MAC core: %d\n", ret); | |
1049 | goto err_unprep; | |
1050 | } | |
1051 | ||
1052 | owl_emac_set_hw_mac_addr(netdev); | |
1053 | owl_emac_setup_frame_xmit(priv); | |
1054 | ||
1055 | netdev_reset_queue(netdev); | |
1056 | napi_enable(&priv->napi); | |
1057 | ||
1058 | owl_emac_irq_enable(priv); | |
1059 | owl_emac_dma_cmd_start(priv); | |
1060 | ||
1061 | if (start_phy) | |
1062 | phy_start(netdev->phydev); | |
1063 | ||
1064 | netif_start_queue(netdev); | |
1065 | ||
1066 | return 0; | |
1067 | ||
1068 | err_unprep: | |
1069 | owl_emac_ring_unprepare_rx(priv); | |
1070 | owl_emac_ring_unprepare_tx(priv); | |
1071 | ||
1072 | return ret; | |
1073 | } | |
1074 | ||
1075 | static void owl_emac_disable(struct net_device *netdev, bool stop_phy) | |
1076 | { | |
1077 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1078 | ||
1079 | owl_emac_dma_cmd_stop(priv); | |
1080 | owl_emac_irq_disable(priv); | |
1081 | ||
1082 | netif_stop_queue(netdev); | |
1083 | napi_disable(&priv->napi); | |
1084 | ||
1085 | if (stop_phy) | |
1086 | phy_stop(netdev->phydev); | |
1087 | ||
1088 | owl_emac_ring_unprepare_rx(priv); | |
1089 | owl_emac_ring_unprepare_tx(priv); | |
1090 | } | |
1091 | ||
1092 | static int owl_emac_ndo_open(struct net_device *netdev) | |
1093 | { | |
1094 | return owl_emac_enable(netdev, true); | |
1095 | } | |
1096 | ||
1097 | static int owl_emac_ndo_stop(struct net_device *netdev) | |
1098 | { | |
1099 | owl_emac_disable(netdev, true); | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
1104 | static void owl_emac_set_multicast(struct net_device *netdev, int count) | |
1105 | { | |
1106 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1107 | struct netdev_hw_addr *ha; | |
1108 | int index = 0; | |
1109 | ||
1110 | if (count <= 0) { | |
1111 | priv->mcaddr_list.count = 0; | |
1112 | return; | |
1113 | } | |
1114 | ||
1115 | netdev_for_each_mc_addr(ha, netdev) { | |
1116 | if (!is_multicast_ether_addr(ha->addr)) | |
1117 | continue; | |
1118 | ||
1119 | WARN_ON(index >= OWL_EMAC_MAX_MULTICAST_ADDRS); | |
1120 | ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr); | |
1121 | } | |
1122 | ||
1123 | priv->mcaddr_list.count = index; | |
1124 | ||
1125 | owl_emac_setup_frame_xmit(priv); | |
1126 | } | |
1127 | ||
1128 | static void owl_emac_ndo_set_rx_mode(struct net_device *netdev) | |
1129 | { | |
1130 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1131 | u32 status, val = 0; | |
1132 | int mcast_count = 0; | |
1133 | ||
1134 | if (netdev->flags & IFF_PROMISC) { | |
1135 | val = OWL_EMAC_BIT_MAC_CSR6_PR; | |
1136 | } else if (netdev->flags & IFF_ALLMULTI) { | |
1137 | val = OWL_EMAC_BIT_MAC_CSR6_PM; | |
1138 | } else if (netdev->flags & IFF_MULTICAST) { | |
1139 | mcast_count = netdev_mc_count(netdev); | |
1140 | ||
1141 | if (mcast_count > OWL_EMAC_MAX_MULTICAST_ADDRS) { | |
1142 | val = OWL_EMAC_BIT_MAC_CSR6_PM; | |
1143 | mcast_count = 0; | |
1144 | } | |
1145 | } | |
1146 | ||
1147 | spin_lock_bh(&priv->lock); | |
1148 | ||
1149 | /* Temporarily stop DMA TX & RX. */ | |
1150 | status = owl_emac_dma_cmd_stop(priv); | |
1151 | ||
1152 | /* Update operation modes. */ | |
1153 | owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6, | |
1154 | OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM, | |
1155 | val); | |
1156 | ||
1157 | /* Restore DMA TX & RX status. */ | |
1158 | owl_emac_dma_cmd_set(priv, status); | |
1159 | ||
1160 | spin_unlock_bh(&priv->lock); | |
1161 | ||
1162 | /* Set/reset multicast addr list. */ | |
1163 | owl_emac_set_multicast(netdev, mcast_count); | |
1164 | } | |
1165 | ||
1166 | static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) | |
1167 | { | |
1168 | struct sockaddr *skaddr = addr; | |
1169 | ||
1170 | if (!is_valid_ether_addr(skaddr->sa_data)) | |
1171 | return -EADDRNOTAVAIL; | |
1172 | ||
1173 | if (netif_running(netdev)) | |
1174 | return -EBUSY; | |
1175 | ||
1176 | memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); | |
1177 | owl_emac_set_hw_mac_addr(netdev); | |
1178 | ||
1179 | return owl_emac_setup_frame_xmit(netdev_priv(netdev)); | |
1180 | } | |
1181 | ||
a7605370 AB |
1182 | static int owl_emac_ndo_eth_ioctl(struct net_device *netdev, |
1183 | struct ifreq *req, int cmd) | |
de6e0b19 CC |
1184 | { |
1185 | if (!netif_running(netdev)) | |
1186 | return -EINVAL; | |
1187 | ||
1188 | return phy_mii_ioctl(netdev->phydev, req, cmd); | |
1189 | } | |
1190 | ||
1191 | static void owl_emac_ndo_tx_timeout(struct net_device *netdev, | |
1192 | unsigned int txqueue) | |
1193 | { | |
1194 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1195 | ||
1196 | schedule_work(&priv->mac_reset_task); | |
1197 | } | |
1198 | ||
1199 | static void owl_emac_reset_task(struct work_struct *work) | |
1200 | { | |
1201 | struct owl_emac_priv *priv; | |
1202 | ||
1203 | priv = container_of(work, struct owl_emac_priv, mac_reset_task); | |
1204 | ||
1205 | netdev_dbg(priv->netdev, "resetting MAC\n"); | |
1206 | owl_emac_disable(priv->netdev, false); | |
1207 | owl_emac_enable(priv->netdev, false); | |
1208 | } | |
1209 | ||
1210 | static struct net_device_stats * | |
1211 | owl_emac_ndo_get_stats(struct net_device *netdev) | |
1212 | { | |
1213 | /* FIXME: If possible, try to get stats from MAC hardware registers | |
1214 | * instead of tracking them manually in the driver. | |
1215 | */ | |
1216 | ||
1217 | return &netdev->stats; | |
1218 | } | |
1219 | ||
1220 | static const struct net_device_ops owl_emac_netdev_ops = { | |
1221 | .ndo_open = owl_emac_ndo_open, | |
1222 | .ndo_stop = owl_emac_ndo_stop, | |
1223 | .ndo_start_xmit = owl_emac_ndo_start_xmit, | |
1224 | .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode, | |
1225 | .ndo_set_mac_address = owl_emac_ndo_set_mac_addr, | |
1226 | .ndo_validate_addr = eth_validate_addr, | |
a7605370 | 1227 | .ndo_eth_ioctl = owl_emac_ndo_eth_ioctl, |
de6e0b19 CC |
1228 | .ndo_tx_timeout = owl_emac_ndo_tx_timeout, |
1229 | .ndo_get_stats = owl_emac_ndo_get_stats, | |
1230 | }; | |
1231 | ||
1232 | static void owl_emac_ethtool_get_drvinfo(struct net_device *dev, | |
1233 | struct ethtool_drvinfo *info) | |
1234 | { | |
1235 | strscpy(info->driver, OWL_EMAC_DRVNAME, sizeof(info->driver)); | |
1236 | } | |
1237 | ||
1238 | static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev) | |
1239 | { | |
1240 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1241 | ||
1242 | return priv->msg_enable; | |
1243 | } | |
1244 | ||
1245 | static void owl_emac_ethtool_set_msglevel(struct net_device *ndev, u32 val) | |
1246 | { | |
1247 | struct owl_emac_priv *priv = netdev_priv(ndev); | |
1248 | ||
1249 | priv->msg_enable = val; | |
1250 | } | |
1251 | ||
1252 | static const struct ethtool_ops owl_emac_ethtool_ops = { | |
1253 | .get_drvinfo = owl_emac_ethtool_get_drvinfo, | |
1254 | .get_link = ethtool_op_get_link, | |
1255 | .get_link_ksettings = phy_ethtool_get_link_ksettings, | |
1256 | .set_link_ksettings = phy_ethtool_set_link_ksettings, | |
1257 | .get_msglevel = owl_emac_ethtool_get_msglevel, | |
1258 | .set_msglevel = owl_emac_ethtool_set_msglevel, | |
1259 | }; | |
1260 | ||
1261 | static int owl_emac_mdio_wait(struct owl_emac_priv *priv) | |
1262 | { | |
1263 | u32 val; | |
1264 | ||
1265 | /* Wait while data transfer is in progress. */ | |
1266 | return readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR10, | |
1267 | val, !(val & OWL_EMAC_BIT_MAC_CSR10_SB), | |
1268 | OWL_EMAC_POLL_DELAY_USEC, | |
1269 | OWL_EMAC_MDIO_POLL_TIMEOUT_USEC); | |
1270 | } | |
1271 | ||
1272 | static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | |
1273 | { | |
1274 | struct owl_emac_priv *priv = bus->priv; | |
1275 | u32 data, tmp; | |
1276 | int ret; | |
1277 | ||
1278 | if (regnum & MII_ADDR_C45) | |
1279 | return -EOPNOTSUPP; | |
1280 | ||
1281 | data = OWL_EMAC_BIT_MAC_CSR10_SB; | |
1282 | data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE; | |
1283 | ||
1284 | tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD; | |
1285 | data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD; | |
1286 | ||
1287 | tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD; | |
1288 | data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD; | |
1289 | ||
1290 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data); | |
1291 | ||
1292 | ret = owl_emac_mdio_wait(priv); | |
1293 | if (ret) | |
1294 | return ret; | |
1295 | ||
1296 | data = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10); | |
1297 | data &= OWL_EMAC_MSK_MAC_CSR10_DATA; | |
1298 | ||
1299 | return data; | |
1300 | } | |
1301 | ||
1302 | static int | |
1303 | owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) | |
1304 | { | |
1305 | struct owl_emac_priv *priv = bus->priv; | |
1306 | u32 data, tmp; | |
1307 | ||
1308 | if (regnum & MII_ADDR_C45) | |
1309 | return -EOPNOTSUPP; | |
1310 | ||
1311 | data = OWL_EMAC_BIT_MAC_CSR10_SB; | |
1312 | data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE; | |
1313 | ||
1314 | tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD; | |
1315 | data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD; | |
1316 | ||
1317 | tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD; | |
1318 | data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD; | |
1319 | ||
1320 | data |= val & OWL_EMAC_MSK_MAC_CSR10_DATA; | |
1321 | ||
1322 | owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data); | |
1323 | ||
1324 | return owl_emac_mdio_wait(priv); | |
1325 | } | |
1326 | ||
1327 | static int owl_emac_mdio_init(struct net_device *netdev) | |
1328 | { | |
1329 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1330 | struct device *dev = owl_emac_get_dev(priv); | |
1331 | struct device_node *mdio_node; | |
1332 | int ret; | |
1333 | ||
1334 | mdio_node = of_get_child_by_name(dev->of_node, "mdio"); | |
1335 | if (!mdio_node) | |
1336 | return -ENODEV; | |
1337 | ||
1338 | if (!of_device_is_available(mdio_node)) { | |
1339 | ret = -ENODEV; | |
1340 | goto err_put_node; | |
1341 | } | |
1342 | ||
1343 | priv->mii = devm_mdiobus_alloc(dev); | |
1344 | if (!priv->mii) { | |
1345 | ret = -ENOMEM; | |
1346 | goto err_put_node; | |
1347 | } | |
1348 | ||
1349 | snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); | |
1350 | priv->mii->name = "owl-emac-mdio"; | |
1351 | priv->mii->parent = dev; | |
1352 | priv->mii->read = owl_emac_mdio_read; | |
1353 | priv->mii->write = owl_emac_mdio_write; | |
1354 | priv->mii->phy_mask = ~0; /* Mask out all PHYs from auto probing. */ | |
1355 | priv->mii->priv = priv; | |
1356 | ||
1357 | ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); | |
1358 | ||
1359 | err_put_node: | |
1360 | of_node_put(mdio_node); | |
1361 | return ret; | |
1362 | } | |
1363 | ||
1364 | static int owl_emac_phy_init(struct net_device *netdev) | |
1365 | { | |
1366 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1367 | struct device *dev = owl_emac_get_dev(priv); | |
1368 | struct phy_device *phy; | |
1369 | ||
1370 | phy = of_phy_get_and_connect(netdev, dev->of_node, | |
1371 | owl_emac_adjust_link); | |
1372 | if (!phy) | |
1373 | return -ENODEV; | |
1374 | ||
1375 | phy_set_sym_pause(phy, true, true, true); | |
1376 | ||
1377 | if (netif_msg_link(priv)) | |
1378 | phy_attached_info(phy); | |
1379 | ||
1380 | return 0; | |
1381 | } | |
1382 | ||
1383 | static void owl_emac_get_mac_addr(struct net_device *netdev) | |
1384 | { | |
1385 | struct device *dev = netdev->dev.parent; | |
1386 | int ret; | |
1387 | ||
1388 | ret = eth_platform_get_mac_address(dev, netdev->dev_addr); | |
1389 | if (!ret && is_valid_ether_addr(netdev->dev_addr)) | |
1390 | return; | |
1391 | ||
1392 | eth_hw_addr_random(netdev); | |
1393 | dev_warn(dev, "using random MAC address %pM\n", netdev->dev_addr); | |
1394 | } | |
1395 | ||
1396 | static __maybe_unused int owl_emac_suspend(struct device *dev) | |
1397 | { | |
1398 | struct net_device *netdev = dev_get_drvdata(dev); | |
1399 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1400 | ||
1401 | disable_irq(netdev->irq); | |
1402 | ||
1403 | if (netif_running(netdev)) { | |
1404 | owl_emac_disable(netdev, true); | |
1405 | netif_device_detach(netdev); | |
1406 | } | |
1407 | ||
1408 | clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); | |
1409 | ||
1410 | return 0; | |
1411 | } | |
1412 | ||
1413 | static __maybe_unused int owl_emac_resume(struct device *dev) | |
1414 | { | |
1415 | struct net_device *netdev = dev_get_drvdata(dev); | |
1416 | struct owl_emac_priv *priv = netdev_priv(netdev); | |
1417 | int ret; | |
1418 | ||
1419 | ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks); | |
1420 | if (ret) | |
1421 | return ret; | |
1422 | ||
1423 | if (netif_running(netdev)) { | |
1424 | owl_emac_core_hw_reset(priv); | |
1425 | owl_emac_core_sw_reset(priv); | |
1426 | ||
1427 | ret = owl_emac_enable(netdev, true); | |
1428 | if (ret) { | |
1429 | clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); | |
1430 | return ret; | |
1431 | } | |
1432 | ||
1433 | netif_device_attach(netdev); | |
1434 | } | |
1435 | ||
1436 | enable_irq(netdev->irq); | |
1437 | ||
1438 | return 0; | |
1439 | } | |
1440 | ||
1441 | static void owl_emac_clk_disable_unprepare(void *data) | |
1442 | { | |
1443 | struct owl_emac_priv *priv = data; | |
1444 | ||
1445 | clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks); | |
1446 | } | |
1447 | ||
1448 | static int owl_emac_clk_set_rate(struct owl_emac_priv *priv) | |
1449 | { | |
1450 | struct device *dev = owl_emac_get_dev(priv); | |
1451 | unsigned long rate; | |
1452 | int ret; | |
1453 | ||
1454 | switch (priv->phy_mode) { | |
1455 | case PHY_INTERFACE_MODE_RMII: | |
1456 | rate = 50000000; | |
1457 | break; | |
1458 | ||
1459 | case PHY_INTERFACE_MODE_SMII: | |
1460 | rate = 125000000; | |
1461 | break; | |
1462 | ||
1463 | default: | |
1464 | dev_err(dev, "unsupported phy interface mode %d\n", | |
1465 | priv->phy_mode); | |
1466 | return -EOPNOTSUPP; | |
1467 | } | |
1468 | ||
1469 | ret = clk_set_rate(priv->clks[OWL_EMAC_CLK_RMII].clk, rate); | |
1470 | if (ret) | |
1471 | dev_err(dev, "failed to set RMII clock rate: %d\n", ret); | |
1472 | ||
1473 | return ret; | |
1474 | } | |
1475 | ||
1476 | static int owl_emac_probe(struct platform_device *pdev) | |
1477 | { | |
1478 | struct device *dev = &pdev->dev; | |
1479 | struct net_device *netdev; | |
1480 | struct owl_emac_priv *priv; | |
1481 | int ret, i; | |
1482 | ||
1483 | netdev = devm_alloc_etherdev(dev, sizeof(*priv)); | |
1484 | if (!netdev) | |
1485 | return -ENOMEM; | |
1486 | ||
1487 | platform_set_drvdata(pdev, netdev); | |
1488 | SET_NETDEV_DEV(netdev, dev); | |
1489 | ||
1490 | priv = netdev_priv(netdev); | |
1491 | priv->netdev = netdev; | |
1492 | priv->msg_enable = netif_msg_init(-1, OWL_EMAC_DEFAULT_MSG_ENABLE); | |
1493 | ||
1494 | ret = of_get_phy_mode(dev->of_node, &priv->phy_mode); | |
1495 | if (ret) { | |
1496 | dev_err(dev, "failed to get phy mode: %d\n", ret); | |
1497 | return ret; | |
1498 | } | |
1499 | ||
1500 | spin_lock_init(&priv->lock); | |
1501 | ||
1502 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | |
1503 | if (ret) { | |
1504 | dev_err(dev, "unsupported DMA mask\n"); | |
1505 | return ret; | |
1506 | } | |
1507 | ||
1508 | ret = owl_emac_ring_alloc(dev, &priv->rx_ring, OWL_EMAC_RX_RING_SIZE); | |
1509 | if (ret) | |
1510 | return ret; | |
1511 | ||
1512 | ret = owl_emac_ring_alloc(dev, &priv->tx_ring, OWL_EMAC_TX_RING_SIZE); | |
1513 | if (ret) | |
1514 | return ret; | |
1515 | ||
1516 | priv->base = devm_platform_ioremap_resource(pdev, 0); | |
1517 | if (IS_ERR(priv->base)) | |
1518 | return PTR_ERR(priv->base); | |
1519 | ||
1520 | netdev->irq = platform_get_irq(pdev, 0); | |
1521 | if (netdev->irq < 0) | |
1522 | return netdev->irq; | |
1523 | ||
1524 | ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq, | |
1525 | IRQF_SHARED, netdev->name, netdev); | |
1526 | if (ret) { | |
1527 | dev_err(dev, "failed to request irq: %d\n", netdev->irq); | |
1528 | return ret; | |
1529 | } | |
1530 | ||
1531 | for (i = 0; i < OWL_EMAC_NCLKS; i++) | |
1532 | priv->clks[i].id = owl_emac_clk_names[i]; | |
1533 | ||
1534 | ret = devm_clk_bulk_get(dev, OWL_EMAC_NCLKS, priv->clks); | |
1535 | if (ret) | |
1536 | return ret; | |
1537 | ||
1538 | ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks); | |
1539 | if (ret) | |
1540 | return ret; | |
1541 | ||
1542 | ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv); | |
1543 | if (ret) | |
1544 | return ret; | |
1545 | ||
1546 | ret = owl_emac_clk_set_rate(priv); | |
1547 | if (ret) | |
1548 | return ret; | |
1549 | ||
1550 | priv->reset = devm_reset_control_get_exclusive(dev, NULL); | |
1551 | if (IS_ERR(priv->reset)) | |
1552 | return dev_err_probe(dev, PTR_ERR(priv->reset), | |
1553 | "failed to get reset control"); | |
1554 | ||
1555 | owl_emac_get_mac_addr(netdev); | |
1556 | ||
1557 | owl_emac_core_hw_reset(priv); | |
1558 | owl_emac_mdio_clock_enable(priv); | |
1559 | ||
1560 | ret = owl_emac_mdio_init(netdev); | |
1561 | if (ret) { | |
1562 | dev_err(dev, "failed to initialize MDIO bus\n"); | |
1563 | return ret; | |
1564 | } | |
1565 | ||
1566 | ret = owl_emac_phy_init(netdev); | |
1567 | if (ret) { | |
1568 | dev_err(dev, "failed to initialize PHY\n"); | |
1569 | return ret; | |
1570 | } | |
1571 | ||
1572 | INIT_WORK(&priv->mac_reset_task, owl_emac_reset_task); | |
1573 | ||
1574 | netdev->min_mtu = OWL_EMAC_MTU_MIN; | |
1575 | netdev->max_mtu = OWL_EMAC_MTU_MAX; | |
1576 | netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT; | |
1577 | netdev->netdev_ops = &owl_emac_netdev_ops; | |
1578 | netdev->ethtool_ops = &owl_emac_ethtool_ops; | |
1579 | netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT); | |
1580 | ||
1581 | ret = devm_register_netdev(dev, netdev); | |
1582 | if (ret) { | |
1583 | netif_napi_del(&priv->napi); | |
1584 | phy_disconnect(netdev->phydev); | |
1585 | return ret; | |
1586 | } | |
1587 | ||
1588 | return 0; | |
1589 | } | |
1590 | ||
1591 | static int owl_emac_remove(struct platform_device *pdev) | |
1592 | { | |
1593 | struct owl_emac_priv *priv = platform_get_drvdata(pdev); | |
1594 | ||
1595 | netif_napi_del(&priv->napi); | |
1596 | phy_disconnect(priv->netdev->phydev); | |
1597 | cancel_work_sync(&priv->mac_reset_task); | |
1598 | ||
1599 | return 0; | |
1600 | } | |
1601 | ||
1602 | static const struct of_device_id owl_emac_of_match[] = { | |
1603 | { .compatible = "actions,owl-emac", }, | |
1604 | { } | |
1605 | }; | |
1606 | MODULE_DEVICE_TABLE(of, owl_emac_of_match); | |
1607 | ||
1608 | static SIMPLE_DEV_PM_OPS(owl_emac_pm_ops, | |
1609 | owl_emac_suspend, owl_emac_resume); | |
1610 | ||
1611 | static struct platform_driver owl_emac_driver = { | |
1612 | .driver = { | |
1613 | .name = OWL_EMAC_DRVNAME, | |
1614 | .of_match_table = owl_emac_of_match, | |
1615 | .pm = &owl_emac_pm_ops, | |
1616 | }, | |
1617 | .probe = owl_emac_probe, | |
1618 | .remove = owl_emac_remove, | |
1619 | }; | |
1620 | module_platform_driver(owl_emac_driver); | |
1621 | ||
1622 | MODULE_DESCRIPTION("Actions Semi Owl SoCs Ethernet MAC Driver"); | |
1623 | MODULE_AUTHOR("Actions Semi Inc."); | |
1624 | MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>"); | |
1625 | MODULE_LICENSE("GPL"); |