Commit | Line | Data |
---|---|---|
d6aa60a1 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2009 Cavium Networks | |
7 | */ | |
8 | ||
9 | #include <linux/capability.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/etherdevice.h> | |
15 | #include <linux/if_vlan.h> | |
16 | #include <linux/phy.h> | |
17 | #include <linux/spinlock.h> | |
18 | ||
19 | #include <asm/octeon/octeon.h> | |
20 | #include <asm/octeon/cvmx-mixx-defs.h> | |
21 | #include <asm/octeon/cvmx-agl-defs.h> | |
22 | ||
23 | #define DRV_NAME "octeon_mgmt" | |
24 | #define DRV_VERSION "2.0" | |
25 | #define DRV_DESCRIPTION \ | |
26 | "Cavium Networks Octeon MII (management) port Network Driver" | |
27 | ||
28 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | |
29 | ||
30 | /* | |
31 | * Ring sizes that are powers of two allow for more efficient modulo | |
32 | * opertions. | |
33 | */ | |
34 | #define OCTEON_MGMT_RX_RING_SIZE 512 | |
35 | #define OCTEON_MGMT_TX_RING_SIZE 128 | |
36 | ||
37 | /* Allow 8 bytes for vlan and FCS. */ | |
38 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | |
39 | ||
40 | union mgmt_port_ring_entry { | |
41 | u64 d64; | |
42 | struct { | |
43 | u64 reserved_62_63:2; | |
44 | /* Length of the buffer/packet in bytes */ | |
45 | u64 len:14; | |
46 | /* For TX, signals that the packet should be timestamped */ | |
47 | u64 tstamp:1; | |
48 | /* The RX error code */ | |
49 | u64 code:7; | |
50 | #define RING_ENTRY_CODE_DONE 0xf | |
51 | #define RING_ENTRY_CODE_MORE 0x10 | |
52 | /* Physical address of the buffer */ | |
53 | u64 addr:40; | |
54 | } s; | |
55 | }; | |
56 | ||
57 | struct octeon_mgmt { | |
58 | struct net_device *netdev; | |
59 | int port; | |
60 | int irq; | |
61 | u64 *tx_ring; | |
62 | dma_addr_t tx_ring_handle; | |
63 | unsigned int tx_next; | |
64 | unsigned int tx_next_clean; | |
65 | unsigned int tx_current_fill; | |
66 | /* The tx_list lock also protects the ring related variables */ | |
67 | struct sk_buff_head tx_list; | |
68 | ||
69 | /* RX variables only touched in napi_poll. No locking necessary. */ | |
70 | u64 *rx_ring; | |
71 | dma_addr_t rx_ring_handle; | |
72 | unsigned int rx_next; | |
73 | unsigned int rx_next_fill; | |
74 | unsigned int rx_current_fill; | |
75 | struct sk_buff_head rx_list; | |
76 | ||
77 | spinlock_t lock; | |
78 | unsigned int last_duplex; | |
79 | unsigned int last_link; | |
80 | struct device *dev; | |
81 | struct napi_struct napi; | |
82 | struct tasklet_struct tx_clean_tasklet; | |
83 | struct phy_device *phydev; | |
84 | }; | |
85 | ||
86 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | |
87 | { | |
88 | int port = p->port; | |
89 | union cvmx_mixx_intena mix_intena; | |
90 | unsigned long flags; | |
91 | ||
92 | spin_lock_irqsave(&p->lock, flags); | |
93 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | |
94 | mix_intena.s.ithena = enable ? 1 : 0; | |
95 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
96 | spin_unlock_irqrestore(&p->lock, flags); | |
97 | } | |
98 | ||
99 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | |
100 | { | |
101 | int port = p->port; | |
102 | union cvmx_mixx_intena mix_intena; | |
103 | unsigned long flags; | |
104 | ||
105 | spin_lock_irqsave(&p->lock, flags); | |
106 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | |
107 | mix_intena.s.othena = enable ? 1 : 0; | |
108 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
109 | spin_unlock_irqrestore(&p->lock, flags); | |
110 | } | |
111 | ||
112 | static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) | |
113 | { | |
114 | octeon_mgmt_set_rx_irq(p, 1); | |
115 | } | |
116 | ||
117 | static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) | |
118 | { | |
119 | octeon_mgmt_set_rx_irq(p, 0); | |
120 | } | |
121 | ||
122 | static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) | |
123 | { | |
124 | octeon_mgmt_set_tx_irq(p, 1); | |
125 | } | |
126 | ||
127 | static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) | |
128 | { | |
129 | octeon_mgmt_set_tx_irq(p, 0); | |
130 | } | |
131 | ||
132 | static unsigned int ring_max_fill(unsigned int ring_size) | |
133 | { | |
134 | return ring_size - 8; | |
135 | } | |
136 | ||
137 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | |
138 | { | |
139 | return ring_size * sizeof(union mgmt_port_ring_entry); | |
140 | } | |
141 | ||
142 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | |
143 | { | |
144 | struct octeon_mgmt *p = netdev_priv(netdev); | |
145 | int port = p->port; | |
146 | ||
147 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | |
148 | unsigned int size; | |
149 | union mgmt_port_ring_entry re; | |
150 | struct sk_buff *skb; | |
151 | ||
152 | /* CN56XX pass 1 needs 8 bytes of padding. */ | |
153 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | |
154 | ||
155 | skb = netdev_alloc_skb(netdev, size); | |
156 | if (!skb) | |
157 | break; | |
158 | skb_reserve(skb, NET_IP_ALIGN); | |
159 | __skb_queue_tail(&p->rx_list, skb); | |
160 | ||
161 | re.d64 = 0; | |
162 | re.s.len = size; | |
163 | re.s.addr = dma_map_single(p->dev, skb->data, | |
164 | size, | |
165 | DMA_FROM_DEVICE); | |
166 | ||
167 | /* Put it in the ring. */ | |
168 | p->rx_ring[p->rx_next_fill] = re.d64; | |
169 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | |
170 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
171 | DMA_BIDIRECTIONAL); | |
172 | p->rx_next_fill = | |
173 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
174 | p->rx_current_fill++; | |
175 | /* Ring the bell. */ | |
176 | cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); | |
177 | } | |
178 | } | |
179 | ||
180 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |
181 | { | |
182 | int port = p->port; | |
183 | union cvmx_mixx_orcnt mix_orcnt; | |
184 | union mgmt_port_ring_entry re; | |
185 | struct sk_buff *skb; | |
186 | int cleaned = 0; | |
187 | unsigned long flags; | |
188 | ||
189 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
190 | while (mix_orcnt.s.orcnt) { | |
191 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, | |
192 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
193 | DMA_BIDIRECTIONAL); | |
194 | ||
195 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
196 | ||
197 | re.d64 = p->tx_ring[p->tx_next_clean]; | |
198 | p->tx_next_clean = | |
199 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
200 | skb = __skb_dequeue(&p->tx_list); | |
201 | ||
202 | mix_orcnt.u64 = 0; | |
203 | mix_orcnt.s.orcnt = 1; | |
204 | ||
205 | /* Acknowledge to hardware that we have the buffer. */ | |
206 | cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); | |
207 | p->tx_current_fill--; | |
208 | ||
209 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
210 | ||
211 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
212 | DMA_TO_DEVICE); | |
213 | dev_kfree_skb_any(skb); | |
214 | cleaned++; | |
215 | ||
216 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
217 | } | |
218 | ||
219 | if (cleaned && netif_queue_stopped(p->netdev)) | |
220 | netif_wake_queue(p->netdev); | |
221 | } | |
222 | ||
223 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | |
224 | { | |
225 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | |
226 | octeon_mgmt_clean_tx_buffers(p); | |
227 | octeon_mgmt_enable_tx_irq(p); | |
228 | } | |
229 | ||
230 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | |
231 | { | |
232 | struct octeon_mgmt *p = netdev_priv(netdev); | |
233 | int port = p->port; | |
234 | unsigned long flags; | |
235 | u64 drop, bad; | |
236 | ||
237 | /* These reads also clear the count registers. */ | |
238 | drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); | |
239 | bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); | |
240 | ||
241 | if (drop || bad) { | |
242 | /* Do an atomic update. */ | |
243 | spin_lock_irqsave(&p->lock, flags); | |
244 | netdev->stats.rx_errors += bad; | |
245 | netdev->stats.rx_dropped += drop; | |
246 | spin_unlock_irqrestore(&p->lock, flags); | |
247 | } | |
248 | } | |
249 | ||
250 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | |
251 | { | |
252 | struct octeon_mgmt *p = netdev_priv(netdev); | |
253 | int port = p->port; | |
254 | unsigned long flags; | |
255 | ||
256 | union cvmx_agl_gmx_txx_stat0 s0; | |
257 | union cvmx_agl_gmx_txx_stat1 s1; | |
258 | ||
259 | /* These reads also clear the count registers. */ | |
260 | s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); | |
261 | s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); | |
262 | ||
263 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | |
264 | /* Do an atomic update. */ | |
265 | spin_lock_irqsave(&p->lock, flags); | |
266 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | |
267 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | |
268 | spin_unlock_irqrestore(&p->lock, flags); | |
269 | } | |
270 | } | |
271 | ||
272 | /* | |
273 | * Dequeue a receive skb and its corresponding ring entry. The ring | |
274 | * entry is returned, *pskb is updated to point to the skb. | |
275 | */ | |
276 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | |
277 | struct sk_buff **pskb) | |
278 | { | |
279 | union mgmt_port_ring_entry re; | |
280 | ||
281 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | |
282 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
283 | DMA_BIDIRECTIONAL); | |
284 | ||
285 | re.d64 = p->rx_ring[p->rx_next]; | |
286 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
287 | p->rx_current_fill--; | |
288 | *pskb = __skb_dequeue(&p->rx_list); | |
289 | ||
290 | dma_unmap_single(p->dev, re.s.addr, | |
291 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | |
292 | DMA_FROM_DEVICE); | |
293 | ||
294 | return re.d64; | |
295 | } | |
296 | ||
297 | ||
298 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | |
299 | { | |
300 | int port = p->port; | |
301 | struct net_device *netdev = p->netdev; | |
302 | union cvmx_mixx_ircnt mix_ircnt; | |
303 | union mgmt_port_ring_entry re; | |
304 | struct sk_buff *skb; | |
305 | struct sk_buff *skb2; | |
306 | struct sk_buff *skb_new; | |
307 | union mgmt_port_ring_entry re2; | |
308 | int rc = 1; | |
309 | ||
310 | ||
311 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | |
312 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | |
313 | /* A good packet, send it up. */ | |
314 | skb_put(skb, re.s.len); | |
315 | good: | |
316 | skb->protocol = eth_type_trans(skb, netdev); | |
317 | netdev->stats.rx_packets++; | |
318 | netdev->stats.rx_bytes += skb->len; | |
319 | netdev->last_rx = jiffies; | |
320 | netif_receive_skb(skb); | |
321 | rc = 0; | |
322 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | |
323 | /* | |
324 | * Packet split across skbs. This can happen if we | |
325 | * increase the MTU. Buffers that are already in the | |
326 | * rx ring can then end up being too small. As the rx | |
327 | * ring is refilled, buffers sized for the new MTU | |
328 | * will be used and we should go back to the normal | |
329 | * non-split case. | |
330 | */ | |
331 | skb_put(skb, re.s.len); | |
332 | do { | |
333 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
334 | if (re2.s.code != RING_ENTRY_CODE_MORE | |
335 | && re2.s.code != RING_ENTRY_CODE_DONE) | |
336 | goto split_error; | |
337 | skb_put(skb2, re2.s.len); | |
338 | skb_new = skb_copy_expand(skb, 0, skb2->len, | |
339 | GFP_ATOMIC); | |
340 | if (!skb_new) | |
341 | goto split_error; | |
342 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | |
343 | skb2->len)) | |
344 | goto split_error; | |
345 | skb_put(skb_new, skb2->len); | |
346 | dev_kfree_skb_any(skb); | |
347 | dev_kfree_skb_any(skb2); | |
348 | skb = skb_new; | |
349 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | |
350 | goto good; | |
351 | } else { | |
352 | /* Some other error, discard it. */ | |
353 | dev_kfree_skb_any(skb); | |
354 | /* | |
355 | * Error statistics are accumulated in | |
356 | * octeon_mgmt_update_rx_stats. | |
357 | */ | |
358 | } | |
359 | goto done; | |
360 | split_error: | |
361 | /* Discard the whole mess. */ | |
362 | dev_kfree_skb_any(skb); | |
363 | dev_kfree_skb_any(skb2); | |
364 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | |
365 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
366 | dev_kfree_skb_any(skb2); | |
367 | } | |
368 | netdev->stats.rx_errors++; | |
369 | ||
370 | done: | |
371 | /* Tell the hardware we processed a packet. */ | |
372 | mix_ircnt.u64 = 0; | |
373 | mix_ircnt.s.ircnt = 1; | |
374 | cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); | |
375 | return rc; | |
376 | ||
377 | } | |
378 | ||
379 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | |
380 | { | |
381 | int port = p->port; | |
382 | unsigned int work_done = 0; | |
383 | union cvmx_mixx_ircnt mix_ircnt; | |
384 | int rc; | |
385 | ||
386 | ||
387 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | |
388 | while (work_done < budget && mix_ircnt.s.ircnt) { | |
389 | ||
390 | rc = octeon_mgmt_receive_one(p); | |
391 | if (!rc) | |
392 | work_done++; | |
393 | ||
394 | /* Check for more packets. */ | |
395 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | |
396 | } | |
397 | ||
398 | octeon_mgmt_rx_fill_ring(p->netdev); | |
399 | ||
400 | return work_done; | |
401 | } | |
402 | ||
403 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | |
404 | { | |
405 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | |
406 | struct net_device *netdev = p->netdev; | |
407 | unsigned int work_done = 0; | |
408 | ||
409 | work_done = octeon_mgmt_receive_packets(p, budget); | |
410 | ||
411 | if (work_done < budget) { | |
412 | /* We stopped because no more packets were available. */ | |
413 | napi_complete(napi); | |
414 | octeon_mgmt_enable_rx_irq(p); | |
415 | } | |
416 | octeon_mgmt_update_rx_stats(netdev); | |
417 | ||
418 | return work_done; | |
419 | } | |
420 | ||
421 | /* Reset the hardware to clean state. */ | |
422 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | |
423 | { | |
424 | union cvmx_mixx_ctl mix_ctl; | |
425 | union cvmx_mixx_bist mix_bist; | |
426 | union cvmx_agl_gmx_bist agl_gmx_bist; | |
427 | ||
428 | mix_ctl.u64 = 0; | |
429 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | |
430 | do { | |
431 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | |
432 | } while (mix_ctl.s.busy); | |
433 | mix_ctl.s.reset = 1; | |
434 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | |
435 | cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | |
436 | cvmx_wait(64); | |
437 | ||
438 | mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); | |
439 | if (mix_bist.u64) | |
440 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | |
441 | (unsigned long long)mix_bist.u64); | |
442 | ||
443 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | |
444 | if (agl_gmx_bist.u64) | |
445 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | |
446 | (unsigned long long)agl_gmx_bist.u64); | |
447 | } | |
448 | ||
449 | struct octeon_mgmt_cam_state { | |
450 | u64 cam[6]; | |
451 | u64 cam_mask; | |
452 | int cam_index; | |
453 | }; | |
454 | ||
455 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | |
456 | unsigned char *addr) | |
457 | { | |
458 | int i; | |
459 | ||
460 | for (i = 0; i < 6; i++) | |
461 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | |
462 | cs->cam_mask |= (1ULL << cs->cam_index); | |
463 | cs->cam_index++; | |
464 | } | |
465 | ||
466 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | |
467 | { | |
468 | struct octeon_mgmt *p = netdev_priv(netdev); | |
469 | int port = p->port; | |
d6aa60a1 DD |
470 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; |
471 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | |
472 | unsigned long flags; | |
473 | unsigned int prev_packet_enable; | |
474 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | |
475 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | |
476 | struct octeon_mgmt_cam_state cam_state; | |
477 | struct dev_addr_list *list; | |
478 | struct list_head *pos; | |
479 | int available_cam_entries; | |
480 | ||
481 | memset(&cam_state, 0, sizeof(cam_state)); | |
482 | ||
483 | if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { | |
484 | cam_mode = 0; | |
485 | available_cam_entries = 8; | |
486 | } else { | |
487 | /* | |
488 | * One CAM entry for the primary address, leaves seven | |
489 | * for the secondary addresses. | |
490 | */ | |
491 | available_cam_entries = 7 - netdev->dev_addrs.count; | |
492 | } | |
493 | ||
494 | if (netdev->flags & IFF_MULTICAST) { | |
4cd24eaf JP |
495 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || |
496 | netdev_mc_count(netdev) > available_cam_entries) | |
d6aa60a1 DD |
497 | multicast_mode = 2; /* 1 - Accept all multicast. */ |
498 | else | |
499 | multicast_mode = 0; /* 0 - Use CAM. */ | |
500 | } | |
501 | ||
502 | if (cam_mode == 1) { | |
503 | /* Add primary address. */ | |
504 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | |
505 | list_for_each(pos, &netdev->dev_addrs.list) { | |
506 | struct netdev_hw_addr *hw_addr; | |
507 | hw_addr = list_entry(pos, struct netdev_hw_addr, list); | |
508 | octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr); | |
509 | list = list->next; | |
510 | } | |
511 | } | |
512 | if (multicast_mode == 0) { | |
fbc450b1 | 513 | netdev_for_each_mc_addr(list, netdev) |
d6aa60a1 | 514 | octeon_mgmt_cam_state_add(&cam_state, list->da_addr); |
d6aa60a1 DD |
515 | } |
516 | ||
517 | ||
518 | spin_lock_irqsave(&p->lock, flags); | |
519 | ||
520 | /* Disable packet I/O. */ | |
521 | agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
522 | prev_packet_enable = agl_gmx_prtx.s.en; | |
523 | agl_gmx_prtx.s.en = 0; | |
524 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | |
525 | ||
526 | ||
527 | adr_ctl.u64 = 0; | |
528 | adr_ctl.s.cam_mode = cam_mode; | |
529 | adr_ctl.s.mcst = multicast_mode; | |
530 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | |
531 | ||
532 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); | |
533 | ||
534 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); | |
535 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); | |
536 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); | |
537 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); | |
538 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); | |
539 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); | |
540 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); | |
541 | ||
542 | /* Restore packet I/O. */ | |
543 | agl_gmx_prtx.s.en = prev_packet_enable; | |
544 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | |
545 | ||
546 | spin_unlock_irqrestore(&p->lock, flags); | |
547 | } | |
548 | ||
549 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | |
550 | { | |
551 | struct sockaddr *sa = addr; | |
552 | ||
553 | if (!is_valid_ether_addr(sa->sa_data)) | |
554 | return -EADDRNOTAVAIL; | |
555 | ||
556 | memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); | |
557 | ||
558 | octeon_mgmt_set_rx_filtering(netdev); | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
563 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | |
564 | { | |
565 | struct octeon_mgmt *p = netdev_priv(netdev); | |
566 | int port = p->port; | |
567 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; | |
568 | ||
569 | /* | |
570 | * Limit the MTU to make sure the ethernet packets are between | |
571 | * 64 bytes and 16383 bytes. | |
572 | */ | |
573 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | |
574 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | |
575 | 64 - OCTEON_MGMT_RX_HEADROOM, | |
576 | 16383 - OCTEON_MGMT_RX_HEADROOM); | |
577 | return -EINVAL; | |
578 | } | |
579 | ||
580 | netdev->mtu = new_mtu; | |
581 | ||
582 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); | |
583 | cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), | |
584 | (size_without_fcs + 7) & 0xfff8); | |
585 | ||
586 | return 0; | |
587 | } | |
588 | ||
589 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | |
590 | { | |
591 | struct net_device *netdev = dev_id; | |
592 | struct octeon_mgmt *p = netdev_priv(netdev); | |
593 | int port = p->port; | |
594 | union cvmx_mixx_isr mixx_isr; | |
595 | ||
596 | mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); | |
597 | ||
598 | /* Clear any pending interrupts */ | |
599 | cvmx_write_csr(CVMX_MIXX_ISR(port), | |
600 | cvmx_read_csr(CVMX_MIXX_ISR(port))); | |
601 | cvmx_read_csr(CVMX_MIXX_ISR(port)); | |
602 | ||
603 | if (mixx_isr.s.irthresh) { | |
604 | octeon_mgmt_disable_rx_irq(p); | |
605 | napi_schedule(&p->napi); | |
606 | } | |
607 | if (mixx_isr.s.orthresh) { | |
608 | octeon_mgmt_disable_tx_irq(p); | |
609 | tasklet_schedule(&p->tx_clean_tasklet); | |
610 | } | |
611 | ||
612 | return IRQ_HANDLED; | |
613 | } | |
614 | ||
615 | static int octeon_mgmt_ioctl(struct net_device *netdev, | |
616 | struct ifreq *rq, int cmd) | |
617 | { | |
618 | struct octeon_mgmt *p = netdev_priv(netdev); | |
619 | ||
620 | if (!netif_running(netdev)) | |
621 | return -EINVAL; | |
622 | ||
623 | if (!p->phydev) | |
624 | return -EINVAL; | |
625 | ||
626 | return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); | |
627 | } | |
628 | ||
629 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | |
630 | { | |
631 | struct octeon_mgmt *p = netdev_priv(netdev); | |
632 | int port = p->port; | |
633 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
634 | unsigned long flags; | |
635 | int link_changed = 0; | |
636 | ||
637 | spin_lock_irqsave(&p->lock, flags); | |
638 | if (p->phydev->link) { | |
639 | if (!p->last_link) | |
640 | link_changed = 1; | |
641 | if (p->last_duplex != p->phydev->duplex) { | |
642 | p->last_duplex = p->phydev->duplex; | |
643 | prtx_cfg.u64 = | |
644 | cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
645 | prtx_cfg.s.duplex = p->phydev->duplex; | |
646 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), | |
647 | prtx_cfg.u64); | |
648 | } | |
649 | } else { | |
650 | if (p->last_link) | |
651 | link_changed = -1; | |
652 | } | |
653 | p->last_link = p->phydev->link; | |
654 | spin_unlock_irqrestore(&p->lock, flags); | |
655 | ||
656 | if (link_changed != 0) { | |
657 | if (link_changed > 0) { | |
658 | netif_carrier_on(netdev); | |
659 | pr_info("%s: Link is up - %d/%s\n", netdev->name, | |
660 | p->phydev->speed, | |
661 | DUPLEX_FULL == p->phydev->duplex ? | |
662 | "Full" : "Half"); | |
663 | } else { | |
664 | netif_carrier_off(netdev); | |
665 | pr_info("%s: Link is down\n", netdev->name); | |
666 | } | |
667 | } | |
668 | } | |
669 | ||
670 | static int octeon_mgmt_init_phy(struct net_device *netdev) | |
671 | { | |
672 | struct octeon_mgmt *p = netdev_priv(netdev); | |
673 | char phy_id[20]; | |
674 | ||
675 | if (octeon_is_simulation()) { | |
676 | /* No PHYs in the simulator. */ | |
677 | netif_carrier_on(netdev); | |
678 | return 0; | |
679 | } | |
680 | ||
681 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); | |
682 | ||
683 | p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, | |
684 | PHY_INTERFACE_MODE_MII); | |
685 | ||
686 | if (IS_ERR(p->phydev)) { | |
687 | p->phydev = NULL; | |
688 | return -1; | |
689 | } | |
690 | ||
691 | phy_start_aneg(p->phydev); | |
692 | ||
693 | return 0; | |
694 | } | |
695 | ||
696 | static int octeon_mgmt_open(struct net_device *netdev) | |
697 | { | |
698 | struct octeon_mgmt *p = netdev_priv(netdev); | |
699 | int port = p->port; | |
700 | union cvmx_mixx_ctl mix_ctl; | |
701 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | |
702 | union cvmx_mixx_oring1 oring1; | |
703 | union cvmx_mixx_iring1 iring1; | |
704 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
705 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | |
706 | union cvmx_mixx_irhwm mix_irhwm; | |
707 | union cvmx_mixx_orhwm mix_orhwm; | |
708 | union cvmx_mixx_intena mix_intena; | |
709 | struct sockaddr sa; | |
710 | ||
711 | /* Allocate ring buffers. */ | |
712 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
713 | GFP_KERNEL); | |
714 | if (!p->tx_ring) | |
715 | return -ENOMEM; | |
716 | p->tx_ring_handle = | |
717 | dma_map_single(p->dev, p->tx_ring, | |
718 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
719 | DMA_BIDIRECTIONAL); | |
720 | p->tx_next = 0; | |
721 | p->tx_next_clean = 0; | |
722 | p->tx_current_fill = 0; | |
723 | ||
724 | ||
725 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
726 | GFP_KERNEL); | |
727 | if (!p->rx_ring) | |
728 | goto err_nomem; | |
729 | p->rx_ring_handle = | |
730 | dma_map_single(p->dev, p->rx_ring, | |
731 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
732 | DMA_BIDIRECTIONAL); | |
733 | ||
734 | p->rx_next = 0; | |
735 | p->rx_next_fill = 0; | |
736 | p->rx_current_fill = 0; | |
737 | ||
738 | octeon_mgmt_reset_hw(p); | |
739 | ||
740 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | |
741 | ||
742 | /* Bring it out of reset if needed. */ | |
743 | if (mix_ctl.s.reset) { | |
744 | mix_ctl.s.reset = 0; | |
745 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | |
746 | do { | |
747 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | |
748 | } while (mix_ctl.s.reset); | |
749 | } | |
750 | ||
751 | agl_gmx_inf_mode.u64 = 0; | |
752 | agl_gmx_inf_mode.s.en = 1; | |
753 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
754 | ||
755 | oring1.u64 = 0; | |
756 | oring1.s.obase = p->tx_ring_handle >> 3; | |
757 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | |
758 | cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); | |
759 | ||
760 | iring1.u64 = 0; | |
761 | iring1.s.ibase = p->rx_ring_handle >> 3; | |
762 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | |
763 | cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); | |
764 | ||
765 | /* Disable packet I/O. */ | |
766 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
767 | prtx_cfg.s.en = 0; | |
768 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | |
769 | ||
770 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); | |
771 | octeon_mgmt_set_mac_address(netdev, &sa); | |
772 | ||
773 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | |
774 | ||
775 | /* | |
776 | * Enable the port HW. Packets are not allowed until | |
777 | * cvmx_mgmt_port_enable() is called. | |
778 | */ | |
779 | mix_ctl.u64 = 0; | |
780 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | |
781 | mix_ctl.s.en = 1; /* Enable the port */ | |
782 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | |
783 | /* MII CB-request FIFO programmable high watermark */ | |
784 | mix_ctl.s.mrq_hwm = 1; | |
785 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | |
786 | ||
787 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | |
788 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | |
789 | /* | |
790 | * Force compensation values, as they are not | |
791 | * determined properly by HW | |
792 | */ | |
793 | union cvmx_agl_gmx_drv_ctl drv_ctl; | |
794 | ||
795 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | |
796 | if (port) { | |
797 | drv_ctl.s.byp_en1 = 1; | |
798 | drv_ctl.s.nctl1 = 6; | |
799 | drv_ctl.s.pctl1 = 6; | |
800 | } else { | |
801 | drv_ctl.s.byp_en = 1; | |
802 | drv_ctl.s.nctl = 6; | |
803 | drv_ctl.s.pctl = 6; | |
804 | } | |
805 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | |
806 | } | |
807 | ||
808 | octeon_mgmt_rx_fill_ring(netdev); | |
809 | ||
810 | /* Clear statistics. */ | |
811 | /* Clear on read. */ | |
812 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); | |
813 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); | |
814 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); | |
815 | ||
816 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); | |
817 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); | |
818 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); | |
819 | ||
820 | /* Clear any pending interrupts */ | |
821 | cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); | |
822 | ||
823 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | |
824 | netdev)) { | |
825 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | |
826 | goto err_noirq; | |
827 | } | |
828 | ||
829 | /* Interrupt every single RX packet */ | |
830 | mix_irhwm.u64 = 0; | |
831 | mix_irhwm.s.irhwm = 0; | |
832 | cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); | |
833 | ||
834 | /* Interrupt when we have 5 or more packets to clean. */ | |
835 | mix_orhwm.u64 = 0; | |
836 | mix_orhwm.s.orhwm = 5; | |
837 | cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); | |
838 | ||
839 | /* Enable receive and transmit interrupts */ | |
840 | mix_intena.u64 = 0; | |
841 | mix_intena.s.ithena = 1; | |
842 | mix_intena.s.othena = 1; | |
843 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
844 | ||
845 | ||
846 | /* Enable packet I/O. */ | |
847 | ||
848 | rxx_frm_ctl.u64 = 0; | |
849 | rxx_frm_ctl.s.pre_align = 1; | |
850 | /* | |
851 | * When set, disables the length check for non-min sized pkts | |
852 | * with padding in the client data. | |
853 | */ | |
854 | rxx_frm_ctl.s.pad_len = 1; | |
855 | /* When set, disables the length check for VLAN pkts */ | |
856 | rxx_frm_ctl.s.vlan_len = 1; | |
857 | /* When set, PREAMBLE checking is less strict */ | |
858 | rxx_frm_ctl.s.pre_free = 1; | |
859 | /* Control Pause Frames can match station SMAC */ | |
860 | rxx_frm_ctl.s.ctl_smac = 0; | |
861 | /* Control Pause Frames can match globally assign Multicast address */ | |
862 | rxx_frm_ctl.s.ctl_mcst = 1; | |
863 | /* Forward pause information to TX block */ | |
864 | rxx_frm_ctl.s.ctl_bck = 1; | |
865 | /* Drop Control Pause Frames */ | |
866 | rxx_frm_ctl.s.ctl_drp = 1; | |
867 | /* Strip off the preamble */ | |
868 | rxx_frm_ctl.s.pre_strp = 1; | |
869 | /* | |
870 | * This port is configured to send PREAMBLE+SFD to begin every | |
871 | * frame. GMX checks that the PREAMBLE is sent correctly. | |
872 | */ | |
873 | rxx_frm_ctl.s.pre_chk = 1; | |
874 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); | |
875 | ||
876 | /* Enable the AGL block */ | |
877 | agl_gmx_inf_mode.u64 = 0; | |
878 | agl_gmx_inf_mode.s.en = 1; | |
879 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
880 | ||
881 | /* Configure the port duplex and enables */ | |
882 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
883 | prtx_cfg.s.tx_en = 1; | |
884 | prtx_cfg.s.rx_en = 1; | |
885 | prtx_cfg.s.en = 1; | |
886 | p->last_duplex = 1; | |
887 | prtx_cfg.s.duplex = p->last_duplex; | |
888 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | |
889 | ||
890 | p->last_link = 0; | |
891 | netif_carrier_off(netdev); | |
892 | ||
893 | if (octeon_mgmt_init_phy(netdev)) { | |
894 | dev_err(p->dev, "Cannot initialize PHY.\n"); | |
895 | goto err_noirq; | |
896 | } | |
897 | ||
898 | netif_wake_queue(netdev); | |
899 | napi_enable(&p->napi); | |
900 | ||
901 | return 0; | |
902 | err_noirq: | |
903 | octeon_mgmt_reset_hw(p); | |
904 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
905 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
906 | DMA_BIDIRECTIONAL); | |
907 | kfree(p->rx_ring); | |
908 | err_nomem: | |
909 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
910 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
911 | DMA_BIDIRECTIONAL); | |
912 | kfree(p->tx_ring); | |
913 | return -ENOMEM; | |
914 | } | |
915 | ||
916 | static int octeon_mgmt_stop(struct net_device *netdev) | |
917 | { | |
918 | struct octeon_mgmt *p = netdev_priv(netdev); | |
919 | ||
920 | napi_disable(&p->napi); | |
921 | netif_stop_queue(netdev); | |
922 | ||
923 | if (p->phydev) | |
924 | phy_disconnect(p->phydev); | |
925 | ||
926 | netif_carrier_off(netdev); | |
927 | ||
928 | octeon_mgmt_reset_hw(p); | |
929 | ||
930 | ||
931 | free_irq(p->irq, netdev); | |
932 | ||
933 | /* dma_unmap is a nop on Octeon, so just free everything. */ | |
934 | skb_queue_purge(&p->tx_list); | |
935 | skb_queue_purge(&p->rx_list); | |
936 | ||
937 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
938 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
939 | DMA_BIDIRECTIONAL); | |
940 | kfree(p->rx_ring); | |
941 | ||
942 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
943 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
944 | DMA_BIDIRECTIONAL); | |
945 | kfree(p->tx_ring); | |
946 | ||
947 | ||
948 | return 0; | |
949 | } | |
950 | ||
951 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |
952 | { | |
953 | struct octeon_mgmt *p = netdev_priv(netdev); | |
954 | int port = p->port; | |
955 | union mgmt_port_ring_entry re; | |
956 | unsigned long flags; | |
957 | ||
958 | re.d64 = 0; | |
959 | re.s.len = skb->len; | |
960 | re.s.addr = dma_map_single(p->dev, skb->data, | |
961 | skb->len, | |
962 | DMA_TO_DEVICE); | |
963 | ||
964 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
965 | ||
966 | if (unlikely(p->tx_current_fill >= | |
967 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | |
968 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
969 | ||
970 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
971 | DMA_TO_DEVICE); | |
972 | ||
973 | netif_stop_queue(netdev); | |
974 | return NETDEV_TX_BUSY; | |
975 | } | |
976 | ||
977 | __skb_queue_tail(&p->tx_list, skb); | |
978 | ||
979 | /* Put it in the ring. */ | |
980 | p->tx_ring[p->tx_next] = re.d64; | |
981 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
982 | p->tx_current_fill++; | |
983 | ||
984 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
985 | ||
986 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | |
987 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
988 | DMA_BIDIRECTIONAL); | |
989 | ||
990 | netdev->stats.tx_packets++; | |
991 | netdev->stats.tx_bytes += skb->len; | |
992 | ||
993 | /* Ring the bell. */ | |
994 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); | |
995 | ||
996 | netdev->trans_start = jiffies; | |
997 | octeon_mgmt_clean_tx_buffers(p); | |
998 | octeon_mgmt_update_tx_stats(netdev); | |
999 | return NETDEV_TX_OK; | |
1000 | } | |
1001 | ||
1002 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1003 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | |
1004 | { | |
1005 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1006 | ||
1007 | octeon_mgmt_receive_packets(p, 16); | |
1008 | octeon_mgmt_update_rx_stats(netdev); | |
1009 | return; | |
1010 | } | |
1011 | #endif | |
1012 | ||
1013 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | |
1014 | struct ethtool_drvinfo *info) | |
1015 | { | |
1016 | strncpy(info->driver, DRV_NAME, sizeof(info->driver)); | |
1017 | strncpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1018 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | |
1019 | strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); | |
1020 | info->n_stats = 0; | |
1021 | info->testinfo_len = 0; | |
1022 | info->regdump_len = 0; | |
1023 | info->eedump_len = 0; | |
1024 | } | |
1025 | ||
1026 | static int octeon_mgmt_get_settings(struct net_device *netdev, | |
1027 | struct ethtool_cmd *cmd) | |
1028 | { | |
1029 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1030 | ||
1031 | if (p->phydev) | |
1032 | return phy_ethtool_gset(p->phydev, cmd); | |
1033 | ||
1034 | return -EINVAL; | |
1035 | } | |
1036 | ||
1037 | static int octeon_mgmt_set_settings(struct net_device *netdev, | |
1038 | struct ethtool_cmd *cmd) | |
1039 | { | |
1040 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1041 | ||
1042 | if (!capable(CAP_NET_ADMIN)) | |
1043 | return -EPERM; | |
1044 | ||
1045 | if (p->phydev) | |
1046 | return phy_ethtool_sset(p->phydev, cmd); | |
1047 | ||
1048 | return -EINVAL; | |
1049 | } | |
1050 | ||
1051 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | |
1052 | .get_drvinfo = octeon_mgmt_get_drvinfo, | |
1053 | .get_link = ethtool_op_get_link, | |
1054 | .get_settings = octeon_mgmt_get_settings, | |
1055 | .set_settings = octeon_mgmt_set_settings | |
1056 | }; | |
1057 | ||
1058 | static const struct net_device_ops octeon_mgmt_ops = { | |
1059 | .ndo_open = octeon_mgmt_open, | |
1060 | .ndo_stop = octeon_mgmt_stop, | |
1061 | .ndo_start_xmit = octeon_mgmt_xmit, | |
1062 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, | |
1063 | .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, | |
1064 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, | |
1065 | .ndo_do_ioctl = octeon_mgmt_ioctl, | |
1066 | .ndo_change_mtu = octeon_mgmt_change_mtu, | |
1067 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1068 | .ndo_poll_controller = octeon_mgmt_poll_controller, | |
1069 | #endif | |
1070 | }; | |
1071 | ||
1072 | static int __init octeon_mgmt_probe(struct platform_device *pdev) | |
1073 | { | |
1074 | struct resource *res_irq; | |
1075 | struct net_device *netdev; | |
1076 | struct octeon_mgmt *p; | |
1077 | int i; | |
1078 | ||
1079 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | |
1080 | if (netdev == NULL) | |
1081 | return -ENOMEM; | |
1082 | ||
1083 | dev_set_drvdata(&pdev->dev, netdev); | |
1084 | p = netdev_priv(netdev); | |
1085 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | |
1086 | OCTEON_MGMT_NAPI_WEIGHT); | |
1087 | ||
1088 | p->netdev = netdev; | |
1089 | p->dev = &pdev->dev; | |
1090 | ||
1091 | p->port = pdev->id; | |
1092 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); | |
1093 | ||
1094 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1095 | if (!res_irq) | |
1096 | goto err; | |
1097 | ||
1098 | p->irq = res_irq->start; | |
1099 | spin_lock_init(&p->lock); | |
1100 | ||
1101 | skb_queue_head_init(&p->tx_list); | |
1102 | skb_queue_head_init(&p->rx_list); | |
1103 | tasklet_init(&p->tx_clean_tasklet, | |
1104 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | |
1105 | ||
1106 | netdev->netdev_ops = &octeon_mgmt_ops; | |
1107 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | |
1108 | ||
1109 | ||
1110 | /* The mgmt ports get the first N MACs. */ | |
1111 | for (i = 0; i < 6; i++) | |
1112 | netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; | |
1113 | netdev->dev_addr[5] += p->port; | |
1114 | ||
1115 | if (p->port >= octeon_bootinfo->mac_addr_count) | |
1116 | dev_err(&pdev->dev, | |
e5834820 HS |
1117 | "Error %s: Using MAC outside of the assigned range: %pM\n", |
1118 | netdev->name, netdev->dev_addr); | |
d6aa60a1 DD |
1119 | |
1120 | if (register_netdev(netdev)) | |
1121 | goto err; | |
1122 | ||
1123 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | |
1124 | return 0; | |
1125 | err: | |
1126 | free_netdev(netdev); | |
1127 | return -ENOENT; | |
1128 | } | |
1129 | ||
1130 | static int __exit octeon_mgmt_remove(struct platform_device *pdev) | |
1131 | { | |
1132 | struct net_device *netdev = dev_get_drvdata(&pdev->dev); | |
1133 | ||
1134 | unregister_netdev(netdev); | |
1135 | free_netdev(netdev); | |
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | static struct platform_driver octeon_mgmt_driver = { | |
1140 | .driver = { | |
1141 | .name = "octeon_mgmt", | |
1142 | .owner = THIS_MODULE, | |
1143 | }, | |
1144 | .probe = octeon_mgmt_probe, | |
1145 | .remove = __exit_p(octeon_mgmt_remove), | |
1146 | }; | |
1147 | ||
1148 | extern void octeon_mdiobus_force_mod_depencency(void); | |
1149 | ||
1150 | static int __init octeon_mgmt_mod_init(void) | |
1151 | { | |
1152 | /* Force our mdiobus driver module to be loaded first. */ | |
1153 | octeon_mdiobus_force_mod_depencency(); | |
1154 | return platform_driver_register(&octeon_mgmt_driver); | |
1155 | } | |
1156 | ||
1157 | static void __exit octeon_mgmt_mod_exit(void) | |
1158 | { | |
1159 | platform_driver_unregister(&octeon_mgmt_driver); | |
1160 | } | |
1161 | ||
1162 | module_init(octeon_mgmt_mod_init); | |
1163 | module_exit(octeon_mgmt_mod_exit); | |
1164 | ||
1165 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
1166 | MODULE_AUTHOR("David Daney"); | |
1167 | MODULE_LICENSE("GPL"); | |
1168 | MODULE_VERSION(DRV_VERSION); |