Commit | Line | Data |
---|---|---|
d6aa60a1 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 2009 Cavium Networks | |
7 | */ | |
8 | ||
9 | #include <linux/capability.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/etherdevice.h> | |
15 | #include <linux/if_vlan.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
d6aa60a1 DD |
17 | #include <linux/phy.h> |
18 | #include <linux/spinlock.h> | |
19 | ||
20 | #include <asm/octeon/octeon.h> | |
21 | #include <asm/octeon/cvmx-mixx-defs.h> | |
22 | #include <asm/octeon/cvmx-agl-defs.h> | |
23 | ||
24 | #define DRV_NAME "octeon_mgmt" | |
25 | #define DRV_VERSION "2.0" | |
26 | #define DRV_DESCRIPTION \ | |
27 | "Cavium Networks Octeon MII (management) port Network Driver" | |
28 | ||
29 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | |
30 | ||
31 | /* | |
32 | * Ring sizes that are powers of two allow for more efficient modulo | |
33 | * opertions. | |
34 | */ | |
35 | #define OCTEON_MGMT_RX_RING_SIZE 512 | |
36 | #define OCTEON_MGMT_TX_RING_SIZE 128 | |
37 | ||
38 | /* Allow 8 bytes for vlan and FCS. */ | |
39 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | |
40 | ||
41 | union mgmt_port_ring_entry { | |
42 | u64 d64; | |
43 | struct { | |
44 | u64 reserved_62_63:2; | |
45 | /* Length of the buffer/packet in bytes */ | |
46 | u64 len:14; | |
47 | /* For TX, signals that the packet should be timestamped */ | |
48 | u64 tstamp:1; | |
49 | /* The RX error code */ | |
50 | u64 code:7; | |
51 | #define RING_ENTRY_CODE_DONE 0xf | |
52 | #define RING_ENTRY_CODE_MORE 0x10 | |
53 | /* Physical address of the buffer */ | |
54 | u64 addr:40; | |
55 | } s; | |
56 | }; | |
57 | ||
58 | struct octeon_mgmt { | |
59 | struct net_device *netdev; | |
60 | int port; | |
61 | int irq; | |
62 | u64 *tx_ring; | |
63 | dma_addr_t tx_ring_handle; | |
64 | unsigned int tx_next; | |
65 | unsigned int tx_next_clean; | |
66 | unsigned int tx_current_fill; | |
67 | /* The tx_list lock also protects the ring related variables */ | |
68 | struct sk_buff_head tx_list; | |
69 | ||
70 | /* RX variables only touched in napi_poll. No locking necessary. */ | |
71 | u64 *rx_ring; | |
72 | dma_addr_t rx_ring_handle; | |
73 | unsigned int rx_next; | |
74 | unsigned int rx_next_fill; | |
75 | unsigned int rx_current_fill; | |
76 | struct sk_buff_head rx_list; | |
77 | ||
78 | spinlock_t lock; | |
79 | unsigned int last_duplex; | |
80 | unsigned int last_link; | |
81 | struct device *dev; | |
82 | struct napi_struct napi; | |
83 | struct tasklet_struct tx_clean_tasklet; | |
84 | struct phy_device *phydev; | |
85 | }; | |
86 | ||
87 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | |
88 | { | |
89 | int port = p->port; | |
90 | union cvmx_mixx_intena mix_intena; | |
91 | unsigned long flags; | |
92 | ||
93 | spin_lock_irqsave(&p->lock, flags); | |
94 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | |
95 | mix_intena.s.ithena = enable ? 1 : 0; | |
96 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
97 | spin_unlock_irqrestore(&p->lock, flags); | |
98 | } | |
99 | ||
100 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | |
101 | { | |
102 | int port = p->port; | |
103 | union cvmx_mixx_intena mix_intena; | |
104 | unsigned long flags; | |
105 | ||
106 | spin_lock_irqsave(&p->lock, flags); | |
107 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | |
108 | mix_intena.s.othena = enable ? 1 : 0; | |
109 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
110 | spin_unlock_irqrestore(&p->lock, flags); | |
111 | } | |
112 | ||
113 | static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) | |
114 | { | |
115 | octeon_mgmt_set_rx_irq(p, 1); | |
116 | } | |
117 | ||
118 | static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) | |
119 | { | |
120 | octeon_mgmt_set_rx_irq(p, 0); | |
121 | } | |
122 | ||
123 | static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) | |
124 | { | |
125 | octeon_mgmt_set_tx_irq(p, 1); | |
126 | } | |
127 | ||
128 | static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) | |
129 | { | |
130 | octeon_mgmt_set_tx_irq(p, 0); | |
131 | } | |
132 | ||
133 | static unsigned int ring_max_fill(unsigned int ring_size) | |
134 | { | |
135 | return ring_size - 8; | |
136 | } | |
137 | ||
138 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | |
139 | { | |
140 | return ring_size * sizeof(union mgmt_port_ring_entry); | |
141 | } | |
142 | ||
143 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | |
144 | { | |
145 | struct octeon_mgmt *p = netdev_priv(netdev); | |
146 | int port = p->port; | |
147 | ||
148 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | |
149 | unsigned int size; | |
150 | union mgmt_port_ring_entry re; | |
151 | struct sk_buff *skb; | |
152 | ||
153 | /* CN56XX pass 1 needs 8 bytes of padding. */ | |
154 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | |
155 | ||
156 | skb = netdev_alloc_skb(netdev, size); | |
157 | if (!skb) | |
158 | break; | |
159 | skb_reserve(skb, NET_IP_ALIGN); | |
160 | __skb_queue_tail(&p->rx_list, skb); | |
161 | ||
162 | re.d64 = 0; | |
163 | re.s.len = size; | |
164 | re.s.addr = dma_map_single(p->dev, skb->data, | |
165 | size, | |
166 | DMA_FROM_DEVICE); | |
167 | ||
168 | /* Put it in the ring. */ | |
169 | p->rx_ring[p->rx_next_fill] = re.d64; | |
170 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | |
171 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
172 | DMA_BIDIRECTIONAL); | |
173 | p->rx_next_fill = | |
174 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
175 | p->rx_current_fill++; | |
176 | /* Ring the bell. */ | |
177 | cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); | |
178 | } | |
179 | } | |
180 | ||
181 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | |
182 | { | |
183 | int port = p->port; | |
184 | union cvmx_mixx_orcnt mix_orcnt; | |
185 | union mgmt_port_ring_entry re; | |
186 | struct sk_buff *skb; | |
187 | int cleaned = 0; | |
188 | unsigned long flags; | |
189 | ||
190 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
191 | while (mix_orcnt.s.orcnt) { | |
192 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, | |
193 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
194 | DMA_BIDIRECTIONAL); | |
195 | ||
196 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
197 | ||
198 | re.d64 = p->tx_ring[p->tx_next_clean]; | |
199 | p->tx_next_clean = | |
200 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
201 | skb = __skb_dequeue(&p->tx_list); | |
202 | ||
203 | mix_orcnt.u64 = 0; | |
204 | mix_orcnt.s.orcnt = 1; | |
205 | ||
206 | /* Acknowledge to hardware that we have the buffer. */ | |
207 | cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); | |
208 | p->tx_current_fill--; | |
209 | ||
210 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
211 | ||
212 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
213 | DMA_TO_DEVICE); | |
214 | dev_kfree_skb_any(skb); | |
215 | cleaned++; | |
216 | ||
217 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | |
218 | } | |
219 | ||
220 | if (cleaned && netif_queue_stopped(p->netdev)) | |
221 | netif_wake_queue(p->netdev); | |
222 | } | |
223 | ||
224 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | |
225 | { | |
226 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | |
227 | octeon_mgmt_clean_tx_buffers(p); | |
228 | octeon_mgmt_enable_tx_irq(p); | |
229 | } | |
230 | ||
231 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | |
232 | { | |
233 | struct octeon_mgmt *p = netdev_priv(netdev); | |
234 | int port = p->port; | |
235 | unsigned long flags; | |
236 | u64 drop, bad; | |
237 | ||
238 | /* These reads also clear the count registers. */ | |
239 | drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); | |
240 | bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); | |
241 | ||
242 | if (drop || bad) { | |
243 | /* Do an atomic update. */ | |
244 | spin_lock_irqsave(&p->lock, flags); | |
245 | netdev->stats.rx_errors += bad; | |
246 | netdev->stats.rx_dropped += drop; | |
247 | spin_unlock_irqrestore(&p->lock, flags); | |
248 | } | |
249 | } | |
250 | ||
251 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | |
252 | { | |
253 | struct octeon_mgmt *p = netdev_priv(netdev); | |
254 | int port = p->port; | |
255 | unsigned long flags; | |
256 | ||
257 | union cvmx_agl_gmx_txx_stat0 s0; | |
258 | union cvmx_agl_gmx_txx_stat1 s1; | |
259 | ||
260 | /* These reads also clear the count registers. */ | |
261 | s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); | |
262 | s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); | |
263 | ||
264 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | |
265 | /* Do an atomic update. */ | |
266 | spin_lock_irqsave(&p->lock, flags); | |
267 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | |
268 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | |
269 | spin_unlock_irqrestore(&p->lock, flags); | |
270 | } | |
271 | } | |
272 | ||
273 | /* | |
274 | * Dequeue a receive skb and its corresponding ring entry. The ring | |
275 | * entry is returned, *pskb is updated to point to the skb. | |
276 | */ | |
277 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | |
278 | struct sk_buff **pskb) | |
279 | { | |
280 | union mgmt_port_ring_entry re; | |
281 | ||
282 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | |
283 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
284 | DMA_BIDIRECTIONAL); | |
285 | ||
286 | re.d64 = p->rx_ring[p->rx_next]; | |
287 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | |
288 | p->rx_current_fill--; | |
289 | *pskb = __skb_dequeue(&p->rx_list); | |
290 | ||
291 | dma_unmap_single(p->dev, re.s.addr, | |
292 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | |
293 | DMA_FROM_DEVICE); | |
294 | ||
295 | return re.d64; | |
296 | } | |
297 | ||
298 | ||
299 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | |
300 | { | |
301 | int port = p->port; | |
302 | struct net_device *netdev = p->netdev; | |
303 | union cvmx_mixx_ircnt mix_ircnt; | |
304 | union mgmt_port_ring_entry re; | |
305 | struct sk_buff *skb; | |
306 | struct sk_buff *skb2; | |
307 | struct sk_buff *skb_new; | |
308 | union mgmt_port_ring_entry re2; | |
309 | int rc = 1; | |
310 | ||
311 | ||
312 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | |
313 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | |
314 | /* A good packet, send it up. */ | |
315 | skb_put(skb, re.s.len); | |
316 | good: | |
317 | skb->protocol = eth_type_trans(skb, netdev); | |
318 | netdev->stats.rx_packets++; | |
319 | netdev->stats.rx_bytes += skb->len; | |
320 | netdev->last_rx = jiffies; | |
321 | netif_receive_skb(skb); | |
322 | rc = 0; | |
323 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | |
324 | /* | |
325 | * Packet split across skbs. This can happen if we | |
326 | * increase the MTU. Buffers that are already in the | |
327 | * rx ring can then end up being too small. As the rx | |
328 | * ring is refilled, buffers sized for the new MTU | |
329 | * will be used and we should go back to the normal | |
330 | * non-split case. | |
331 | */ | |
332 | skb_put(skb, re.s.len); | |
333 | do { | |
334 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
335 | if (re2.s.code != RING_ENTRY_CODE_MORE | |
336 | && re2.s.code != RING_ENTRY_CODE_DONE) | |
337 | goto split_error; | |
338 | skb_put(skb2, re2.s.len); | |
339 | skb_new = skb_copy_expand(skb, 0, skb2->len, | |
340 | GFP_ATOMIC); | |
341 | if (!skb_new) | |
342 | goto split_error; | |
343 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | |
344 | skb2->len)) | |
345 | goto split_error; | |
346 | skb_put(skb_new, skb2->len); | |
347 | dev_kfree_skb_any(skb); | |
348 | dev_kfree_skb_any(skb2); | |
349 | skb = skb_new; | |
350 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | |
351 | goto good; | |
352 | } else { | |
353 | /* Some other error, discard it. */ | |
354 | dev_kfree_skb_any(skb); | |
355 | /* | |
356 | * Error statistics are accumulated in | |
357 | * octeon_mgmt_update_rx_stats. | |
358 | */ | |
359 | } | |
360 | goto done; | |
361 | split_error: | |
362 | /* Discard the whole mess. */ | |
363 | dev_kfree_skb_any(skb); | |
364 | dev_kfree_skb_any(skb2); | |
365 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | |
366 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | |
367 | dev_kfree_skb_any(skb2); | |
368 | } | |
369 | netdev->stats.rx_errors++; | |
370 | ||
371 | done: | |
372 | /* Tell the hardware we processed a packet. */ | |
373 | mix_ircnt.u64 = 0; | |
374 | mix_ircnt.s.ircnt = 1; | |
375 | cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); | |
376 | return rc; | |
377 | ||
378 | } | |
379 | ||
380 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | |
381 | { | |
382 | int port = p->port; | |
383 | unsigned int work_done = 0; | |
384 | union cvmx_mixx_ircnt mix_ircnt; | |
385 | int rc; | |
386 | ||
387 | ||
388 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | |
389 | while (work_done < budget && mix_ircnt.s.ircnt) { | |
390 | ||
391 | rc = octeon_mgmt_receive_one(p); | |
392 | if (!rc) | |
393 | work_done++; | |
394 | ||
395 | /* Check for more packets. */ | |
396 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | |
397 | } | |
398 | ||
399 | octeon_mgmt_rx_fill_ring(p->netdev); | |
400 | ||
401 | return work_done; | |
402 | } | |
403 | ||
404 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | |
405 | { | |
406 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | |
407 | struct net_device *netdev = p->netdev; | |
408 | unsigned int work_done = 0; | |
409 | ||
410 | work_done = octeon_mgmt_receive_packets(p, budget); | |
411 | ||
412 | if (work_done < budget) { | |
413 | /* We stopped because no more packets were available. */ | |
414 | napi_complete(napi); | |
415 | octeon_mgmt_enable_rx_irq(p); | |
416 | } | |
417 | octeon_mgmt_update_rx_stats(netdev); | |
418 | ||
419 | return work_done; | |
420 | } | |
421 | ||
422 | /* Reset the hardware to clean state. */ | |
423 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | |
424 | { | |
425 | union cvmx_mixx_ctl mix_ctl; | |
426 | union cvmx_mixx_bist mix_bist; | |
427 | union cvmx_agl_gmx_bist agl_gmx_bist; | |
428 | ||
429 | mix_ctl.u64 = 0; | |
430 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | |
431 | do { | |
432 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | |
433 | } while (mix_ctl.s.busy); | |
434 | mix_ctl.s.reset = 1; | |
435 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | |
436 | cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | |
437 | cvmx_wait(64); | |
438 | ||
439 | mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); | |
440 | if (mix_bist.u64) | |
441 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | |
442 | (unsigned long long)mix_bist.u64); | |
443 | ||
444 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | |
445 | if (agl_gmx_bist.u64) | |
446 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | |
447 | (unsigned long long)agl_gmx_bist.u64); | |
448 | } | |
449 | ||
450 | struct octeon_mgmt_cam_state { | |
451 | u64 cam[6]; | |
452 | u64 cam_mask; | |
453 | int cam_index; | |
454 | }; | |
455 | ||
456 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | |
457 | unsigned char *addr) | |
458 | { | |
459 | int i; | |
460 | ||
461 | for (i = 0; i < 6; i++) | |
462 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | |
463 | cs->cam_mask |= (1ULL << cs->cam_index); | |
464 | cs->cam_index++; | |
465 | } | |
466 | ||
467 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | |
468 | { | |
469 | struct octeon_mgmt *p = netdev_priv(netdev); | |
470 | int port = p->port; | |
d6aa60a1 DD |
471 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; |
472 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | |
473 | unsigned long flags; | |
474 | unsigned int prev_packet_enable; | |
475 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | |
476 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | |
477 | struct octeon_mgmt_cam_state cam_state; | |
478 | struct dev_addr_list *list; | |
479 | struct list_head *pos; | |
480 | int available_cam_entries; | |
481 | ||
482 | memset(&cam_state, 0, sizeof(cam_state)); | |
483 | ||
484 | if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { | |
485 | cam_mode = 0; | |
486 | available_cam_entries = 8; | |
487 | } else { | |
488 | /* | |
489 | * One CAM entry for the primary address, leaves seven | |
490 | * for the secondary addresses. | |
491 | */ | |
492 | available_cam_entries = 7 - netdev->dev_addrs.count; | |
493 | } | |
494 | ||
495 | if (netdev->flags & IFF_MULTICAST) { | |
4cd24eaf JP |
496 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || |
497 | netdev_mc_count(netdev) > available_cam_entries) | |
d6aa60a1 DD |
498 | multicast_mode = 2; /* 1 - Accept all multicast. */ |
499 | else | |
500 | multicast_mode = 0; /* 0 - Use CAM. */ | |
501 | } | |
502 | ||
503 | if (cam_mode == 1) { | |
504 | /* Add primary address. */ | |
505 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | |
506 | list_for_each(pos, &netdev->dev_addrs.list) { | |
507 | struct netdev_hw_addr *hw_addr; | |
508 | hw_addr = list_entry(pos, struct netdev_hw_addr, list); | |
509 | octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr); | |
510 | list = list->next; | |
511 | } | |
512 | } | |
513 | if (multicast_mode == 0) { | |
fbc450b1 | 514 | netdev_for_each_mc_addr(list, netdev) |
d6aa60a1 | 515 | octeon_mgmt_cam_state_add(&cam_state, list->da_addr); |
d6aa60a1 DD |
516 | } |
517 | ||
518 | ||
519 | spin_lock_irqsave(&p->lock, flags); | |
520 | ||
521 | /* Disable packet I/O. */ | |
522 | agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
523 | prev_packet_enable = agl_gmx_prtx.s.en; | |
524 | agl_gmx_prtx.s.en = 0; | |
525 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | |
526 | ||
527 | ||
528 | adr_ctl.u64 = 0; | |
529 | adr_ctl.s.cam_mode = cam_mode; | |
530 | adr_ctl.s.mcst = multicast_mode; | |
531 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | |
532 | ||
533 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); | |
534 | ||
535 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); | |
536 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); | |
537 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); | |
538 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); | |
539 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); | |
540 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); | |
541 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); | |
542 | ||
543 | /* Restore packet I/O. */ | |
544 | agl_gmx_prtx.s.en = prev_packet_enable; | |
545 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | |
546 | ||
547 | spin_unlock_irqrestore(&p->lock, flags); | |
548 | } | |
549 | ||
550 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | |
551 | { | |
552 | struct sockaddr *sa = addr; | |
553 | ||
554 | if (!is_valid_ether_addr(sa->sa_data)) | |
555 | return -EADDRNOTAVAIL; | |
556 | ||
557 | memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); | |
558 | ||
559 | octeon_mgmt_set_rx_filtering(netdev); | |
560 | ||
561 | return 0; | |
562 | } | |
563 | ||
564 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | |
565 | { | |
566 | struct octeon_mgmt *p = netdev_priv(netdev); | |
567 | int port = p->port; | |
568 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; | |
569 | ||
570 | /* | |
571 | * Limit the MTU to make sure the ethernet packets are between | |
572 | * 64 bytes and 16383 bytes. | |
573 | */ | |
574 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | |
575 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | |
576 | 64 - OCTEON_MGMT_RX_HEADROOM, | |
577 | 16383 - OCTEON_MGMT_RX_HEADROOM); | |
578 | return -EINVAL; | |
579 | } | |
580 | ||
581 | netdev->mtu = new_mtu; | |
582 | ||
583 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); | |
584 | cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), | |
585 | (size_without_fcs + 7) & 0xfff8); | |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
590 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | |
591 | { | |
592 | struct net_device *netdev = dev_id; | |
593 | struct octeon_mgmt *p = netdev_priv(netdev); | |
594 | int port = p->port; | |
595 | union cvmx_mixx_isr mixx_isr; | |
596 | ||
597 | mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); | |
598 | ||
599 | /* Clear any pending interrupts */ | |
600 | cvmx_write_csr(CVMX_MIXX_ISR(port), | |
601 | cvmx_read_csr(CVMX_MIXX_ISR(port))); | |
602 | cvmx_read_csr(CVMX_MIXX_ISR(port)); | |
603 | ||
604 | if (mixx_isr.s.irthresh) { | |
605 | octeon_mgmt_disable_rx_irq(p); | |
606 | napi_schedule(&p->napi); | |
607 | } | |
608 | if (mixx_isr.s.orthresh) { | |
609 | octeon_mgmt_disable_tx_irq(p); | |
610 | tasklet_schedule(&p->tx_clean_tasklet); | |
611 | } | |
612 | ||
613 | return IRQ_HANDLED; | |
614 | } | |
615 | ||
616 | static int octeon_mgmt_ioctl(struct net_device *netdev, | |
617 | struct ifreq *rq, int cmd) | |
618 | { | |
619 | struct octeon_mgmt *p = netdev_priv(netdev); | |
620 | ||
621 | if (!netif_running(netdev)) | |
622 | return -EINVAL; | |
623 | ||
624 | if (!p->phydev) | |
625 | return -EINVAL; | |
626 | ||
627 | return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); | |
628 | } | |
629 | ||
630 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | |
631 | { | |
632 | struct octeon_mgmt *p = netdev_priv(netdev); | |
633 | int port = p->port; | |
634 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
635 | unsigned long flags; | |
636 | int link_changed = 0; | |
637 | ||
638 | spin_lock_irqsave(&p->lock, flags); | |
639 | if (p->phydev->link) { | |
640 | if (!p->last_link) | |
641 | link_changed = 1; | |
642 | if (p->last_duplex != p->phydev->duplex) { | |
643 | p->last_duplex = p->phydev->duplex; | |
644 | prtx_cfg.u64 = | |
645 | cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
646 | prtx_cfg.s.duplex = p->phydev->duplex; | |
647 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), | |
648 | prtx_cfg.u64); | |
649 | } | |
650 | } else { | |
651 | if (p->last_link) | |
652 | link_changed = -1; | |
653 | } | |
654 | p->last_link = p->phydev->link; | |
655 | spin_unlock_irqrestore(&p->lock, flags); | |
656 | ||
657 | if (link_changed != 0) { | |
658 | if (link_changed > 0) { | |
659 | netif_carrier_on(netdev); | |
660 | pr_info("%s: Link is up - %d/%s\n", netdev->name, | |
661 | p->phydev->speed, | |
662 | DUPLEX_FULL == p->phydev->duplex ? | |
663 | "Full" : "Half"); | |
664 | } else { | |
665 | netif_carrier_off(netdev); | |
666 | pr_info("%s: Link is down\n", netdev->name); | |
667 | } | |
668 | } | |
669 | } | |
670 | ||
671 | static int octeon_mgmt_init_phy(struct net_device *netdev) | |
672 | { | |
673 | struct octeon_mgmt *p = netdev_priv(netdev); | |
674 | char phy_id[20]; | |
675 | ||
676 | if (octeon_is_simulation()) { | |
677 | /* No PHYs in the simulator. */ | |
678 | netif_carrier_on(netdev); | |
679 | return 0; | |
680 | } | |
681 | ||
682 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); | |
683 | ||
684 | p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, | |
685 | PHY_INTERFACE_MODE_MII); | |
686 | ||
687 | if (IS_ERR(p->phydev)) { | |
688 | p->phydev = NULL; | |
689 | return -1; | |
690 | } | |
691 | ||
692 | phy_start_aneg(p->phydev); | |
693 | ||
694 | return 0; | |
695 | } | |
696 | ||
697 | static int octeon_mgmt_open(struct net_device *netdev) | |
698 | { | |
699 | struct octeon_mgmt *p = netdev_priv(netdev); | |
700 | int port = p->port; | |
701 | union cvmx_mixx_ctl mix_ctl; | |
702 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | |
703 | union cvmx_mixx_oring1 oring1; | |
704 | union cvmx_mixx_iring1 iring1; | |
705 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | |
706 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | |
707 | union cvmx_mixx_irhwm mix_irhwm; | |
708 | union cvmx_mixx_orhwm mix_orhwm; | |
709 | union cvmx_mixx_intena mix_intena; | |
710 | struct sockaddr sa; | |
711 | ||
712 | /* Allocate ring buffers. */ | |
713 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
714 | GFP_KERNEL); | |
715 | if (!p->tx_ring) | |
716 | return -ENOMEM; | |
717 | p->tx_ring_handle = | |
718 | dma_map_single(p->dev, p->tx_ring, | |
719 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
720 | DMA_BIDIRECTIONAL); | |
721 | p->tx_next = 0; | |
722 | p->tx_next_clean = 0; | |
723 | p->tx_current_fill = 0; | |
724 | ||
725 | ||
726 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
727 | GFP_KERNEL); | |
728 | if (!p->rx_ring) | |
729 | goto err_nomem; | |
730 | p->rx_ring_handle = | |
731 | dma_map_single(p->dev, p->rx_ring, | |
732 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
733 | DMA_BIDIRECTIONAL); | |
734 | ||
735 | p->rx_next = 0; | |
736 | p->rx_next_fill = 0; | |
737 | p->rx_current_fill = 0; | |
738 | ||
739 | octeon_mgmt_reset_hw(p); | |
740 | ||
741 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | |
742 | ||
743 | /* Bring it out of reset if needed. */ | |
744 | if (mix_ctl.s.reset) { | |
745 | mix_ctl.s.reset = 0; | |
746 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | |
747 | do { | |
748 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | |
749 | } while (mix_ctl.s.reset); | |
750 | } | |
751 | ||
752 | agl_gmx_inf_mode.u64 = 0; | |
753 | agl_gmx_inf_mode.s.en = 1; | |
754 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
755 | ||
756 | oring1.u64 = 0; | |
757 | oring1.s.obase = p->tx_ring_handle >> 3; | |
758 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | |
759 | cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); | |
760 | ||
761 | iring1.u64 = 0; | |
762 | iring1.s.ibase = p->rx_ring_handle >> 3; | |
763 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | |
764 | cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); | |
765 | ||
766 | /* Disable packet I/O. */ | |
767 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
768 | prtx_cfg.s.en = 0; | |
769 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | |
770 | ||
771 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); | |
772 | octeon_mgmt_set_mac_address(netdev, &sa); | |
773 | ||
774 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | |
775 | ||
776 | /* | |
777 | * Enable the port HW. Packets are not allowed until | |
778 | * cvmx_mgmt_port_enable() is called. | |
779 | */ | |
780 | mix_ctl.u64 = 0; | |
781 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | |
782 | mix_ctl.s.en = 1; /* Enable the port */ | |
783 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | |
784 | /* MII CB-request FIFO programmable high watermark */ | |
785 | mix_ctl.s.mrq_hwm = 1; | |
786 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | |
787 | ||
788 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | |
789 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | |
790 | /* | |
791 | * Force compensation values, as they are not | |
792 | * determined properly by HW | |
793 | */ | |
794 | union cvmx_agl_gmx_drv_ctl drv_ctl; | |
795 | ||
796 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | |
797 | if (port) { | |
798 | drv_ctl.s.byp_en1 = 1; | |
799 | drv_ctl.s.nctl1 = 6; | |
800 | drv_ctl.s.pctl1 = 6; | |
801 | } else { | |
802 | drv_ctl.s.byp_en = 1; | |
803 | drv_ctl.s.nctl = 6; | |
804 | drv_ctl.s.pctl = 6; | |
805 | } | |
806 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | |
807 | } | |
808 | ||
809 | octeon_mgmt_rx_fill_ring(netdev); | |
810 | ||
811 | /* Clear statistics. */ | |
812 | /* Clear on read. */ | |
813 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); | |
814 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); | |
815 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); | |
816 | ||
817 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); | |
818 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); | |
819 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); | |
820 | ||
821 | /* Clear any pending interrupts */ | |
822 | cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); | |
823 | ||
824 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | |
825 | netdev)) { | |
826 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | |
827 | goto err_noirq; | |
828 | } | |
829 | ||
830 | /* Interrupt every single RX packet */ | |
831 | mix_irhwm.u64 = 0; | |
832 | mix_irhwm.s.irhwm = 0; | |
833 | cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); | |
834 | ||
835 | /* Interrupt when we have 5 or more packets to clean. */ | |
836 | mix_orhwm.u64 = 0; | |
837 | mix_orhwm.s.orhwm = 5; | |
838 | cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); | |
839 | ||
840 | /* Enable receive and transmit interrupts */ | |
841 | mix_intena.u64 = 0; | |
842 | mix_intena.s.ithena = 1; | |
843 | mix_intena.s.othena = 1; | |
844 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | |
845 | ||
846 | ||
847 | /* Enable packet I/O. */ | |
848 | ||
849 | rxx_frm_ctl.u64 = 0; | |
850 | rxx_frm_ctl.s.pre_align = 1; | |
851 | /* | |
852 | * When set, disables the length check for non-min sized pkts | |
853 | * with padding in the client data. | |
854 | */ | |
855 | rxx_frm_ctl.s.pad_len = 1; | |
856 | /* When set, disables the length check for VLAN pkts */ | |
857 | rxx_frm_ctl.s.vlan_len = 1; | |
858 | /* When set, PREAMBLE checking is less strict */ | |
859 | rxx_frm_ctl.s.pre_free = 1; | |
860 | /* Control Pause Frames can match station SMAC */ | |
861 | rxx_frm_ctl.s.ctl_smac = 0; | |
862 | /* Control Pause Frames can match globally assign Multicast address */ | |
863 | rxx_frm_ctl.s.ctl_mcst = 1; | |
864 | /* Forward pause information to TX block */ | |
865 | rxx_frm_ctl.s.ctl_bck = 1; | |
866 | /* Drop Control Pause Frames */ | |
867 | rxx_frm_ctl.s.ctl_drp = 1; | |
868 | /* Strip off the preamble */ | |
869 | rxx_frm_ctl.s.pre_strp = 1; | |
870 | /* | |
871 | * This port is configured to send PREAMBLE+SFD to begin every | |
872 | * frame. GMX checks that the PREAMBLE is sent correctly. | |
873 | */ | |
874 | rxx_frm_ctl.s.pre_chk = 1; | |
875 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); | |
876 | ||
877 | /* Enable the AGL block */ | |
878 | agl_gmx_inf_mode.u64 = 0; | |
879 | agl_gmx_inf_mode.s.en = 1; | |
880 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | |
881 | ||
882 | /* Configure the port duplex and enables */ | |
883 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | |
884 | prtx_cfg.s.tx_en = 1; | |
885 | prtx_cfg.s.rx_en = 1; | |
886 | prtx_cfg.s.en = 1; | |
887 | p->last_duplex = 1; | |
888 | prtx_cfg.s.duplex = p->last_duplex; | |
889 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | |
890 | ||
891 | p->last_link = 0; | |
892 | netif_carrier_off(netdev); | |
893 | ||
894 | if (octeon_mgmt_init_phy(netdev)) { | |
895 | dev_err(p->dev, "Cannot initialize PHY.\n"); | |
896 | goto err_noirq; | |
897 | } | |
898 | ||
899 | netif_wake_queue(netdev); | |
900 | napi_enable(&p->napi); | |
901 | ||
902 | return 0; | |
903 | err_noirq: | |
904 | octeon_mgmt_reset_hw(p); | |
905 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
906 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
907 | DMA_BIDIRECTIONAL); | |
908 | kfree(p->rx_ring); | |
909 | err_nomem: | |
910 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
911 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
912 | DMA_BIDIRECTIONAL); | |
913 | kfree(p->tx_ring); | |
914 | return -ENOMEM; | |
915 | } | |
916 | ||
917 | static int octeon_mgmt_stop(struct net_device *netdev) | |
918 | { | |
919 | struct octeon_mgmt *p = netdev_priv(netdev); | |
920 | ||
921 | napi_disable(&p->napi); | |
922 | netif_stop_queue(netdev); | |
923 | ||
924 | if (p->phydev) | |
925 | phy_disconnect(p->phydev); | |
926 | ||
927 | netif_carrier_off(netdev); | |
928 | ||
929 | octeon_mgmt_reset_hw(p); | |
930 | ||
931 | ||
932 | free_irq(p->irq, netdev); | |
933 | ||
934 | /* dma_unmap is a nop on Octeon, so just free everything. */ | |
935 | skb_queue_purge(&p->tx_list); | |
936 | skb_queue_purge(&p->rx_list); | |
937 | ||
938 | dma_unmap_single(p->dev, p->rx_ring_handle, | |
939 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | |
940 | DMA_BIDIRECTIONAL); | |
941 | kfree(p->rx_ring); | |
942 | ||
943 | dma_unmap_single(p->dev, p->tx_ring_handle, | |
944 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
945 | DMA_BIDIRECTIONAL); | |
946 | kfree(p->tx_ring); | |
947 | ||
948 | ||
949 | return 0; | |
950 | } | |
951 | ||
952 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | |
953 | { | |
954 | struct octeon_mgmt *p = netdev_priv(netdev); | |
955 | int port = p->port; | |
956 | union mgmt_port_ring_entry re; | |
957 | unsigned long flags; | |
958 | ||
959 | re.d64 = 0; | |
960 | re.s.len = skb->len; | |
961 | re.s.addr = dma_map_single(p->dev, skb->data, | |
962 | skb->len, | |
963 | DMA_TO_DEVICE); | |
964 | ||
965 | spin_lock_irqsave(&p->tx_list.lock, flags); | |
966 | ||
967 | if (unlikely(p->tx_current_fill >= | |
968 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | |
969 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
970 | ||
971 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | |
972 | DMA_TO_DEVICE); | |
973 | ||
974 | netif_stop_queue(netdev); | |
975 | return NETDEV_TX_BUSY; | |
976 | } | |
977 | ||
978 | __skb_queue_tail(&p->tx_list, skb); | |
979 | ||
980 | /* Put it in the ring. */ | |
981 | p->tx_ring[p->tx_next] = re.d64; | |
982 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | |
983 | p->tx_current_fill++; | |
984 | ||
985 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | |
986 | ||
987 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | |
988 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | |
989 | DMA_BIDIRECTIONAL); | |
990 | ||
991 | netdev->stats.tx_packets++; | |
992 | netdev->stats.tx_bytes += skb->len; | |
993 | ||
994 | /* Ring the bell. */ | |
995 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); | |
996 | ||
997 | netdev->trans_start = jiffies; | |
998 | octeon_mgmt_clean_tx_buffers(p); | |
999 | octeon_mgmt_update_tx_stats(netdev); | |
1000 | return NETDEV_TX_OK; | |
1001 | } | |
1002 | ||
1003 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1004 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | |
1005 | { | |
1006 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1007 | ||
1008 | octeon_mgmt_receive_packets(p, 16); | |
1009 | octeon_mgmt_update_rx_stats(netdev); | |
1010 | return; | |
1011 | } | |
1012 | #endif | |
1013 | ||
1014 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | |
1015 | struct ethtool_drvinfo *info) | |
1016 | { | |
1017 | strncpy(info->driver, DRV_NAME, sizeof(info->driver)); | |
1018 | strncpy(info->version, DRV_VERSION, sizeof(info->version)); | |
1019 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | |
1020 | strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); | |
1021 | info->n_stats = 0; | |
1022 | info->testinfo_len = 0; | |
1023 | info->regdump_len = 0; | |
1024 | info->eedump_len = 0; | |
1025 | } | |
1026 | ||
1027 | static int octeon_mgmt_get_settings(struct net_device *netdev, | |
1028 | struct ethtool_cmd *cmd) | |
1029 | { | |
1030 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1031 | ||
1032 | if (p->phydev) | |
1033 | return phy_ethtool_gset(p->phydev, cmd); | |
1034 | ||
1035 | return -EINVAL; | |
1036 | } | |
1037 | ||
1038 | static int octeon_mgmt_set_settings(struct net_device *netdev, | |
1039 | struct ethtool_cmd *cmd) | |
1040 | { | |
1041 | struct octeon_mgmt *p = netdev_priv(netdev); | |
1042 | ||
1043 | if (!capable(CAP_NET_ADMIN)) | |
1044 | return -EPERM; | |
1045 | ||
1046 | if (p->phydev) | |
1047 | return phy_ethtool_sset(p->phydev, cmd); | |
1048 | ||
1049 | return -EINVAL; | |
1050 | } | |
1051 | ||
1052 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | |
1053 | .get_drvinfo = octeon_mgmt_get_drvinfo, | |
1054 | .get_link = ethtool_op_get_link, | |
1055 | .get_settings = octeon_mgmt_get_settings, | |
1056 | .set_settings = octeon_mgmt_set_settings | |
1057 | }; | |
1058 | ||
1059 | static const struct net_device_ops octeon_mgmt_ops = { | |
1060 | .ndo_open = octeon_mgmt_open, | |
1061 | .ndo_stop = octeon_mgmt_stop, | |
1062 | .ndo_start_xmit = octeon_mgmt_xmit, | |
1063 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, | |
1064 | .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, | |
1065 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, | |
1066 | .ndo_do_ioctl = octeon_mgmt_ioctl, | |
1067 | .ndo_change_mtu = octeon_mgmt_change_mtu, | |
1068 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1069 | .ndo_poll_controller = octeon_mgmt_poll_controller, | |
1070 | #endif | |
1071 | }; | |
1072 | ||
1073 | static int __init octeon_mgmt_probe(struct platform_device *pdev) | |
1074 | { | |
1075 | struct resource *res_irq; | |
1076 | struct net_device *netdev; | |
1077 | struct octeon_mgmt *p; | |
1078 | int i; | |
1079 | ||
1080 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | |
1081 | if (netdev == NULL) | |
1082 | return -ENOMEM; | |
1083 | ||
1084 | dev_set_drvdata(&pdev->dev, netdev); | |
1085 | p = netdev_priv(netdev); | |
1086 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | |
1087 | OCTEON_MGMT_NAPI_WEIGHT); | |
1088 | ||
1089 | p->netdev = netdev; | |
1090 | p->dev = &pdev->dev; | |
1091 | ||
1092 | p->port = pdev->id; | |
1093 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); | |
1094 | ||
1095 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1096 | if (!res_irq) | |
1097 | goto err; | |
1098 | ||
1099 | p->irq = res_irq->start; | |
1100 | spin_lock_init(&p->lock); | |
1101 | ||
1102 | skb_queue_head_init(&p->tx_list); | |
1103 | skb_queue_head_init(&p->rx_list); | |
1104 | tasklet_init(&p->tx_clean_tasklet, | |
1105 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | |
1106 | ||
1107 | netdev->netdev_ops = &octeon_mgmt_ops; | |
1108 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | |
1109 | ||
1110 | ||
1111 | /* The mgmt ports get the first N MACs. */ | |
1112 | for (i = 0; i < 6; i++) | |
1113 | netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; | |
1114 | netdev->dev_addr[5] += p->port; | |
1115 | ||
1116 | if (p->port >= octeon_bootinfo->mac_addr_count) | |
1117 | dev_err(&pdev->dev, | |
e5834820 HS |
1118 | "Error %s: Using MAC outside of the assigned range: %pM\n", |
1119 | netdev->name, netdev->dev_addr); | |
d6aa60a1 DD |
1120 | |
1121 | if (register_netdev(netdev)) | |
1122 | goto err; | |
1123 | ||
1124 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | |
1125 | return 0; | |
1126 | err: | |
1127 | free_netdev(netdev); | |
1128 | return -ENOENT; | |
1129 | } | |
1130 | ||
1131 | static int __exit octeon_mgmt_remove(struct platform_device *pdev) | |
1132 | { | |
1133 | struct net_device *netdev = dev_get_drvdata(&pdev->dev); | |
1134 | ||
1135 | unregister_netdev(netdev); | |
1136 | free_netdev(netdev); | |
1137 | return 0; | |
1138 | } | |
1139 | ||
1140 | static struct platform_driver octeon_mgmt_driver = { | |
1141 | .driver = { | |
1142 | .name = "octeon_mgmt", | |
1143 | .owner = THIS_MODULE, | |
1144 | }, | |
1145 | .probe = octeon_mgmt_probe, | |
1146 | .remove = __exit_p(octeon_mgmt_remove), | |
1147 | }; | |
1148 | ||
1149 | extern void octeon_mdiobus_force_mod_depencency(void); | |
1150 | ||
1151 | static int __init octeon_mgmt_mod_init(void) | |
1152 | { | |
1153 | /* Force our mdiobus driver module to be loaded first. */ | |
1154 | octeon_mdiobus_force_mod_depencency(); | |
1155 | return platform_driver_register(&octeon_mgmt_driver); | |
1156 | } | |
1157 | ||
1158 | static void __exit octeon_mgmt_mod_exit(void) | |
1159 | { | |
1160 | platform_driver_unregister(&octeon_mgmt_driver); | |
1161 | } | |
1162 | ||
1163 | module_init(octeon_mgmt_mod_init); | |
1164 | module_exit(octeon_mgmt_mod_exit); | |
1165 | ||
1166 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
1167 | MODULE_AUTHOR("David Daney"); | |
1168 | MODULE_LICENSE("GPL"); | |
1169 | MODULE_VERSION(DRV_VERSION); |