Commit | Line | Data |
---|---|---|
48257c4f PA |
1 | /* |
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | |
3 | * | |
4 | * Copyright (c) 2003 Intracom S.A. | |
5 | * by Pantelis Antoniou <panto@intracom.gr> | |
6 | * | |
7 | * 2005 (c) MontaVista Software, Inc. | |
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | |
9 | * | |
10 | * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> | |
11 | * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> | |
12 | * | |
13 | * This file is licensed under the terms of the GNU General Public License | |
14 | * version 2. This program is licensed "as is" without any warranty of any | |
15 | * kind, whether express or implied. | |
16 | */ | |
17 | ||
48257c4f PA |
18 | #include <linux/module.h> |
19 | #include <linux/kernel.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/ptrace.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/ioport.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/interrupt.h> | |
28 | #include <linux/pci.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/delay.h> | |
31 | #include <linux/netdevice.h> | |
32 | #include <linux/etherdevice.h> | |
33 | #include <linux/skbuff.h> | |
34 | #include <linux/spinlock.h> | |
35 | #include <linux/mii.h> | |
36 | #include <linux/ethtool.h> | |
37 | #include <linux/bitops.h> | |
38 | #include <linux/fs.h> | |
f7b99969 | 39 | #include <linux/platform_device.h> |
48257c4f PA |
40 | |
41 | #include <linux/vmalloc.h> | |
42 | #include <asm/pgtable.h> | |
43 | ||
44 | #include <asm/pgtable.h> | |
45 | #include <asm/irq.h> | |
46 | #include <asm/uaccess.h> | |
47 | ||
48 | #include "fs_enet.h" | |
49 | ||
50 | /*************************************************/ | |
51 | ||
52 | static char version[] __devinitdata = | |
53 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; | |
54 | ||
55 | MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); | |
56 | MODULE_DESCRIPTION("Freescale Ethernet Driver"); | |
57 | MODULE_LICENSE("GPL"); | |
58 | MODULE_VERSION(DRV_MODULE_VERSION); | |
59 | ||
8d3b33f6 RR |
60 | int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ |
61 | module_param(fs_enet_debug, int, 0); | |
48257c4f PA |
62 | MODULE_PARM_DESC(fs_enet_debug, |
63 | "Freescale bitmapped debugging message enable value"); | |
64 | ||
48257c4f PA |
65 | |
66 | static void fs_set_multicast_list(struct net_device *dev) | |
67 | { | |
68 | struct fs_enet_private *fep = netdev_priv(dev); | |
69 | ||
70 | (*fep->ops->set_multicast_list)(dev); | |
71 | } | |
72 | ||
73 | /* NAPI receive function */ | |
74 | static int fs_enet_rx_napi(struct net_device *dev, int *budget) | |
75 | { | |
76 | struct fs_enet_private *fep = netdev_priv(dev); | |
77 | const struct fs_platform_info *fpi = fep->fpi; | |
78 | cbd_t *bdp; | |
79 | struct sk_buff *skb, *skbn, *skbt; | |
80 | int received = 0; | |
81 | u16 pkt_len, sc; | |
82 | int curidx; | |
83 | int rx_work_limit = 0; /* pacify gcc */ | |
84 | ||
85 | rx_work_limit = min(dev->quota, *budget); | |
86 | ||
87 | if (!netif_running(dev)) | |
88 | return 0; | |
89 | ||
90 | /* | |
91 | * First, grab all of the stats for the incoming packet. | |
92 | * These get messed up if we get called due to a busy condition. | |
93 | */ | |
94 | bdp = fep->cur_rx; | |
95 | ||
96 | /* clear RX status bits for napi*/ | |
97 | (*fep->ops->napi_clear_rx_event)(dev); | |
98 | ||
99 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | |
100 | ||
101 | curidx = bdp - fep->rx_bd_base; | |
102 | ||
103 | /* | |
104 | * Since we have allocated space to hold a complete frame, | |
105 | * the last indicator should be set. | |
106 | */ | |
107 | if ((sc & BD_ENET_RX_LAST) == 0) | |
108 | printk(KERN_WARNING DRV_MODULE_NAME | |
109 | ": %s rcv is not +last\n", | |
110 | dev->name); | |
111 | ||
112 | /* | |
113 | * Check for errors. | |
114 | */ | |
115 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | |
116 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | |
117 | fep->stats.rx_errors++; | |
118 | /* Frame too long or too short. */ | |
119 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | |
120 | fep->stats.rx_length_errors++; | |
121 | /* Frame alignment */ | |
122 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | |
123 | fep->stats.rx_frame_errors++; | |
124 | /* CRC Error */ | |
125 | if (sc & BD_ENET_RX_CR) | |
126 | fep->stats.rx_crc_errors++; | |
127 | /* FIFO overrun */ | |
128 | if (sc & BD_ENET_RX_OV) | |
129 | fep->stats.rx_crc_errors++; | |
130 | ||
131 | skb = fep->rx_skbuff[curidx]; | |
132 | ||
34e30d61 | 133 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
134 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
135 | DMA_FROM_DEVICE); | |
136 | ||
137 | skbn = skb; | |
138 | ||
139 | } else { | |
140 | ||
141 | /* napi, got packet but no quota */ | |
142 | if (--rx_work_limit < 0) | |
143 | break; | |
144 | ||
145 | skb = fep->rx_skbuff[curidx]; | |
146 | ||
34e30d61 | 147 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
148 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
149 | DMA_FROM_DEVICE); | |
150 | ||
151 | /* | |
152 | * Process the incoming frame. | |
153 | */ | |
154 | fep->stats.rx_packets++; | |
155 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | |
156 | fep->stats.rx_bytes += pkt_len + 4; | |
157 | ||
158 | if (pkt_len <= fpi->rx_copybreak) { | |
159 | /* +2 to make IP header L1 cache aligned */ | |
160 | skbn = dev_alloc_skb(pkt_len + 2); | |
161 | if (skbn != NULL) { | |
162 | skb_reserve(skbn, 2); /* align IP header */ | |
163 | memcpy(skbn->data, skb->data, pkt_len); | |
164 | /* swap */ | |
165 | skbt = skb; | |
166 | skb = skbn; | |
167 | skbn = skbt; | |
168 | } | |
169 | } else | |
170 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | |
171 | ||
172 | if (skbn != NULL) { | |
173 | skb->dev = dev; | |
174 | skb_put(skb, pkt_len); /* Make room */ | |
175 | skb->protocol = eth_type_trans(skb, dev); | |
176 | received++; | |
177 | netif_receive_skb(skb); | |
178 | } else { | |
179 | printk(KERN_WARNING DRV_MODULE_NAME | |
180 | ": %s Memory squeeze, dropping packet.\n", | |
181 | dev->name); | |
182 | fep->stats.rx_dropped++; | |
183 | skbn = skb; | |
184 | } | |
185 | } | |
186 | ||
187 | fep->rx_skbuff[curidx] = skbn; | |
188 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | |
189 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | |
190 | DMA_FROM_DEVICE)); | |
191 | CBDW_DATLEN(bdp, 0); | |
192 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | |
193 | ||
194 | /* | |
195 | * Update BD pointer to next entry. | |
196 | */ | |
197 | if ((sc & BD_ENET_RX_WRAP) == 0) | |
198 | bdp++; | |
199 | else | |
200 | bdp = fep->rx_bd_base; | |
201 | ||
202 | (*fep->ops->rx_bd_done)(dev); | |
203 | } | |
204 | ||
205 | fep->cur_rx = bdp; | |
206 | ||
207 | dev->quota -= received; | |
208 | *budget -= received; | |
209 | ||
210 | if (rx_work_limit < 0) | |
211 | return 1; /* not done */ | |
212 | ||
213 | /* done */ | |
214 | netif_rx_complete(dev); | |
215 | ||
216 | (*fep->ops->napi_enable_rx)(dev); | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | /* non NAPI receive function */ | |
222 | static int fs_enet_rx_non_napi(struct net_device *dev) | |
223 | { | |
224 | struct fs_enet_private *fep = netdev_priv(dev); | |
225 | const struct fs_platform_info *fpi = fep->fpi; | |
226 | cbd_t *bdp; | |
227 | struct sk_buff *skb, *skbn, *skbt; | |
228 | int received = 0; | |
229 | u16 pkt_len, sc; | |
230 | int curidx; | |
231 | /* | |
232 | * First, grab all of the stats for the incoming packet. | |
233 | * These get messed up if we get called due to a busy condition. | |
234 | */ | |
235 | bdp = fep->cur_rx; | |
236 | ||
237 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | |
238 | ||
239 | curidx = bdp - fep->rx_bd_base; | |
240 | ||
241 | /* | |
242 | * Since we have allocated space to hold a complete frame, | |
243 | * the last indicator should be set. | |
244 | */ | |
245 | if ((sc & BD_ENET_RX_LAST) == 0) | |
246 | printk(KERN_WARNING DRV_MODULE_NAME | |
247 | ": %s rcv is not +last\n", | |
248 | dev->name); | |
249 | ||
250 | /* | |
251 | * Check for errors. | |
252 | */ | |
253 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | |
254 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | |
255 | fep->stats.rx_errors++; | |
256 | /* Frame too long or too short. */ | |
257 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | |
258 | fep->stats.rx_length_errors++; | |
259 | /* Frame alignment */ | |
260 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | |
261 | fep->stats.rx_frame_errors++; | |
262 | /* CRC Error */ | |
263 | if (sc & BD_ENET_RX_CR) | |
264 | fep->stats.rx_crc_errors++; | |
265 | /* FIFO overrun */ | |
266 | if (sc & BD_ENET_RX_OV) | |
267 | fep->stats.rx_crc_errors++; | |
268 | ||
269 | skb = fep->rx_skbuff[curidx]; | |
270 | ||
34e30d61 | 271 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
272 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
273 | DMA_FROM_DEVICE); | |
274 | ||
275 | skbn = skb; | |
276 | ||
277 | } else { | |
278 | ||
279 | skb = fep->rx_skbuff[curidx]; | |
280 | ||
34e30d61 | 281 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
282 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
283 | DMA_FROM_DEVICE); | |
284 | ||
285 | /* | |
286 | * Process the incoming frame. | |
287 | */ | |
288 | fep->stats.rx_packets++; | |
289 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | |
290 | fep->stats.rx_bytes += pkt_len + 4; | |
291 | ||
292 | if (pkt_len <= fpi->rx_copybreak) { | |
293 | /* +2 to make IP header L1 cache aligned */ | |
294 | skbn = dev_alloc_skb(pkt_len + 2); | |
295 | if (skbn != NULL) { | |
296 | skb_reserve(skbn, 2); /* align IP header */ | |
297 | memcpy(skbn->data, skb->data, pkt_len); | |
298 | /* swap */ | |
299 | skbt = skb; | |
300 | skb = skbn; | |
301 | skbn = skbt; | |
302 | } | |
303 | } else | |
304 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | |
305 | ||
306 | if (skbn != NULL) { | |
307 | skb->dev = dev; | |
308 | skb_put(skb, pkt_len); /* Make room */ | |
309 | skb->protocol = eth_type_trans(skb, dev); | |
310 | received++; | |
311 | netif_rx(skb); | |
312 | } else { | |
313 | printk(KERN_WARNING DRV_MODULE_NAME | |
314 | ": %s Memory squeeze, dropping packet.\n", | |
315 | dev->name); | |
316 | fep->stats.rx_dropped++; | |
317 | skbn = skb; | |
318 | } | |
319 | } | |
320 | ||
321 | fep->rx_skbuff[curidx] = skbn; | |
322 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | |
323 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | |
324 | DMA_FROM_DEVICE)); | |
325 | CBDW_DATLEN(bdp, 0); | |
326 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | |
327 | ||
328 | /* | |
329 | * Update BD pointer to next entry. | |
330 | */ | |
331 | if ((sc & BD_ENET_RX_WRAP) == 0) | |
332 | bdp++; | |
333 | else | |
334 | bdp = fep->rx_bd_base; | |
335 | ||
336 | (*fep->ops->rx_bd_done)(dev); | |
337 | } | |
338 | ||
339 | fep->cur_rx = bdp; | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | static void fs_enet_tx(struct net_device *dev) | |
345 | { | |
346 | struct fs_enet_private *fep = netdev_priv(dev); | |
347 | cbd_t *bdp; | |
348 | struct sk_buff *skb; | |
349 | int dirtyidx, do_wake, do_restart; | |
350 | u16 sc; | |
351 | ||
352 | spin_lock(&fep->lock); | |
353 | bdp = fep->dirty_tx; | |
354 | ||
355 | do_wake = do_restart = 0; | |
356 | while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { | |
357 | ||
358 | dirtyidx = bdp - fep->tx_bd_base; | |
359 | ||
360 | if (fep->tx_free == fep->tx_ring) | |
361 | break; | |
362 | ||
363 | skb = fep->tx_skbuff[dirtyidx]; | |
364 | ||
365 | /* | |
366 | * Check for errors. | |
367 | */ | |
368 | if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | |
369 | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { | |
370 | ||
371 | if (sc & BD_ENET_TX_HB) /* No heartbeat */ | |
372 | fep->stats.tx_heartbeat_errors++; | |
373 | if (sc & BD_ENET_TX_LC) /* Late collision */ | |
374 | fep->stats.tx_window_errors++; | |
375 | if (sc & BD_ENET_TX_RL) /* Retrans limit */ | |
376 | fep->stats.tx_aborted_errors++; | |
377 | if (sc & BD_ENET_TX_UN) /* Underrun */ | |
378 | fep->stats.tx_fifo_errors++; | |
379 | if (sc & BD_ENET_TX_CSL) /* Carrier lost */ | |
380 | fep->stats.tx_carrier_errors++; | |
381 | ||
382 | if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | |
383 | fep->stats.tx_errors++; | |
384 | do_restart = 1; | |
385 | } | |
386 | } else | |
387 | fep->stats.tx_packets++; | |
388 | ||
389 | if (sc & BD_ENET_TX_READY) | |
390 | printk(KERN_WARNING DRV_MODULE_NAME | |
391 | ": %s HEY! Enet xmit interrupt and TX_READY.\n", | |
392 | dev->name); | |
393 | ||
394 | /* | |
395 | * Deferred means some collisions occurred during transmit, | |
396 | * but we eventually sent the packet OK. | |
397 | */ | |
398 | if (sc & BD_ENET_TX_DEF) | |
399 | fep->stats.collisions++; | |
400 | ||
401 | /* unmap */ | |
34e30d61 PA |
402 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
403 | skb->len, DMA_TO_DEVICE); | |
48257c4f PA |
404 | |
405 | /* | |
406 | * Free the sk buffer associated with this last transmit. | |
407 | */ | |
408 | dev_kfree_skb_irq(skb); | |
409 | fep->tx_skbuff[dirtyidx] = NULL; | |
410 | ||
411 | /* | |
412 | * Update pointer to next buffer descriptor to be transmitted. | |
413 | */ | |
414 | if ((sc & BD_ENET_TX_WRAP) == 0) | |
415 | bdp++; | |
416 | else | |
417 | bdp = fep->tx_bd_base; | |
418 | ||
419 | /* | |
420 | * Since we have freed up a buffer, the ring is no longer | |
421 | * full. | |
422 | */ | |
423 | if (!fep->tx_free++) | |
424 | do_wake = 1; | |
425 | } | |
426 | ||
427 | fep->dirty_tx = bdp; | |
428 | ||
429 | if (do_restart) | |
430 | (*fep->ops->tx_restart)(dev); | |
431 | ||
432 | spin_unlock(&fep->lock); | |
433 | ||
434 | if (do_wake) | |
435 | netif_wake_queue(dev); | |
436 | } | |
437 | ||
438 | /* | |
439 | * The interrupt handler. | |
440 | * This is called from the MPC core interrupt. | |
441 | */ | |
442 | static irqreturn_t | |
443 | fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
444 | { | |
445 | struct net_device *dev = dev_id; | |
446 | struct fs_enet_private *fep; | |
447 | const struct fs_platform_info *fpi; | |
448 | u32 int_events; | |
449 | u32 int_clr_events; | |
450 | int nr, napi_ok; | |
451 | int handled; | |
452 | ||
453 | fep = netdev_priv(dev); | |
454 | fpi = fep->fpi; | |
455 | ||
456 | nr = 0; | |
457 | while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { | |
458 | ||
459 | nr++; | |
460 | ||
461 | int_clr_events = int_events; | |
462 | if (fpi->use_napi) | |
463 | int_clr_events &= ~fep->ev_napi_rx; | |
464 | ||
465 | (*fep->ops->clear_int_events)(dev, int_clr_events); | |
466 | ||
467 | if (int_events & fep->ev_err) | |
468 | (*fep->ops->ev_error)(dev, int_events); | |
469 | ||
470 | if (int_events & fep->ev_rx) { | |
471 | if (!fpi->use_napi) | |
472 | fs_enet_rx_non_napi(dev); | |
473 | else { | |
474 | napi_ok = netif_rx_schedule_prep(dev); | |
475 | ||
476 | (*fep->ops->napi_disable_rx)(dev); | |
477 | (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); | |
478 | ||
479 | /* NOTE: it is possible for FCCs in NAPI mode */ | |
480 | /* to submit a spurious interrupt while in poll */ | |
481 | if (napi_ok) | |
482 | __netif_rx_schedule(dev); | |
483 | } | |
484 | } | |
485 | ||
486 | if (int_events & fep->ev_tx) | |
487 | fs_enet_tx(dev); | |
488 | } | |
489 | ||
490 | handled = nr > 0; | |
491 | return IRQ_RETVAL(handled); | |
492 | } | |
493 | ||
494 | void fs_init_bds(struct net_device *dev) | |
495 | { | |
496 | struct fs_enet_private *fep = netdev_priv(dev); | |
497 | cbd_t *bdp; | |
498 | struct sk_buff *skb; | |
499 | int i; | |
500 | ||
501 | fs_cleanup_bds(dev); | |
502 | ||
503 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | |
504 | fep->tx_free = fep->tx_ring; | |
505 | fep->cur_rx = fep->rx_bd_base; | |
506 | ||
507 | /* | |
508 | * Initialize the receive buffer descriptors. | |
509 | */ | |
510 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | |
511 | skb = dev_alloc_skb(ENET_RX_FRSIZE); | |
512 | if (skb == NULL) { | |
513 | printk(KERN_WARNING DRV_MODULE_NAME | |
514 | ": %s Memory squeeze, unable to allocate skb\n", | |
515 | dev->name); | |
516 | break; | |
517 | } | |
518 | fep->rx_skbuff[i] = skb; | |
519 | skb->dev = dev; | |
520 | CBDW_BUFADDR(bdp, | |
521 | dma_map_single(fep->dev, skb->data, | |
522 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | |
523 | DMA_FROM_DEVICE)); | |
524 | CBDW_DATLEN(bdp, 0); /* zero */ | |
525 | CBDW_SC(bdp, BD_ENET_RX_EMPTY | | |
526 | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); | |
527 | } | |
528 | /* | |
529 | * if we failed, fillup remainder | |
530 | */ | |
531 | for (; i < fep->rx_ring; i++, bdp++) { | |
532 | fep->rx_skbuff[i] = NULL; | |
533 | CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); | |
534 | } | |
535 | ||
536 | /* | |
537 | * ...and the same for transmit. | |
538 | */ | |
539 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | |
540 | fep->tx_skbuff[i] = NULL; | |
541 | CBDW_BUFADDR(bdp, 0); | |
542 | CBDW_DATLEN(bdp, 0); | |
543 | CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); | |
544 | } | |
545 | } | |
546 | ||
547 | void fs_cleanup_bds(struct net_device *dev) | |
548 | { | |
549 | struct fs_enet_private *fep = netdev_priv(dev); | |
550 | struct sk_buff *skb; | |
34e30d61 | 551 | cbd_t *bdp; |
48257c4f PA |
552 | int i; |
553 | ||
554 | /* | |
555 | * Reset SKB transmit buffers. | |
556 | */ | |
34e30d61 | 557 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { |
48257c4f PA |
558 | if ((skb = fep->tx_skbuff[i]) == NULL) |
559 | continue; | |
560 | ||
561 | /* unmap */ | |
34e30d61 PA |
562 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
563 | skb->len, DMA_TO_DEVICE); | |
48257c4f PA |
564 | |
565 | fep->tx_skbuff[i] = NULL; | |
566 | dev_kfree_skb(skb); | |
567 | } | |
568 | ||
569 | /* | |
570 | * Reset SKB receive buffers | |
571 | */ | |
34e30d61 | 572 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { |
48257c4f PA |
573 | if ((skb = fep->rx_skbuff[i]) == NULL) |
574 | continue; | |
575 | ||
576 | /* unmap */ | |
34e30d61 | 577 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), |
48257c4f PA |
578 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), |
579 | DMA_FROM_DEVICE); | |
580 | ||
581 | fep->rx_skbuff[i] = NULL; | |
582 | ||
583 | dev_kfree_skb(skb); | |
584 | } | |
585 | } | |
586 | ||
587 | /**********************************************************************************/ | |
588 | ||
589 | static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
590 | { | |
591 | struct fs_enet_private *fep = netdev_priv(dev); | |
592 | cbd_t *bdp; | |
593 | int curidx; | |
594 | u16 sc; | |
595 | unsigned long flags; | |
596 | ||
597 | spin_lock_irqsave(&fep->tx_lock, flags); | |
598 | ||
599 | /* | |
600 | * Fill in a Tx ring entry | |
601 | */ | |
602 | bdp = fep->cur_tx; | |
603 | ||
604 | if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { | |
605 | netif_stop_queue(dev); | |
606 | spin_unlock_irqrestore(&fep->tx_lock, flags); | |
607 | ||
608 | /* | |
609 | * Ooops. All transmit buffers are full. Bail out. | |
610 | * This should not happen, since the tx queue should be stopped. | |
611 | */ | |
612 | printk(KERN_WARNING DRV_MODULE_NAME | |
613 | ": %s tx queue full!.\n", dev->name); | |
614 | return NETDEV_TX_BUSY; | |
615 | } | |
616 | ||
617 | curidx = bdp - fep->tx_bd_base; | |
618 | /* | |
619 | * Clear all of the status flags. | |
620 | */ | |
621 | CBDC_SC(bdp, BD_ENET_TX_STATS); | |
622 | ||
623 | /* | |
624 | * Save skb pointer. | |
625 | */ | |
626 | fep->tx_skbuff[curidx] = skb; | |
627 | ||
628 | fep->stats.tx_bytes += skb->len; | |
629 | ||
630 | /* | |
631 | * Push the data cache so the CPM does not get stale memory data. | |
632 | */ | |
633 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, | |
634 | skb->data, skb->len, DMA_TO_DEVICE)); | |
635 | CBDW_DATLEN(bdp, skb->len); | |
636 | ||
637 | dev->trans_start = jiffies; | |
638 | ||
639 | /* | |
640 | * If this was the last BD in the ring, start at the beginning again. | |
641 | */ | |
642 | if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) | |
643 | fep->cur_tx++; | |
644 | else | |
645 | fep->cur_tx = fep->tx_bd_base; | |
646 | ||
647 | if (!--fep->tx_free) | |
648 | netif_stop_queue(dev); | |
649 | ||
650 | /* Trigger transmission start */ | |
651 | sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | | |
652 | BD_ENET_TX_LAST | BD_ENET_TX_TC; | |
653 | ||
654 | /* note that while FEC does not have this bit | |
655 | * it marks it as available for software use | |
656 | * yay for hw reuse :) */ | |
657 | if (skb->len <= 60) | |
658 | sc |= BD_ENET_TX_PAD; | |
659 | CBDS_SC(bdp, sc); | |
660 | ||
661 | (*fep->ops->tx_kickstart)(dev); | |
662 | ||
663 | spin_unlock_irqrestore(&fep->tx_lock, flags); | |
664 | ||
665 | return NETDEV_TX_OK; | |
666 | } | |
667 | ||
668 | static int fs_request_irq(struct net_device *dev, int irq, const char *name, | |
669 | irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs)) | |
670 | { | |
671 | struct fs_enet_private *fep = netdev_priv(dev); | |
672 | ||
673 | (*fep->ops->pre_request_irq)(dev, irq); | |
1fb9df5d | 674 | return request_irq(irq, irqf, IRQF_SHARED, name, dev); |
48257c4f PA |
675 | } |
676 | ||
677 | static void fs_free_irq(struct net_device *dev, int irq) | |
678 | { | |
679 | struct fs_enet_private *fep = netdev_priv(dev); | |
680 | ||
681 | free_irq(irq, dev); | |
682 | (*fep->ops->post_free_irq)(dev, irq); | |
683 | } | |
684 | ||
685 | /**********************************************************************************/ | |
686 | ||
687 | /* This interrupt occurs when the PHY detects a link change. */ | |
688 | static irqreturn_t | |
689 | fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
690 | { | |
691 | struct net_device *dev = dev_id; | |
692 | struct fs_enet_private *fep; | |
693 | const struct fs_platform_info *fpi; | |
694 | ||
695 | fep = netdev_priv(dev); | |
696 | fpi = fep->fpi; | |
697 | ||
698 | /* | |
699 | * Acknowledge the interrupt if possible. If we have not | |
700 | * found the PHY yet we can't process or acknowledge the | |
701 | * interrupt now. Instead we ignore this interrupt for now, | |
702 | * which we can do since it is edge triggered. It will be | |
703 | * acknowledged later by fs_enet_open(). | |
704 | */ | |
705 | if (!fep->phy) | |
706 | return IRQ_NONE; | |
707 | ||
708 | fs_mii_ack_int(dev); | |
709 | fs_mii_link_status_change_check(dev, 0); | |
710 | ||
711 | return IRQ_HANDLED; | |
712 | } | |
713 | ||
714 | static void fs_timeout(struct net_device *dev) | |
715 | { | |
716 | struct fs_enet_private *fep = netdev_priv(dev); | |
717 | unsigned long flags; | |
718 | int wake = 0; | |
719 | ||
720 | fep->stats.tx_errors++; | |
721 | ||
722 | spin_lock_irqsave(&fep->lock, flags); | |
723 | ||
724 | if (dev->flags & IFF_UP) { | |
725 | (*fep->ops->stop)(dev); | |
726 | (*fep->ops->restart)(dev); | |
727 | } | |
728 | ||
729 | wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); | |
730 | spin_unlock_irqrestore(&fep->lock, flags); | |
731 | ||
732 | if (wake) | |
733 | netif_wake_queue(dev); | |
734 | } | |
735 | ||
736 | static int fs_enet_open(struct net_device *dev) | |
737 | { | |
738 | struct fs_enet_private *fep = netdev_priv(dev); | |
739 | const struct fs_platform_info *fpi = fep->fpi; | |
740 | int r; | |
741 | ||
742 | /* Install our interrupt handler. */ | |
743 | r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); | |
744 | if (r != 0) { | |
745 | printk(KERN_ERR DRV_MODULE_NAME | |
746 | ": %s Could not allocate FEC IRQ!", dev->name); | |
747 | return -EINVAL; | |
748 | } | |
749 | ||
750 | /* Install our phy interrupt handler */ | |
751 | if (fpi->phy_irq != -1) { | |
752 | ||
753 | r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt); | |
754 | if (r != 0) { | |
755 | printk(KERN_ERR DRV_MODULE_NAME | |
756 | ": %s Could not allocate PHY IRQ!", dev->name); | |
757 | fs_free_irq(dev, fep->interrupt); | |
758 | return -EINVAL; | |
759 | } | |
760 | } | |
761 | ||
762 | fs_mii_startup(dev); | |
763 | netif_carrier_off(dev); | |
764 | fs_mii_link_status_change_check(dev, 1); | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
769 | static int fs_enet_close(struct net_device *dev) | |
770 | { | |
771 | struct fs_enet_private *fep = netdev_priv(dev); | |
772 | const struct fs_platform_info *fpi = fep->fpi; | |
773 | unsigned long flags; | |
774 | ||
775 | netif_stop_queue(dev); | |
776 | netif_carrier_off(dev); | |
777 | fs_mii_shutdown(dev); | |
778 | ||
779 | spin_lock_irqsave(&fep->lock, flags); | |
780 | (*fep->ops->stop)(dev); | |
781 | spin_unlock_irqrestore(&fep->lock, flags); | |
782 | ||
783 | /* release any irqs */ | |
784 | if (fpi->phy_irq != -1) | |
785 | fs_free_irq(dev, fpi->phy_irq); | |
786 | fs_free_irq(dev, fep->interrupt); | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
791 | static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) | |
792 | { | |
793 | struct fs_enet_private *fep = netdev_priv(dev); | |
794 | return &fep->stats; | |
795 | } | |
796 | ||
797 | /*************************************************************************/ | |
798 | ||
799 | static void fs_get_drvinfo(struct net_device *dev, | |
800 | struct ethtool_drvinfo *info) | |
801 | { | |
802 | strcpy(info->driver, DRV_MODULE_NAME); | |
803 | strcpy(info->version, DRV_MODULE_VERSION); | |
804 | } | |
805 | ||
806 | static int fs_get_regs_len(struct net_device *dev) | |
807 | { | |
808 | struct fs_enet_private *fep = netdev_priv(dev); | |
809 | ||
810 | return (*fep->ops->get_regs_len)(dev); | |
811 | } | |
812 | ||
813 | static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |
814 | void *p) | |
815 | { | |
816 | struct fs_enet_private *fep = netdev_priv(dev); | |
817 | unsigned long flags; | |
818 | int r, len; | |
819 | ||
820 | len = regs->len; | |
821 | ||
822 | spin_lock_irqsave(&fep->lock, flags); | |
823 | r = (*fep->ops->get_regs)(dev, p, &len); | |
824 | spin_unlock_irqrestore(&fep->lock, flags); | |
825 | ||
826 | if (r == 0) | |
827 | regs->version = 0; | |
828 | } | |
829 | ||
830 | static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
831 | { | |
832 | struct fs_enet_private *fep = netdev_priv(dev); | |
833 | unsigned long flags; | |
834 | int rc; | |
835 | ||
836 | spin_lock_irqsave(&fep->lock, flags); | |
837 | rc = mii_ethtool_gset(&fep->mii_if, cmd); | |
838 | spin_unlock_irqrestore(&fep->lock, flags); | |
839 | ||
840 | return rc; | |
841 | } | |
842 | ||
843 | static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
844 | { | |
845 | struct fs_enet_private *fep = netdev_priv(dev); | |
846 | unsigned long flags; | |
847 | int rc; | |
848 | ||
849 | spin_lock_irqsave(&fep->lock, flags); | |
850 | rc = mii_ethtool_sset(&fep->mii_if, cmd); | |
851 | spin_unlock_irqrestore(&fep->lock, flags); | |
852 | ||
853 | return rc; | |
854 | } | |
855 | ||
856 | static int fs_nway_reset(struct net_device *dev) | |
857 | { | |
858 | struct fs_enet_private *fep = netdev_priv(dev); | |
859 | return mii_nway_restart(&fep->mii_if); | |
860 | } | |
861 | ||
862 | static u32 fs_get_msglevel(struct net_device *dev) | |
863 | { | |
864 | struct fs_enet_private *fep = netdev_priv(dev); | |
865 | return fep->msg_enable; | |
866 | } | |
867 | ||
868 | static void fs_set_msglevel(struct net_device *dev, u32 value) | |
869 | { | |
870 | struct fs_enet_private *fep = netdev_priv(dev); | |
871 | fep->msg_enable = value; | |
872 | } | |
873 | ||
874 | static struct ethtool_ops fs_ethtool_ops = { | |
875 | .get_drvinfo = fs_get_drvinfo, | |
876 | .get_regs_len = fs_get_regs_len, | |
877 | .get_settings = fs_get_settings, | |
878 | .set_settings = fs_set_settings, | |
879 | .nway_reset = fs_nway_reset, | |
880 | .get_link = ethtool_op_get_link, | |
881 | .get_msglevel = fs_get_msglevel, | |
882 | .set_msglevel = fs_set_msglevel, | |
883 | .get_tx_csum = ethtool_op_get_tx_csum, | |
884 | .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ | |
885 | .get_sg = ethtool_op_get_sg, | |
886 | .set_sg = ethtool_op_set_sg, | |
887 | .get_regs = fs_get_regs, | |
888 | }; | |
889 | ||
890 | static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
891 | { | |
892 | struct fs_enet_private *fep = netdev_priv(dev); | |
893 | struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; | |
894 | unsigned long flags; | |
895 | int rc; | |
896 | ||
897 | if (!netif_running(dev)) | |
898 | return -EINVAL; | |
899 | ||
900 | spin_lock_irqsave(&fep->lock, flags); | |
901 | rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); | |
902 | spin_unlock_irqrestore(&fep->lock, flags); | |
903 | return rc; | |
904 | } | |
905 | ||
906 | extern int fs_mii_connect(struct net_device *dev); | |
907 | extern void fs_mii_disconnect(struct net_device *dev); | |
908 | ||
909 | static struct net_device *fs_init_instance(struct device *dev, | |
910 | const struct fs_platform_info *fpi) | |
911 | { | |
912 | struct net_device *ndev = NULL; | |
913 | struct fs_enet_private *fep = NULL; | |
914 | int privsize, i, r, err = 0, registered = 0; | |
915 | ||
916 | /* guard */ | |
917 | if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX) | |
918 | return ERR_PTR(-EINVAL); | |
919 | ||
920 | privsize = sizeof(*fep) + (sizeof(struct sk_buff **) * | |
921 | (fpi->rx_ring + fpi->tx_ring)); | |
922 | ||
923 | ndev = alloc_etherdev(privsize); | |
924 | if (!ndev) { | |
925 | err = -ENOMEM; | |
926 | goto err; | |
927 | } | |
928 | SET_MODULE_OWNER(ndev); | |
929 | ||
930 | fep = netdev_priv(ndev); | |
931 | memset(fep, 0, privsize); /* clear everything */ | |
932 | ||
933 | fep->dev = dev; | |
934 | dev_set_drvdata(dev, ndev); | |
935 | fep->fpi = fpi; | |
936 | if (fpi->init_ioports) | |
937 | fpi->init_ioports(); | |
938 | ||
939 | #ifdef CONFIG_FS_ENET_HAS_FEC | |
940 | if (fs_get_fec_index(fpi->fs_no) >= 0) | |
941 | fep->ops = &fs_fec_ops; | |
942 | #endif | |
943 | ||
944 | #ifdef CONFIG_FS_ENET_HAS_SCC | |
945 | if (fs_get_scc_index(fpi->fs_no) >=0 ) | |
946 | fep->ops = &fs_scc_ops; | |
947 | #endif | |
948 | ||
949 | #ifdef CONFIG_FS_ENET_HAS_FCC | |
950 | if (fs_get_fcc_index(fpi->fs_no) >= 0) | |
951 | fep->ops = &fs_fcc_ops; | |
952 | #endif | |
953 | ||
954 | if (fep->ops == NULL) { | |
955 | printk(KERN_ERR DRV_MODULE_NAME | |
956 | ": %s No matching ops found (%d).\n", | |
957 | ndev->name, fpi->fs_no); | |
958 | err = -EINVAL; | |
959 | goto err; | |
960 | } | |
961 | ||
962 | r = (*fep->ops->setup_data)(ndev); | |
963 | if (r != 0) { | |
964 | printk(KERN_ERR DRV_MODULE_NAME | |
965 | ": %s setup_data failed\n", | |
966 | ndev->name); | |
967 | err = r; | |
968 | goto err; | |
969 | } | |
970 | ||
971 | /* point rx_skbuff, tx_skbuff */ | |
972 | fep->rx_skbuff = (struct sk_buff **)&fep[1]; | |
973 | fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; | |
974 | ||
975 | /* init locks */ | |
976 | spin_lock_init(&fep->lock); | |
977 | spin_lock_init(&fep->tx_lock); | |
978 | ||
979 | /* | |
980 | * Set the Ethernet address. | |
981 | */ | |
982 | for (i = 0; i < 6; i++) | |
983 | ndev->dev_addr[i] = fpi->macaddr[i]; | |
984 | ||
985 | r = (*fep->ops->allocate_bd)(ndev); | |
986 | ||
987 | if (fep->ring_base == NULL) { | |
988 | printk(KERN_ERR DRV_MODULE_NAME | |
989 | ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r); | |
990 | err = r; | |
991 | goto err; | |
992 | } | |
993 | ||
994 | /* | |
995 | * Set receive and transmit descriptor base. | |
996 | */ | |
997 | fep->rx_bd_base = fep->ring_base; | |
998 | fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; | |
999 | ||
1000 | /* initialize ring size variables */ | |
1001 | fep->tx_ring = fpi->tx_ring; | |
1002 | fep->rx_ring = fpi->rx_ring; | |
1003 | ||
1004 | /* | |
1005 | * The FEC Ethernet specific entries in the device structure. | |
1006 | */ | |
1007 | ndev->open = fs_enet_open; | |
1008 | ndev->hard_start_xmit = fs_enet_start_xmit; | |
1009 | ndev->tx_timeout = fs_timeout; | |
1010 | ndev->watchdog_timeo = 2 * HZ; | |
1011 | ndev->stop = fs_enet_close; | |
1012 | ndev->get_stats = fs_enet_get_stats; | |
1013 | ndev->set_multicast_list = fs_set_multicast_list; | |
1014 | if (fpi->use_napi) { | |
1015 | ndev->poll = fs_enet_rx_napi; | |
1016 | ndev->weight = fpi->napi_weight; | |
1017 | } | |
1018 | ndev->ethtool_ops = &fs_ethtool_ops; | |
1019 | ndev->do_ioctl = fs_ioctl; | |
1020 | ||
1021 | init_timer(&fep->phy_timer_list); | |
1022 | ||
1023 | netif_carrier_off(ndev); | |
1024 | ||
1025 | err = register_netdev(ndev); | |
1026 | if (err != 0) { | |
1027 | printk(KERN_ERR DRV_MODULE_NAME | |
1028 | ": %s register_netdev failed.\n", ndev->name); | |
1029 | goto err; | |
1030 | } | |
1031 | registered = 1; | |
1032 | ||
1033 | err = fs_mii_connect(ndev); | |
1034 | if (err != 0) { | |
1035 | printk(KERN_ERR DRV_MODULE_NAME | |
1036 | ": %s fs_mii_connect failed.\n", ndev->name); | |
1037 | goto err; | |
1038 | } | |
1039 | ||
1040 | return ndev; | |
1041 | ||
1042 | err: | |
1043 | if (ndev != NULL) { | |
1044 | ||
1045 | if (registered) | |
1046 | unregister_netdev(ndev); | |
1047 | ||
1048 | if (fep != NULL) { | |
1049 | (*fep->ops->free_bd)(ndev); | |
1050 | (*fep->ops->cleanup_data)(ndev); | |
1051 | } | |
1052 | ||
1053 | free_netdev(ndev); | |
1054 | } | |
1055 | ||
1056 | dev_set_drvdata(dev, NULL); | |
1057 | ||
1058 | return ERR_PTR(err); | |
1059 | } | |
1060 | ||
1061 | static int fs_cleanup_instance(struct net_device *ndev) | |
1062 | { | |
1063 | struct fs_enet_private *fep; | |
1064 | const struct fs_platform_info *fpi; | |
1065 | struct device *dev; | |
1066 | ||
1067 | if (ndev == NULL) | |
1068 | return -EINVAL; | |
1069 | ||
1070 | fep = netdev_priv(ndev); | |
1071 | if (fep == NULL) | |
1072 | return -EINVAL; | |
1073 | ||
1074 | fpi = fep->fpi; | |
1075 | ||
1076 | fs_mii_disconnect(ndev); | |
1077 | ||
1078 | unregister_netdev(ndev); | |
1079 | ||
1080 | dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), | |
1081 | fep->ring_base, fep->ring_mem_addr); | |
1082 | ||
1083 | /* reset it */ | |
1084 | (*fep->ops->cleanup_data)(ndev); | |
1085 | ||
1086 | dev = fep->dev; | |
1087 | if (dev != NULL) { | |
1088 | dev_set_drvdata(dev, NULL); | |
1089 | fep->dev = NULL; | |
1090 | } | |
1091 | ||
1092 | free_netdev(ndev); | |
1093 | ||
1094 | return 0; | |
1095 | } | |
1096 | ||
1097 | /**************************************************************************************/ | |
1098 | ||
1099 | /* handy pointer to the immap */ | |
1100 | void *fs_enet_immap = NULL; | |
1101 | ||
1102 | static int setup_immap(void) | |
1103 | { | |
1104 | phys_addr_t paddr = 0; | |
1105 | unsigned long size = 0; | |
1106 | ||
1107 | #ifdef CONFIG_CPM1 | |
1108 | paddr = IMAP_ADDR; | |
1109 | size = 0x10000; /* map 64K */ | |
1110 | #endif | |
1111 | ||
1112 | #ifdef CONFIG_CPM2 | |
1113 | paddr = CPM_MAP_ADDR; | |
1114 | size = 0x40000; /* map 256 K */ | |
1115 | #endif | |
1116 | fs_enet_immap = ioremap(paddr, size); | |
1117 | if (fs_enet_immap == NULL) | |
1118 | return -EBADF; /* XXX ahem; maybe just BUG_ON? */ | |
1119 | ||
1120 | return 0; | |
1121 | } | |
1122 | ||
1123 | static void cleanup_immap(void) | |
1124 | { | |
1125 | if (fs_enet_immap != NULL) { | |
1126 | iounmap(fs_enet_immap); | |
1127 | fs_enet_immap = NULL; | |
1128 | } | |
1129 | } | |
1130 | ||
1131 | /**************************************************************************************/ | |
1132 | ||
1133 | static int __devinit fs_enet_probe(struct device *dev) | |
1134 | { | |
1135 | struct net_device *ndev; | |
1136 | ||
1137 | /* no fixup - no device */ | |
1138 | if (dev->platform_data == NULL) { | |
1139 | printk(KERN_INFO "fs_enet: " | |
1140 | "probe called with no platform data; " | |
1141 | "remove unused devices\n"); | |
1142 | return -ENODEV; | |
1143 | } | |
1144 | ||
1145 | ndev = fs_init_instance(dev, dev->platform_data); | |
1146 | if (IS_ERR(ndev)) | |
1147 | return PTR_ERR(ndev); | |
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | static int fs_enet_remove(struct device *dev) | |
1152 | { | |
1153 | return fs_cleanup_instance(dev_get_drvdata(dev)); | |
1154 | } | |
1155 | ||
1156 | static struct device_driver fs_enet_fec_driver = { | |
1157 | .name = "fsl-cpm-fec", | |
1158 | .bus = &platform_bus_type, | |
1159 | .probe = fs_enet_probe, | |
1160 | .remove = fs_enet_remove, | |
1161 | #ifdef CONFIG_PM | |
1162 | /* .suspend = fs_enet_suspend, TODO */ | |
1163 | /* .resume = fs_enet_resume, TODO */ | |
1164 | #endif | |
1165 | }; | |
1166 | ||
1167 | static struct device_driver fs_enet_scc_driver = { | |
1168 | .name = "fsl-cpm-scc", | |
1169 | .bus = &platform_bus_type, | |
1170 | .probe = fs_enet_probe, | |
1171 | .remove = fs_enet_remove, | |
1172 | #ifdef CONFIG_PM | |
1173 | /* .suspend = fs_enet_suspend, TODO */ | |
1174 | /* .resume = fs_enet_resume, TODO */ | |
1175 | #endif | |
1176 | }; | |
1177 | ||
1178 | static struct device_driver fs_enet_fcc_driver = { | |
1179 | .name = "fsl-cpm-fcc", | |
1180 | .bus = &platform_bus_type, | |
1181 | .probe = fs_enet_probe, | |
1182 | .remove = fs_enet_remove, | |
1183 | #ifdef CONFIG_PM | |
1184 | /* .suspend = fs_enet_suspend, TODO */ | |
1185 | /* .resume = fs_enet_resume, TODO */ | |
1186 | #endif | |
1187 | }; | |
1188 | ||
1189 | static int __init fs_init(void) | |
1190 | { | |
1191 | int r; | |
1192 | ||
1193 | printk(KERN_INFO | |
1194 | "%s", version); | |
1195 | ||
1196 | r = setup_immap(); | |
1197 | if (r != 0) | |
1198 | return r; | |
1199 | r = driver_register(&fs_enet_fec_driver); | |
1200 | if (r != 0) | |
1201 | goto err; | |
1202 | ||
1203 | r = driver_register(&fs_enet_fcc_driver); | |
1204 | if (r != 0) | |
1205 | goto err; | |
1206 | ||
1207 | r = driver_register(&fs_enet_scc_driver); | |
1208 | if (r != 0) | |
1209 | goto err; | |
1210 | ||
1211 | return 0; | |
1212 | err: | |
1213 | cleanup_immap(); | |
1214 | return r; | |
1215 | ||
1216 | } | |
1217 | ||
1218 | static void __exit fs_cleanup(void) | |
1219 | { | |
1220 | driver_unregister(&fs_enet_fec_driver); | |
1221 | driver_unregister(&fs_enet_fcc_driver); | |
1222 | driver_unregister(&fs_enet_scc_driver); | |
1223 | cleanup_immap(); | |
1224 | } | |
1225 | ||
1226 | /**************************************************************************************/ | |
1227 | ||
1228 | module_init(fs_init); | |
1229 | module_exit(fs_cleanup); |