powerpc/4xx: Allow 4xx PCI bridge to be disabled via device tree
[linux-2.6-block.git] / drivers / net / ibm_newemac / core.c
CommitLineData
1d3bb996
DG
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
17cf803a
BH
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
1d3bb996
DG
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
27#include <linux/sched.h>
28#include <linux/string.h>
29#include <linux/errno.h>
30#include <linux/delay.h>
31#include <linux/types.h>
32#include <linux/pci.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/crc32.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#include <linux/bitops.h>
39#include <linux/workqueue.h>
283029d1 40#include <linux/of.h>
1d3bb996
DG
41
42#include <asm/processor.h>
43#include <asm/io.h>
44#include <asm/dma.h>
45#include <asm/uaccess.h>
0925ab5d
VB
46#include <asm/dcr.h>
47#include <asm/dcr-regs.h>
1d3bb996
DG
48
49#include "core.h"
50
51/*
52 * Lack of dma_unmap_???? calls is intentional.
53 *
54 * API-correct usage requires additional support state information to be
55 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
56 * EMAC design (e.g. TX buffer passed from network stack can be split into
57 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
58 * maintaining such information will add additional overhead.
59 * Current DMA API implementation for 4xx processors only ensures cache coherency
60 * and dma_unmap_???? routines are empty and are likely to stay this way.
61 * I decided to omit dma_unmap_??? calls because I don't want to add additional
62 * complexity just for the sake of following some abstract API, when it doesn't
63 * add any real benefit to the driver. I understand that this decision maybe
64 * controversial, but I really tried to make code API-correct and efficient
65 * at the same time and didn't come up with code I liked :(. --ebs
66 */
67
68#define DRV_NAME "emac"
69#define DRV_VERSION "3.54"
70#define DRV_DESC "PPC 4xx OCP EMAC driver"
71
72MODULE_DESCRIPTION(DRV_DESC);
73MODULE_AUTHOR
74 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
75MODULE_LICENSE("GPL");
76
77/*
78 * PPC64 doesn't (yet) have a cacheable_memcpy
79 */
80#ifdef CONFIG_PPC64
81#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
82#endif
83
84/* minimum number of free TX descriptors required to wake up TX process */
85#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86
87/* If packet size is less than this number, we allocate small skb and copy packet
88 * contents into it instead of just sending original big skb up
89 */
90#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
91
92/* Since multiple EMACs share MDIO lines in various ways, we need
93 * to avoid re-using the same PHY ID in cases where the arch didn't
94 * setup precise phy_map entries
95 *
96 * XXX This is something that needs to be reworked as we can have multiple
97 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
98 * probably require in that case to have explicit PHY IDs in the device-tree
99 */
100static u32 busy_phy_map;
101static DEFINE_MUTEX(emac_phy_map_lock);
102
103/* This is the wait queue used to wait on any event related to probe, that
104 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105 */
106static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107
108/* Having stable interface names is a doomed idea. However, it would be nice
109 * if we didn't have completely random interface names at boot too :-) It's
110 * just a matter of making everybody's life easier. Since we are doing
111 * threaded probing, it's a bit harder though. The base idea here is that
112 * we make up a list of all emacs in the device-tree before we register the
113 * driver. Every emac will then wait for the previous one in the list to
114 * initialize before itself. We should also keep that list ordered by
115 * cell_index.
116 * That list is only 4 entries long, meaning that additional EMACs don't
117 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
118 */
119
120#define EMAC_BOOT_LIST_SIZE 4
121static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122
123/* How long should I wait for dependent devices ? */
124#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
125
126/* I don't want to litter system log with timeout errors
127 * when we have brain-damaged PHY.
128 */
129static inline void emac_report_timeout_error(struct emac_instance *dev,
130 const char *error)
131{
11121e30
VB
132 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
1d3bb996
DG
136 printk(KERN_ERR "%s: %s\n", dev->ndev->name, error);
137}
138
11121e30
VB
139/* EMAC PHY clock workaround:
140 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
141 * which allows controlling each EMAC clock
142 */
143static inline void emac_rx_clk_tx(struct emac_instance *dev)
144{
145#ifdef CONFIG_PPC_DCR_NATIVE
146 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
147 dcri_clrset(SDR0, SDR0_MFR,
148 0, SDR0_MFR_ECS >> dev->cell_index);
149#endif
150}
151
152static inline void emac_rx_clk_default(struct emac_instance *dev)
153{
154#ifdef CONFIG_PPC_DCR_NATIVE
155 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
156 dcri_clrset(SDR0, SDR0_MFR,
157 SDR0_MFR_ECS >> dev->cell_index, 0);
158#endif
159}
160
1d3bb996
DG
161/* PHY polling intervals */
162#define PHY_POLL_LINK_ON HZ
163#define PHY_POLL_LINK_OFF (HZ / 5)
164
165/* Graceful stop timeouts in us.
166 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
167 */
168#define STOP_TIMEOUT_10 1230
169#define STOP_TIMEOUT_100 124
170#define STOP_TIMEOUT_1000 13
171#define STOP_TIMEOUT_1000_JUMBO 73
172
4373c932
PB
173static unsigned char default_mcast_addr[] = {
174 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
175};
176
1d3bb996
DG
177/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
178static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
179 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
180 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
181 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
182 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
183 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
184 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
185 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
186 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
187 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
188 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
189 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
190 "tx_bd_excessive_collisions", "tx_bd_late_collision",
191 "tx_bd_multple_collisions", "tx_bd_single_collision",
192 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
193 "tx_errors"
194};
195
196static irqreturn_t emac_irq(int irq, void *dev_instance);
197static void emac_clean_tx_ring(struct emac_instance *dev);
198static void __emac_set_multicast_list(struct emac_instance *dev);
199
200static inline int emac_phy_supports_gige(int phy_mode)
201{
202 return phy_mode == PHY_MODE_GMII ||
203 phy_mode == PHY_MODE_RGMII ||
204 phy_mode == PHY_MODE_TBI ||
205 phy_mode == PHY_MODE_RTBI;
206}
207
208static inline int emac_phy_gpcs(int phy_mode)
209{
210 return phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
212}
213
214static inline void emac_tx_enable(struct emac_instance *dev)
215{
216 struct emac_regs __iomem *p = dev->emacp;
217 u32 r;
218
219 DBG(dev, "tx_enable" NL);
220
221 r = in_be32(&p->mr0);
222 if (!(r & EMAC_MR0_TXE))
223 out_be32(&p->mr0, r | EMAC_MR0_TXE);
224}
225
226static void emac_tx_disable(struct emac_instance *dev)
227{
228 struct emac_regs __iomem *p = dev->emacp;
229 u32 r;
230
231 DBG(dev, "tx_disable" NL);
232
233 r = in_be32(&p->mr0);
234 if (r & EMAC_MR0_TXE) {
235 int n = dev->stop_timeout;
236 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
237 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
238 udelay(1);
239 --n;
240 }
241 if (unlikely(!n))
242 emac_report_timeout_error(dev, "TX disable timeout");
243 }
244}
245
246static void emac_rx_enable(struct emac_instance *dev)
247{
248 struct emac_regs __iomem *p = dev->emacp;
249 u32 r;
250
251 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
252 goto out;
253
254 DBG(dev, "rx_enable" NL);
255
256 r = in_be32(&p->mr0);
257 if (!(r & EMAC_MR0_RXE)) {
258 if (unlikely(!(r & EMAC_MR0_RXI))) {
259 /* Wait if previous async disable is still in progress */
260 int n = dev->stop_timeout;
261 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
262 udelay(1);
263 --n;
264 }
265 if (unlikely(!n))
266 emac_report_timeout_error(dev,
267 "RX disable timeout");
268 }
269 out_be32(&p->mr0, r | EMAC_MR0_RXE);
270 }
271 out:
272 ;
273}
274
275static void emac_rx_disable(struct emac_instance *dev)
276{
277 struct emac_regs __iomem *p = dev->emacp;
278 u32 r;
279
280 DBG(dev, "rx_disable" NL);
281
282 r = in_be32(&p->mr0);
283 if (r & EMAC_MR0_RXE) {
284 int n = dev->stop_timeout;
285 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
286 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
287 udelay(1);
288 --n;
289 }
290 if (unlikely(!n))
291 emac_report_timeout_error(dev, "RX disable timeout");
292 }
293}
294
295static inline void emac_netif_stop(struct emac_instance *dev)
296{
297 netif_tx_lock_bh(dev->ndev);
e308a5d8 298 netif_addr_lock(dev->ndev);
1d3bb996 299 dev->no_mcast = 1;
e308a5d8 300 netif_addr_unlock(dev->ndev);
1d3bb996
DG
301 netif_tx_unlock_bh(dev->ndev);
302 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
303 mal_poll_disable(dev->mal, &dev->commac);
304 netif_tx_disable(dev->ndev);
305}
306
307static inline void emac_netif_start(struct emac_instance *dev)
308{
309 netif_tx_lock_bh(dev->ndev);
e308a5d8 310 netif_addr_lock(dev->ndev);
1d3bb996
DG
311 dev->no_mcast = 0;
312 if (dev->mcast_pending && netif_running(dev->ndev))
313 __emac_set_multicast_list(dev);
e308a5d8 314 netif_addr_unlock(dev->ndev);
1d3bb996
DG
315 netif_tx_unlock_bh(dev->ndev);
316
317 netif_wake_queue(dev->ndev);
318
319 /* NOTE: unconditional netif_wake_queue is only appropriate
320 * so long as all callers are assured to have free tx slots
321 * (taken from tg3... though the case where that is wrong is
322 * not terribly harmful)
323 */
324 mal_poll_enable(dev->mal, &dev->commac);
325}
326
327static inline void emac_rx_disable_async(struct emac_instance *dev)
328{
329 struct emac_regs __iomem *p = dev->emacp;
330 u32 r;
331
332 DBG(dev, "rx_disable_async" NL);
333
334 r = in_be32(&p->mr0);
335 if (r & EMAC_MR0_RXE)
336 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
337}
338
339static int emac_reset(struct emac_instance *dev)
340{
341 struct emac_regs __iomem *p = dev->emacp;
342 int n = 20;
343
344 DBG(dev, "reset" NL);
345
346 if (!dev->reset_failed) {
347 /* 40x erratum suggests stopping RX channel before reset,
348 * we stop TX as well
349 */
350 emac_rx_disable(dev);
351 emac_tx_disable(dev);
352 }
353
354 out_be32(&p->mr0, EMAC_MR0_SRST);
355 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
356 --n;
357
358 if (n) {
359 dev->reset_failed = 0;
360 return 0;
361 } else {
362 emac_report_timeout_error(dev, "reset timeout");
363 dev->reset_failed = 1;
364 return -ETIMEDOUT;
365 }
366}
367
368static void emac_hash_mc(struct emac_instance *dev)
369{
05781ccd
GE
370 const int regs = EMAC_XAHT_REGS(dev);
371 u32 *gaht_base = emac_gaht_base(dev);
372 u32 gaht_temp[regs];
1d3bb996 373 struct dev_mc_list *dmi;
05781ccd 374 int i;
1d3bb996
DG
375
376 DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
377
05781ccd
GE
378 memset(gaht_temp, 0, sizeof (gaht_temp));
379
1d3bb996 380 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
05781ccd 381 int slot, reg, mask;
1d3bb996
DG
382 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
383 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
384 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
385
05781ccd
GE
386 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
387 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
388 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
389
390 gaht_temp[reg] |= mask;
1d3bb996 391 }
05781ccd
GE
392
393 for (i = 0; i < regs; i++)
394 out_be32(gaht_base + i, gaht_temp[i]);
1d3bb996
DG
395}
396
397static inline u32 emac_iff2rmr(struct net_device *ndev)
398{
399 struct emac_instance *dev = netdev_priv(ndev);
400 u32 r;
401
402 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
403
404 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
405 r |= EMAC4_RMR_BASE;
406 else
407 r |= EMAC_RMR_BASE;
408
409 if (ndev->flags & IFF_PROMISC)
410 r |= EMAC_RMR_PME;
05781ccd
GE
411 else if (ndev->flags & IFF_ALLMULTI ||
412 (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
1d3bb996
DG
413 r |= EMAC_RMR_PMME;
414 else if (ndev->mc_count > 0)
415 r |= EMAC_RMR_MAE;
416
417 return r;
418}
419
420static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
421{
422 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
423
424 DBG2(dev, "__emac_calc_base_mr1" NL);
425
426 switch(tx_size) {
427 case 2048:
428 ret |= EMAC_MR1_TFS_2K;
429 break;
430 default:
431 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
432 dev->ndev->name, tx_size);
433 }
434
435 switch(rx_size) {
436 case 16384:
437 ret |= EMAC_MR1_RFS_16K;
438 break;
439 case 4096:
440 ret |= EMAC_MR1_RFS_4K;
441 break;
442 default:
443 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
444 dev->ndev->name, rx_size);
445 }
446
447 return ret;
448}
449
450static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
451{
452 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
4696c3c4 453 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
1d3bb996
DG
454
455 DBG2(dev, "__emac4_calc_base_mr1" NL);
456
457 switch(tx_size) {
458 case 4096:
459 ret |= EMAC4_MR1_TFS_4K;
460 break;
461 case 2048:
462 ret |= EMAC4_MR1_TFS_2K;
463 break;
464 default:
465 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
466 dev->ndev->name, tx_size);
467 }
468
469 switch(rx_size) {
470 case 16384:
471 ret |= EMAC4_MR1_RFS_16K;
472 break;
473 case 4096:
474 ret |= EMAC4_MR1_RFS_4K;
475 break;
476 case 2048:
477 ret |= EMAC4_MR1_RFS_2K;
478 break;
479 default:
480 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
481 dev->ndev->name, rx_size);
482 }
483
484 return ret;
485}
486
487static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
488{
489 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
490 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
491 __emac_calc_base_mr1(dev, tx_size, rx_size);
492}
493
494static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
495{
496 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
497 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
498 else
499 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
500}
501
502static inline u32 emac_calc_rwmr(struct emac_instance *dev,
503 unsigned int low, unsigned int high)
504{
505 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
506 return (low << 22) | ( (high & 0x3ff) << 6);
507 else
508 return (low << 23) | ( (high & 0x1ff) << 7);
509}
510
511static int emac_configure(struct emac_instance *dev)
512{
513 struct emac_regs __iomem *p = dev->emacp;
514 struct net_device *ndev = dev->ndev;
911b237d 515 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
1d3bb996
DG
516 u32 r, mr1 = 0;
517
518 DBG(dev, "configure" NL);
519
911b237d
BH
520 if (!link) {
521 out_be32(&p->mr1, in_be32(&p->mr1)
522 | EMAC_MR1_FDE | EMAC_MR1_ILE);
523 udelay(100);
524 } else if (emac_reset(dev) < 0)
1d3bb996
DG
525 return -ETIMEDOUT;
526
527 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
528 tah_reset(dev->tah_dev);
529
911b237d
BH
530 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
531 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
1d3bb996
DG
532
533 /* Default fifo sizes */
534 tx_size = dev->tx_fifo_size;
535 rx_size = dev->rx_fifo_size;
536
911b237d
BH
537 /* No link, force loopback */
538 if (!link)
539 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
540
1d3bb996 541 /* Check for full duplex */
911b237d 542 else if (dev->phy.duplex == DUPLEX_FULL)
1d3bb996
DG
543 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
544
545 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
546 dev->stop_timeout = STOP_TIMEOUT_10;
547 switch (dev->phy.speed) {
548 case SPEED_1000:
549 if (emac_phy_gpcs(dev->phy.mode)) {
550 mr1 |= EMAC_MR1_MF_1000GPCS |
551 EMAC_MR1_MF_IPPA(dev->phy.address);
552
553 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
554 * identify this GPCS PHY later.
555 */
05781ccd 556 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
1d3bb996
DG
557 } else
558 mr1 |= EMAC_MR1_MF_1000;
559
560 /* Extended fifo sizes */
561 tx_size = dev->tx_fifo_size_gige;
562 rx_size = dev->rx_fifo_size_gige;
563
564 if (dev->ndev->mtu > ETH_DATA_LEN) {
f34ebab6
SR
565 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
566 mr1 |= EMAC4_MR1_JPSM;
567 else
568 mr1 |= EMAC_MR1_JPSM;
1d3bb996
DG
569 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
570 } else
571 dev->stop_timeout = STOP_TIMEOUT_1000;
572 break;
573 case SPEED_100:
574 mr1 |= EMAC_MR1_MF_100;
575 dev->stop_timeout = STOP_TIMEOUT_100;
576 break;
577 default: /* make gcc happy */
578 break;
579 }
580
581 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
582 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
583 dev->phy.speed);
584 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
585 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
586
587 /* on 40x erratum forces us to NOT use integrated flow control,
588 * let's hope it works on 44x ;)
589 */
590 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
591 dev->phy.duplex == DUPLEX_FULL) {
592 if (dev->phy.pause)
593 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
594 else if (dev->phy.asym_pause)
595 mr1 |= EMAC_MR1_APP;
596 }
597
598 /* Add base settings & fifo sizes & program MR1 */
599 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
600 out_be32(&p->mr1, mr1);
601
602 /* Set individual MAC address */
603 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
604 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
605 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
606 ndev->dev_addr[5]);
607
608 /* VLAN Tag Protocol ID */
609 out_be32(&p->vtpid, 0x8100);
610
611 /* Receive mode register */
612 r = emac_iff2rmr(ndev);
613 if (r & EMAC_RMR_MAE)
614 emac_hash_mc(dev);
615 out_be32(&p->rmr, r);
616
617 /* FIFOs thresholds */
618 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
619 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
620 tx_size / 2 / dev->fifo_entry_size);
621 else
622 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
623 tx_size / 2 / dev->fifo_entry_size);
624 out_be32(&p->tmr1, r);
625 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
626
627 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
628 there should be still enough space in FIFO to allow the our link
629 partner time to process this frame and also time to send PAUSE
630 frame itself.
631
632 Here is the worst case scenario for the RX FIFO "headroom"
633 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
634
635 1) One maximum-length frame on TX 1522 bytes
636 2) One PAUSE frame time 64 bytes
637 3) PAUSE frame decode time allowance 64 bytes
638 4) One maximum-length frame on RX 1522 bytes
639 5) Round-trip propagation delay of the link (100Mb) 15 bytes
640 ----------
641 3187 bytes
642
643 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
644 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
645 */
646 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
647 rx_size / 4 / dev->fifo_entry_size);
648 out_be32(&p->rwmr, r);
649
650 /* Set PAUSE timer to the maximum */
651 out_be32(&p->ptr, 0xffff);
652
653 /* IRQ sources */
654 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
655 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
656 EMAC_ISR_IRE | EMAC_ISR_TE;
657 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
658 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
659 EMAC4_ISR_RXOE | */;
660 out_be32(&p->iser, r);
661
662 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
663 if (emac_phy_gpcs(dev->phy.mode))
664 emac_mii_reset_phy(&dev->phy);
665
666 return 0;
667}
668
669static void emac_reinitialize(struct emac_instance *dev)
670{
671 DBG(dev, "reinitialize" NL);
672
673 emac_netif_stop(dev);
674 if (!emac_configure(dev)) {
675 emac_tx_enable(dev);
676 emac_rx_enable(dev);
677 }
678 emac_netif_start(dev);
679}
680
681static void emac_full_tx_reset(struct emac_instance *dev)
682{
683 DBG(dev, "full_tx_reset" NL);
684
685 emac_tx_disable(dev);
686 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
687 emac_clean_tx_ring(dev);
688 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
689
690 emac_configure(dev);
691
692 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
693 emac_tx_enable(dev);
694 emac_rx_enable(dev);
695}
696
697static void emac_reset_work(struct work_struct *work)
698{
699 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
700
701 DBG(dev, "reset_work" NL);
702
703 mutex_lock(&dev->link_lock);
61dbcece
BH
704 if (dev->opened) {
705 emac_netif_stop(dev);
706 emac_full_tx_reset(dev);
707 emac_netif_start(dev);
708 }
1d3bb996
DG
709 mutex_unlock(&dev->link_lock);
710}
711
712static void emac_tx_timeout(struct net_device *ndev)
713{
714 struct emac_instance *dev = netdev_priv(ndev);
715
716 DBG(dev, "tx_timeout" NL);
717
718 schedule_work(&dev->reset_work);
719}
720
721
722static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
723{
724 int done = !!(stacr & EMAC_STACR_OC);
725
726 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
727 done = !done;
728
729 return done;
730};
731
732static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
733{
734 struct emac_regs __iomem *p = dev->emacp;
735 u32 r = 0;
736 int n, err = -ETIMEDOUT;
737
738 mutex_lock(&dev->mdio_lock);
739
740 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
741
742 /* Enable proper MDIO port */
743 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
744 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
745 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
746 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
747
748 /* Wait for management interface to become idle */
cca87c18 749 n = 20;
1d3bb996
DG
750 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
751 udelay(1);
752 if (!--n) {
753 DBG2(dev, " -> timeout wait idle\n");
754 goto bail;
755 }
756 }
757
758 /* Issue read command */
759 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
760 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
761 else
762 r = EMAC_STACR_BASE(dev->opb_bus_freq);
763 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
764 r |= EMAC_STACR_OC;
bff713b5 765 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
766 r |= EMACX_STACR_STAC_READ;
767 else
768 r |= EMAC_STACR_STAC_READ;
769 r |= (reg & EMAC_STACR_PRA_MASK)
770 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
771 out_be32(&p->stacr, r);
772
773 /* Wait for read to complete */
cca87c18 774 n = 200;
1d3bb996
DG
775 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
776 udelay(1);
777 if (!--n) {
778 DBG2(dev, " -> timeout wait complete\n");
779 goto bail;
780 }
781 }
782
783 if (unlikely(r & EMAC_STACR_PHYE)) {
784 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
785 err = -EREMOTEIO;
786 goto bail;
787 }
788
789 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
790
791 DBG2(dev, "mdio_read -> %04x" NL, r);
792 err = 0;
793 bail:
794 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
795 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
796 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
797 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
798 mutex_unlock(&dev->mdio_lock);
799
800 return err == 0 ? r : err;
801}
802
803static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
804 u16 val)
805{
806 struct emac_regs __iomem *p = dev->emacp;
807 u32 r = 0;
808 int n, err = -ETIMEDOUT;
809
810 mutex_lock(&dev->mdio_lock);
811
812 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
813
814 /* Enable proper MDIO port */
815 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
816 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
817 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
818 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
819
820 /* Wait for management interface to be idle */
cca87c18 821 n = 20;
1d3bb996
DG
822 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
823 udelay(1);
824 if (!--n) {
825 DBG2(dev, " -> timeout wait idle\n");
826 goto bail;
827 }
828 }
829
830 /* Issue write command */
831 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
832 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
833 else
834 r = EMAC_STACR_BASE(dev->opb_bus_freq);
835 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
836 r |= EMAC_STACR_OC;
bff713b5 837 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
1d3bb996
DG
838 r |= EMACX_STACR_STAC_WRITE;
839 else
840 r |= EMAC_STACR_STAC_WRITE;
841 r |= (reg & EMAC_STACR_PRA_MASK) |
842 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
843 (val << EMAC_STACR_PHYD_SHIFT);
844 out_be32(&p->stacr, r);
845
846 /* Wait for write to complete */
cca87c18 847 n = 200;
1d3bb996
DG
848 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
849 udelay(1);
850 if (!--n) {
851 DBG2(dev, " -> timeout wait complete\n");
852 goto bail;
853 }
854 }
855 err = 0;
856 bail:
857 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
858 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
859 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
860 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
861 mutex_unlock(&dev->mdio_lock);
862}
863
864static int emac_mdio_read(struct net_device *ndev, int id, int reg)
865{
866 struct emac_instance *dev = netdev_priv(ndev);
867 int res;
868
869 res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev,
870 (u8) id, (u8) reg);
871 return res;
872}
873
874static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
875{
876 struct emac_instance *dev = netdev_priv(ndev);
877
878 __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev,
879 (u8) id, (u8) reg, (u16) val);
880}
881
882/* Tx lock BH */
883static void __emac_set_multicast_list(struct emac_instance *dev)
884{
885 struct emac_regs __iomem *p = dev->emacp;
886 u32 rmr = emac_iff2rmr(dev->ndev);
887
888 DBG(dev, "__multicast %08x" NL, rmr);
889
890 /* I decided to relax register access rules here to avoid
891 * full EMAC reset.
892 *
893 * There is a real problem with EMAC4 core if we use MWSW_001 bit
894 * in MR1 register and do a full EMAC reset.
895 * One TX BD status update is delayed and, after EMAC reset, it
896 * never happens, resulting in TX hung (it'll be recovered by TX
897 * timeout handler eventually, but this is just gross).
898 * So we either have to do full TX reset or try to cheat here :)
899 *
900 * The only required change is to RX mode register, so I *think* all
901 * we need is just to stop RX channel. This seems to work on all
902 * tested SoCs. --ebs
903 *
904 * If we need the full reset, we might just trigger the workqueue
905 * and do it async... a bit nasty but should work --BenH
906 */
907 dev->mcast_pending = 0;
908 emac_rx_disable(dev);
909 if (rmr & EMAC_RMR_MAE)
910 emac_hash_mc(dev);
911 out_be32(&p->rmr, rmr);
912 emac_rx_enable(dev);
913}
914
915/* Tx lock BH */
916static void emac_set_multicast_list(struct net_device *ndev)
917{
918 struct emac_instance *dev = netdev_priv(ndev);
919
920 DBG(dev, "multicast" NL);
921
922 BUG_ON(!netif_running(dev->ndev));
923
924 if (dev->no_mcast) {
925 dev->mcast_pending = 1;
926 return;
927 }
928 __emac_set_multicast_list(dev);
929}
930
931static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
932{
933 int rx_sync_size = emac_rx_sync_size(new_mtu);
934 int rx_skb_size = emac_rx_skb_size(new_mtu);
935 int i, ret = 0;
936
937 mutex_lock(&dev->link_lock);
938 emac_netif_stop(dev);
939 emac_rx_disable(dev);
940 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
941
942 if (dev->rx_sg_skb) {
943 ++dev->estats.rx_dropped_resize;
944 dev_kfree_skb(dev->rx_sg_skb);
945 dev->rx_sg_skb = NULL;
946 }
947
948 /* Make a first pass over RX ring and mark BDs ready, dropping
949 * non-processed packets on the way. We need this as a separate pass
950 * to simplify error recovery in the case of allocation failure later.
951 */
952 for (i = 0; i < NUM_RX_BUFF; ++i) {
953 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
954 ++dev->estats.rx_dropped_resize;
955
956 dev->rx_desc[i].data_len = 0;
957 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
958 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
959 }
960
961 /* Reallocate RX ring only if bigger skb buffers are required */
962 if (rx_skb_size <= dev->rx_skb_size)
963 goto skip;
964
965 /* Second pass, allocate new skbs */
966 for (i = 0; i < NUM_RX_BUFF; ++i) {
967 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
968 if (!skb) {
969 ret = -ENOMEM;
970 goto oom;
971 }
972
973 BUG_ON(!dev->rx_skb[i]);
974 dev_kfree_skb(dev->rx_skb[i]);
975
976 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
977 dev->rx_desc[i].data_ptr =
978 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
979 DMA_FROM_DEVICE) + 2;
980 dev->rx_skb[i] = skb;
981 }
982 skip:
983 /* Check if we need to change "Jumbo" bit in MR1 */
984 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
985 /* This is to prevent starting RX channel in emac_rx_enable() */
986 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
987
988 dev->ndev->mtu = new_mtu;
989 emac_full_tx_reset(dev);
990 }
991
992 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
993 oom:
994 /* Restart RX */
995 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
996 dev->rx_slot = 0;
997 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
998 emac_rx_enable(dev);
999 emac_netif_start(dev);
1000 mutex_unlock(&dev->link_lock);
1001
1002 return ret;
1003}
1004
1005/* Process ctx, rtnl_lock semaphore */
1006static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1007{
1008 struct emac_instance *dev = netdev_priv(ndev);
1009 int ret = 0;
1010
1011 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1012 return -EINVAL;
1013
1014 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1015
1016 if (netif_running(ndev)) {
1017 /* Check if we really need to reinitalize RX ring */
1018 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1019 ret = emac_resize_rx_ring(dev, new_mtu);
1020 }
1021
1022 if (!ret) {
1023 ndev->mtu = new_mtu;
1024 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1025 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1026 }
1027
1028 return ret;
1029}
1030
1031static void emac_clean_tx_ring(struct emac_instance *dev)
1032{
1033 int i;
1034
1035 for (i = 0; i < NUM_TX_BUFF; ++i) {
1036 if (dev->tx_skb[i]) {
1037 dev_kfree_skb(dev->tx_skb[i]);
1038 dev->tx_skb[i] = NULL;
1039 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1040 ++dev->estats.tx_dropped;
1041 }
1042 dev->tx_desc[i].ctrl = 0;
1043 dev->tx_desc[i].data_ptr = 0;
1044 }
1045}
1046
1047static void emac_clean_rx_ring(struct emac_instance *dev)
1048{
1049 int i;
1050
1051 for (i = 0; i < NUM_RX_BUFF; ++i)
1052 if (dev->rx_skb[i]) {
1053 dev->rx_desc[i].ctrl = 0;
1054 dev_kfree_skb(dev->rx_skb[i]);
1055 dev->rx_skb[i] = NULL;
1056 dev->rx_desc[i].data_ptr = 0;
1057 }
1058
1059 if (dev->rx_sg_skb) {
1060 dev_kfree_skb(dev->rx_sg_skb);
1061 dev->rx_sg_skb = NULL;
1062 }
1063}
1064
1065static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1066 gfp_t flags)
1067{
1068 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1069 if (unlikely(!skb))
1070 return -ENOMEM;
1071
1072 dev->rx_skb[slot] = skb;
1073 dev->rx_desc[slot].data_len = 0;
1074
1075 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1076 dev->rx_desc[slot].data_ptr =
1077 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1078 DMA_FROM_DEVICE) + 2;
1079 wmb();
1080 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1081 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1082
1083 return 0;
1084}
1085
1086static void emac_print_link_status(struct emac_instance *dev)
1087{
1088 if (netif_carrier_ok(dev->ndev))
1089 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1090 dev->ndev->name, dev->phy.speed,
1091 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1092 dev->phy.pause ? ", pause enabled" :
1093 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1094 else
1095 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1096}
1097
1098/* Process ctx, rtnl_lock semaphore */
1099static int emac_open(struct net_device *ndev)
1100{
1101 struct emac_instance *dev = netdev_priv(ndev);
1102 int err, i;
1103
1104 DBG(dev, "open" NL);
1105
1106 /* Setup error IRQ handler */
1107 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1108 if (err) {
1109 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1110 ndev->name, dev->emac_irq);
1111 return err;
1112 }
1113
1114 /* Allocate RX ring */
1115 for (i = 0; i < NUM_RX_BUFF; ++i)
1116 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1117 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1118 ndev->name);
1119 goto oom;
1120 }
1121
1122 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1123 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1124 dev->rx_sg_skb = NULL;
1125
1126 mutex_lock(&dev->link_lock);
61dbcece 1127 dev->opened = 1;
1d3bb996 1128
61dbcece 1129 /* Start PHY polling now.
1d3bb996
DG
1130 */
1131 if (dev->phy.address >= 0) {
1132 int link_poll_interval;
1133 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1134 dev->phy.def->ops->read_link(&dev->phy);
11121e30 1135 emac_rx_clk_default(dev);
1d3bb996
DG
1136 netif_carrier_on(dev->ndev);
1137 link_poll_interval = PHY_POLL_LINK_ON;
1138 } else {
11121e30 1139 emac_rx_clk_tx(dev);
1d3bb996
DG
1140 netif_carrier_off(dev->ndev);
1141 link_poll_interval = PHY_POLL_LINK_OFF;
1142 }
1143 dev->link_polling = 1;
1144 wmb();
1145 schedule_delayed_work(&dev->link_work, link_poll_interval);
1146 emac_print_link_status(dev);
1147 } else
1148 netif_carrier_on(dev->ndev);
1149
e8296582
BH
1150 /* Required for Pause packet support in EMAC */
1151 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1);
1152
1d3bb996
DG
1153 emac_configure(dev);
1154 mal_poll_add(dev->mal, &dev->commac);
1155 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1156 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1157 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1158 emac_tx_enable(dev);
1159 emac_rx_enable(dev);
1160 emac_netif_start(dev);
1161
1162 mutex_unlock(&dev->link_lock);
1163
1164 return 0;
1165 oom:
1166 emac_clean_rx_ring(dev);
1167 free_irq(dev->emac_irq, dev);
1168
1169 return -ENOMEM;
1170}
1171
1172/* BHs disabled */
1173#if 0
1174static int emac_link_differs(struct emac_instance *dev)
1175{
1176 u32 r = in_be32(&dev->emacp->mr1);
1177
1178 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1179 int speed, pause, asym_pause;
1180
1181 if (r & EMAC_MR1_MF_1000)
1182 speed = SPEED_1000;
1183 else if (r & EMAC_MR1_MF_100)
1184 speed = SPEED_100;
1185 else
1186 speed = SPEED_10;
1187
1188 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1189 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1190 pause = 1;
1191 asym_pause = 0;
1192 break;
1193 case EMAC_MR1_APP:
1194 pause = 0;
1195 asym_pause = 1;
1196 break;
1197 default:
1198 pause = asym_pause = 0;
1199 }
1200 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1201 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1202}
1203#endif
1204
1205static void emac_link_timer(struct work_struct *work)
1206{
1207 struct emac_instance *dev =
1208 container_of((struct delayed_work *)work,
1209 struct emac_instance, link_work);
1210 int link_poll_interval;
1211
1212 mutex_lock(&dev->link_lock);
1d3bb996
DG
1213 DBG2(dev, "link timer" NL);
1214
61dbcece
BH
1215 if (!dev->opened)
1216 goto bail;
1217
1d3bb996
DG
1218 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1219 if (!netif_carrier_ok(dev->ndev)) {
11121e30 1220 emac_rx_clk_default(dev);
1d3bb996
DG
1221 /* Get new link parameters */
1222 dev->phy.def->ops->read_link(&dev->phy);
1223
1224 netif_carrier_on(dev->ndev);
1225 emac_netif_stop(dev);
1226 emac_full_tx_reset(dev);
1227 emac_netif_start(dev);
1228 emac_print_link_status(dev);
1229 }
1230 link_poll_interval = PHY_POLL_LINK_ON;
1231 } else {
1232 if (netif_carrier_ok(dev->ndev)) {
11121e30 1233 emac_rx_clk_tx(dev);
1d3bb996
DG
1234 netif_carrier_off(dev->ndev);
1235 netif_tx_disable(dev->ndev);
911b237d 1236 emac_reinitialize(dev);
1d3bb996
DG
1237 emac_print_link_status(dev);
1238 }
1239 link_poll_interval = PHY_POLL_LINK_OFF;
1240 }
1241 schedule_delayed_work(&dev->link_work, link_poll_interval);
61dbcece 1242 bail:
1d3bb996
DG
1243 mutex_unlock(&dev->link_lock);
1244}
1245
1246static void emac_force_link_update(struct emac_instance *dev)
1247{
1248 netif_carrier_off(dev->ndev);
61dbcece 1249 smp_rmb();
1d3bb996
DG
1250 if (dev->link_polling) {
1251 cancel_rearming_delayed_work(&dev->link_work);
1252 if (dev->link_polling)
1253 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1254 }
1255}
1256
1257/* Process ctx, rtnl_lock semaphore */
1258static int emac_close(struct net_device *ndev)
1259{
1260 struct emac_instance *dev = netdev_priv(ndev);
1261
1262 DBG(dev, "close" NL);
1263
61dbcece
BH
1264 if (dev->phy.address >= 0) {
1265 dev->link_polling = 0;
1d3bb996 1266 cancel_rearming_delayed_work(&dev->link_work);
61dbcece
BH
1267 }
1268 mutex_lock(&dev->link_lock);
1d3bb996 1269 emac_netif_stop(dev);
61dbcece
BH
1270 dev->opened = 0;
1271 mutex_unlock(&dev->link_lock);
1d3bb996
DG
1272
1273 emac_rx_disable(dev);
1274 emac_tx_disable(dev);
1275 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1276 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1277 mal_poll_del(dev->mal, &dev->commac);
1278
1279 emac_clean_tx_ring(dev);
1280 emac_clean_rx_ring(dev);
1281
1282 free_irq(dev->emac_irq, dev);
1283
1284 return 0;
1285}
1286
1287static inline u16 emac_tx_csum(struct emac_instance *dev,
1288 struct sk_buff *skb)
1289{
e66f4168
VB
1290 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1291 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1d3bb996
DG
1292 ++dev->stats.tx_packets_csum;
1293 return EMAC_TX_CTRL_TAH_CSUM;
1294 }
1295 return 0;
1296}
1297
1298static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1299{
1300 struct emac_regs __iomem *p = dev->emacp;
1301 struct net_device *ndev = dev->ndev;
1302
1303 /* Send the packet out. If the if makes a significant perf
1304 * difference, then we can store the TMR0 value in "dev"
1305 * instead
1306 */
1307 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1d3bb996 1308 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
07c2c76e 1309 else
1310 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1d3bb996
DG
1311
1312 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1313 netif_stop_queue(ndev);
1314 DBG2(dev, "stopped TX queue" NL);
1315 }
1316
1317 ndev->trans_start = jiffies;
1318 ++dev->stats.tx_packets;
1319 dev->stats.tx_bytes += len;
1320
1321 return 0;
1322}
1323
1324/* Tx lock BH */
1325static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1326{
1327 struct emac_instance *dev = netdev_priv(ndev);
1328 unsigned int len = skb->len;
1329 int slot;
1330
1331 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1332 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1333
1334 slot = dev->tx_slot++;
1335 if (dev->tx_slot == NUM_TX_BUFF) {
1336 dev->tx_slot = 0;
1337 ctrl |= MAL_TX_CTRL_WRAP;
1338 }
1339
1340 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1341
1342 dev->tx_skb[slot] = skb;
1343 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1344 skb->data, len,
1345 DMA_TO_DEVICE);
1346 dev->tx_desc[slot].data_len = (u16) len;
1347 wmb();
1348 dev->tx_desc[slot].ctrl = ctrl;
1349
1350 return emac_xmit_finish(dev, len);
1351}
1352
1d3bb996
DG
1353static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1354 u32 pd, int len, int last, u16 base_ctrl)
1355{
1356 while (1) {
1357 u16 ctrl = base_ctrl;
1358 int chunk = min(len, MAL_MAX_TX_SIZE);
1359 len -= chunk;
1360
1361 slot = (slot + 1) % NUM_TX_BUFF;
1362
1363 if (last && !len)
1364 ctrl |= MAL_TX_CTRL_LAST;
1365 if (slot == NUM_TX_BUFF - 1)
1366 ctrl |= MAL_TX_CTRL_WRAP;
1367
1368 dev->tx_skb[slot] = NULL;
1369 dev->tx_desc[slot].data_ptr = pd;
1370 dev->tx_desc[slot].data_len = (u16) chunk;
1371 dev->tx_desc[slot].ctrl = ctrl;
1372 ++dev->tx_cnt;
1373
1374 if (!len)
1375 break;
1376
1377 pd += chunk;
1378 }
1379 return slot;
1380}
1381
1382/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1383static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1384{
1385 struct emac_instance *dev = netdev_priv(ndev);
1386 int nr_frags = skb_shinfo(skb)->nr_frags;
1387 int len = skb->len, chunk;
1388 int slot, i;
1389 u16 ctrl;
1390 u32 pd;
1391
1392 /* This is common "fast" path */
1393 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1394 return emac_start_xmit(skb, ndev);
1395
1396 len -= skb->data_len;
1397
1398 /* Note, this is only an *estimation*, we can still run out of empty
1399 * slots because of the additional fragmentation into
1400 * MAL_MAX_TX_SIZE-sized chunks
1401 */
1402 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1403 goto stop_queue;
1404
1405 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1406 emac_tx_csum(dev, skb);
1407 slot = dev->tx_slot;
1408
1409 /* skb data */
1410 dev->tx_skb[slot] = NULL;
1411 chunk = min(len, MAL_MAX_TX_SIZE);
1412 dev->tx_desc[slot].data_ptr = pd =
1413 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1414 dev->tx_desc[slot].data_len = (u16) chunk;
1415 len -= chunk;
1416 if (unlikely(len))
1417 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1418 ctrl);
1419 /* skb fragments */
1420 for (i = 0; i < nr_frags; ++i) {
1421 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1422 len = frag->size;
1423
1424 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1425 goto undo_frame;
1426
1427 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1428 DMA_TO_DEVICE);
1429
1430 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1431 ctrl);
1432 }
1433
1434 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1435
1436 /* Attach skb to the last slot so we don't release it too early */
1437 dev->tx_skb[slot] = skb;
1438
1439 /* Send the packet out */
1440 if (dev->tx_slot == NUM_TX_BUFF - 1)
1441 ctrl |= MAL_TX_CTRL_WRAP;
1442 wmb();
1443 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1444 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1445
1446 return emac_xmit_finish(dev, skb->len);
1447
1448 undo_frame:
1449 /* Well, too bad. Our previous estimation was overly optimistic.
1450 * Undo everything.
1451 */
1452 while (slot != dev->tx_slot) {
1453 dev->tx_desc[slot].ctrl = 0;
1454 --dev->tx_cnt;
1455 if (--slot < 0)
1456 slot = NUM_TX_BUFF - 1;
1457 }
1458 ++dev->estats.tx_undo;
1459
1460 stop_queue:
1461 netif_stop_queue(ndev);
1462 DBG2(dev, "stopped TX queue" NL);
1463 return 1;
1464}
1d3bb996
DG
1465
1466/* Tx lock BHs */
1467static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1468{
1469 struct emac_error_stats *st = &dev->estats;
1470
1471 DBG(dev, "BD TX error %04x" NL, ctrl);
1472
1473 ++st->tx_bd_errors;
1474 if (ctrl & EMAC_TX_ST_BFCS)
1475 ++st->tx_bd_bad_fcs;
1476 if (ctrl & EMAC_TX_ST_LCS)
1477 ++st->tx_bd_carrier_loss;
1478 if (ctrl & EMAC_TX_ST_ED)
1479 ++st->tx_bd_excessive_deferral;
1480 if (ctrl & EMAC_TX_ST_EC)
1481 ++st->tx_bd_excessive_collisions;
1482 if (ctrl & EMAC_TX_ST_LC)
1483 ++st->tx_bd_late_collision;
1484 if (ctrl & EMAC_TX_ST_MC)
1485 ++st->tx_bd_multple_collisions;
1486 if (ctrl & EMAC_TX_ST_SC)
1487 ++st->tx_bd_single_collision;
1488 if (ctrl & EMAC_TX_ST_UR)
1489 ++st->tx_bd_underrun;
1490 if (ctrl & EMAC_TX_ST_SQE)
1491 ++st->tx_bd_sqe;
1492}
1493
1494static void emac_poll_tx(void *param)
1495{
1496 struct emac_instance *dev = param;
1497 u32 bad_mask;
1498
1499 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1500
1501 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1502 bad_mask = EMAC_IS_BAD_TX_TAH;
1503 else
1504 bad_mask = EMAC_IS_BAD_TX;
1505
1506 netif_tx_lock_bh(dev->ndev);
1507 if (dev->tx_cnt) {
1508 u16 ctrl;
1509 int slot = dev->ack_slot, n = 0;
1510 again:
1511 ctrl = dev->tx_desc[slot].ctrl;
1512 if (!(ctrl & MAL_TX_CTRL_READY)) {
1513 struct sk_buff *skb = dev->tx_skb[slot];
1514 ++n;
1515
1516 if (skb) {
1517 dev_kfree_skb(skb);
1518 dev->tx_skb[slot] = NULL;
1519 }
1520 slot = (slot + 1) % NUM_TX_BUFF;
1521
1522 if (unlikely(ctrl & bad_mask))
1523 emac_parse_tx_error(dev, ctrl);
1524
1525 if (--dev->tx_cnt)
1526 goto again;
1527 }
1528 if (n) {
1529 dev->ack_slot = slot;
1530 if (netif_queue_stopped(dev->ndev) &&
1531 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1532 netif_wake_queue(dev->ndev);
1533
1534 DBG2(dev, "tx %d pkts" NL, n);
1535 }
1536 }
1537 netif_tx_unlock_bh(dev->ndev);
1538}
1539
1540static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1541 int len)
1542{
1543 struct sk_buff *skb = dev->rx_skb[slot];
1544
1545 DBG2(dev, "recycle %d %d" NL, slot, len);
1546
1547 if (len)
1548 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1549 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1550
1551 dev->rx_desc[slot].data_len = 0;
1552 wmb();
1553 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1554 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1555}
1556
1557static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1558{
1559 struct emac_error_stats *st = &dev->estats;
1560
1561 DBG(dev, "BD RX error %04x" NL, ctrl);
1562
1563 ++st->rx_bd_errors;
1564 if (ctrl & EMAC_RX_ST_OE)
1565 ++st->rx_bd_overrun;
1566 if (ctrl & EMAC_RX_ST_BP)
1567 ++st->rx_bd_bad_packet;
1568 if (ctrl & EMAC_RX_ST_RP)
1569 ++st->rx_bd_runt_packet;
1570 if (ctrl & EMAC_RX_ST_SE)
1571 ++st->rx_bd_short_event;
1572 if (ctrl & EMAC_RX_ST_AE)
1573 ++st->rx_bd_alignment_error;
1574 if (ctrl & EMAC_RX_ST_BFCS)
1575 ++st->rx_bd_bad_fcs;
1576 if (ctrl & EMAC_RX_ST_PTL)
1577 ++st->rx_bd_packet_too_long;
1578 if (ctrl & EMAC_RX_ST_ORE)
1579 ++st->rx_bd_out_of_range;
1580 if (ctrl & EMAC_RX_ST_IRE)
1581 ++st->rx_bd_in_range;
1582}
1583
1584static inline void emac_rx_csum(struct emac_instance *dev,
1585 struct sk_buff *skb, u16 ctrl)
1586{
1587#ifdef CONFIG_IBM_NEW_EMAC_TAH
1588 if (!ctrl && dev->tah_dev) {
1589 skb->ip_summed = CHECKSUM_UNNECESSARY;
1590 ++dev->stats.rx_packets_csum;
1591 }
1592#endif
1593}
1594
1595static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1596{
1597 if (likely(dev->rx_sg_skb != NULL)) {
1598 int len = dev->rx_desc[slot].data_len;
1599 int tot_len = dev->rx_sg_skb->len + len;
1600
1601 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1602 ++dev->estats.rx_dropped_mtu;
1603 dev_kfree_skb(dev->rx_sg_skb);
1604 dev->rx_sg_skb = NULL;
1605 } else {
bef1bc95 1606 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1d3bb996
DG
1607 dev->rx_skb[slot]->data, len);
1608 skb_put(dev->rx_sg_skb, len);
1609 emac_recycle_rx_skb(dev, slot, len);
1610 return 0;
1611 }
1612 }
1613 emac_recycle_rx_skb(dev, slot, 0);
1614 return -1;
1615}
1616
1617/* NAPI poll context */
1618static int emac_poll_rx(void *param, int budget)
1619{
1620 struct emac_instance *dev = param;
1621 int slot = dev->rx_slot, received = 0;
1622
1623 DBG2(dev, "poll_rx(%d)" NL, budget);
1624
1625 again:
1626 while (budget > 0) {
1627 int len;
1628 struct sk_buff *skb;
1629 u16 ctrl = dev->rx_desc[slot].ctrl;
1630
1631 if (ctrl & MAL_RX_CTRL_EMPTY)
1632 break;
1633
1634 skb = dev->rx_skb[slot];
1635 mb();
1636 len = dev->rx_desc[slot].data_len;
1637
1638 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1639 goto sg;
1640
1641 ctrl &= EMAC_BAD_RX_MASK;
1642 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1643 emac_parse_rx_error(dev, ctrl);
1644 ++dev->estats.rx_dropped_error;
1645 emac_recycle_rx_skb(dev, slot, 0);
1646 len = 0;
1647 goto next;
1648 }
6c688f42
SN
1649
1650 if (len < ETH_HLEN) {
1651 ++dev->estats.rx_dropped_stack;
1652 emac_recycle_rx_skb(dev, slot, len);
1653 goto next;
1654 }
1d3bb996
DG
1655
1656 if (len && len < EMAC_RX_COPY_THRESH) {
1657 struct sk_buff *copy_skb =
1658 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1659 if (unlikely(!copy_skb))
1660 goto oom;
1661
1662 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1663 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1664 len + 2);
1665 emac_recycle_rx_skb(dev, slot, len);
1666 skb = copy_skb;
1667 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1668 goto oom;
1669
1670 skb_put(skb, len);
1671 push_packet:
1672 skb->dev = dev->ndev;
1673 skb->protocol = eth_type_trans(skb, dev->ndev);
1674 emac_rx_csum(dev, skb, ctrl);
1675
1676 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1677 ++dev->estats.rx_dropped_stack;
1678 next:
1679 ++dev->stats.rx_packets;
1680 skip:
1681 dev->stats.rx_bytes += len;
1682 slot = (slot + 1) % NUM_RX_BUFF;
1683 --budget;
1684 ++received;
1685 continue;
1686 sg:
1687 if (ctrl & MAL_RX_CTRL_FIRST) {
1688 BUG_ON(dev->rx_sg_skb);
1689 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1690 DBG(dev, "rx OOM %d" NL, slot);
1691 ++dev->estats.rx_dropped_oom;
1692 emac_recycle_rx_skb(dev, slot, 0);
1693 } else {
1694 dev->rx_sg_skb = skb;
1695 skb_put(skb, len);
1696 }
1697 } else if (!emac_rx_sg_append(dev, slot) &&
1698 (ctrl & MAL_RX_CTRL_LAST)) {
1699
1700 skb = dev->rx_sg_skb;
1701 dev->rx_sg_skb = NULL;
1702
1703 ctrl &= EMAC_BAD_RX_MASK;
1704 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1705 emac_parse_rx_error(dev, ctrl);
1706 ++dev->estats.rx_dropped_error;
1707 dev_kfree_skb(skb);
1708 len = 0;
1709 } else
1710 goto push_packet;
1711 }
1712 goto skip;
1713 oom:
1714 DBG(dev, "rx OOM %d" NL, slot);
1715 /* Drop the packet and recycle skb */
1716 ++dev->estats.rx_dropped_oom;
1717 emac_recycle_rx_skb(dev, slot, 0);
1718 goto next;
1719 }
1720
1721 if (received) {
1722 DBG2(dev, "rx %d BDs" NL, received);
1723 dev->rx_slot = slot;
1724 }
1725
1726 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1727 mb();
1728 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1729 DBG2(dev, "rx restart" NL);
1730 received = 0;
1731 goto again;
1732 }
1733
1734 if (dev->rx_sg_skb) {
1735 DBG2(dev, "dropping partial rx packet" NL);
1736 ++dev->estats.rx_dropped_error;
1737 dev_kfree_skb(dev->rx_sg_skb);
1738 dev->rx_sg_skb = NULL;
1739 }
1740
1741 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1742 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1743 emac_rx_enable(dev);
1744 dev->rx_slot = 0;
1745 }
1746 return received;
1747}
1748
1749/* NAPI poll context */
1750static int emac_peek_rx(void *param)
1751{
1752 struct emac_instance *dev = param;
1753
1754 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1755}
1756
1757/* NAPI poll context */
1758static int emac_peek_rx_sg(void *param)
1759{
1760 struct emac_instance *dev = param;
1761
1762 int slot = dev->rx_slot;
1763 while (1) {
1764 u16 ctrl = dev->rx_desc[slot].ctrl;
1765 if (ctrl & MAL_RX_CTRL_EMPTY)
1766 return 0;
1767 else if (ctrl & MAL_RX_CTRL_LAST)
1768 return 1;
1769
1770 slot = (slot + 1) % NUM_RX_BUFF;
1771
1772 /* I'm just being paranoid here :) */
1773 if (unlikely(slot == dev->rx_slot))
1774 return 0;
1775 }
1776}
1777
1778/* Hard IRQ */
1779static void emac_rxde(void *param)
1780{
1781 struct emac_instance *dev = param;
1782
1783 ++dev->estats.rx_stopped;
1784 emac_rx_disable_async(dev);
1785}
1786
1787/* Hard IRQ */
1788static irqreturn_t emac_irq(int irq, void *dev_instance)
1789{
1790 struct emac_instance *dev = dev_instance;
1791 struct emac_regs __iomem *p = dev->emacp;
1792 struct emac_error_stats *st = &dev->estats;
1793 u32 isr;
1794
1795 spin_lock(&dev->lock);
1796
1797 isr = in_be32(&p->isr);
1798 out_be32(&p->isr, isr);
1799
1800 DBG(dev, "isr = %08x" NL, isr);
1801
1802 if (isr & EMAC4_ISR_TXPE)
1803 ++st->tx_parity;
1804 if (isr & EMAC4_ISR_RXPE)
1805 ++st->rx_parity;
1806 if (isr & EMAC4_ISR_TXUE)
1807 ++st->tx_underrun;
1808 if (isr & EMAC4_ISR_RXOE)
1809 ++st->rx_fifo_overrun;
1810 if (isr & EMAC_ISR_OVR)
1811 ++st->rx_overrun;
1812 if (isr & EMAC_ISR_BP)
1813 ++st->rx_bad_packet;
1814 if (isr & EMAC_ISR_RP)
1815 ++st->rx_runt_packet;
1816 if (isr & EMAC_ISR_SE)
1817 ++st->rx_short_event;
1818 if (isr & EMAC_ISR_ALE)
1819 ++st->rx_alignment_error;
1820 if (isr & EMAC_ISR_BFCS)
1821 ++st->rx_bad_fcs;
1822 if (isr & EMAC_ISR_PTLE)
1823 ++st->rx_packet_too_long;
1824 if (isr & EMAC_ISR_ORE)
1825 ++st->rx_out_of_range;
1826 if (isr & EMAC_ISR_IRE)
1827 ++st->rx_in_range;
1828 if (isr & EMAC_ISR_SQE)
1829 ++st->tx_sqe;
1830 if (isr & EMAC_ISR_TE)
1831 ++st->tx_errors;
1832
1833 spin_unlock(&dev->lock);
1834
1835 return IRQ_HANDLED;
1836}
1837
1838static struct net_device_stats *emac_stats(struct net_device *ndev)
1839{
1840 struct emac_instance *dev = netdev_priv(ndev);
1841 struct emac_stats *st = &dev->stats;
1842 struct emac_error_stats *est = &dev->estats;
1843 struct net_device_stats *nst = &dev->nstats;
1844 unsigned long flags;
1845
1846 DBG2(dev, "stats" NL);
1847
1848 /* Compute "legacy" statistics */
1849 spin_lock_irqsave(&dev->lock, flags);
1850 nst->rx_packets = (unsigned long)st->rx_packets;
1851 nst->rx_bytes = (unsigned long)st->rx_bytes;
1852 nst->tx_packets = (unsigned long)st->tx_packets;
1853 nst->tx_bytes = (unsigned long)st->tx_bytes;
1854 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1855 est->rx_dropped_error +
1856 est->rx_dropped_resize +
1857 est->rx_dropped_mtu);
1858 nst->tx_dropped = (unsigned long)est->tx_dropped;
1859
1860 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1861 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1862 est->rx_fifo_overrun +
1863 est->rx_overrun);
1864 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1865 est->rx_alignment_error);
1866 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1867 est->rx_bad_fcs);
1868 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1869 est->rx_bd_short_event +
1870 est->rx_bd_packet_too_long +
1871 est->rx_bd_out_of_range +
1872 est->rx_bd_in_range +
1873 est->rx_runt_packet +
1874 est->rx_short_event +
1875 est->rx_packet_too_long +
1876 est->rx_out_of_range +
1877 est->rx_in_range);
1878
1879 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1880 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1881 est->tx_underrun);
1882 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1883 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1884 est->tx_bd_excessive_collisions +
1885 est->tx_bd_late_collision +
1886 est->tx_bd_multple_collisions);
1887 spin_unlock_irqrestore(&dev->lock, flags);
1888 return nst;
1889}
1890
1891static struct mal_commac_ops emac_commac_ops = {
1892 .poll_tx = &emac_poll_tx,
1893 .poll_rx = &emac_poll_rx,
1894 .peek_rx = &emac_peek_rx,
1895 .rxde = &emac_rxde,
1896};
1897
1898static struct mal_commac_ops emac_commac_sg_ops = {
1899 .poll_tx = &emac_poll_tx,
1900 .poll_rx = &emac_poll_rx,
1901 .peek_rx = &emac_peek_rx_sg,
1902 .rxde = &emac_rxde,
1903};
1904
1905/* Ethtool support */
1906static int emac_ethtool_get_settings(struct net_device *ndev,
1907 struct ethtool_cmd *cmd)
1908{
1909 struct emac_instance *dev = netdev_priv(ndev);
1910
1911 cmd->supported = dev->phy.features;
1912 cmd->port = PORT_MII;
1913 cmd->phy_address = dev->phy.address;
1914 cmd->transceiver =
1915 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1916
1917 mutex_lock(&dev->link_lock);
1918 cmd->advertising = dev->phy.advertising;
1919 cmd->autoneg = dev->phy.autoneg;
1920 cmd->speed = dev->phy.speed;
1921 cmd->duplex = dev->phy.duplex;
1922 mutex_unlock(&dev->link_lock);
1923
1924 return 0;
1925}
1926
1927static int emac_ethtool_set_settings(struct net_device *ndev,
1928 struct ethtool_cmd *cmd)
1929{
1930 struct emac_instance *dev = netdev_priv(ndev);
1931 u32 f = dev->phy.features;
1932
1933 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1934 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1935
1936 /* Basic sanity checks */
1937 if (dev->phy.address < 0)
1938 return -EOPNOTSUPP;
1939 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1940 return -EINVAL;
1941 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1942 return -EINVAL;
1943 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1944 return -EINVAL;
1945
1946 if (cmd->autoneg == AUTONEG_DISABLE) {
1947 switch (cmd->speed) {
1948 case SPEED_10:
1949 if (cmd->duplex == DUPLEX_HALF
1950 && !(f & SUPPORTED_10baseT_Half))
1951 return -EINVAL;
1952 if (cmd->duplex == DUPLEX_FULL
1953 && !(f & SUPPORTED_10baseT_Full))
1954 return -EINVAL;
1955 break;
1956 case SPEED_100:
1957 if (cmd->duplex == DUPLEX_HALF
1958 && !(f & SUPPORTED_100baseT_Half))
1959 return -EINVAL;
1960 if (cmd->duplex == DUPLEX_FULL
1961 && !(f & SUPPORTED_100baseT_Full))
1962 return -EINVAL;
1963 break;
1964 case SPEED_1000:
1965 if (cmd->duplex == DUPLEX_HALF
1966 && !(f & SUPPORTED_1000baseT_Half))
1967 return -EINVAL;
1968 if (cmd->duplex == DUPLEX_FULL
1969 && !(f & SUPPORTED_1000baseT_Full))
1970 return -EINVAL;
1971 break;
1972 default:
1973 return -EINVAL;
1974 }
1975
1976 mutex_lock(&dev->link_lock);
1977 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1978 cmd->duplex);
1979 mutex_unlock(&dev->link_lock);
1980
1981 } else {
1982 if (!(f & SUPPORTED_Autoneg))
1983 return -EINVAL;
1984
1985 mutex_lock(&dev->link_lock);
1986 dev->phy.def->ops->setup_aneg(&dev->phy,
1987 (cmd->advertising & f) |
1988 (dev->phy.advertising &
1989 (ADVERTISED_Pause |
1990 ADVERTISED_Asym_Pause)));
1991 mutex_unlock(&dev->link_lock);
1992 }
1993 emac_force_link_update(dev);
1994
1995 return 0;
1996}
1997
1998static void emac_ethtool_get_ringparam(struct net_device *ndev,
1999 struct ethtool_ringparam *rp)
2000{
2001 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2002 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2003}
2004
2005static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2006 struct ethtool_pauseparam *pp)
2007{
2008 struct emac_instance *dev = netdev_priv(ndev);
2009
2010 mutex_lock(&dev->link_lock);
2011 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2012 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2013 pp->autoneg = 1;
2014
2015 if (dev->phy.duplex == DUPLEX_FULL) {
2016 if (dev->phy.pause)
2017 pp->rx_pause = pp->tx_pause = 1;
2018 else if (dev->phy.asym_pause)
2019 pp->tx_pause = 1;
2020 }
2021 mutex_unlock(&dev->link_lock);
2022}
2023
2024static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
2025{
2026 struct emac_instance *dev = netdev_priv(ndev);
2027
eb4d84f1 2028 return dev->tah_dev != NULL;
1d3bb996
DG
2029}
2030
2031static int emac_get_regs_len(struct emac_instance *dev)
2032{
2033 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2034 return sizeof(struct emac_ethtool_regs_subhdr) +
05781ccd 2035 EMAC4_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2036 else
2037 return sizeof(struct emac_ethtool_regs_subhdr) +
05781ccd 2038 EMAC_ETHTOOL_REGS_SIZE(dev);
1d3bb996
DG
2039}
2040
2041static int emac_ethtool_get_regs_len(struct net_device *ndev)
2042{
2043 struct emac_instance *dev = netdev_priv(ndev);
2044 int size;
2045
2046 size = sizeof(struct emac_ethtool_regs_hdr) +
2047 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2048 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2049 size += zmii_get_regs_len(dev->zmii_dev);
2050 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2051 size += rgmii_get_regs_len(dev->rgmii_dev);
2052 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2053 size += tah_get_regs_len(dev->tah_dev);
2054
2055 return size;
2056}
2057
2058static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2059{
2060 struct emac_ethtool_regs_subhdr *hdr = buf;
2061
2062 hdr->index = dev->cell_index;
2063 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2064 hdr->version = EMAC4_ETHTOOL_REGS_VER;
05781ccd
GE
2065 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2066 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
1d3bb996
DG
2067 } else {
2068 hdr->version = EMAC_ETHTOOL_REGS_VER;
05781ccd
GE
2069 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2070 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
1d3bb996
DG
2071 }
2072}
2073
2074static void emac_ethtool_get_regs(struct net_device *ndev,
2075 struct ethtool_regs *regs, void *buf)
2076{
2077 struct emac_instance *dev = netdev_priv(ndev);
2078 struct emac_ethtool_regs_hdr *hdr = buf;
2079
2080 hdr->components = 0;
2081 buf = hdr + 1;
2082
2083 buf = mal_dump_regs(dev->mal, buf);
2084 buf = emac_dump_regs(dev, buf);
2085 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2086 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2087 buf = zmii_dump_regs(dev->zmii_dev, buf);
2088 }
2089 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2090 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2091 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2092 }
2093 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2094 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2095 buf = tah_dump_regs(dev->tah_dev, buf);
2096 }
2097}
2098
2099static int emac_ethtool_nway_reset(struct net_device *ndev)
2100{
2101 struct emac_instance *dev = netdev_priv(ndev);
2102 int res = 0;
2103
2104 DBG(dev, "nway_reset" NL);
2105
2106 if (dev->phy.address < 0)
2107 return -EOPNOTSUPP;
2108
2109 mutex_lock(&dev->link_lock);
2110 if (!dev->phy.autoneg) {
2111 res = -EINVAL;
2112 goto out;
2113 }
2114
2115 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2116 out:
2117 mutex_unlock(&dev->link_lock);
2118 emac_force_link_update(dev);
2119 return res;
2120}
2121
2122static int emac_ethtool_get_stats_count(struct net_device *ndev)
2123{
2124 return EMAC_ETHTOOL_STATS_COUNT;
2125}
2126
2127static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2128 u8 * buf)
2129{
2130 if (stringset == ETH_SS_STATS)
2131 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2132}
2133
2134static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2135 struct ethtool_stats *estats,
2136 u64 * tmp_stats)
2137{
2138 struct emac_instance *dev = netdev_priv(ndev);
2139
2140 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2141 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2142 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2143}
2144
2145static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2146 struct ethtool_drvinfo *info)
2147{
2148 struct emac_instance *dev = netdev_priv(ndev);
2149
2150 strcpy(info->driver, "ibm_emac");
2151 strcpy(info->version, DRV_VERSION);
2152 info->fw_version[0] = '\0';
2153 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2154 dev->cell_index, dev->ofdev->node->full_name);
2155 info->n_stats = emac_ethtool_get_stats_count(ndev);
2156 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2157}
2158
2159static const struct ethtool_ops emac_ethtool_ops = {
2160 .get_settings = emac_ethtool_get_settings,
2161 .set_settings = emac_ethtool_set_settings,
2162 .get_drvinfo = emac_ethtool_get_drvinfo,
2163
2164 .get_regs_len = emac_ethtool_get_regs_len,
2165 .get_regs = emac_ethtool_get_regs,
2166
2167 .nway_reset = emac_ethtool_nway_reset,
2168
2169 .get_ringparam = emac_ethtool_get_ringparam,
2170 .get_pauseparam = emac_ethtool_get_pauseparam,
2171
2172 .get_rx_csum = emac_ethtool_get_rx_csum,
2173
2174 .get_strings = emac_ethtool_get_strings,
2175 .get_stats_count = emac_ethtool_get_stats_count,
2176 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2177
2178 .get_link = ethtool_op_get_link,
2179 .get_tx_csum = ethtool_op_get_tx_csum,
2180 .get_sg = ethtool_op_get_sg,
2181};
2182
2183static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2184{
2185 struct emac_instance *dev = netdev_priv(ndev);
2186 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
2187
2188 DBG(dev, "ioctl %08x" NL, cmd);
2189
2190 if (dev->phy.address < 0)
2191 return -EOPNOTSUPP;
2192
2193 switch (cmd) {
2194 case SIOCGMIIPHY:
2195 case SIOCDEVPRIVATE:
2196 data[0] = dev->phy.address;
2197 /* Fall through */
2198 case SIOCGMIIREG:
2199 case SIOCDEVPRIVATE + 1:
2200 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
2201 return 0;
2202
2203 case SIOCSMIIREG:
2204 case SIOCDEVPRIVATE + 2:
2205 if (!capable(CAP_NET_ADMIN))
2206 return -EPERM;
2207 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
2208 return 0;
2209 default:
2210 return -EOPNOTSUPP;
2211 }
2212}
2213
2214struct emac_depentry {
2215 u32 phandle;
2216 struct device_node *node;
2217 struct of_device *ofdev;
2218 void *drvdata;
2219};
2220
2221#define EMAC_DEP_MAL_IDX 0
2222#define EMAC_DEP_ZMII_IDX 1
2223#define EMAC_DEP_RGMII_IDX 2
2224#define EMAC_DEP_TAH_IDX 3
2225#define EMAC_DEP_MDIO_IDX 4
2226#define EMAC_DEP_PREV_IDX 5
2227#define EMAC_DEP_COUNT 6
2228
2229static int __devinit emac_check_deps(struct emac_instance *dev,
2230 struct emac_depentry *deps)
2231{
2232 int i, there = 0;
2233 struct device_node *np;
2234
2235 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2236 /* no dependency on that item, allright */
2237 if (deps[i].phandle == 0) {
2238 there++;
2239 continue;
2240 }
2241 /* special case for blist as the dependency might go away */
2242 if (i == EMAC_DEP_PREV_IDX) {
2243 np = *(dev->blist - 1);
2244 if (np == NULL) {
2245 deps[i].phandle = 0;
2246 there++;
2247 continue;
2248 }
2249 if (deps[i].node == NULL)
2250 deps[i].node = of_node_get(np);
2251 }
2252 if (deps[i].node == NULL)
2253 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2254 if (deps[i].node == NULL)
2255 continue;
2256 if (deps[i].ofdev == NULL)
2257 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2258 if (deps[i].ofdev == NULL)
2259 continue;
2260 if (deps[i].drvdata == NULL)
2261 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2262 if (deps[i].drvdata != NULL)
2263 there++;
2264 }
2265 return (there == EMAC_DEP_COUNT);
2266}
2267
2268static void emac_put_deps(struct emac_instance *dev)
2269{
2270 if (dev->mal_dev)
2271 of_dev_put(dev->mal_dev);
2272 if (dev->zmii_dev)
2273 of_dev_put(dev->zmii_dev);
2274 if (dev->rgmii_dev)
2275 of_dev_put(dev->rgmii_dev);
2276 if (dev->mdio_dev)
2277 of_dev_put(dev->mdio_dev);
2278 if (dev->tah_dev)
2279 of_dev_put(dev->tah_dev);
2280}
2281
2282static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2283 unsigned long action, void *data)
2284{
2285 /* We are only intereted in device addition */
2286 if (action == BUS_NOTIFY_BOUND_DRIVER)
2287 wake_up_all(&emac_probe_wait);
2288 return 0;
2289}
2290
51d4a1cc 2291static struct notifier_block emac_of_bus_notifier __devinitdata = {
1d3bb996
DG
2292 .notifier_call = emac_of_bus_notify
2293};
2294
2295static int __devinit emac_wait_deps(struct emac_instance *dev)
2296{
2297 struct emac_depentry deps[EMAC_DEP_COUNT];
2298 int i, err;
2299
2300 memset(&deps, 0, sizeof(deps));
2301
2302 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2303 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2304 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2305 if (dev->tah_ph)
2306 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2307 if (dev->mdio_ph)
2308 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2309 if (dev->blist && dev->blist > emac_boot_list)
2310 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2311 bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2312 wait_event_timeout(emac_probe_wait,
2313 emac_check_deps(dev, deps),
2314 EMAC_PROBE_DEP_TIMEOUT);
2315 bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier);
2316 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2317 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2318 if (deps[i].node)
2319 of_node_put(deps[i].node);
2320 if (err && deps[i].ofdev)
2321 of_dev_put(deps[i].ofdev);
2322 }
2323 if (err == 0) {
2324 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2325 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2326 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2327 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2328 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2329 }
2330 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2331 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2332 return err;
2333}
2334
2335static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2336 u32 *val, int fatal)
2337{
2338 int len;
2339 const u32 *prop = of_get_property(np, name, &len);
2340 if (prop == NULL || len < sizeof(u32)) {
2341 if (fatal)
2342 printk(KERN_ERR "%s: missing %s property\n",
2343 np->full_name, name);
2344 return -ENODEV;
2345 }
2346 *val = *prop;
2347 return 0;
2348}
2349
2350static int __devinit emac_init_phy(struct emac_instance *dev)
2351{
2352 struct device_node *np = dev->ofdev->node;
2353 struct net_device *ndev = dev->ndev;
2354 u32 phy_map, adv;
2355 int i;
2356
2357 dev->phy.dev = ndev;
2358 dev->phy.mode = dev->phy_mode;
2359
2360 /* PHY-less configuration.
2361 * XXX I probably should move these settings to the dev tree
2362 */
2363 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2364 emac_reset(dev);
2365
2366 /* PHY-less configuration.
2367 * XXX I probably should move these settings to the dev tree
2368 */
2369 dev->phy.address = -1;
2370 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2371 dev->phy.pause = 1;
2372
2373 return 0;
2374 }
2375
2376 mutex_lock(&emac_phy_map_lock);
2377 phy_map = dev->phy_map | busy_phy_map;
2378
2379 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2380
2381 dev->phy.mdio_read = emac_mdio_read;
2382 dev->phy.mdio_write = emac_mdio_write;
2383
0925ab5d
VB
2384 /* Enable internal clock source */
2385#ifdef CONFIG_PPC_DCR_NATIVE
2386 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2387 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
11121e30
VB
2388#endif
2389 /* PHY clock workaround */
2390 emac_rx_clk_tx(dev);
2391
2392 /* Enable internal clock source on 440GX*/
2393#ifdef CONFIG_PPC_DCR_NATIVE
2394 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2395 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
0925ab5d 2396#endif
1d3bb996
DG
2397 /* Configure EMAC with defaults so we can at least use MDIO
2398 * This is needed mostly for 440GX
2399 */
2400 if (emac_phy_gpcs(dev->phy.mode)) {
2401 /* XXX
2402 * Make GPCS PHY address equal to EMAC index.
2403 * We probably should take into account busy_phy_map
2404 * and/or phy_map here.
2405 *
2406 * Note that the busy_phy_map is currently global
2407 * while it should probably be per-ASIC...
2408 */
2409 dev->phy.address = dev->cell_index;
2410 }
2411
2412 emac_configure(dev);
2413
2414 if (dev->phy_address != 0xffffffff)
2415 phy_map = ~(1 << dev->phy_address);
2416
2417 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2418 if (!(phy_map & 1)) {
2419 int r;
2420 busy_phy_map |= 1 << i;
2421
2422 /* Quick check if there is a PHY at the address */
2423 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2424 if (r == 0xffff || r < 0)
2425 continue;
2426 if (!emac_mii_phy_probe(&dev->phy, i))
2427 break;
2428 }
0925ab5d
VB
2429
2430 /* Enable external clock source */
2431#ifdef CONFIG_PPC_DCR_NATIVE
2432 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2433 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2434#endif
1d3bb996
DG
2435 mutex_unlock(&emac_phy_map_lock);
2436 if (i == 0x20) {
2437 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2438 return -ENXIO;
2439 }
2440
2441 /* Init PHY */
2442 if (dev->phy.def->ops->init)
2443 dev->phy.def->ops->init(&dev->phy);
2444
2445 /* Disable any PHY features not supported by the platform */
2446 dev->phy.def->features &= ~dev->phy_feat_exc;
2447
2448 /* Setup initial link parameters */
2449 if (dev->phy.features & SUPPORTED_Autoneg) {
2450 adv = dev->phy.features;
2451 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2452 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2453 /* Restart autonegotiation */
2454 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2455 } else {
2456 u32 f = dev->phy.def->features;
2457 int speed = SPEED_10, fd = DUPLEX_HALF;
2458
2459 /* Select highest supported speed/duplex */
2460 if (f & SUPPORTED_1000baseT_Full) {
2461 speed = SPEED_1000;
2462 fd = DUPLEX_FULL;
2463 } else if (f & SUPPORTED_1000baseT_Half)
2464 speed = SPEED_1000;
2465 else if (f & SUPPORTED_100baseT_Full) {
2466 speed = SPEED_100;
2467 fd = DUPLEX_FULL;
2468 } else if (f & SUPPORTED_100baseT_Half)
2469 speed = SPEED_100;
2470 else if (f & SUPPORTED_10baseT_Full)
2471 fd = DUPLEX_FULL;
2472
2473 /* Force link parameters */
2474 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2475 }
2476 return 0;
2477}
2478
2479static int __devinit emac_init_config(struct emac_instance *dev)
2480{
2481 struct device_node *np = dev->ofdev->node;
2482 const void *p;
2483 unsigned int plen;
2484 const char *pm, *phy_modes[] = {
2485 [PHY_MODE_NA] = "",
2486 [PHY_MODE_MII] = "mii",
2487 [PHY_MODE_RMII] = "rmii",
2488 [PHY_MODE_SMII] = "smii",
2489 [PHY_MODE_RGMII] = "rgmii",
2490 [PHY_MODE_TBI] = "tbi",
2491 [PHY_MODE_GMII] = "gmii",
2492 [PHY_MODE_RTBI] = "rtbi",
2493 [PHY_MODE_SGMII] = "sgmii",
2494 };
2495
2496 /* Read config from device-tree */
2497 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2498 return -ENXIO;
2499 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2500 return -ENXIO;
2501 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2502 return -ENXIO;
2503 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2504 return -ENXIO;
2505 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2506 dev->max_mtu = 1500;
2507 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2508 dev->rx_fifo_size = 2048;
2509 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2510 dev->tx_fifo_size = 2048;
2511 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2512 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2513 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2514 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2515 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2516 dev->phy_address = 0xffffffff;
2517 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2518 dev->phy_map = 0xffffffff;
2519 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2520 return -ENXIO;
2521 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2522 dev->tah_ph = 0;
2523 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
63b6cad7 2524 dev->tah_port = 0;
1d3bb996
DG
2525 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2526 dev->mdio_ph = 0;
2527 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2528 dev->zmii_ph = 0;;
2529 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2530 dev->zmii_port = 0xffffffff;;
2531 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2532 dev->rgmii_ph = 0;;
2533 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2534 dev->rgmii_port = 0xffffffff;;
2535 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2536 dev->fifo_entry_size = 16;
2537 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2538 dev->mal_burst_size = 256;
2539
2540 /* PHY mode needs some decoding */
2541 dev->phy_mode = PHY_MODE_NA;
2542 pm = of_get_property(np, "phy-mode", &plen);
2543 if (pm != NULL) {
2544 int i;
2545 for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
2546 if (!strcasecmp(pm, phy_modes[i])) {
2547 dev->phy_mode = i;
2548 break;
2549 }
2550 }
2551
2552 /* Backward compat with non-final DT */
2553 if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
2554 u32 nmode = *(const u32 *)pm;
2555 if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
2556 dev->phy_mode = nmode;
2557 }
2558
2559 /* Check EMAC version */
05781ccd
GE
2560 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2561 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2562 } else if (of_device_is_compatible(np, "ibm,emac4")) {
1d3bb996 2563 dev->features |= EMAC_FTR_EMAC4;
0925ab5d
VB
2564 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2565 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
11121e30
VB
2566 } else {
2567 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2568 of_device_is_compatible(np, "ibm,emac-440gr"))
2569 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
b68d185a
JB
2570 if (of_device_is_compatible(np, "ibm,emac-405ez"))
2571 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
0925ab5d 2572 }
bff713b5
BH
2573
2574 /* Fixup some feature bits based on the device tree */
2575 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
1d3bb996 2576 dev->features |= EMAC_FTR_STACR_OC_INVERT;
bff713b5
BH
2577 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2578 dev->features |= EMAC_FTR_HAS_NEW_STACR;
1d3bb996 2579
bff713b5
BH
2580 /* CAB lacks the appropriate properties */
2581 if (of_device_is_compatible(np, "ibm,emac-axon"))
2582 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2583 EMAC_FTR_STACR_OC_INVERT;
2584
2585 /* Enable TAH/ZMII/RGMII features as found */
1d3bb996
DG
2586 if (dev->tah_ph != 0) {
2587#ifdef CONFIG_IBM_NEW_EMAC_TAH
2588 dev->features |= EMAC_FTR_HAS_TAH;
2589#else
2590 printk(KERN_ERR "%s: TAH support not enabled !\n",
2591 np->full_name);
2592 return -ENXIO;
2593#endif
2594 }
2595
2596 if (dev->zmii_ph != 0) {
2597#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2598 dev->features |= EMAC_FTR_HAS_ZMII;
2599#else
2600 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2601 np->full_name);
2602 return -ENXIO;
2603#endif
2604 }
2605
2606 if (dev->rgmii_ph != 0) {
2607#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2608 dev->features |= EMAC_FTR_HAS_RGMII;
2609#else
2610 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2611 np->full_name);
2612 return -ENXIO;
2613#endif
2614 }
2615
2616 /* Read MAC-address */
2617 p = of_get_property(np, "local-mac-address", NULL);
2618 if (p == NULL) {
2619 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2620 np->full_name);
2621 return -ENXIO;
2622 }
2623 memcpy(dev->ndev->dev_addr, p, 6);
2624
05781ccd
GE
2625 /* IAHT and GAHT filter parameterization */
2626 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2627 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2628 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2629 } else {
2630 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2631 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2632 }
2633
1d3bb996
DG
2634 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2635 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2636 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2637 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2638 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2639
2640 return 0;
2641}
2642
2643static int __devinit emac_probe(struct of_device *ofdev,
2644 const struct of_device_id *match)
2645{
2646 struct net_device *ndev;
2647 struct emac_instance *dev;
2648 struct device_node *np = ofdev->node;
2649 struct device_node **blist = NULL;
2650 int err, i;
2651
be63c09a
JB
2652 /* Skip unused/unwired EMACS. We leave the check for an unused
2653 * property here for now, but new flat device trees should set a
2654 * status property to "disabled" instead.
2655 */
2656 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3d722562
HB
2657 return -ENODEV;
2658
1d3bb996
DG
2659 /* Find ourselves in the bootlist if we are there */
2660 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2661 if (emac_boot_list[i] == np)
2662 blist = &emac_boot_list[i];
2663
2664 /* Allocate our net_device structure */
2665 err = -ENOMEM;
2666 ndev = alloc_etherdev(sizeof(struct emac_instance));
2667 if (!ndev) {
2668 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2669 np->full_name);
2670 goto err_gone;
2671 }
2672 dev = netdev_priv(ndev);
2673 dev->ndev = ndev;
2674 dev->ofdev = ofdev;
2675 dev->blist = blist;
1d3bb996
DG
2676 SET_NETDEV_DEV(ndev, &ofdev->dev);
2677
2678 /* Initialize some embedded data structures */
2679 mutex_init(&dev->mdio_lock);
2680 mutex_init(&dev->link_lock);
2681 spin_lock_init(&dev->lock);
2682 INIT_WORK(&dev->reset_work, emac_reset_work);
2683
2684 /* Init various config data based on device-tree */
2685 err = emac_init_config(dev);
2686 if (err != 0)
2687 goto err_free;
2688
2689 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2690 dev->emac_irq = irq_of_parse_and_map(np, 0);
2691 dev->wol_irq = irq_of_parse_and_map(np, 1);
2692 if (dev->emac_irq == NO_IRQ) {
2693 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2694 goto err_free;
2695 }
2696 ndev->irq = dev->emac_irq;
2697
2698 /* Map EMAC regs */
2699 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2700 printk(KERN_ERR "%s: Can't get registers address\n",
2701 np->full_name);
2702 goto err_irq_unmap;
2703 }
2704 // TODO : request_mem_region
05781ccd
GE
2705 dev->emacp = ioremap(dev->rsrc_regs.start,
2706 dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
1d3bb996
DG
2707 if (dev->emacp == NULL) {
2708 printk(KERN_ERR "%s: Can't map device registers!\n",
2709 np->full_name);
2710 err = -ENOMEM;
2711 goto err_irq_unmap;
2712 }
2713
2714 /* Wait for dependent devices */
2715 err = emac_wait_deps(dev);
2716 if (err) {
2717 printk(KERN_ERR
2718 "%s: Timeout waiting for dependent devices\n",
2719 np->full_name);
2720 /* display more info about what's missing ? */
2721 goto err_reg_unmap;
2722 }
2723 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2724 if (dev->mdio_dev != NULL)
2725 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2726
2727 /* Register with MAL */
2728 dev->commac.ops = &emac_commac_ops;
2729 dev->commac.dev = dev;
2730 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2731 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2732 err = mal_register_commac(dev->mal, &dev->commac);
2733 if (err) {
2734 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2735 np->full_name, dev->mal_dev->node->full_name);
2736 goto err_rel_deps;
2737 }
2738 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2739 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2740
2741 /* Get pointers to BD rings */
2742 dev->tx_desc =
2743 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2744 dev->rx_desc =
2745 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2746
2747 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2748 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2749
2750 /* Clean rings */
2751 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2752 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
ab9b30cc
SN
2753 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2754 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
1d3bb996
DG
2755
2756 /* Attach to ZMII, if needed */
2757 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2758 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2759 goto err_unreg_commac;
2760
2761 /* Attach to RGMII, if needed */
2762 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2763 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2764 goto err_detach_zmii;
2765
2766 /* Attach to TAH, if needed */
2767 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2768 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2769 goto err_detach_rgmii;
2770
2771 /* Set some link defaults before we can find out real parameters */
2772 dev->phy.speed = SPEED_100;
2773 dev->phy.duplex = DUPLEX_FULL;
2774 dev->phy.autoneg = AUTONEG_DISABLE;
2775 dev->phy.pause = dev->phy.asym_pause = 0;
2776 dev->stop_timeout = STOP_TIMEOUT_100;
2777 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2778
2779 /* Find PHY if any */
2780 err = emac_init_phy(dev);
2781 if (err != 0)
2782 goto err_detach_tah;
2783
2784 /* Fill in the driver function table */
2785 ndev->open = &emac_open;
ee63d22b 2786 if (dev->tah_dev)
1d3bb996 2787 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1d3bb996
DG
2788 ndev->tx_timeout = &emac_tx_timeout;
2789 ndev->watchdog_timeo = 5 * HZ;
2790 ndev->stop = &emac_close;
2791 ndev->get_stats = &emac_stats;
2792 ndev->set_multicast_list = &emac_set_multicast_list;
2793 ndev->do_ioctl = &emac_ioctl;
2794 if (emac_phy_supports_gige(dev->phy_mode)) {
ee63d22b 2795 ndev->hard_start_xmit = &emac_start_xmit_sg;
1d3bb996
DG
2796 ndev->change_mtu = &emac_change_mtu;
2797 dev->commac.ops = &emac_commac_sg_ops;
ee63d22b
SR
2798 } else {
2799 ndev->hard_start_xmit = &emac_start_xmit;
1d3bb996
DG
2800 }
2801 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2802
2803 netif_carrier_off(ndev);
2804 netif_stop_queue(ndev);
2805
2806 err = register_netdev(ndev);
2807 if (err) {
2808 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2809 np->full_name, err);
2810 goto err_detach_tah;
2811 }
2812
2813 /* Set our drvdata last as we don't want them visible until we are
2814 * fully initialized
2815 */
2816 wmb();
2817 dev_set_drvdata(&ofdev->dev, dev);
2818
2819 /* There's a new kid in town ! Let's tell everybody */
2820 wake_up_all(&emac_probe_wait);
2821
2822
2823 printk(KERN_INFO
2824 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2825 ndev->name, dev->cell_index, np->full_name,
2826 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2827 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2828
2829 if (dev->phy.address >= 0)
2830 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2831 dev->phy.def->name, dev->phy.address);
2832
2833 emac_dbg_register(dev);
2834
2835 /* Life is good */
2836 return 0;
2837
2838 /* I have a bad feeling about this ... */
2839
2840 err_detach_tah:
2841 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2842 tah_detach(dev->tah_dev, dev->tah_port);
2843 err_detach_rgmii:
2844 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2845 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2846 err_detach_zmii:
2847 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2848 zmii_detach(dev->zmii_dev, dev->zmii_port);
2849 err_unreg_commac:
2850 mal_unregister_commac(dev->mal, &dev->commac);
2851 err_rel_deps:
2852 emac_put_deps(dev);
2853 err_reg_unmap:
2854 iounmap(dev->emacp);
2855 err_irq_unmap:
2856 if (dev->wol_irq != NO_IRQ)
2857 irq_dispose_mapping(dev->wol_irq);
2858 if (dev->emac_irq != NO_IRQ)
2859 irq_dispose_mapping(dev->emac_irq);
2860 err_free:
2861 kfree(ndev);
2862 err_gone:
2863 /* if we were on the bootlist, remove us as we won't show up and
2864 * wake up all waiters to notify them in case they were waiting
2865 * on us
2866 */
2867 if (blist) {
2868 *blist = NULL;
2869 wake_up_all(&emac_probe_wait);
2870 }
2871 return err;
2872}
2873
2874static int __devexit emac_remove(struct of_device *ofdev)
2875{
2876 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2877
2878 DBG(dev, "remove" NL);
2879
2880 dev_set_drvdata(&ofdev->dev, NULL);
2881
2882 unregister_netdev(dev->ndev);
2883
61dbcece
BH
2884 flush_scheduled_work();
2885
1d3bb996
DG
2886 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2887 tah_detach(dev->tah_dev, dev->tah_port);
2888 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2889 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2890 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2891 zmii_detach(dev->zmii_dev, dev->zmii_port);
2892
2893 mal_unregister_commac(dev->mal, &dev->commac);
2894 emac_put_deps(dev);
2895
2896 emac_dbg_unregister(dev);
2897 iounmap(dev->emacp);
2898
2899 if (dev->wol_irq != NO_IRQ)
2900 irq_dispose_mapping(dev->wol_irq);
2901 if (dev->emac_irq != NO_IRQ)
2902 irq_dispose_mapping(dev->emac_irq);
2903
2904 kfree(dev->ndev);
2905
2906 return 0;
2907}
2908
2909/* XXX Features in here should be replaced by properties... */
2910static struct of_device_id emac_match[] =
2911{
2912 {
2913 .type = "network",
2914 .compatible = "ibm,emac",
2915 },
2916 {
2917 .type = "network",
2918 .compatible = "ibm,emac4",
2919 },
05781ccd
GE
2920 {
2921 .type = "network",
2922 .compatible = "ibm,emac4sync",
2923 },
1d3bb996
DG
2924 {},
2925};
2926
2927static struct of_platform_driver emac_driver = {
2928 .name = "emac",
2929 .match_table = emac_match,
2930
2931 .probe = emac_probe,
2932 .remove = emac_remove,
2933};
2934
2935static void __init emac_make_bootlist(void)
2936{
2937 struct device_node *np = NULL;
2938 int j, max, i = 0, k;
2939 int cell_indices[EMAC_BOOT_LIST_SIZE];
2940
2941 /* Collect EMACs */
2942 while((np = of_find_all_nodes(np)) != NULL) {
2943 const u32 *idx;
2944
2945 if (of_match_node(emac_match, np) == NULL)
2946 continue;
2947 if (of_get_property(np, "unused", NULL))
2948 continue;
2949 idx = of_get_property(np, "cell-index", NULL);
2950 if (idx == NULL)
2951 continue;
2952 cell_indices[i] = *idx;
2953 emac_boot_list[i++] = of_node_get(np);
2954 if (i >= EMAC_BOOT_LIST_SIZE) {
2955 of_node_put(np);
2956 break;
2957 }
2958 }
2959 max = i;
2960
2961 /* Bubble sort them (doh, what a creative algorithm :-) */
2962 for (i = 0; max > 1 && (i < (max - 1)); i++)
2963 for (j = i; j < max; j++) {
2964 if (cell_indices[i] > cell_indices[j]) {
2965 np = emac_boot_list[i];
2966 emac_boot_list[i] = emac_boot_list[j];
2967 emac_boot_list[j] = np;
2968 k = cell_indices[i];
2969 cell_indices[i] = cell_indices[j];
2970 cell_indices[j] = k;
2971 }
2972 }
2973}
2974
2975static int __init emac_init(void)
2976{
2977 int rc;
2978
2979 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2980
2981 /* Init debug stuff */
2982 emac_init_debug();
2983
2984 /* Build EMAC boot list */
2985 emac_make_bootlist();
2986
2987 /* Init submodules */
2988 rc = mal_init();
2989 if (rc)
2990 goto err;
2991 rc = zmii_init();
2992 if (rc)
2993 goto err_mal;
2994 rc = rgmii_init();
2995 if (rc)
2996 goto err_zmii;
2997 rc = tah_init();
2998 if (rc)
2999 goto err_rgmii;
3000 rc = of_register_platform_driver(&emac_driver);
3001 if (rc)
3002 goto err_tah;
3003
3004 return 0;
3005
3006 err_tah:
3007 tah_exit();
3008 err_rgmii:
3009 rgmii_exit();
3010 err_zmii:
3011 zmii_exit();
3012 err_mal:
3013 mal_exit();
3014 err:
3015 return rc;
3016}
3017
3018static void __exit emac_exit(void)
3019{
3020 int i;
3021
3022 of_unregister_platform_driver(&emac_driver);
3023
3024 tah_exit();
3025 rgmii_exit();
3026 zmii_exit();
3027 mal_exit();
3028 emac_fini_debug();
3029
3030 /* Destroy EMAC boot list */
3031 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3032 if (emac_boot_list[i])
3033 of_node_put(emac_boot_list[i]);
3034}
3035
3036module_init(emac_init);
3037module_exit(emac_exit);