[SK_BUFF]: Convert skb->tail to sk_buff_data_t
[linux-block.git] / drivers / net / ibm_emac / ibm_emac_core.c
CommitLineData
1da177e4 1/*
37448f7d 2 * drivers/net/ibm_emac/ibm_emac_core.c
1da177e4 3 *
37448f7d 4 * Driver for PowerPC 4xx on-chip ethernet controller.
1da177e4 5 *
37448f7d
ES
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
1da177e4 8 *
37448f7d
ES
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
1da177e4
LT
12 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
37448f7d 19 *
1da177e4 20 */
37448f7d 21
1da177e4
LT
22#include <linux/module.h>
23#include <linux/kernel.h>
1da177e4 24#include <linux/string.h>
1da177e4 25#include <linux/errno.h>
1da177e4
LT
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/init.h>
29#include <linux/types.h>
37448f7d
ES
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/crc32.h>
1da177e4
LT
35#include <linux/ethtool.h>
36#include <linux/mii.h>
37#include <linux/bitops.h>
38
39#include <asm/processor.h>
40#include <asm/io.h>
41#include <asm/dma.h>
1da177e4
LT
42#include <asm/uaccess.h>
43#include <asm/ocp.h>
44
1da177e4 45#include "ibm_emac_core.h"
37448f7d 46#include "ibm_emac_debug.h"
1da177e4
LT
47
48/*
37448f7d
ES
49 * Lack of dma_unmap_???? calls is intentional.
50 *
51 * API-correct usage requires additional support state information to be
52 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53 * EMAC design (e.g. TX buffer passed from network stack can be split into
54 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55 * maintaining such information will add additional overhead.
56 * Current DMA API implementation for 4xx processors only ensures cache coherency
57 * and dma_unmap_???? routines are empty and are likely to stay this way.
58 * I decided to omit dma_unmap_??? calls because I don't want to add additional
59 * complexity just for the sake of following some abstract API, when it doesn't
60 * add any real benefit to the driver. I understand that this decision maybe
61 * controversial, but I really tried to make code API-correct and efficient
62 * at the same time and didn't come up with code I liked :(. --ebs
1da177e4 63 */
1da177e4 64
37448f7d 65#define DRV_NAME "emac"
8169bd91 66#define DRV_VERSION "3.54"
37448f7d
ES
67#define DRV_DESC "PPC 4xx OCP EMAC driver"
68
1da177e4 69MODULE_DESCRIPTION(DRV_DESC);
37448f7d
ES
70MODULE_AUTHOR
71 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
1da177e4
LT
72MODULE_LICENSE("GPL");
73
37448f7d
ES
74/* minimum number of free TX descriptors required to wake up TX process */
75#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
1da177e4 76
37448f7d
ES
77/* If packet size is less than this number, we allocate small skb and copy packet
78 * contents into it instead of just sending original big skb up
79 */
80#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
1da177e4 81
37448f7d
ES
82/* Since multiple EMACs share MDIO lines in various ways, we need
83 * to avoid re-using the same PHY ID in cases where the arch didn't
84 * setup precise phy_map entries
85 */
86static u32 busy_phy_map;
1da177e4 87
1b195916
ES
88#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
89 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
37448f7d
ES
90/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
91 * with PHY RX clock problem.
1b195916 92 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
37448f7d
ES
93 * also allows controlling each EMAC clock
94 */
95static inline void EMAC_RX_CLK_TX(int idx)
96{
97 unsigned long flags;
98 local_irq_save(flags);
1da177e4 99
37448f7d
ES
100#if defined(CONFIG_405EP)
101 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
1b195916 102#else /* CONFIG_440EP || CONFIG_440GR */
37448f7d
ES
103 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
104#endif
1da177e4 105
37448f7d
ES
106 local_irq_restore(flags);
107}
1da177e4 108
37448f7d
ES
109static inline void EMAC_RX_CLK_DEFAULT(int idx)
110{
111 unsigned long flags;
112 local_irq_save(flags);
1da177e4 113
37448f7d
ES
114#if defined(CONFIG_405EP)
115 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
116#else /* CONFIG_440EP */
117 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
118#endif
1da177e4 119
37448f7d
ES
120 local_irq_restore(flags);
121}
122#else
123#define EMAC_RX_CLK_TX(idx) ((void)0)
124#define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
125#endif
1da177e4 126
37448f7d
ES
127#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
128/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
129 * unfortunately this is less flexible than 440EP case, because it's a global
130 * setting for all EMACs, therefore we do this clock trick only during probe.
1da177e4 131 */
37448f7d
ES
132#define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
133 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
134#define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
136#else
137#define EMAC_CLK_INTERNAL ((void)0)
138#define EMAC_CLK_EXTERNAL ((void)0)
139#endif
1da177e4 140
37448f7d
ES
141/* I don't want to litter system log with timeout errors
142 * when we have brain-damaged PHY.
143 */
144static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
145 const char *error)
146{
147#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
148 DBG("%d: %s" NL, dev->def->index, error);
149#else
150 if (net_ratelimit())
151 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
152#endif
153}
1da177e4 154
37448f7d
ES
155/* PHY polling intervals */
156#define PHY_POLL_LINK_ON HZ
157#define PHY_POLL_LINK_OFF (HZ / 5)
158
8169bd91
ES
159/* Graceful stop timeouts in us.
160 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
161 */
162#define STOP_TIMEOUT_10 1230
163#define STOP_TIMEOUT_100 124
164#define STOP_TIMEOUT_1000 13
165#define STOP_TIMEOUT_1000_JUMBO 73
166
37448f7d
ES
167/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
168static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
169 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
170 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
171 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
172 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
173 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
174 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
175 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
176 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
177 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
178 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
179 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
180 "tx_bd_excessive_collisions", "tx_bd_late_collision",
181 "tx_bd_multple_collisions", "tx_bd_single_collision",
182 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
183 "tx_errors"
1da177e4
LT
184};
185
7d12e780 186static irqreturn_t emac_irq(int irq, void *dev_instance);
37448f7d
ES
187static void emac_clean_tx_ring(struct ocp_enet_private *dev);
188
189static inline int emac_phy_supports_gige(int phy_mode)
1da177e4 190{
37448f7d
ES
191 return phy_mode == PHY_MODE_GMII ||
192 phy_mode == PHY_MODE_RGMII ||
193 phy_mode == PHY_MODE_TBI ||
194 phy_mode == PHY_MODE_RTBI;
195}
1da177e4 196
37448f7d 197static inline int emac_phy_gpcs(int phy_mode)
1da177e4 198{
37448f7d
ES
199 return phy_mode == PHY_MODE_TBI ||
200 phy_mode == PHY_MODE_RTBI;
201}
1da177e4 202
37448f7d
ES
203static inline void emac_tx_enable(struct ocp_enet_private *dev)
204{
b43de2d8 205 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
206 unsigned long flags;
207 u32 r;
1da177e4 208
37448f7d 209 local_irq_save(flags);
1da177e4 210
37448f7d 211 DBG("%d: tx_enable" NL, dev->def->index);
1da177e4 212
37448f7d
ES
213 r = in_be32(&p->mr0);
214 if (!(r & EMAC_MR0_TXE))
215 out_be32(&p->mr0, r | EMAC_MR0_TXE);
216 local_irq_restore(flags);
217}
1da177e4 218
37448f7d
ES
219static void emac_tx_disable(struct ocp_enet_private *dev)
220{
b43de2d8 221 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
222 unsigned long flags;
223 u32 r;
1da177e4 224
37448f7d 225 local_irq_save(flags);
1da177e4 226
37448f7d
ES
227 DBG("%d: tx_disable" NL, dev->def->index);
228
229 r = in_be32(&p->mr0);
230 if (r & EMAC_MR0_TXE) {
8169bd91 231 int n = dev->stop_timeout;
37448f7d 232 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
8169bd91
ES
233 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
234 udelay(1);
37448f7d 235 --n;
8169bd91 236 }
37448f7d
ES
237 if (unlikely(!n))
238 emac_report_timeout_error(dev, "TX disable timeout");
239 }
240 local_irq_restore(flags);
241}
1da177e4 242
37448f7d
ES
243static void emac_rx_enable(struct ocp_enet_private *dev)
244{
b43de2d8 245 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
246 unsigned long flags;
247 u32 r;
1da177e4 248
37448f7d
ES
249 local_irq_save(flags);
250 if (unlikely(dev->commac.rx_stopped))
251 goto out;
1da177e4 252
37448f7d
ES
253 DBG("%d: rx_enable" NL, dev->def->index);
254
255 r = in_be32(&p->mr0);
256 if (!(r & EMAC_MR0_RXE)) {
257 if (unlikely(!(r & EMAC_MR0_RXI))) {
258 /* Wait if previous async disable is still in progress */
8169bd91
ES
259 int n = dev->stop_timeout;
260 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
261 udelay(1);
37448f7d 262 --n;
8169bd91 263 }
37448f7d
ES
264 if (unlikely(!n))
265 emac_report_timeout_error(dev,
266 "RX disable timeout");
267 }
268 out_be32(&p->mr0, r | EMAC_MR0_RXE);
269 }
270 out:
271 local_irq_restore(flags);
1da177e4
LT
272}
273
37448f7d 274static void emac_rx_disable(struct ocp_enet_private *dev)
1da177e4 275{
b43de2d8 276 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
277 unsigned long flags;
278 u32 r;
1da177e4 279
37448f7d 280 local_irq_save(flags);
1da177e4 281
37448f7d 282 DBG("%d: rx_disable" NL, dev->def->index);
1da177e4 283
37448f7d
ES
284 r = in_be32(&p->mr0);
285 if (r & EMAC_MR0_RXE) {
8169bd91 286 int n = dev->stop_timeout;
37448f7d 287 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
8169bd91
ES
288 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289 udelay(1);
37448f7d 290 --n;
8169bd91 291 }
37448f7d
ES
292 if (unlikely(!n))
293 emac_report_timeout_error(dev, "RX disable timeout");
294 }
295 local_irq_restore(flags);
1da177e4
LT
296}
297
37448f7d 298static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
1da177e4 299{
b43de2d8 300 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
301 unsigned long flags;
302 u32 r;
1da177e4 303
37448f7d
ES
304 local_irq_save(flags);
305
306 DBG("%d: rx_disable_async" NL, dev->def->index);
307
308 r = in_be32(&p->mr0);
309 if (r & EMAC_MR0_RXE)
310 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
311 local_irq_restore(flags);
1da177e4
LT
312}
313
37448f7d 314static int emac_reset(struct ocp_enet_private *dev)
1da177e4 315{
b43de2d8 316 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
317 unsigned long flags;
318 int n = 20;
1da177e4 319
37448f7d 320 DBG("%d: reset" NL, dev->def->index);
1da177e4 321
37448f7d 322 local_irq_save(flags);
1da177e4 323
37448f7d
ES
324 if (!dev->reset_failed) {
325 /* 40x erratum suggests stopping RX channel before reset,
326 * we stop TX as well
327 */
328 emac_rx_disable(dev);
329 emac_tx_disable(dev);
1da177e4
LT
330 }
331
37448f7d
ES
332 out_be32(&p->mr0, EMAC_MR0_SRST);
333 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
334 --n;
335 local_irq_restore(flags);
336
337 if (n) {
338 dev->reset_failed = 0;
339 return 0;
1da177e4 340 } else {
37448f7d
ES
341 emac_report_timeout_error(dev, "reset timeout");
342 dev->reset_failed = 1;
343 return -ETIMEDOUT;
1da177e4 344 }
37448f7d 345}
1da177e4 346
37448f7d
ES
347static void emac_hash_mc(struct ocp_enet_private *dev)
348{
b43de2d8 349 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
350 u16 gaht[4] = { 0 };
351 struct dev_mc_list *dmi;
1da177e4 352
37448f7d 353 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
1da177e4 354
37448f7d
ES
355 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
356 int bit;
357 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
358 dev->def->index,
359 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
360 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
1da177e4 361
37448f7d
ES
362 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
363 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
364 }
365 out_be32(&p->gaht1, gaht[0]);
366 out_be32(&p->gaht2, gaht[1]);
367 out_be32(&p->gaht3, gaht[2]);
368 out_be32(&p->gaht4, gaht[3]);
1da177e4
LT
369}
370
37448f7d 371static inline u32 emac_iff2rmr(struct net_device *ndev)
1da177e4 372{
37448f7d
ES
373 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
374 EMAC_RMR_BASE;
375
376 if (ndev->flags & IFF_PROMISC)
377 r |= EMAC_RMR_PME;
378 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
379 r |= EMAC_RMR_PMME;
380 else if (ndev->mc_count > 0)
381 r |= EMAC_RMR_MAE;
1da177e4 382
37448f7d 383 return r;
1da177e4
LT
384}
385
37448f7d 386static inline int emac_opb_mhz(void)
1da177e4 387{
37448f7d 388 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
1da177e4
LT
389}
390
37448f7d
ES
391/* BHs disabled */
392static int emac_configure(struct ocp_enet_private *dev)
1da177e4 393{
b43de2d8 394 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
395 struct net_device *ndev = dev->ndev;
396 int gige;
397 u32 r;
1da177e4 398
37448f7d 399 DBG("%d: configure" NL, dev->def->index);
1da177e4 400
37448f7d
ES
401 if (emac_reset(dev) < 0)
402 return -ETIMEDOUT;
1da177e4 403
37448f7d 404 tah_reset(dev->tah_dev);
1da177e4 405
37448f7d
ES
406 /* Mode register */
407 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
408 if (dev->phy.duplex == DUPLEX_FULL)
38843888 409 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
8169bd91 410 dev->stop_timeout = STOP_TIMEOUT_10;
37448f7d
ES
411 switch (dev->phy.speed) {
412 case SPEED_1000:
413 if (emac_phy_gpcs(dev->phy.mode)) {
414 r |= EMAC_MR1_MF_1000GPCS |
415 EMAC_MR1_MF_IPPA(dev->phy.address);
1da177e4 416
37448f7d
ES
417 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
418 * identify this GPCS PHY later.
419 */
420 out_be32(&p->ipcr, 0xdeadbeef);
421 } else
422 r |= EMAC_MR1_MF_1000;
423 r |= EMAC_MR1_RFS_16K;
424 gige = 1;
8169bd91
ES
425
426 if (dev->ndev->mtu > ETH_DATA_LEN) {
37448f7d 427 r |= EMAC_MR1_JPSM;
8169bd91
ES
428 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
429 } else
430 dev->stop_timeout = STOP_TIMEOUT_1000;
37448f7d
ES
431 break;
432 case SPEED_100:
433 r |= EMAC_MR1_MF_100;
8169bd91 434 dev->stop_timeout = STOP_TIMEOUT_100;
37448f7d
ES
435 /* Fall through */
436 default:
437 r |= EMAC_MR1_RFS_4K;
438 gige = 0;
439 break;
1da177e4
LT
440 }
441
37448f7d
ES
442 if (dev->rgmii_dev)
443 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
444 dev->phy.speed);
445 else
446 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
1da177e4 447
37448f7d
ES
448#if !defined(CONFIG_40x)
449 /* on 40x erratum forces us to NOT use integrated flow control,
450 * let's hope it works on 44x ;)
451 */
452 if (dev->phy.duplex == DUPLEX_FULL) {
453 if (dev->phy.pause)
454 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
455 else if (dev->phy.asym_pause)
456 r |= EMAC_MR1_APP;
1da177e4 457 }
37448f7d
ES
458#endif
459 out_be32(&p->mr1, r);
460
461 /* Set individual MAC address */
462 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
463 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
464 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
465 ndev->dev_addr[5]);
466
467 /* VLAN Tag Protocol ID */
468 out_be32(&p->vtpid, 0x8100);
469
470 /* Receive mode register */
471 r = emac_iff2rmr(ndev);
472 if (r & EMAC_RMR_MAE)
473 emac_hash_mc(dev);
474 out_be32(&p->rmr, r);
475
476 /* FIFOs thresholds */
477 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
478 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
479 out_be32(&p->tmr1, r);
480 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
481
482 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
483 there should be still enough space in FIFO to allow the our link
484 partner time to process this frame and also time to send PAUSE
485 frame itself.
486
487 Here is the worst case scenario for the RX FIFO "headroom"
488 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
489
490 1) One maximum-length frame on TX 1522 bytes
491 2) One PAUSE frame time 64 bytes
492 3) PAUSE frame decode time allowance 64 bytes
493 4) One maximum-length frame on RX 1522 bytes
494 5) Round-trip propagation delay of the link (100Mb) 15 bytes
495 ----------
496 3187 bytes
497
498 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
499 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
500 */
501 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
502 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
503 out_be32(&p->rwmr, r);
504
505 /* Set PAUSE timer to the maximum */
506 out_be32(&p->ptr, 0xffff);
507
508 /* IRQ sources */
509 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
510 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
511 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
512 EMAC_ISR_IRE | EMAC_ISR_TE);
513
514 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
515 if (emac_phy_gpcs(dev->phy.mode))
516 mii_reset_phy(&dev->phy);
517
518 return 0;
519}
1da177e4 520
37448f7d
ES
521/* BHs disabled */
522static void emac_reinitialize(struct ocp_enet_private *dev)
523{
524 DBG("%d: reinitialize" NL, dev->def->index);
1da177e4 525
37448f7d
ES
526 if (!emac_configure(dev)) {
527 emac_tx_enable(dev);
528 emac_rx_enable(dev);
529 }
530}
1da177e4 531
37448f7d
ES
532/* BHs disabled */
533static void emac_full_tx_reset(struct net_device *ndev)
534{
535 struct ocp_enet_private *dev = ndev->priv;
536 struct ocp_func_emac_data *emacdata = dev->def->additions;
1da177e4 537
37448f7d 538 DBG("%d: full_tx_reset" NL, dev->def->index);
1da177e4 539
37448f7d
ES
540 emac_tx_disable(dev);
541 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
542 emac_clean_tx_ring(dev);
543 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
544
545 emac_configure(dev);
1da177e4 546
37448f7d
ES
547 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
548 emac_tx_enable(dev);
549 emac_rx_enable(dev);
1da177e4 550
37448f7d 551 netif_wake_queue(ndev);
1da177e4
LT
552}
553
37448f7d 554static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
1da177e4 555{
b43de2d8 556 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
557 u32 r;
558 int n;
1da177e4 559
37448f7d 560 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
1da177e4 561
37448f7d
ES
562 /* Enable proper MDIO port */
563 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
1da177e4 564
37448f7d
ES
565 /* Wait for management interface to become idle */
566 n = 10;
7ad8a89c 567 while (!emac_phy_done(in_be32(&p->stacr))) {
37448f7d
ES
568 udelay(1);
569 if (!--n)
570 goto to;
1da177e4
LT
571 }
572
37448f7d
ES
573 /* Issue read command */
574 out_be32(&p->stacr,
575 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
576 (reg & EMAC_STACR_PRA_MASK)
7ad8a89c
ES
577 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
578 | EMAC_STACR_START);
37448f7d
ES
579
580 /* Wait for read to complete */
581 n = 100;
7ad8a89c 582 while (!emac_phy_done(r = in_be32(&p->stacr))) {
1da177e4 583 udelay(1);
37448f7d
ES
584 if (!--n)
585 goto to;
586 }
1da177e4 587
37448f7d
ES
588 if (unlikely(r & EMAC_STACR_PHYE)) {
589 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
590 id, reg);
591 return -EREMOTEIO;
1da177e4
LT
592 }
593
37448f7d
ES
594 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
595 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
596 return r;
597 to:
598 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
599 return -ETIMEDOUT;
600}
601
602static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
603 u16 val)
604{
b43de2d8 605 struct emac_regs __iomem *p = dev->emacp;
37448f7d 606 int n;
1da177e4 607
37448f7d
ES
608 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
609 val);
1da177e4 610
37448f7d
ES
611 /* Enable proper MDIO port */
612 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
1da177e4 613
37448f7d
ES
614 /* Wait for management interface to be idle */
615 n = 10;
7ad8a89c 616 while (!emac_phy_done(in_be32(&p->stacr))) {
1da177e4 617 udelay(1);
37448f7d
ES
618 if (!--n)
619 goto to;
620 }
1da177e4 621
37448f7d
ES
622 /* Issue write command */
623 out_be32(&p->stacr,
624 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
625 (reg & EMAC_STACR_PRA_MASK) |
626 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
7ad8a89c 627 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
1da177e4 628
37448f7d
ES
629 /* Wait for write to complete */
630 n = 100;
7ad8a89c 631 while (!emac_phy_done(in_be32(&p->stacr))) {
37448f7d
ES
632 udelay(1);
633 if (!--n)
634 goto to;
1da177e4 635 }
37448f7d
ES
636 return;
637 to:
638 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
1da177e4
LT
639}
640
37448f7d 641static int emac_mdio_read(struct net_device *ndev, int id, int reg)
1da177e4 642{
37448f7d
ES
643 struct ocp_enet_private *dev = ndev->priv;
644 int res;
645
646 local_bh_disable();
647 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
648 (u8) reg);
649 local_bh_enable();
650 return res;
651}
1da177e4 652
37448f7d
ES
653static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
654{
655 struct ocp_enet_private *dev = ndev->priv;
1da177e4 656
37448f7d
ES
657 local_bh_disable();
658 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
659 (u8) reg, (u16) val);
660 local_bh_enable();
661}
1da177e4 662
37448f7d
ES
663/* BHs disabled */
664static void emac_set_multicast_list(struct net_device *ndev)
665{
666 struct ocp_enet_private *dev = ndev->priv;
b43de2d8 667 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
668 u32 rmr = emac_iff2rmr(ndev);
669
670 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
671 BUG_ON(!netif_running(dev->ndev));
672
673 /* I decided to relax register access rules here to avoid
674 * full EMAC reset.
675 *
676 * There is a real problem with EMAC4 core if we use MWSW_001 bit
677 * in MR1 register and do a full EMAC reset.
678 * One TX BD status update is delayed and, after EMAC reset, it
679 * never happens, resulting in TX hung (it'll be recovered by TX
680 * timeout handler eventually, but this is just gross).
681 * So we either have to do full TX reset or try to cheat here :)
682 *
683 * The only required change is to RX mode register, so I *think* all
684 * we need is just to stop RX channel. This seems to work on all
685 * tested SoCs. --ebs
686 */
687 emac_rx_disable(dev);
688 if (rmr & EMAC_RMR_MAE)
689 emac_hash_mc(dev);
690 out_be32(&p->rmr, rmr);
691 emac_rx_enable(dev);
692}
1da177e4 693
37448f7d
ES
694/* BHs disabled */
695static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
696{
697 struct ocp_func_emac_data *emacdata = dev->def->additions;
698 int rx_sync_size = emac_rx_sync_size(new_mtu);
699 int rx_skb_size = emac_rx_skb_size(new_mtu);
700 int i, ret = 0;
701
702 emac_rx_disable(dev);
703 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
704
705 if (dev->rx_sg_skb) {
706 ++dev->estats.rx_dropped_resize;
707 dev_kfree_skb(dev->rx_sg_skb);
708 dev->rx_sg_skb = NULL;
1da177e4 709 }
1da177e4 710
37448f7d
ES
711 /* Make a first pass over RX ring and mark BDs ready, dropping
712 * non-processed packets on the way. We need this as a separate pass
713 * to simplify error recovery in the case of allocation failure later.
714 */
715 for (i = 0; i < NUM_RX_BUFF; ++i) {
716 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
717 ++dev->estats.rx_dropped_resize;
1da177e4 718
37448f7d
ES
719 dev->rx_desc[i].data_len = 0;
720 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
721 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
722 }
1da177e4 723
37448f7d
ES
724 /* Reallocate RX ring only if bigger skb buffers are required */
725 if (rx_skb_size <= dev->rx_skb_size)
726 goto skip;
1da177e4 727
37448f7d
ES
728 /* Second pass, allocate new skbs */
729 for (i = 0; i < NUM_RX_BUFF; ++i) {
730 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
731 if (!skb) {
732 ret = -ENOMEM;
733 goto oom;
1da177e4
LT
734 }
735
37448f7d
ES
736 BUG_ON(!dev->rx_skb[i]);
737 dev_kfree_skb(dev->rx_skb[i]);
1da177e4 738
37448f7d
ES
739 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
740 dev->rx_desc[i].data_ptr =
741 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
742 DMA_FROM_DEVICE) + 2;
743 dev->rx_skb[i] = skb;
744 }
745 skip:
746 /* Check if we need to change "Jumbo" bit in MR1 */
747 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
748 /* This is to prevent starting RX channel in emac_rx_enable() */
749 dev->commac.rx_stopped = 1;
750
751 dev->ndev->mtu = new_mtu;
752 emac_full_tx_reset(dev->ndev);
753 }
1da177e4 754
37448f7d
ES
755 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
756 oom:
757 /* Restart RX */
758 dev->commac.rx_stopped = dev->rx_slot = 0;
759 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
760 emac_rx_enable(dev);
1da177e4 761
37448f7d 762 return ret;
1da177e4
LT
763}
764
37448f7d
ES
765/* Process ctx, rtnl_lock semaphore */
766static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1da177e4 767{
37448f7d
ES
768 struct ocp_enet_private *dev = ndev->priv;
769 int ret = 0;
1da177e4 770
37448f7d
ES
771 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
772 return -EINVAL;
1da177e4 773
37448f7d 774 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
1da177e4 775
37448f7d
ES
776 local_bh_disable();
777 if (netif_running(ndev)) {
778 /* Check if we really need to reinitalize RX ring */
779 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
780 ret = emac_resize_rx_ring(dev, new_mtu);
781 }
1da177e4 782
37448f7d
ES
783 if (!ret) {
784 ndev->mtu = new_mtu;
785 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
786 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
787 }
788 local_bh_enable();
1da177e4 789
37448f7d
ES
790 return ret;
791}
1da177e4 792
37448f7d
ES
793static void emac_clean_tx_ring(struct ocp_enet_private *dev)
794{
795 int i;
796 for (i = 0; i < NUM_TX_BUFF; ++i) {
797 if (dev->tx_skb[i]) {
798 dev_kfree_skb(dev->tx_skb[i]);
799 dev->tx_skb[i] = NULL;
800 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
801 ++dev->estats.tx_dropped;
1da177e4 802 }
37448f7d
ES
803 dev->tx_desc[i].ctrl = 0;
804 dev->tx_desc[i].data_ptr = 0;
805 }
1da177e4
LT
806}
807
37448f7d 808static void emac_clean_rx_ring(struct ocp_enet_private *dev)
1da177e4 809{
37448f7d
ES
810 int i;
811 for (i = 0; i < NUM_RX_BUFF; ++i)
812 if (dev->rx_skb[i]) {
813 dev->rx_desc[i].ctrl = 0;
814 dev_kfree_skb(dev->rx_skb[i]);
815 dev->rx_skb[i] = NULL;
816 dev->rx_desc[i].data_ptr = 0;
817 }
1da177e4 818
37448f7d
ES
819 if (dev->rx_sg_skb) {
820 dev_kfree_skb(dev->rx_sg_skb);
821 dev->rx_sg_skb = NULL;
822 }
1da177e4
LT
823}
824
37448f7d 825static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
b43de2d8 826 gfp_t flags)
1da177e4 827{
37448f7d
ES
828 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
829 if (unlikely(!skb))
830 return -ENOMEM;
1da177e4 831
37448f7d
ES
832 dev->rx_skb[slot] = skb;
833 dev->rx_desc[slot].data_len = 0;
1da177e4 834
37448f7d
ES
835 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
836 dev->rx_desc[slot].data_ptr =
837 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
838 DMA_FROM_DEVICE) + 2;
839 barrier();
840 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
841 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1da177e4 842
37448f7d 843 return 0;
1da177e4
LT
844}
845
37448f7d 846static void emac_print_link_status(struct ocp_enet_private *dev)
1da177e4 847{
37448f7d
ES
848 if (netif_carrier_ok(dev->ndev))
849 printk(KERN_INFO "%s: link is up, %d %s%s\n",
850 dev->ndev->name, dev->phy.speed,
851 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
852 dev->phy.pause ? ", pause enabled" :
853 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
854 else
855 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
856}
1da177e4 857
37448f7d
ES
858/* Process ctx, rtnl_lock semaphore */
859static int emac_open(struct net_device *ndev)
860{
861 struct ocp_enet_private *dev = ndev->priv;
862 struct ocp_func_emac_data *emacdata = dev->def->additions;
863 int err, i;
864
865 DBG("%d: open" NL, dev->def->index);
866
867 /* Setup error IRQ handler */
868 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
869 if (err) {
870 printk(KERN_ERR "%s: failed to request IRQ %d\n",
871 ndev->name, dev->def->irq);
872 return err;
1da177e4
LT
873 }
874
37448f7d
ES
875 /* Allocate RX ring */
876 for (i = 0; i < NUM_RX_BUFF; ++i)
877 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
878 printk(KERN_ERR "%s: failed to allocate RX ring\n",
879 ndev->name);
880 goto oom;
881 }
1da177e4 882
37448f7d
ES
883 local_bh_disable();
884 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
885 dev->commac.rx_stopped = 0;
886 dev->rx_sg_skb = NULL;
887
888 if (dev->phy.address >= 0) {
889 int link_poll_interval;
890 if (dev->phy.def->ops->poll_link(&dev->phy)) {
891 dev->phy.def->ops->read_link(&dev->phy);
892 EMAC_RX_CLK_DEFAULT(dev->def->index);
893 netif_carrier_on(dev->ndev);
894 link_poll_interval = PHY_POLL_LINK_ON;
895 } else {
896 EMAC_RX_CLK_TX(dev->def->index);
897 netif_carrier_off(dev->ndev);
898 link_poll_interval = PHY_POLL_LINK_OFF;
899 }
900 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
901 emac_print_link_status(dev);
902 } else
903 netif_carrier_on(dev->ndev);
904
905 emac_configure(dev);
906 mal_poll_add(dev->mal, &dev->commac);
907 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
908 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
909 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
910 emac_tx_enable(dev);
911 emac_rx_enable(dev);
912 netif_start_queue(ndev);
913 local_bh_enable();
1da177e4 914
37448f7d
ES
915 return 0;
916 oom:
917 emac_clean_rx_ring(dev);
918 free_irq(dev->def->irq, dev);
919 return -ENOMEM;
1da177e4
LT
920}
921
37448f7d
ES
922/* BHs disabled */
923static int emac_link_differs(struct ocp_enet_private *dev)
1da177e4 924{
37448f7d 925 u32 r = in_be32(&dev->emacp->mr1);
1da177e4 926
37448f7d
ES
927 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
928 int speed, pause, asym_pause;
1da177e4 929
37448f7d
ES
930 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
931 speed = SPEED_1000;
932 else if (r & EMAC_MR1_MF_100)
933 speed = SPEED_100;
934 else
935 speed = SPEED_10;
1da177e4 936
37448f7d
ES
937 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
938 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
939 pause = 1;
940 asym_pause = 0;
941 break;
942 case EMAC_MR1_APP:
943 pause = 0;
944 asym_pause = 1;
945 break;
946 default:
947 pause = asym_pause = 0;
1da177e4 948 }
37448f7d
ES
949 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
950 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1da177e4
LT
951}
952
37448f7d
ES
953/* BHs disabled */
954static void emac_link_timer(unsigned long data)
1da177e4 955{
37448f7d
ES
956 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
957 int link_poll_interval;
1da177e4 958
37448f7d 959 DBG2("%d: link timer" NL, dev->def->index);
1da177e4 960
37448f7d
ES
961 if (dev->phy.def->ops->poll_link(&dev->phy)) {
962 if (!netif_carrier_ok(dev->ndev)) {
963 EMAC_RX_CLK_DEFAULT(dev->def->index);
1da177e4 964
37448f7d
ES
965 /* Get new link parameters */
966 dev->phy.def->ops->read_link(&dev->phy);
1da177e4 967
37448f7d
ES
968 if (dev->tah_dev || emac_link_differs(dev))
969 emac_full_tx_reset(dev->ndev);
1da177e4 970
37448f7d
ES
971 netif_carrier_on(dev->ndev);
972 emac_print_link_status(dev);
973 }
974 link_poll_interval = PHY_POLL_LINK_ON;
975 } else {
976 if (netif_carrier_ok(dev->ndev)) {
977 EMAC_RX_CLK_TX(dev->def->index);
978#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
979 emac_reinitialize(dev);
980#endif
981 netif_carrier_off(dev->ndev);
982 emac_print_link_status(dev);
1da177e4 983 }
1da177e4 984
37448f7d
ES
985 /* Retry reset if the previous attempt failed.
986 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
987 * case, but I left it here because it shouldn't trigger for
988 * sane PHYs anyway.
989 */
990 if (unlikely(dev->reset_failed))
991 emac_reinitialize(dev);
1da177e4 992
37448f7d 993 link_poll_interval = PHY_POLL_LINK_OFF;
1da177e4 994 }
37448f7d
ES
995 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
996}
1da177e4 997
37448f7d
ES
998/* BHs disabled */
999static void emac_force_link_update(struct ocp_enet_private *dev)
1000{
1001 netif_carrier_off(dev->ndev);
1002 if (timer_pending(&dev->link_timer))
1003 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1004}
1da177e4 1005
37448f7d
ES
1006/* Process ctx, rtnl_lock semaphore */
1007static int emac_close(struct net_device *ndev)
1008{
1009 struct ocp_enet_private *dev = ndev->priv;
1010 struct ocp_func_emac_data *emacdata = dev->def->additions;
1da177e4 1011
37448f7d 1012 DBG("%d: close" NL, dev->def->index);
1da177e4 1013
37448f7d 1014 local_bh_disable();
1da177e4 1015
37448f7d
ES
1016 if (dev->phy.address >= 0)
1017 del_timer_sync(&dev->link_timer);
1da177e4 1018
37448f7d
ES
1019 netif_stop_queue(ndev);
1020 emac_rx_disable(dev);
1021 emac_tx_disable(dev);
1022 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1023 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1024 mal_poll_del(dev->mal, &dev->commac);
1025 local_bh_enable();
1da177e4 1026
37448f7d
ES
1027 emac_clean_tx_ring(dev);
1028 emac_clean_rx_ring(dev);
1029 free_irq(dev->def->irq, dev);
1da177e4
LT
1030
1031 return 0;
1032}
1033
37448f7d
ES
1034static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1035 struct sk_buff *skb)
1da177e4 1036{
37448f7d 1037#if defined(CONFIG_IBM_EMAC_TAH)
84fa7933 1038 if (skb->ip_summed == CHECKSUM_PARTIAL) {
37448f7d
ES
1039 ++dev->stats.tx_packets_csum;
1040 return EMAC_TX_CTRL_TAH_CSUM;
1da177e4 1041 }
37448f7d
ES
1042#endif
1043 return 0;
1044}
1da177e4 1045
37448f7d
ES
1046static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1047{
b43de2d8 1048 struct emac_regs __iomem *p = dev->emacp;
37448f7d 1049 struct net_device *ndev = dev->ndev;
1da177e4 1050
37448f7d
ES
1051 /* Send the packet out */
1052 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1da177e4 1053
37448f7d
ES
1054 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1055 netif_stop_queue(ndev);
1056 DBG2("%d: stopped TX queue" NL, dev->def->index);
1057 }
1da177e4 1058
37448f7d
ES
1059 ndev->trans_start = jiffies;
1060 ++dev->stats.tx_packets;
1061 dev->stats.tx_bytes += len;
1da177e4
LT
1062
1063 return 0;
1064}
1065
37448f7d
ES
1066/* BHs disabled */
1067static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1da177e4 1068{
37448f7d
ES
1069 struct ocp_enet_private *dev = ndev->priv;
1070 unsigned int len = skb->len;
1071 int slot;
1da177e4 1072
37448f7d
ES
1073 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1074 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1da177e4 1075
37448f7d
ES
1076 slot = dev->tx_slot++;
1077 if (dev->tx_slot == NUM_TX_BUFF) {
1078 dev->tx_slot = 0;
1079 ctrl |= MAL_TX_CTRL_WRAP;
1080 }
1da177e4 1081
37448f7d 1082 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1da177e4 1083
37448f7d
ES
1084 dev->tx_skb[slot] = skb;
1085 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1086 DMA_TO_DEVICE);
1087 dev->tx_desc[slot].data_len = (u16) len;
1088 barrier();
1089 dev->tx_desc[slot].ctrl = ctrl;
1da177e4 1090
37448f7d 1091 return emac_xmit_finish(dev, len);
1da177e4
LT
1092}
1093
37448f7d
ES
1094#if defined(CONFIG_IBM_EMAC_TAH)
1095static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1096 u32 pd, int len, int last, u16 base_ctrl)
1da177e4 1097{
37448f7d
ES
1098 while (1) {
1099 u16 ctrl = base_ctrl;
1100 int chunk = min(len, MAL_MAX_TX_SIZE);
1101 len -= chunk;
1da177e4 1102
37448f7d 1103 slot = (slot + 1) % NUM_TX_BUFF;
1da177e4 1104
37448f7d
ES
1105 if (last && !len)
1106 ctrl |= MAL_TX_CTRL_LAST;
1107 if (slot == NUM_TX_BUFF - 1)
1108 ctrl |= MAL_TX_CTRL_WRAP;
1da177e4 1109
37448f7d
ES
1110 dev->tx_skb[slot] = NULL;
1111 dev->tx_desc[slot].data_ptr = pd;
1112 dev->tx_desc[slot].data_len = (u16) chunk;
1113 dev->tx_desc[slot].ctrl = ctrl;
1114 ++dev->tx_cnt;
1da177e4 1115
37448f7d
ES
1116 if (!len)
1117 break;
1da177e4 1118
37448f7d 1119 pd += chunk;
1da177e4 1120 }
37448f7d 1121 return slot;
1da177e4
LT
1122}
1123
37448f7d
ES
1124/* BHs disabled (SG version for TAH equipped EMACs) */
1125static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1da177e4 1126{
37448f7d
ES
1127 struct ocp_enet_private *dev = ndev->priv;
1128 int nr_frags = skb_shinfo(skb)->nr_frags;
1129 int len = skb->len, chunk;
1130 int slot, i;
1131 u16 ctrl;
1132 u32 pd;
1da177e4 1133
37448f7d
ES
1134 /* This is common "fast" path */
1135 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1136 return emac_start_xmit(skb, ndev);
1da177e4 1137
37448f7d 1138 len -= skb->data_len;
1da177e4 1139
37448f7d
ES
1140 /* Note, this is only an *estimation*, we can still run out of empty
1141 * slots because of the additional fragmentation into
1142 * MAL_MAX_TX_SIZE-sized chunks
1143 */
1144 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1145 goto stop_queue;
1146
1147 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1148 emac_tx_csum(dev, skb);
1149 slot = dev->tx_slot;
1150
1151 /* skb data */
1152 dev->tx_skb[slot] = NULL;
1153 chunk = min(len, MAL_MAX_TX_SIZE);
1154 dev->tx_desc[slot].data_ptr = pd =
1155 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1156 dev->tx_desc[slot].data_len = (u16) chunk;
1157 len -= chunk;
1158 if (unlikely(len))
1159 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1160 ctrl);
1161 /* skb fragments */
1162 for (i = 0; i < nr_frags; ++i) {
1163 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1164 len = frag->size;
1da177e4 1165
37448f7d
ES
1166 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1167 goto undo_frame;
1da177e4 1168
37448f7d
ES
1169 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1170 DMA_TO_DEVICE);
1da177e4 1171
37448f7d
ES
1172 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1173 ctrl);
1da177e4
LT
1174 }
1175
37448f7d
ES
1176 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1177 dev->tx_slot, slot);
1da177e4 1178
37448f7d
ES
1179 /* Attach skb to the last slot so we don't release it too early */
1180 dev->tx_skb[slot] = skb;
1da177e4 1181
37448f7d
ES
1182 /* Send the packet out */
1183 if (dev->tx_slot == NUM_TX_BUFF - 1)
1184 ctrl |= MAL_TX_CTRL_WRAP;
1185 barrier();
1186 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1187 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1da177e4 1188
37448f7d 1189 return emac_xmit_finish(dev, skb->len);
1da177e4 1190
37448f7d
ES
1191 undo_frame:
1192 /* Well, too bad. Our previous estimation was overly optimistic.
1193 * Undo everything.
1194 */
1195 while (slot != dev->tx_slot) {
1196 dev->tx_desc[slot].ctrl = 0;
1197 --dev->tx_cnt;
1198 if (--slot < 0)
1199 slot = NUM_TX_BUFF - 1;
1200 }
1201 ++dev->estats.tx_undo;
1202
1203 stop_queue:
1204 netif_stop_queue(ndev);
1205 DBG2("%d: stopped TX queue" NL, dev->def->index);
1206 return 1;
1207}
1208#else
1209# define emac_start_xmit_sg emac_start_xmit
1210#endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1211
1212/* BHs disabled */
1213static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1214{
1215 struct ibm_emac_error_stats *st = &dev->estats;
1216 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1217
1218 ++st->tx_bd_errors;
1219 if (ctrl & EMAC_TX_ST_BFCS)
1220 ++st->tx_bd_bad_fcs;
1221 if (ctrl & EMAC_TX_ST_LCS)
1222 ++st->tx_bd_carrier_loss;
1223 if (ctrl & EMAC_TX_ST_ED)
1224 ++st->tx_bd_excessive_deferral;
1225 if (ctrl & EMAC_TX_ST_EC)
1226 ++st->tx_bd_excessive_collisions;
1227 if (ctrl & EMAC_TX_ST_LC)
1228 ++st->tx_bd_late_collision;
1229 if (ctrl & EMAC_TX_ST_MC)
1230 ++st->tx_bd_multple_collisions;
1231 if (ctrl & EMAC_TX_ST_SC)
1232 ++st->tx_bd_single_collision;
1233 if (ctrl & EMAC_TX_ST_UR)
1234 ++st->tx_bd_underrun;
1235 if (ctrl & EMAC_TX_ST_SQE)
1236 ++st->tx_bd_sqe;
1da177e4
LT
1237}
1238
37448f7d 1239static void emac_poll_tx(void *param)
1da177e4 1240{
37448f7d
ES
1241 struct ocp_enet_private *dev = param;
1242 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1243 dev->ack_slot);
1244
1245 if (dev->tx_cnt) {
1246 u16 ctrl;
1247 int slot = dev->ack_slot, n = 0;
1248 again:
1249 ctrl = dev->tx_desc[slot].ctrl;
1250 if (!(ctrl & MAL_TX_CTRL_READY)) {
1251 struct sk_buff *skb = dev->tx_skb[slot];
1252 ++n;
1253
1254 if (skb) {
1255 dev_kfree_skb(skb);
1256 dev->tx_skb[slot] = NULL;
1257 }
1258 slot = (slot + 1) % NUM_TX_BUFF;
1da177e4 1259
37448f7d
ES
1260 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1261 emac_parse_tx_error(dev, ctrl);
1da177e4 1262
37448f7d
ES
1263 if (--dev->tx_cnt)
1264 goto again;
1da177e4 1265 }
37448f7d
ES
1266 if (n) {
1267 dev->ack_slot = slot;
1268 if (netif_queue_stopped(dev->ndev) &&
1269 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1270 netif_wake_queue(dev->ndev);
1da177e4 1271
37448f7d
ES
1272 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1273 }
1274 }
1da177e4
LT
1275}
1276
37448f7d
ES
1277static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1278 int len)
1da177e4 1279{
37448f7d
ES
1280 struct sk_buff *skb = dev->rx_skb[slot];
1281 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1da177e4 1282
37448f7d
ES
1283 if (len)
1284 dma_map_single(dev->ldev, skb->data - 2,
1285 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1da177e4 1286
37448f7d
ES
1287 dev->rx_desc[slot].data_len = 0;
1288 barrier();
1289 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1290 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1291}
1292
1293static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1294{
1295 struct ibm_emac_error_stats *st = &dev->estats;
1296 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1297
1298 ++st->rx_bd_errors;
1299 if (ctrl & EMAC_RX_ST_OE)
1300 ++st->rx_bd_overrun;
1301 if (ctrl & EMAC_RX_ST_BP)
1302 ++st->rx_bd_bad_packet;
1303 if (ctrl & EMAC_RX_ST_RP)
1304 ++st->rx_bd_runt_packet;
1305 if (ctrl & EMAC_RX_ST_SE)
1306 ++st->rx_bd_short_event;
1307 if (ctrl & EMAC_RX_ST_AE)
1308 ++st->rx_bd_alignment_error;
1309 if (ctrl & EMAC_RX_ST_BFCS)
1310 ++st->rx_bd_bad_fcs;
1311 if (ctrl & EMAC_RX_ST_PTL)
1312 ++st->rx_bd_packet_too_long;
1313 if (ctrl & EMAC_RX_ST_ORE)
1314 ++st->rx_bd_out_of_range;
1315 if (ctrl & EMAC_RX_ST_IRE)
1316 ++st->rx_bd_in_range;
1317}
1318
1319static inline void emac_rx_csum(struct ocp_enet_private *dev,
1320 struct sk_buff *skb, u16 ctrl)
1321{
1322#if defined(CONFIG_IBM_EMAC_TAH)
1323 if (!ctrl && dev->tah_dev) {
1324 skb->ip_summed = CHECKSUM_UNNECESSARY;
1325 ++dev->stats.rx_packets_csum;
1326 }
1327#endif
1328}
1da177e4 1329
37448f7d
ES
1330static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1331{
1332 if (likely(dev->rx_sg_skb != NULL)) {
1333 int len = dev->rx_desc[slot].data_len;
1334 int tot_len = dev->rx_sg_skb->len + len;
1335
1336 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1337 ++dev->estats.rx_dropped_mtu;
1338 dev_kfree_skb(dev->rx_sg_skb);
1339 dev->rx_sg_skb = NULL;
1340 } else {
27a884dc 1341 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
37448f7d
ES
1342 dev->rx_skb[slot]->data, len);
1343 skb_put(dev->rx_sg_skb, len);
1344 emac_recycle_rx_skb(dev, slot, len);
1345 return 0;
1da177e4
LT
1346 }
1347 }
37448f7d
ES
1348 emac_recycle_rx_skb(dev, slot, 0);
1349 return -1;
1350}
1da177e4 1351
37448f7d
ES
1352/* BHs disabled */
1353static int emac_poll_rx(void *param, int budget)
1354{
1355 struct ocp_enet_private *dev = param;
1356 int slot = dev->rx_slot, received = 0;
1357
1358 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1da177e4 1359
37448f7d
ES
1360 again:
1361 while (budget > 0) {
1362 int len;
1363 struct sk_buff *skb;
1364 u16 ctrl = dev->rx_desc[slot].ctrl;
1365
1366 if (ctrl & MAL_RX_CTRL_EMPTY)
1367 break;
1da177e4 1368
37448f7d
ES
1369 skb = dev->rx_skb[slot];
1370 barrier();
1371 len = dev->rx_desc[slot].data_len;
1da177e4 1372
37448f7d
ES
1373 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1374 goto sg;
1da177e4 1375
37448f7d
ES
1376 ctrl &= EMAC_BAD_RX_MASK;
1377 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1378 emac_parse_rx_error(dev, ctrl);
1379 ++dev->estats.rx_dropped_error;
1380 emac_recycle_rx_skb(dev, slot, 0);
1381 len = 0;
1382 goto next;
1383 }
1da177e4 1384
37448f7d
ES
1385 if (len && len < EMAC_RX_COPY_THRESH) {
1386 struct sk_buff *copy_skb =
1387 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1388 if (unlikely(!copy_skb))
1389 goto oom;
1390
1391 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1392 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1393 len + 2);
1394 emac_recycle_rx_skb(dev, slot, len);
1395 skb = copy_skb;
1396 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1397 goto oom;
1398
1399 skb_put(skb, len);
1400 push_packet:
37448f7d
ES
1401 skb->protocol = eth_type_trans(skb, dev->ndev);
1402 emac_rx_csum(dev, skb, ctrl);
1403
1404 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1405 ++dev->estats.rx_dropped_stack;
1406 next:
1407 ++dev->stats.rx_packets;
1408 skip:
1409 dev->stats.rx_bytes += len;
1410 slot = (slot + 1) % NUM_RX_BUFF;
1411 --budget;
1412 ++received;
1413 continue;
1414 sg:
1415 if (ctrl & MAL_RX_CTRL_FIRST) {
1416 BUG_ON(dev->rx_sg_skb);
1417 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1418 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1419 ++dev->estats.rx_dropped_oom;
1420 emac_recycle_rx_skb(dev, slot, 0);
1421 } else {
1422 dev->rx_sg_skb = skb;
1423 skb_put(skb, len);
1424 }
1425 } else if (!emac_rx_sg_append(dev, slot) &&
1426 (ctrl & MAL_RX_CTRL_LAST)) {
1427
1428 skb = dev->rx_sg_skb;
1429 dev->rx_sg_skb = NULL;
1430
1431 ctrl &= EMAC_BAD_RX_MASK;
1432 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1433 emac_parse_rx_error(dev, ctrl);
1434 ++dev->estats.rx_dropped_error;
1435 dev_kfree_skb(skb);
1436 len = 0;
1437 } else
1438 goto push_packet;
1439 }
1440 goto skip;
1441 oom:
1442 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1443 /* Drop the packet and recycle skb */
1444 ++dev->estats.rx_dropped_oom;
1445 emac_recycle_rx_skb(dev, slot, 0);
1446 goto next;
1447 }
1da177e4 1448
37448f7d
ES
1449 if (received) {
1450 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1451 dev->rx_slot = slot;
1452 }
1da177e4 1453
37448f7d
ES
1454 if (unlikely(budget && dev->commac.rx_stopped)) {
1455 struct ocp_func_emac_data *emacdata = dev->def->additions;
1da177e4 1456
37448f7d
ES
1457 barrier();
1458 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1459 DBG2("%d: rx restart" NL, dev->def->index);
1460 received = 0;
1461 goto again;
1462 }
1da177e4 1463
37448f7d
ES
1464 if (dev->rx_sg_skb) {
1465 DBG2("%d: dropping partial rx packet" NL,
1466 dev->def->index);
1467 ++dev->estats.rx_dropped_error;
1468 dev_kfree_skb(dev->rx_sg_skb);
1469 dev->rx_sg_skb = NULL;
1470 }
1da177e4 1471
37448f7d
ES
1472 dev->commac.rx_stopped = 0;
1473 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1474 emac_rx_enable(dev);
1475 dev->rx_slot = 0;
1476 }
1477 return received;
1da177e4
LT
1478}
1479
37448f7d
ES
1480/* BHs disabled */
1481static int emac_peek_rx(void *param)
1da177e4 1482{
37448f7d
ES
1483 struct ocp_enet_private *dev = param;
1484 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1485}
1da177e4 1486
37448f7d
ES
1487/* BHs disabled */
1488static int emac_peek_rx_sg(void *param)
1489{
1490 struct ocp_enet_private *dev = param;
1491 int slot = dev->rx_slot;
1492 while (1) {
1493 u16 ctrl = dev->rx_desc[slot].ctrl;
1494 if (ctrl & MAL_RX_CTRL_EMPTY)
1495 return 0;
1496 else if (ctrl & MAL_RX_CTRL_LAST)
1497 return 1;
1da177e4 1498
37448f7d 1499 slot = (slot + 1) % NUM_RX_BUFF;
1da177e4 1500
37448f7d
ES
1501 /* I'm just being paranoid here :) */
1502 if (unlikely(slot == dev->rx_slot))
1503 return 0;
1504 }
1da177e4
LT
1505}
1506
37448f7d
ES
1507/* Hard IRQ */
1508static void emac_rxde(void *param)
1da177e4 1509{
37448f7d
ES
1510 struct ocp_enet_private *dev = param;
1511 ++dev->estats.rx_stopped;
1512 emac_rx_disable_async(dev);
1513}
1da177e4 1514
37448f7d 1515/* Hard IRQ */
7d12e780 1516static irqreturn_t emac_irq(int irq, void *dev_instance)
37448f7d
ES
1517{
1518 struct ocp_enet_private *dev = dev_instance;
b43de2d8 1519 struct emac_regs __iomem *p = dev->emacp;
37448f7d
ES
1520 struct ibm_emac_error_stats *st = &dev->estats;
1521
1522 u32 isr = in_be32(&p->isr);
1523 out_be32(&p->isr, isr);
1524
1525 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1526
1527 if (isr & EMAC_ISR_TXPE)
1528 ++st->tx_parity;
1529 if (isr & EMAC_ISR_RXPE)
1530 ++st->rx_parity;
1531 if (isr & EMAC_ISR_TXUE)
1532 ++st->tx_underrun;
1533 if (isr & EMAC_ISR_RXOE)
1534 ++st->rx_fifo_overrun;
1535 if (isr & EMAC_ISR_OVR)
1536 ++st->rx_overrun;
1537 if (isr & EMAC_ISR_BP)
1538 ++st->rx_bad_packet;
1539 if (isr & EMAC_ISR_RP)
1540 ++st->rx_runt_packet;
1541 if (isr & EMAC_ISR_SE)
1542 ++st->rx_short_event;
1543 if (isr & EMAC_ISR_ALE)
1544 ++st->rx_alignment_error;
1545 if (isr & EMAC_ISR_BFCS)
1546 ++st->rx_bad_fcs;
1547 if (isr & EMAC_ISR_PTLE)
1548 ++st->rx_packet_too_long;
1549 if (isr & EMAC_ISR_ORE)
1550 ++st->rx_out_of_range;
1551 if (isr & EMAC_ISR_IRE)
1552 ++st->rx_in_range;
1553 if (isr & EMAC_ISR_SQE)
1554 ++st->tx_sqe;
1555 if (isr & EMAC_ISR_TE)
1556 ++st->tx_errors;
1da177e4 1557
37448f7d
ES
1558 return IRQ_HANDLED;
1559}
1da177e4 1560
37448f7d
ES
1561static struct net_device_stats *emac_stats(struct net_device *ndev)
1562{
1563 struct ocp_enet_private *dev = ndev->priv;
1564 struct ibm_emac_stats *st = &dev->stats;
1565 struct ibm_emac_error_stats *est = &dev->estats;
1566 struct net_device_stats *nst = &dev->nstats;
1567
1568 DBG2("%d: stats" NL, dev->def->index);
1569
1570 /* Compute "legacy" statistics */
1571 local_irq_disable();
1572 nst->rx_packets = (unsigned long)st->rx_packets;
1573 nst->rx_bytes = (unsigned long)st->rx_bytes;
1574 nst->tx_packets = (unsigned long)st->tx_packets;
1575 nst->tx_bytes = (unsigned long)st->tx_bytes;
1576 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1577 est->rx_dropped_error +
1578 est->rx_dropped_resize +
1579 est->rx_dropped_mtu);
1580 nst->tx_dropped = (unsigned long)est->tx_dropped;
1581
1582 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1583 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1584 est->rx_fifo_overrun +
1585 est->rx_overrun);
1586 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1587 est->rx_alignment_error);
1588 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1589 est->rx_bad_fcs);
1590 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1591 est->rx_bd_short_event +
1592 est->rx_bd_packet_too_long +
1593 est->rx_bd_out_of_range +
1594 est->rx_bd_in_range +
1595 est->rx_runt_packet +
1596 est->rx_short_event +
1597 est->rx_packet_too_long +
1598 est->rx_out_of_range +
1599 est->rx_in_range);
1600
1601 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1602 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1603 est->tx_underrun);
1604 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1605 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1606 est->tx_bd_excessive_collisions +
1607 est->tx_bd_late_collision +
1608 est->tx_bd_multple_collisions);
1609 local_irq_enable();
1610 return nst;
1da177e4
LT
1611}
1612
37448f7d 1613static void emac_remove(struct ocp_device *ocpdev)
1da177e4 1614{
37448f7d 1615 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1da177e4 1616
37448f7d 1617 DBG("%d: remove" NL, dev->def->index);
1da177e4 1618
b43de2d8 1619 ocp_set_drvdata(ocpdev, NULL);
37448f7d 1620 unregister_netdev(dev->ndev);
1da177e4 1621
37448f7d
ES
1622 tah_fini(dev->tah_dev);
1623 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1624 zmii_fini(dev->zmii_dev, dev->zmii_input);
1da177e4 1625
b43de2d8 1626 emac_dbg_register(dev->def->index, NULL);
37448f7d
ES
1627
1628 mal_unregister_commac(dev->mal, &dev->commac);
b43de2d8 1629 iounmap(dev->emacp);
37448f7d 1630 kfree(dev->ndev);
1da177e4
LT
1631}
1632
37448f7d
ES
1633static struct mal_commac_ops emac_commac_ops = {
1634 .poll_tx = &emac_poll_tx,
1635 .poll_rx = &emac_poll_rx,
1636 .peek_rx = &emac_peek_rx,
1637 .rxde = &emac_rxde,
1638};
1da177e4 1639
37448f7d
ES
1640static struct mal_commac_ops emac_commac_sg_ops = {
1641 .poll_tx = &emac_poll_tx,
1642 .poll_rx = &emac_poll_rx,
1643 .peek_rx = &emac_peek_rx_sg,
1644 .rxde = &emac_rxde,
1645};
1da177e4 1646
37448f7d
ES
1647/* Ethtool support */
1648static int emac_ethtool_get_settings(struct net_device *ndev,
1649 struct ethtool_cmd *cmd)
1da177e4 1650{
37448f7d 1651 struct ocp_enet_private *dev = ndev->priv;
1da177e4 1652
37448f7d 1653 cmd->supported = dev->phy.features;
1da177e4 1654 cmd->port = PORT_MII;
37448f7d
ES
1655 cmd->phy_address = dev->phy.address;
1656 cmd->transceiver =
1657 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1658
1659 local_bh_disable();
1660 cmd->advertising = dev->phy.advertising;
1661 cmd->autoneg = dev->phy.autoneg;
1662 cmd->speed = dev->phy.speed;
1663 cmd->duplex = dev->phy.duplex;
1664 local_bh_enable();
1665
1da177e4
LT
1666 return 0;
1667}
1668
37448f7d
ES
1669static int emac_ethtool_set_settings(struct net_device *ndev,
1670 struct ethtool_cmd *cmd)
1da177e4 1671{
37448f7d
ES
1672 struct ocp_enet_private *dev = ndev->priv;
1673 u32 f = dev->phy.features;
1da177e4 1674
37448f7d
ES
1675 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1676 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1da177e4 1677
37448f7d
ES
1678 /* Basic sanity checks */
1679 if (dev->phy.address < 0)
1680 return -EOPNOTSUPP;
1da177e4
LT
1681 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1682 return -EINVAL;
1683 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1684 return -EINVAL;
1685 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1686 return -EINVAL;
37448f7d
ES
1687
1688 if (cmd->autoneg == AUTONEG_DISABLE) {
1da177e4
LT
1689 switch (cmd->speed) {
1690 case SPEED_10:
37448f7d
ES
1691 if (cmd->duplex == DUPLEX_HALF
1692 && !(f & SUPPORTED_10baseT_Half))
1da177e4 1693 return -EINVAL;
37448f7d
ES
1694 if (cmd->duplex == DUPLEX_FULL
1695 && !(f & SUPPORTED_10baseT_Full))
1da177e4
LT
1696 return -EINVAL;
1697 break;
1698 case SPEED_100:
37448f7d
ES
1699 if (cmd->duplex == DUPLEX_HALF
1700 && !(f & SUPPORTED_100baseT_Half))
1da177e4 1701 return -EINVAL;
37448f7d
ES
1702 if (cmd->duplex == DUPLEX_FULL
1703 && !(f & SUPPORTED_100baseT_Full))
1da177e4
LT
1704 return -EINVAL;
1705 break;
1706 case SPEED_1000:
37448f7d
ES
1707 if (cmd->duplex == DUPLEX_HALF
1708 && !(f & SUPPORTED_1000baseT_Half))
1da177e4 1709 return -EINVAL;
37448f7d
ES
1710 if (cmd->duplex == DUPLEX_FULL
1711 && !(f & SUPPORTED_1000baseT_Full))
1da177e4
LT
1712 return -EINVAL;
1713 break;
1714 default:
1715 return -EINVAL;
37448f7d
ES
1716 }
1717
1718 local_bh_disable();
1719 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1720 cmd->duplex);
1721
1722 } else {
1723 if (!(f & SUPPORTED_Autoneg))
1724 return -EINVAL;
1725
1726 local_bh_disable();
1727 dev->phy.def->ops->setup_aneg(&dev->phy,
1728 (cmd->advertising & f) |
1729 (dev->phy.advertising &
1730 (ADVERTISED_Pause |
1731 ADVERTISED_Asym_Pause)));
1732 }
1733 emac_force_link_update(dev);
1734 local_bh_enable();
1735
1da177e4
LT
1736 return 0;
1737}
1738
37448f7d
ES
1739static void emac_ethtool_get_ringparam(struct net_device *ndev,
1740 struct ethtool_ringparam *rp)
1da177e4 1741{
37448f7d
ES
1742 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1743 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1da177e4
LT
1744}
1745
37448f7d
ES
1746static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1747 struct ethtool_pauseparam *pp)
1da177e4 1748{
37448f7d
ES
1749 struct ocp_enet_private *dev = ndev->priv;
1750
1751 local_bh_disable();
1752 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1753 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1754 pp->autoneg = 1;
1755
1756 if (dev->phy.duplex == DUPLEX_FULL) {
1757 if (dev->phy.pause)
1758 pp->rx_pause = pp->tx_pause = 1;
1759 else if (dev->phy.asym_pause)
1760 pp->tx_pause = 1;
1761 }
1762 local_bh_enable();
1763}
1da177e4 1764
37448f7d
ES
1765static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1766{
1767 struct ocp_enet_private *dev = ndev->priv;
1768 return dev->tah_dev != 0;
1da177e4
LT
1769}
1770
37448f7d 1771static int emac_get_regs_len(struct ocp_enet_private *dev)
1da177e4 1772{
37448f7d 1773 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1da177e4
LT
1774}
1775
37448f7d
ES
1776static int emac_ethtool_get_regs_len(struct net_device *ndev)
1777{
1778 struct ocp_enet_private *dev = ndev->priv;
1779 return sizeof(struct emac_ethtool_regs_hdr) +
1780 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1781 zmii_get_regs_len(dev->zmii_dev) +
1782 rgmii_get_regs_len(dev->rgmii_dev) +
1783 tah_get_regs_len(dev->tah_dev);
1784}
1da177e4 1785
37448f7d 1786static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1da177e4 1787{
37448f7d 1788 struct emac_ethtool_regs_subhdr *hdr = buf;
1da177e4 1789
37448f7d
ES
1790 hdr->version = EMAC_ETHTOOL_REGS_VER;
1791 hdr->index = dev->def->index;
1792 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1793 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1794}
1da177e4 1795
37448f7d
ES
1796static void emac_ethtool_get_regs(struct net_device *ndev,
1797 struct ethtool_regs *regs, void *buf)
1798{
1799 struct ocp_enet_private *dev = ndev->priv;
1800 struct emac_ethtool_regs_hdr *hdr = buf;
1801
1802 hdr->components = 0;
1803 buf = hdr + 1;
1804
1805 local_irq_disable();
1806 buf = mal_dump_regs(dev->mal, buf);
1807 buf = emac_dump_regs(dev, buf);
1808 if (dev->zmii_dev) {
1809 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1810 buf = zmii_dump_regs(dev->zmii_dev, buf);
1811 }
1812 if (dev->rgmii_dev) {
1813 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1814 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1da177e4 1815 }
37448f7d
ES
1816 if (dev->tah_dev) {
1817 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1818 buf = tah_dump_regs(dev->tah_dev, buf);
1819 }
1820 local_irq_enable();
1da177e4
LT
1821}
1822
37448f7d 1823static int emac_ethtool_nway_reset(struct net_device *ndev)
1da177e4 1824{
37448f7d
ES
1825 struct ocp_enet_private *dev = ndev->priv;
1826 int res = 0;
1da177e4 1827
37448f7d 1828 DBG("%d: nway_reset" NL, dev->def->index);
1da177e4 1829
37448f7d
ES
1830 if (dev->phy.address < 0)
1831 return -EOPNOTSUPP;
1da177e4 1832
37448f7d
ES
1833 local_bh_disable();
1834 if (!dev->phy.autoneg) {
1835 res = -EINVAL;
1836 goto out;
1837 }
1da177e4 1838
37448f7d
ES
1839 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1840 emac_force_link_update(dev);
1da177e4 1841
37448f7d
ES
1842 out:
1843 local_bh_enable();
1844 return res;
1845}
1da177e4 1846
37448f7d
ES
1847static int emac_ethtool_get_stats_count(struct net_device *ndev)
1848{
1849 return EMAC_ETHTOOL_STATS_COUNT;
1da177e4
LT
1850}
1851
37448f7d
ES
1852static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1853 u8 * buf)
1da177e4 1854{
37448f7d
ES
1855 if (stringset == ETH_SS_STATS)
1856 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1857}
1da177e4 1858
37448f7d
ES
1859static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1860 struct ethtool_stats *estats,
1861 u64 * tmp_stats)
1862{
1863 struct ocp_enet_private *dev = ndev->priv;
1864 local_irq_disable();
1865 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1866 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1867 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1868 local_irq_enable();
1869}
1da177e4 1870
37448f7d
ES
1871static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1872 struct ethtool_drvinfo *info)
1873{
1874 struct ocp_enet_private *dev = ndev->priv;
1da177e4 1875
37448f7d
ES
1876 strcpy(info->driver, "ibm_emac");
1877 strcpy(info->version, DRV_VERSION);
1878 info->fw_version[0] = '\0';
1879 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1880 info->n_stats = emac_ethtool_get_stats_count(ndev);
1881 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1882}
1da177e4 1883
7282d491 1884static const struct ethtool_ops emac_ethtool_ops = {
37448f7d
ES
1885 .get_settings = emac_ethtool_get_settings,
1886 .set_settings = emac_ethtool_set_settings,
1887 .get_drvinfo = emac_ethtool_get_drvinfo,
1da177e4 1888
37448f7d
ES
1889 .get_regs_len = emac_ethtool_get_regs_len,
1890 .get_regs = emac_ethtool_get_regs,
1da177e4 1891
37448f7d 1892 .nway_reset = emac_ethtool_nway_reset,
1da177e4 1893
37448f7d
ES
1894 .get_ringparam = emac_ethtool_get_ringparam,
1895 .get_pauseparam = emac_ethtool_get_pauseparam,
1896
1897 .get_rx_csum = emac_ethtool_get_rx_csum,
1898
1899 .get_strings = emac_ethtool_get_strings,
1900 .get_stats_count = emac_ethtool_get_stats_count,
1901 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1902
1903 .get_link = ethtool_op_get_link,
1904 .get_tx_csum = ethtool_op_get_tx_csum,
1905 .get_sg = ethtool_op_get_sg,
1da177e4
LT
1906};
1907
37448f7d 1908static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1da177e4 1909{
37448f7d
ES
1910 struct ocp_enet_private *dev = ndev->priv;
1911 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1912
1913 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1914
1915 if (dev->phy.address < 0)
1916 return -EOPNOTSUPP;
1917
1918 switch (cmd) {
1919 case SIOCGMIIPHY:
1920 case SIOCDEVPRIVATE:
1921 data[0] = dev->phy.address;
1922 /* Fall through */
1923 case SIOCGMIIREG:
1924 case SIOCDEVPRIVATE + 1:
1925 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1926 return 0;
1927
1928 case SIOCSMIIREG:
1929 case SIOCDEVPRIVATE + 2:
1930 if (!capable(CAP_NET_ADMIN))
1931 return -EPERM;
1932 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1933 return 0;
1934 default:
1935 return -EOPNOTSUPP;
1936 }
1da177e4 1937}
1da177e4 1938
37448f7d 1939static int __init emac_probe(struct ocp_device *ocpdev)
1da177e4 1940{
37448f7d 1941 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1da177e4 1942 struct net_device *ndev;
37448f7d
ES
1943 struct ocp_device *maldev;
1944 struct ocp_enet_private *dev;
1945 int err, i;
1946
1947 DBG("%d: probe" NL, ocpdev->def->index);
1da177e4 1948
1da177e4
LT
1949 if (!emacdata) {
1950 printk(KERN_ERR "emac%d: Missing additional data!\n",
1951 ocpdev->def->index);
1952 return -ENODEV;
1953 }
1954
1955 /* Allocate our net_device structure */
1956 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
37448f7d
ES
1957 if (!ndev) {
1958 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1da177e4
LT
1959 ocpdev->def->index);
1960 return -ENOMEM;
1961 }
37448f7d
ES
1962 dev = ndev->priv;
1963 dev->ndev = ndev;
1964 dev->ldev = &ocpdev->dev;
1965 dev->def = ocpdev->def;
1966 SET_MODULE_OWNER(ndev);
1da177e4 1967
37448f7d
ES
1968 /* Find MAL device we are connected to */
1969 maldev =
1970 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1971 if (!maldev) {
1972 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1973 dev->def->index, emacdata->mal_idx);
1974 err = -ENODEV;
1975 goto out;
1976 }
1977 dev->mal = ocp_get_drvdata(maldev);
1978 if (!dev->mal) {
1979 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1980 dev->def->index, emacdata->mal_idx);
1981 err = -ENODEV;
1982 goto out;
1da177e4
LT
1983 }
1984
37448f7d
ES
1985 /* Register with MAL */
1986 dev->commac.ops = &emac_commac_ops;
1987 dev->commac.dev = dev;
1988 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1989 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1990 err = mal_register_commac(dev->mal, &dev->commac);
1991 if (err) {
1992 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1993 dev->def->index, emacdata->mal_idx);
1994 goto out;
1995 }
1996 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1997 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1998
1999 /* Get pointers to BD rings */
2000 dev->tx_desc =
2001 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2002 emacdata->mal_tx_chan);
2003 dev->rx_desc =
2004 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2005 emacdata->mal_rx_chan);
2006
2007 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2008 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2009
2010 /* Clean rings */
2011 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2012 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2013
2014 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2015 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2016 struct ocp_device *mdiodev =
2017 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2018 emacdata->mdio_idx);
2019 if (!mdiodev) {
2020 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2021 dev->def->index, emacdata->mdio_idx);
2022 err = -ENODEV;
2023 goto out2;
2024 }
2025 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2026 if (!dev->mdio_dev) {
2027 printk(KERN_ERR
2028 "emac%d: emac%d hasn't been initialized yet!\n",
2029 dev->def->index, emacdata->mdio_idx);
2030 err = -ENODEV;
2031 goto out2;
2032 }
1da177e4
LT
2033 }
2034
37448f7d
ES
2035 /* Attach to ZMII, if needed */
2036 if ((err = zmii_attach(dev)) != 0)
2037 goto out2;
2038
2039 /* Attach to RGMII, if needed */
2040 if ((err = rgmii_attach(dev)) != 0)
2041 goto out3;
2042
2043 /* Attach to TAH, if needed */
2044 if ((err = tah_attach(dev)) != 0)
2045 goto out4;
2046
2047 /* Map EMAC regs */
b43de2d8 2048 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
37448f7d
ES
2049 if (!dev->emacp) {
2050 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2051 dev->def->index);
2052 err = -ENOMEM;
2053 goto out5;
1da177e4
LT
2054 }
2055
37448f7d
ES
2056 /* Fill in MAC address */
2057 for (i = 0; i < 6; ++i)
2058 ndev->dev_addr[i] = emacdata->mac_addr[i];
1da177e4 2059
37448f7d
ES
2060 /* Set some link defaults before we can find out real parameters */
2061 dev->phy.speed = SPEED_100;
2062 dev->phy.duplex = DUPLEX_FULL;
2063 dev->phy.autoneg = AUTONEG_DISABLE;
2064 dev->phy.pause = dev->phy.asym_pause = 0;
8169bd91 2065 dev->stop_timeout = STOP_TIMEOUT_100;
37448f7d
ES
2066 init_timer(&dev->link_timer);
2067 dev->link_timer.function = emac_link_timer;
2068 dev->link_timer.data = (unsigned long)dev;
2069
2070 /* Find PHY if any */
2071 dev->phy.dev = ndev;
2072 dev->phy.mode = emacdata->phy_mode;
2073 if (emacdata->phy_map != 0xffffffff) {
2074 u32 phy_map = emacdata->phy_map | busy_phy_map;
2075 u32 adv;
2076
2077 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2078 emacdata->phy_map, busy_phy_map);
2079
2080 EMAC_RX_CLK_TX(dev->def->index);
2081
2082 dev->phy.mdio_read = emac_mdio_read;
2083 dev->phy.mdio_write = emac_mdio_write;
2084
2085 /* Configure EMAC with defaults so we can at least use MDIO
2086 * This is needed mostly for 440GX
2087 */
2088 if (emac_phy_gpcs(dev->phy.mode)) {
2089 /* XXX
2090 * Make GPCS PHY address equal to EMAC index.
2091 * We probably should take into account busy_phy_map
2092 * and/or phy_map here.
2093 */
2094 dev->phy.address = dev->def->index;
1da177e4 2095 }
37448f7d
ES
2096
2097 emac_configure(dev);
2098
2099 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2100 if (!(phy_map & 1)) {
2101 int r;
2102 busy_phy_map |= 1 << i;
1da177e4 2103
37448f7d
ES
2104 /* Quick check if there is a PHY at the address */
2105 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2106 if (r == 0xffff || r < 0)
2107 continue;
2108 if (!mii_phy_probe(&dev->phy, i))
2109 break;
2110 }
2111 if (i == 0x20) {
2112 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2113 dev->def->index);
2114 goto out6;
1da177e4 2115 }
1da177e4 2116
37448f7d
ES
2117 /* Init PHY */
2118 if (dev->phy.def->ops->init)
2119 dev->phy.def->ops->init(&dev->phy);
49a9db07 2120
37448f7d
ES
2121 /* Disable any PHY features not supported by the platform */
2122 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2123
2124 /* Setup initial link parameters */
2125 if (dev->phy.features & SUPPORTED_Autoneg) {
2126 adv = dev->phy.features;
2127#if !defined(CONFIG_40x)
2128 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2129#endif
2130 /* Restart autonegotiation */
2131 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
49a9db07 2132 } else {
37448f7d
ES
2133 u32 f = dev->phy.def->features;
2134 int speed = SPEED_10, fd = DUPLEX_HALF;
2135
2136 /* Select highest supported speed/duplex */
2137 if (f & SUPPORTED_1000baseT_Full) {
2138 speed = SPEED_1000;
2139 fd = DUPLEX_FULL;
2140 } else if (f & SUPPORTED_1000baseT_Half)
2141 speed = SPEED_1000;
2142 else if (f & SUPPORTED_100baseT_Full) {
2143 speed = SPEED_100;
2144 fd = DUPLEX_FULL;
2145 } else if (f & SUPPORTED_100baseT_Half)
2146 speed = SPEED_100;
2147 else if (f & SUPPORTED_10baseT_Full)
2148 fd = DUPLEX_FULL;
2149
2150 /* Force link parameters */
2151 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
49a9db07 2152 }
37448f7d
ES
2153 } else {
2154 emac_reset(dev);
1da177e4 2155
37448f7d
ES
2156 /* PHY-less configuration.
2157 * XXX I probably should move these settings to emacdata
2158 */
2159 dev->phy.address = -1;
2160 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2161 dev->phy.pause = 1;
2162 }
1da177e4
LT
2163
2164 /* Fill in the driver function table */
2165 ndev->open = &emac_open;
37448f7d
ES
2166 if (dev->tah_dev) {
2167 ndev->hard_start_xmit = &emac_start_xmit_sg;
2168 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2169 } else
2170 ndev->hard_start_xmit = &emac_start_xmit;
2171 ndev->tx_timeout = &emac_full_tx_reset;
2172 ndev->watchdog_timeo = 5 * HZ;
1da177e4
LT
2173 ndev->stop = &emac_close;
2174 ndev->get_stats = &emac_stats;
1da177e4
LT
2175 ndev->set_multicast_list = &emac_set_multicast_list;
2176 ndev->do_ioctl = &emac_ioctl;
37448f7d
ES
2177 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2178 ndev->change_mtu = &emac_change_mtu;
2179 dev->commac.ops = &emac_commac_sg_ops;
2180 }
1da177e4 2181 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
1da177e4 2182
37448f7d
ES
2183 netif_carrier_off(ndev);
2184 netif_stop_queue(ndev);
2185
2186 err = register_netdev(ndev);
2187 if (err) {
2188 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2189 dev->def->index, err);
2190 goto out6;
2191 }
1da177e4 2192
37448f7d 2193 ocp_set_drvdata(ocpdev, dev);
1da177e4 2194
37448f7d
ES
2195 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2196 ndev->name, dev->def->index,
1da177e4
LT
2197 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2198 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1da177e4 2199
37448f7d
ES
2200 if (dev->phy.address >= 0)
2201 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2202 dev->phy.def->name, dev->phy.address);
1da177e4 2203
37448f7d 2204 emac_dbg_register(dev->def->index, dev);
1da177e4
LT
2205
2206 return 0;
37448f7d 2207 out6:
b43de2d8 2208 iounmap(dev->emacp);
37448f7d
ES
2209 out5:
2210 tah_fini(dev->tah_dev);
2211 out4:
2212 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2213 out3:
2214 zmii_fini(dev->zmii_dev, dev->zmii_input);
2215 out2:
2216 mal_unregister_commac(dev->mal, &dev->commac);
2217 out:
2218 kfree(ndev);
2219 return err;
1da177e4
LT
2220}
2221
1da177e4 2222static struct ocp_device_id emac_ids[] = {
37448f7d
ES
2223 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2224 { .vendor = OCP_VENDOR_INVALID}
1da177e4
LT
2225};
2226
2227static struct ocp_driver emac_driver = {
2228 .name = "emac",
2229 .id_table = emac_ids,
1da177e4
LT
2230 .probe = emac_probe,
2231 .remove = emac_remove,
2232};
2233
2234static int __init emac_init(void)
2235{
37448f7d
ES
2236 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2237
2238 DBG(": init" NL);
1da177e4 2239
37448f7d
ES
2240 if (mal_init())
2241 return -ENODEV;
2242
2243 EMAC_CLK_INTERNAL;
2244 if (ocp_register_driver(&emac_driver)) {
2245 EMAC_CLK_EXTERNAL;
2246 ocp_unregister_driver(&emac_driver);
2247 mal_exit();
2248 return -ENODEV;
1da177e4 2249 }
37448f7d 2250 EMAC_CLK_EXTERNAL;
1da177e4 2251
37448f7d
ES
2252 emac_init_debug();
2253 return 0;
1da177e4
LT
2254}
2255
2256static void __exit emac_exit(void)
2257{
37448f7d 2258 DBG(": exit" NL);
1da177e4 2259 ocp_unregister_driver(&emac_driver);
37448f7d
ES
2260 mal_exit();
2261 emac_fini_debug();
1da177e4
LT
2262}
2263
2264module_init(emac_init);
2265module_exit(emac_exit);