ethtool: set addr_assign_type to NET_ADDR_SET when addr is passed on create
[linux-2.6-block.git] / drivers / net / ethernet / freescale / fec.c
CommitLineData
1da177e4
LT
1/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 *
7dd6a2aa 5 * Right now, I am very wasteful with the buffers. I allocate memory
1da177e4
LT
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
10 * small packets.
11 *
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
14 *
562d2f8c
GU
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
7dd6a2aa
GU
17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
677177c5 19 * Copyright (c) 2004-2006 Macq Electronique SA.
b5680e0b 20 *
230dec61 21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
1da177e4
LT
22 */
23
1da177e4
LT
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/ptrace.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/spinlock.h>
39#include <linux/workqueue.h>
40#include <linux/bitops.h>
6f501b17
SH
41#include <linux/io.h>
42#include <linux/irq.h>
196719ec 43#include <linux/clk.h>
ead73183 44#include <linux/platform_device.h>
e6b043d5 45#include <linux/phy.h>
5eb32bd0 46#include <linux/fec.h>
ca2cc333
SG
47#include <linux/of.h>
48#include <linux/of_device.h>
49#include <linux/of_gpio.h>
50#include <linux/of_net.h>
b2bccee1 51#include <linux/pinctrl/consumer.h>
5fa9c0fe 52#include <linux/regulator/consumer.h>
1da177e4 53
080853af 54#include <asm/cacheflush.h>
196719ec 55
b5680e0b 56#ifndef CONFIG_ARM
1da177e4
LT
57#include <asm/coldfire.h>
58#include <asm/mcfsim.h>
196719ec 59#endif
6f501b17 60
1da177e4 61#include "fec.h"
1da177e4 62
085e79ed 63#if defined(CONFIG_ARM)
196719ec
SH
64#define FEC_ALIGNMENT 0xf
65#else
66#define FEC_ALIGNMENT 0x3
67#endif
68
b5680e0b
SG
69#define DRIVER_NAME "fec"
70
71/* Controller is ENET-MAC */
72#define FEC_QUIRK_ENET_MAC (1 << 0)
73/* Controller needs driver to swap frame */
74#define FEC_QUIRK_SWAP_FRAME (1 << 1)
0ca1e290
SG
75/* Controller uses gasket */
76#define FEC_QUIRK_USE_GASKET (1 << 2)
230dec61
SG
77/* Controller has GBIT support */
78#define FEC_QUIRK_HAS_GBIT (1 << 3)
ff43da86
FL
79/* Controller has extend desc buffer */
80#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
b5680e0b
SG
81
82static struct platform_device_id fec_devtype[] = {
83 {
0ca1e290 84 /* keep it for coldfire */
b5680e0b
SG
85 .name = DRIVER_NAME,
86 .driver_data = 0,
0ca1e290
SG
87 }, {
88 .name = "imx25-fec",
89 .driver_data = FEC_QUIRK_USE_GASKET,
90 }, {
91 .name = "imx27-fec",
92 .driver_data = 0,
b5680e0b
SG
93 }, {
94 .name = "imx28-fec",
95 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
230dec61
SG
96 }, {
97 .name = "imx6q-fec",
ff43da86
FL
98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
99 FEC_QUIRK_HAS_BUFDESC_EX,
0ca1e290
SG
100 }, {
101 /* sentinel */
102 }
b5680e0b 103};
0ca1e290 104MODULE_DEVICE_TABLE(platform, fec_devtype);
b5680e0b 105
ca2cc333 106enum imx_fec_type {
a7dd3219 107 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
ca2cc333
SG
108 IMX27_FEC, /* runs on i.mx27/35/51 */
109 IMX28_FEC,
230dec61 110 IMX6Q_FEC,
ca2cc333
SG
111};
112
113static const struct of_device_id fec_dt_ids[] = {
114 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
115 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
116 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
230dec61 117 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
ca2cc333
SG
118 { /* sentinel */ }
119};
120MODULE_DEVICE_TABLE(of, fec_dt_ids);
121
49da97dc
SG
122static unsigned char macaddr[ETH_ALEN];
123module_param_array(macaddr, byte, NULL, 0);
124MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
1da177e4 125
49da97dc 126#if defined(CONFIG_M5272)
1da177e4
LT
127/*
128 * Some hardware gets it MAC address out of local flash memory.
129 * if this is non-zero then assume it is the address to get MAC from.
130 */
131#if defined(CONFIG_NETtel)
132#define FEC_FLASHMAC 0xf0006006
133#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
134#define FEC_FLASHMAC 0xf0006000
1da177e4
LT
135#elif defined(CONFIG_CANCam)
136#define FEC_FLASHMAC 0xf0020000
7dd6a2aa
GU
137#elif defined (CONFIG_M5272C3)
138#define FEC_FLASHMAC (0xffe04000 + 4)
139#elif defined(CONFIG_MOD5272)
a7dd3219 140#define FEC_FLASHMAC 0xffc0406b
1da177e4
LT
141#else
142#define FEC_FLASHMAC 0
143#endif
43be6366 144#endif /* CONFIG_M5272 */
ead73183 145
ff43da86 146#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
6b265293 147#error "FEC: descriptor ring size constants too large"
562d2f8c
GU
148#endif
149
22f6b860 150/* Interrupt events/masks. */
1da177e4
LT
151#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
152#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
153#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
154#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
155#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
156#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
157#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
158#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
159#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
160#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
161
4bee1f9a
WS
162#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
163
1da177e4
LT
164/* The FEC stores dest/src/type, data, and checksum for receive packets.
165 */
166#define PKT_MAXBUF_SIZE 1518
167#define PKT_MINBUF_SIZE 64
168#define PKT_MAXBLR_SIZE 1520
169
1da177e4 170/*
6b265293 171 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
1da177e4
LT
172 * size bits. Other FEC hardware does not, so we need to take that into
173 * account when setting it.
174 */
562d2f8c 175#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
085e79ed 176 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
1da177e4
LT
177#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
178#else
179#define OPT_FRAME_SIZE 0
180#endif
181
e6b043d5
BW
182/* FEC MII MMFR bits definition */
183#define FEC_MMFR_ST (1 << 30)
184#define FEC_MMFR_OP_READ (2 << 28)
185#define FEC_MMFR_OP_WRITE (1 << 28)
186#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
187#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
188#define FEC_MMFR_TA (2 << 16)
189#define FEC_MMFR_DATA(v) (v & 0xffff)
1da177e4 190
c3b084c2 191#define FEC_MII_TIMEOUT 30000 /* us */
1da177e4 192
22f6b860
SH
193/* Transmitter timeout */
194#define TX_TIMEOUT (2 * HZ)
1da177e4 195
e163cc97
LW
196static int mii_cnt;
197
ff43da86
FL
198static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
199{
200 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
201 if (is_ex)
202 return (struct bufdesc *)(ex + 1);
203 else
204 return bdp + 1;
205}
206
207static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
208{
209 struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
210 if (is_ex)
211 return (struct bufdesc *)(ex - 1);
212 else
213 return bdp - 1;
214}
215
b5680e0b
SG
216static void *swap_buffer(void *bufaddr, int len)
217{
218 int i;
219 unsigned int *buf = bufaddr;
220
221 for (i = 0; i < (len + 3) / 4; i++, buf++)
222 *buf = cpu_to_be32(*buf);
223
224 return bufaddr;
225}
226
c7621cb3 227static netdev_tx_t
c556167f 228fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1da177e4 229{
c556167f 230 struct fec_enet_private *fep = netdev_priv(ndev);
b5680e0b
SG
231 const struct platform_device_id *id_entry =
232 platform_get_device_id(fep->pdev);
2e28532f 233 struct bufdesc *bdp;
9555b31e 234 void *bufaddr;
0e702ab3 235 unsigned short status;
3b2b74ca 236 unsigned long flags;
1da177e4 237
1da177e4
LT
238 if (!fep->link) {
239 /* Link is down or autonegotiation is in progress. */
5b548140 240 return NETDEV_TX_BUSY;
1da177e4
LT
241 }
242
3b2b74ca 243 spin_lock_irqsave(&fep->hw_lock, flags);
1da177e4
LT
244 /* Fill in a Tx ring entry */
245 bdp = fep->cur_tx;
246
0e702ab3 247 status = bdp->cbd_sc;
22f6b860 248
0e702ab3 249 if (status & BD_ENET_TX_READY) {
1da177e4 250 /* Ooops. All transmit buffers are full. Bail out.
c556167f 251 * This should not happen, since ndev->tbusy should be set.
1da177e4 252 */
c556167f 253 printk("%s: tx queue full!.\n", ndev->name);
3b2b74ca 254 spin_unlock_irqrestore(&fep->hw_lock, flags);
5b548140 255 return NETDEV_TX_BUSY;
1da177e4 256 }
1da177e4 257
22f6b860 258 /* Clear all of the status flags */
0e702ab3 259 status &= ~BD_ENET_TX_STATS;
1da177e4 260
22f6b860 261 /* Set buffer length and buffer pointer */
9555b31e 262 bufaddr = skb->data;
1da177e4
LT
263 bdp->cbd_datlen = skb->len;
264
265 /*
22f6b860
SH
266 * On some FEC implementations data must be aligned on
267 * 4-byte boundaries. Use bounce buffers to copy data
268 * and get it aligned. Ugh.
1da177e4 269 */
9555b31e 270 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
1da177e4 271 unsigned int index;
ff43da86
FL
272 if (fep->bufdesc_ex)
273 index = (struct bufdesc_ex *)bdp -
274 (struct bufdesc_ex *)fep->tx_bd_base;
275 else
276 index = bdp - fep->tx_bd_base;
8a73b0bc 277 memcpy(fep->tx_bounce[index], skb->data, skb->len);
9555b31e 278 bufaddr = fep->tx_bounce[index];
1da177e4
LT
279 }
280
b5680e0b
SG
281 /*
282 * Some design made an incorrect assumption on endian mode of
283 * the system that it's running on. As the result, driver has to
284 * swap every frame going to and coming from the controller.
285 */
286 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
287 swap_buffer(bufaddr, skb->len);
288
22f6b860 289 /* Save skb pointer */
1da177e4
LT
290 fep->tx_skbuff[fep->skb_cur] = skb;
291
c556167f 292 ndev->stats.tx_bytes += skb->len;
1da177e4 293 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
6aa20a22 294
1da177e4
LT
295 /* Push the data cache so the CPM does not get stale memory
296 * data.
297 */
d1ab1f54 298 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
f0b3fbea 299 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
1da177e4 300
0e702ab3
GU
301 /* Send it on its way. Tell FEC it's ready, interrupt when done,
302 * it's the last BD of the frame, and to put the CRC on the end.
1da177e4 303 */
0e702ab3 304 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
1da177e4 305 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
0e702ab3 306 bdp->cbd_sc = status;
1da177e4 307
ff43da86
FL
308 if (fep->bufdesc_ex) {
309
310 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
311 ebdp->cbd_bdu = 0;
312 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
6605b730 313 fep->hwts_tx_en)) {
ff43da86 314 ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
6605b730 315 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
ff43da86 316 } else {
6605b730 317
ff43da86
FL
318 ebdp->cbd_esc = BD_ENET_TX_INT;
319 }
6605b730 320 }
1da177e4 321 /* Trigger transmission start */
f44d6305 322 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
1da177e4 323
22f6b860
SH
324 /* If this was the last BD in the ring, start at the beginning again. */
325 if (status & BD_ENET_TX_WRAP)
1da177e4 326 bdp = fep->tx_bd_base;
22f6b860 327 else
ff43da86 328 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
1da177e4
LT
329
330 if (bdp == fep->dirty_tx) {
331 fep->tx_full = 1;
c556167f 332 netif_stop_queue(ndev);
1da177e4
LT
333 }
334
2e28532f 335 fep->cur_tx = bdp;
1da177e4 336
18a03b97
RC
337 skb_tx_timestamp(skb);
338
a0087a36
RC
339 spin_unlock_irqrestore(&fep->hw_lock, flags);
340
6ed10654 341 return NETDEV_TX_OK;
1da177e4
LT
342}
343
45993653
UKK
344/* This function is called to start or restart the FEC during a link
345 * change. This only happens when switching between half and full
346 * duplex.
347 */
1da177e4 348static void
45993653 349fec_restart(struct net_device *ndev, int duplex)
1da177e4 350{
c556167f 351 struct fec_enet_private *fep = netdev_priv(ndev);
45993653
UKK
352 const struct platform_device_id *id_entry =
353 platform_get_device_id(fep->pdev);
354 int i;
cd1f402c
UKK
355 u32 temp_mac[2];
356 u32 rcntl = OPT_FRAME_SIZE | 0x04;
230dec61 357 u32 ecntl = 0x2; /* ETHEREN */
1da177e4 358
45993653
UKK
359 /* Whack a reset. We should wait for this. */
360 writel(1, fep->hwp + FEC_ECNTRL);
361 udelay(10);
1da177e4 362
45993653
UKK
363 /*
364 * enet-mac reset will reset mac address registers too,
365 * so need to reconfigure it.
366 */
367 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
368 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
369 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
370 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
371 }
1da177e4 372
45993653
UKK
373 /* Clear any outstanding interrupt. */
374 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1da177e4 375
45993653
UKK
376 /* Reset all multicast. */
377 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
378 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
379#ifndef CONFIG_M5272
380 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
381 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
382#endif
1da177e4 383
45993653
UKK
384 /* Set maximum receive buffer size. */
385 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1da177e4 386
45993653
UKK
387 /* Set receive and transmit descriptor base. */
388 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
ff43da86
FL
389 if (fep->bufdesc_ex)
390 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
391 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
392 else
393 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
394 * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
45993653
UKK
395
396 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
397 fep->cur_rx = fep->rx_bd_base;
398
399 /* Reset SKB transmit buffers. */
400 fep->skb_cur = fep->skb_dirty = 0;
401 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
402 if (fep->tx_skbuff[i]) {
403 dev_kfree_skb_any(fep->tx_skbuff[i]);
404 fep->tx_skbuff[i] = NULL;
1da177e4 405 }
45993653 406 }
97b72e43 407
45993653
UKK
408 /* Enable MII mode */
409 if (duplex) {
cd1f402c 410 /* FD enable */
45993653
UKK
411 writel(0x04, fep->hwp + FEC_X_CNTRL);
412 } else {
cd1f402c
UKK
413 /* No Rcv on Xmit */
414 rcntl |= 0x02;
45993653
UKK
415 writel(0x0, fep->hwp + FEC_X_CNTRL);
416 }
cd1f402c 417
45993653
UKK
418 fep->full_duplex = duplex;
419
420 /* Set MII speed */
421 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
422
423 /*
424 * The phy interface and speed need to get configured
425 * differently on enet-mac.
426 */
427 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
cd1f402c
UKK
428 /* Enable flow control and length check */
429 rcntl |= 0x40000000 | 0x00000020;
45993653 430
230dec61
SG
431 /* RGMII, RMII or MII */
432 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
433 rcntl |= (1 << 6);
434 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
cd1f402c 435 rcntl |= (1 << 8);
45993653 436 else
cd1f402c 437 rcntl &= ~(1 << 8);
45993653 438
230dec61
SG
439 /* 1G, 100M or 10M */
440 if (fep->phy_dev) {
441 if (fep->phy_dev->speed == SPEED_1000)
442 ecntl |= (1 << 5);
443 else if (fep->phy_dev->speed == SPEED_100)
444 rcntl &= ~(1 << 9);
445 else
446 rcntl |= (1 << 9);
447 }
45993653
UKK
448 } else {
449#ifdef FEC_MIIGSK_ENR
0ca1e290 450 if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
8d82f219 451 u32 cfgr;
45993653
UKK
452 /* disable the gasket and wait */
453 writel(0, fep->hwp + FEC_MIIGSK_ENR);
454 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
455 udelay(1);
456
457 /*
458 * configure the gasket:
459 * RMII, 50 MHz, no loopback, no echo
0ca1e290 460 * MII, 25 MHz, no loopback, no echo
45993653 461 */
8d82f219
EB
462 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
463 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
464 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
465 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
466 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
45993653
UKK
467
468 /* re-enable the gasket */
469 writel(2, fep->hwp + FEC_MIIGSK_ENR);
97b72e43 470 }
45993653
UKK
471#endif
472 }
cd1f402c 473 writel(rcntl, fep->hwp + FEC_R_CNTRL);
3b2b74ca 474
230dec61
SG
475 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
476 /* enable ENET endian swap */
477 ecntl |= (1 << 8);
478 /* enable ENET store and forward mode */
479 writel(1 << 8, fep->hwp + FEC_X_WMRK);
480 }
481
ff43da86
FL
482 if (fep->bufdesc_ex)
483 ecntl |= (1 << 4);
6605b730 484
45993653 485 /* And last, enable the transmit and receive processing */
230dec61 486 writel(ecntl, fep->hwp + FEC_ECNTRL);
45993653
UKK
487 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
488
ff43da86
FL
489 if (fep->bufdesc_ex)
490 fec_ptp_start_cyclecounter(ndev);
491
45993653
UKK
492 /* Enable interrupts we wish to service */
493 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
494}
495
496static void
497fec_stop(struct net_device *ndev)
498{
499 struct fec_enet_private *fep = netdev_priv(ndev);
230dec61
SG
500 const struct platform_device_id *id_entry =
501 platform_get_device_id(fep->pdev);
42431dc2 502 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
45993653
UKK
503
504 /* We cannot expect a graceful transmit stop without link !!! */
505 if (fep->link) {
506 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
507 udelay(10);
508 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
509 printk("fec_stop : Graceful transmit stop did not complete !\n");
510 }
511
512 /* Whack a reset. We should wait for this. */
513 writel(1, fep->hwp + FEC_ECNTRL);
514 udelay(10);
515 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
516 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
230dec61
SG
517
518 /* We have to keep ENET enabled to have MII interrupt stay working */
42431dc2 519 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
230dec61 520 writel(2, fep->hwp + FEC_ECNTRL);
42431dc2
LW
521 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
522 }
1da177e4
LT
523}
524
525
45993653
UKK
526static void
527fec_timeout(struct net_device *ndev)
528{
529 struct fec_enet_private *fep = netdev_priv(ndev);
530
531 ndev->stats.tx_errors++;
532
533 fec_restart(ndev, fep->full_duplex);
534 netif_wake_queue(ndev);
535}
536
1da177e4 537static void
c556167f 538fec_enet_tx(struct net_device *ndev)
1da177e4
LT
539{
540 struct fec_enet_private *fep;
2e28532f 541 struct bufdesc *bdp;
0e702ab3 542 unsigned short status;
1da177e4
LT
543 struct sk_buff *skb;
544
c556167f 545 fep = netdev_priv(ndev);
81538e74 546 spin_lock(&fep->hw_lock);
1da177e4
LT
547 bdp = fep->dirty_tx;
548
0e702ab3 549 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
f0b3fbea
SH
550 if (bdp == fep->cur_tx && fep->tx_full == 0)
551 break;
552
d1ab1f54
UKK
553 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
554 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
f0b3fbea 555 bdp->cbd_bufaddr = 0;
1da177e4
LT
556
557 skb = fep->tx_skbuff[fep->skb_dirty];
558 /* Check for errors. */
0e702ab3 559 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1da177e4
LT
560 BD_ENET_TX_RL | BD_ENET_TX_UN |
561 BD_ENET_TX_CSL)) {
c556167f 562 ndev->stats.tx_errors++;
0e702ab3 563 if (status & BD_ENET_TX_HB) /* No heartbeat */
c556167f 564 ndev->stats.tx_heartbeat_errors++;
0e702ab3 565 if (status & BD_ENET_TX_LC) /* Late collision */
c556167f 566 ndev->stats.tx_window_errors++;
0e702ab3 567 if (status & BD_ENET_TX_RL) /* Retrans limit */
c556167f 568 ndev->stats.tx_aborted_errors++;
0e702ab3 569 if (status & BD_ENET_TX_UN) /* Underrun */
c556167f 570 ndev->stats.tx_fifo_errors++;
0e702ab3 571 if (status & BD_ENET_TX_CSL) /* Carrier lost */
c556167f 572 ndev->stats.tx_carrier_errors++;
1da177e4 573 } else {
c556167f 574 ndev->stats.tx_packets++;
1da177e4
LT
575 }
576
ff43da86
FL
577 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
578 fep->bufdesc_ex) {
6605b730
FL
579 struct skb_shared_hwtstamps shhwtstamps;
580 unsigned long flags;
ff43da86 581 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
6605b730
FL
582
583 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
584 spin_lock_irqsave(&fep->tmreg_lock, flags);
585 shhwtstamps.hwtstamp = ns_to_ktime(
ff43da86 586 timecounter_cyc2time(&fep->tc, ebdp->ts));
6605b730
FL
587 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
588 skb_tstamp_tx(skb, &shhwtstamps);
589 }
ff43da86 590
0e702ab3 591 if (status & BD_ENET_TX_READY)
1da177e4 592 printk("HEY! Enet xmit interrupt and TX_READY.\n");
22f6b860 593
1da177e4
LT
594 /* Deferred means some collisions occurred during transmit,
595 * but we eventually sent the packet OK.
596 */
0e702ab3 597 if (status & BD_ENET_TX_DEF)
c556167f 598 ndev->stats.collisions++;
6aa20a22 599
22f6b860 600 /* Free the sk buffer associated with this last transmit */
1da177e4
LT
601 dev_kfree_skb_any(skb);
602 fep->tx_skbuff[fep->skb_dirty] = NULL;
603 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
6aa20a22 604
22f6b860 605 /* Update pointer to next buffer descriptor to be transmitted */
0e702ab3 606 if (status & BD_ENET_TX_WRAP)
1da177e4
LT
607 bdp = fep->tx_bd_base;
608 else
ff43da86 609 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
6aa20a22 610
22f6b860 611 /* Since we have freed up a buffer, the ring is no longer full
1da177e4
LT
612 */
613 if (fep->tx_full) {
614 fep->tx_full = 0;
c556167f
UKK
615 if (netif_queue_stopped(ndev))
616 netif_wake_queue(ndev);
1da177e4
LT
617 }
618 }
2e28532f 619 fep->dirty_tx = bdp;
81538e74 620 spin_unlock(&fep->hw_lock);
1da177e4
LT
621}
622
623
624/* During a receive, the cur_rx points to the current incoming buffer.
625 * When we update through the ring, if the next incoming buffer has
626 * not been given to the system, we just set the empty indicator,
627 * effectively tossing the packet.
628 */
629static void
c556167f 630fec_enet_rx(struct net_device *ndev)
1da177e4 631{
c556167f 632 struct fec_enet_private *fep = netdev_priv(ndev);
b5680e0b
SG
633 const struct platform_device_id *id_entry =
634 platform_get_device_id(fep->pdev);
2e28532f 635 struct bufdesc *bdp;
0e702ab3 636 unsigned short status;
1da177e4
LT
637 struct sk_buff *skb;
638 ushort pkt_len;
639 __u8 *data;
6aa20a22 640
0e702ab3
GU
641#ifdef CONFIG_M532x
642 flush_cache_all();
6aa20a22 643#endif
1da177e4 644
81538e74 645 spin_lock(&fep->hw_lock);
3b2b74ca 646
1da177e4
LT
647 /* First, grab all of the stats for the incoming packet.
648 * These get messed up if we get called due to a busy condition.
649 */
650 bdp = fep->cur_rx;
651
22f6b860 652 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
1da177e4 653
22f6b860
SH
654 /* Since we have allocated space to hold a complete frame,
655 * the last indicator should be set.
656 */
657 if ((status & BD_ENET_RX_LAST) == 0)
658 printk("FEC ENET: rcv is not +last\n");
1da177e4 659
22f6b860
SH
660 if (!fep->opened)
661 goto rx_processing_done;
1da177e4 662
22f6b860
SH
663 /* Check for errors. */
664 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1da177e4 665 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
c556167f 666 ndev->stats.rx_errors++;
22f6b860
SH
667 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
668 /* Frame too long or too short. */
c556167f 669 ndev->stats.rx_length_errors++;
22f6b860
SH
670 }
671 if (status & BD_ENET_RX_NO) /* Frame alignment */
c556167f 672 ndev->stats.rx_frame_errors++;
22f6b860 673 if (status & BD_ENET_RX_CR) /* CRC Error */
c556167f 674 ndev->stats.rx_crc_errors++;
22f6b860 675 if (status & BD_ENET_RX_OV) /* FIFO overrun */
c556167f 676 ndev->stats.rx_fifo_errors++;
1da177e4 677 }
1da177e4 678
22f6b860
SH
679 /* Report late collisions as a frame error.
680 * On this error, the BD is closed, but we don't know what we
681 * have in the buffer. So, just drop this frame on the floor.
682 */
683 if (status & BD_ENET_RX_CL) {
c556167f
UKK
684 ndev->stats.rx_errors++;
685 ndev->stats.rx_frame_errors++;
22f6b860
SH
686 goto rx_processing_done;
687 }
1da177e4 688
22f6b860 689 /* Process the incoming frame. */
c556167f 690 ndev->stats.rx_packets++;
22f6b860 691 pkt_len = bdp->cbd_datlen;
c556167f 692 ndev->stats.rx_bytes += pkt_len;
22f6b860 693 data = (__u8*)__va(bdp->cbd_bufaddr);
1da177e4 694
d1ab1f54
UKK
695 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
696 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
ccdc4f19 697
b5680e0b
SG
698 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
699 swap_buffer(data, pkt_len);
700
22f6b860
SH
701 /* This does 16 byte alignment, exactly what we need.
702 * The packet length includes FCS, but we don't want to
703 * include that when passing upstream as it messes up
704 * bridging applications.
705 */
b72061a3 706 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
1da177e4 707
8549889c 708 if (unlikely(!skb)) {
22f6b860 709 printk("%s: Memory squeeze, dropping packet.\n",
c556167f
UKK
710 ndev->name);
711 ndev->stats.rx_dropped++;
22f6b860 712 } else {
8549889c 713 skb_reserve(skb, NET_IP_ALIGN);
22f6b860
SH
714 skb_put(skb, pkt_len - 4); /* Make room */
715 skb_copy_to_linear_data(skb, data, pkt_len - 4);
c556167f 716 skb->protocol = eth_type_trans(skb, ndev);
ff43da86 717
6605b730 718 /* Get receive timestamp from the skb */
ff43da86 719 if (fep->hwts_rx_en && fep->bufdesc_ex) {
6605b730
FL
720 struct skb_shared_hwtstamps *shhwtstamps =
721 skb_hwtstamps(skb);
722 unsigned long flags;
ff43da86
FL
723 struct bufdesc_ex *ebdp =
724 (struct bufdesc_ex *)bdp;
6605b730
FL
725
726 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
727
728 spin_lock_irqsave(&fep->tmreg_lock, flags);
729 shhwtstamps->hwtstamp = ns_to_ktime(
ff43da86 730 timecounter_cyc2time(&fep->tc, ebdp->ts));
6605b730
FL
731 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
732 }
ff43da86 733
18a03b97
RC
734 if (!skb_defer_rx_timestamp(skb))
735 netif_rx(skb);
22f6b860 736 }
f0b3fbea 737
d1ab1f54
UKK
738 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
739 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
22f6b860
SH
740rx_processing_done:
741 /* Clear the status flags for this buffer */
742 status &= ~BD_ENET_RX_STATS;
1da177e4 743
22f6b860
SH
744 /* Mark the buffer empty */
745 status |= BD_ENET_RX_EMPTY;
746 bdp->cbd_sc = status;
6aa20a22 747
ff43da86
FL
748 if (fep->bufdesc_ex) {
749 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
750
751 ebdp->cbd_esc = BD_ENET_RX_INT;
752 ebdp->cbd_prot = 0;
753 ebdp->cbd_bdu = 0;
754 }
6605b730 755
22f6b860
SH
756 /* Update BD pointer to next entry */
757 if (status & BD_ENET_RX_WRAP)
758 bdp = fep->rx_bd_base;
759 else
ff43da86 760 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
22f6b860
SH
761 /* Doing this here will keep the FEC running while we process
762 * incoming frames. On a heavily loaded network, we should be
763 * able to keep up at the expense of system resources.
764 */
765 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
766 }
2e28532f 767 fep->cur_rx = bdp;
1da177e4 768
81538e74 769 spin_unlock(&fep->hw_lock);
1da177e4
LT
770}
771
45993653
UKK
772static irqreturn_t
773fec_enet_interrupt(int irq, void *dev_id)
774{
775 struct net_device *ndev = dev_id;
776 struct fec_enet_private *fep = netdev_priv(ndev);
777 uint int_events;
778 irqreturn_t ret = IRQ_NONE;
779
780 do {
781 int_events = readl(fep->hwp + FEC_IEVENT);
782 writel(int_events, fep->hwp + FEC_IEVENT);
783
784 if (int_events & FEC_ENET_RXF) {
785 ret = IRQ_HANDLED;
786 fec_enet_rx(ndev);
787 }
788
789 /* Transmit OK, or non-fatal error. Update the buffer
790 * descriptors. FEC handles all errors, we just discover
791 * them as part of the transmit process.
792 */
793 if (int_events & FEC_ENET_TXF) {
794 ret = IRQ_HANDLED;
795 fec_enet_tx(ndev);
796 }
797
798 if (int_events & FEC_ENET_MII) {
799 ret = IRQ_HANDLED;
800 complete(&fep->mdio_done);
801 }
802 } while (int_events);
803
804 return ret;
805}
806
807
808
e6b043d5 809/* ------------------------------------------------------------------------- */
c556167f 810static void __inline__ fec_get_mac(struct net_device *ndev)
1da177e4 811{
c556167f 812 struct fec_enet_private *fep = netdev_priv(ndev);
49da97dc 813 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
e6b043d5 814 unsigned char *iap, tmpaddr[ETH_ALEN];
1da177e4 815
49da97dc
SG
816 /*
817 * try to get mac address in following order:
818 *
819 * 1) module parameter via kernel command line in form
820 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
821 */
822 iap = macaddr;
823
ca2cc333
SG
824#ifdef CONFIG_OF
825 /*
826 * 2) from device tree data
827 */
828 if (!is_valid_ether_addr(iap)) {
829 struct device_node *np = fep->pdev->dev.of_node;
830 if (np) {
831 const char *mac = of_get_mac_address(np);
832 if (mac)
833 iap = (unsigned char *) mac;
834 }
835 }
836#endif
837
49da97dc 838 /*
ca2cc333 839 * 3) from flash or fuse (via platform data)
49da97dc
SG
840 */
841 if (!is_valid_ether_addr(iap)) {
842#ifdef CONFIG_M5272
843 if (FEC_FLASHMAC)
844 iap = (unsigned char *)FEC_FLASHMAC;
845#else
846 if (pdata)
589efdc7 847 iap = (unsigned char *)&pdata->mac;
49da97dc
SG
848#endif
849 }
850
851 /*
ca2cc333 852 * 4) FEC mac registers set by bootloader
49da97dc
SG
853 */
854 if (!is_valid_ether_addr(iap)) {
855 *((unsigned long *) &tmpaddr[0]) =
856 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
857 *((unsigned short *) &tmpaddr[4]) =
858 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
e6b043d5 859 iap = &tmpaddr[0];
1da177e4
LT
860 }
861
c556167f 862 memcpy(ndev->dev_addr, iap, ETH_ALEN);
1da177e4 863
49da97dc
SG
864 /* Adjust MAC if using macaddr */
865 if (iap == macaddr)
43af940c 866 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
1da177e4
LT
867}
868
e6b043d5 869/* ------------------------------------------------------------------------- */
1da177e4 870
e6b043d5
BW
871/*
872 * Phy section
873 */
c556167f 874static void fec_enet_adjust_link(struct net_device *ndev)
1da177e4 875{
c556167f 876 struct fec_enet_private *fep = netdev_priv(ndev);
e6b043d5
BW
877 struct phy_device *phy_dev = fep->phy_dev;
878 unsigned long flags;
1da177e4 879
e6b043d5 880 int status_change = 0;
1da177e4 881
e6b043d5 882 spin_lock_irqsave(&fep->hw_lock, flags);
1da177e4 883
e6b043d5
BW
884 /* Prevent a state halted on mii error */
885 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
886 phy_dev->state = PHY_RESUMING;
887 goto spin_unlock;
888 }
1da177e4 889
e6b043d5
BW
890 /* Duplex link change */
891 if (phy_dev->link) {
892 if (fep->full_duplex != phy_dev->duplex) {
c556167f 893 fec_restart(ndev, phy_dev->duplex);
6ea0722f
LW
894 /* prevent unnecessary second fec_restart() below */
895 fep->link = phy_dev->link;
e6b043d5
BW
896 status_change = 1;
897 }
898 }
1da177e4 899
e6b043d5
BW
900 /* Link on or off change */
901 if (phy_dev->link != fep->link) {
902 fep->link = phy_dev->link;
903 if (phy_dev->link)
c556167f 904 fec_restart(ndev, phy_dev->duplex);
1da177e4 905 else
c556167f 906 fec_stop(ndev);
e6b043d5 907 status_change = 1;
1da177e4 908 }
6aa20a22 909
e6b043d5
BW
910spin_unlock:
911 spin_unlock_irqrestore(&fep->hw_lock, flags);
1da177e4 912
e6b043d5
BW
913 if (status_change)
914 phy_print_status(phy_dev);
915}
1da177e4 916
e6b043d5 917static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1da177e4 918{
e6b043d5 919 struct fec_enet_private *fep = bus->priv;
97b72e43 920 unsigned long time_left;
1da177e4 921
e6b043d5 922 fep->mii_timeout = 0;
97b72e43 923 init_completion(&fep->mdio_done);
e6b043d5
BW
924
925 /* start a read op */
926 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
927 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
928 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
929
930 /* wait for end of transfer */
97b72e43
BS
931 time_left = wait_for_completion_timeout(&fep->mdio_done,
932 usecs_to_jiffies(FEC_MII_TIMEOUT));
933 if (time_left == 0) {
934 fep->mii_timeout = 1;
935 printk(KERN_ERR "FEC: MDIO read timeout\n");
936 return -ETIMEDOUT;
1da177e4 937 }
1da177e4 938
e6b043d5
BW
939 /* return value */
940 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
7dd6a2aa 941}
6aa20a22 942
e6b043d5
BW
943static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
944 u16 value)
1da177e4 945{
e6b043d5 946 struct fec_enet_private *fep = bus->priv;
97b72e43 947 unsigned long time_left;
1da177e4 948
e6b043d5 949 fep->mii_timeout = 0;
97b72e43 950 init_completion(&fep->mdio_done);
1da177e4 951
862f0982
SG
952 /* start a write op */
953 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
e6b043d5
BW
954 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
955 FEC_MMFR_TA | FEC_MMFR_DATA(value),
956 fep->hwp + FEC_MII_DATA);
957
958 /* wait for end of transfer */
97b72e43
BS
959 time_left = wait_for_completion_timeout(&fep->mdio_done,
960 usecs_to_jiffies(FEC_MII_TIMEOUT));
961 if (time_left == 0) {
962 fep->mii_timeout = 1;
963 printk(KERN_ERR "FEC: MDIO write timeout\n");
964 return -ETIMEDOUT;
e6b043d5 965 }
1da177e4 966
e6b043d5
BW
967 return 0;
968}
1da177e4 969
e6b043d5 970static int fec_enet_mdio_reset(struct mii_bus *bus)
1da177e4 971{
e6b043d5 972 return 0;
1da177e4
LT
973}
974
c556167f 975static int fec_enet_mii_probe(struct net_device *ndev)
562d2f8c 976{
c556167f 977 struct fec_enet_private *fep = netdev_priv(ndev);
230dec61
SG
978 const struct platform_device_id *id_entry =
979 platform_get_device_id(fep->pdev);
e6b043d5 980 struct phy_device *phy_dev = NULL;
6fcc040f
GU
981 char mdio_bus_id[MII_BUS_ID_SIZE];
982 char phy_name[MII_BUS_ID_SIZE + 3];
983 int phy_id;
43af940c 984 int dev_id = fep->dev_id;
562d2f8c 985
418bd0d4
BW
986 fep->phy_dev = NULL;
987
6fcc040f
GU
988 /* check for attached phy */
989 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
990 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
991 continue;
992 if (fep->mii_bus->phy_map[phy_id] == NULL)
993 continue;
994 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
995 continue;
b5680e0b
SG
996 if (dev_id--)
997 continue;
6fcc040f
GU
998 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
999 break;
e6b043d5 1000 }
1da177e4 1001
6fcc040f 1002 if (phy_id >= PHY_MAX_ADDR) {
a7dd3219
LW
1003 printk(KERN_INFO
1004 "%s: no PHY, assuming direct connection to switch\n",
1005 ndev->name);
ea51ade9 1006 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
6fcc040f
GU
1007 phy_id = 0;
1008 }
1009
a7ed07d5 1010 snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
c556167f 1011 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
230dec61 1012 fep->phy_interface);
6fcc040f 1013 if (IS_ERR(phy_dev)) {
c556167f 1014 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
6fcc040f 1015 return PTR_ERR(phy_dev);
e6b043d5 1016 }
1da177e4 1017
e6b043d5 1018 /* mask with MAC supported features */
230dec61
SG
1019 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
1020 phy_dev->supported &= PHY_GBIT_FEATURES;
1021 else
1022 phy_dev->supported &= PHY_BASIC_FEATURES;
1023
e6b043d5 1024 phy_dev->advertising = phy_dev->supported;
1da177e4 1025
e6b043d5
BW
1026 fep->phy_dev = phy_dev;
1027 fep->link = 0;
1028 fep->full_duplex = 0;
1da177e4 1029
a7dd3219
LW
1030 printk(KERN_INFO
1031 "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1032 ndev->name,
418bd0d4
BW
1033 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1034 fep->phy_dev->irq);
1035
e6b043d5 1036 return 0;
1da177e4
LT
1037}
1038
e6b043d5 1039static int fec_enet_mii_init(struct platform_device *pdev)
562d2f8c 1040{
b5680e0b 1041 static struct mii_bus *fec0_mii_bus;
c556167f
UKK
1042 struct net_device *ndev = platform_get_drvdata(pdev);
1043 struct fec_enet_private *fep = netdev_priv(ndev);
b5680e0b
SG
1044 const struct platform_device_id *id_entry =
1045 platform_get_device_id(fep->pdev);
e6b043d5 1046 int err = -ENXIO, i;
6b265293 1047
b5680e0b
SG
1048 /*
1049 * The dual fec interfaces are not equivalent with enet-mac.
1050 * Here are the differences:
1051 *
1052 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1053 * - fec0 acts as the 1588 time master while fec1 is slave
1054 * - external phys can only be configured by fec0
1055 *
1056 * That is to say fec1 can not work independently. It only works
1057 * when fec0 is working. The reason behind this design is that the
1058 * second interface is added primarily for Switch mode.
1059 *
1060 * Because of the last point above, both phys are attached on fec0
1061 * mdio interface in board design, and need to be configured by
1062 * fec0 mii_bus.
1063 */
43af940c 1064 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
b5680e0b 1065 /* fec1 uses fec0 mii_bus */
e163cc97
LW
1066 if (mii_cnt && fec0_mii_bus) {
1067 fep->mii_bus = fec0_mii_bus;
1068 mii_cnt++;
1069 return 0;
1070 }
1071 return -ENOENT;
b5680e0b
SG
1072 }
1073
e6b043d5 1074 fep->mii_timeout = 0;
1da177e4 1075
e6b043d5
BW
1076 /*
1077 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
230dec61
SG
1078 *
1079 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
1080 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
1081 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1082 * document.
e6b043d5 1083 */
f4d40de3 1084 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
230dec61
SG
1085 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1086 fep->phy_speed--;
1087 fep->phy_speed <<= 1;
e6b043d5 1088 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1da177e4 1089
e6b043d5
BW
1090 fep->mii_bus = mdiobus_alloc();
1091 if (fep->mii_bus == NULL) {
1092 err = -ENOMEM;
1093 goto err_out;
1da177e4
LT
1094 }
1095
e6b043d5
BW
1096 fep->mii_bus->name = "fec_enet_mii_bus";
1097 fep->mii_bus->read = fec_enet_mdio_read;
1098 fep->mii_bus->write = fec_enet_mdio_write;
1099 fep->mii_bus->reset = fec_enet_mdio_reset;
391420f7
FF
1100 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1101 pdev->name, fep->dev_id + 1);
e6b043d5
BW
1102 fep->mii_bus->priv = fep;
1103 fep->mii_bus->parent = &pdev->dev;
1104
1105 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1106 if (!fep->mii_bus->irq) {
1107 err = -ENOMEM;
1108 goto err_out_free_mdiobus;
1da177e4
LT
1109 }
1110
e6b043d5
BW
1111 for (i = 0; i < PHY_MAX_ADDR; i++)
1112 fep->mii_bus->irq[i] = PHY_POLL;
1da177e4 1113
e6b043d5
BW
1114 if (mdiobus_register(fep->mii_bus))
1115 goto err_out_free_mdio_irq;
1da177e4 1116
e163cc97
LW
1117 mii_cnt++;
1118
b5680e0b
SG
1119 /* save fec0 mii_bus */
1120 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1121 fec0_mii_bus = fep->mii_bus;
1122
e6b043d5 1123 return 0;
1da177e4 1124
e6b043d5
BW
1125err_out_free_mdio_irq:
1126 kfree(fep->mii_bus->irq);
1127err_out_free_mdiobus:
1128 mdiobus_free(fep->mii_bus);
1129err_out:
1130 return err;
1da177e4
LT
1131}
1132
e6b043d5 1133static void fec_enet_mii_remove(struct fec_enet_private *fep)
1da177e4 1134{
e163cc97
LW
1135 if (--mii_cnt == 0) {
1136 mdiobus_unregister(fep->mii_bus);
1137 kfree(fep->mii_bus->irq);
1138 mdiobus_free(fep->mii_bus);
1139 }
1da177e4
LT
1140}
1141
c556167f 1142static int fec_enet_get_settings(struct net_device *ndev,
e6b043d5 1143 struct ethtool_cmd *cmd)
1da177e4 1144{
c556167f 1145 struct fec_enet_private *fep = netdev_priv(ndev);
e6b043d5 1146 struct phy_device *phydev = fep->phy_dev;
1da177e4 1147
e6b043d5
BW
1148 if (!phydev)
1149 return -ENODEV;
1da177e4 1150
e6b043d5 1151 return phy_ethtool_gset(phydev, cmd);
1da177e4
LT
1152}
1153
c556167f 1154static int fec_enet_set_settings(struct net_device *ndev,
e6b043d5 1155 struct ethtool_cmd *cmd)
1da177e4 1156{
c556167f 1157 struct fec_enet_private *fep = netdev_priv(ndev);
e6b043d5 1158 struct phy_device *phydev = fep->phy_dev;
1da177e4 1159
e6b043d5
BW
1160 if (!phydev)
1161 return -ENODEV;
1da177e4 1162
e6b043d5 1163 return phy_ethtool_sset(phydev, cmd);
1da177e4
LT
1164}
1165
c556167f 1166static void fec_enet_get_drvinfo(struct net_device *ndev,
e6b043d5 1167 struct ethtool_drvinfo *info)
1da177e4 1168{
c556167f 1169 struct fec_enet_private *fep = netdev_priv(ndev);
6aa20a22 1170
e6b043d5
BW
1171 strcpy(info->driver, fep->pdev->dev.driver->name);
1172 strcpy(info->version, "Revision: 1.0");
c556167f 1173 strcpy(info->bus_info, dev_name(&ndev->dev));
1da177e4
LT
1174}
1175
9b07be4b 1176static const struct ethtool_ops fec_enet_ethtool_ops = {
e6b043d5
BW
1177 .get_settings = fec_enet_get_settings,
1178 .set_settings = fec_enet_set_settings,
1179 .get_drvinfo = fec_enet_get_drvinfo,
1180 .get_link = ethtool_op_get_link,
ec567bca 1181 .get_ts_info = ethtool_op_get_ts_info,
e6b043d5 1182};
1da177e4 1183
c556167f 1184static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1da177e4 1185{
c556167f 1186 struct fec_enet_private *fep = netdev_priv(ndev);
e6b043d5 1187 struct phy_device *phydev = fep->phy_dev;
1da177e4 1188
c556167f 1189 if (!netif_running(ndev))
e6b043d5 1190 return -EINVAL;
1da177e4 1191
e6b043d5
BW
1192 if (!phydev)
1193 return -ENODEV;
1194
ff43da86 1195 if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
6605b730 1196 return fec_ptp_ioctl(ndev, rq, cmd);
ff43da86 1197
28b04113 1198 return phy_mii_ioctl(phydev, rq, cmd);
1da177e4
LT
1199}
1200
c556167f 1201static void fec_enet_free_buffers(struct net_device *ndev)
f0b3fbea 1202{
c556167f 1203 struct fec_enet_private *fep = netdev_priv(ndev);
f0b3fbea
SH
1204 int i;
1205 struct sk_buff *skb;
1206 struct bufdesc *bdp;
1207
1208 bdp = fep->rx_bd_base;
1209 for (i = 0; i < RX_RING_SIZE; i++) {
1210 skb = fep->rx_skbuff[i];
1211
1212 if (bdp->cbd_bufaddr)
d1ab1f54 1213 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
f0b3fbea
SH
1214 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1215 if (skb)
1216 dev_kfree_skb(skb);
ff43da86 1217 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
f0b3fbea
SH
1218 }
1219
1220 bdp = fep->tx_bd_base;
1221 for (i = 0; i < TX_RING_SIZE; i++)
1222 kfree(fep->tx_bounce[i]);
1223}
1224
c556167f 1225static int fec_enet_alloc_buffers(struct net_device *ndev)
f0b3fbea 1226{
c556167f 1227 struct fec_enet_private *fep = netdev_priv(ndev);
f0b3fbea
SH
1228 int i;
1229 struct sk_buff *skb;
1230 struct bufdesc *bdp;
1231
1232 bdp = fep->rx_bd_base;
1233 for (i = 0; i < RX_RING_SIZE; i++) {
b72061a3 1234 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
f0b3fbea 1235 if (!skb) {
c556167f 1236 fec_enet_free_buffers(ndev);
f0b3fbea
SH
1237 return -ENOMEM;
1238 }
1239 fep->rx_skbuff[i] = skb;
1240
d1ab1f54 1241 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
f0b3fbea
SH
1242 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1243 bdp->cbd_sc = BD_ENET_RX_EMPTY;
ff43da86
FL
1244
1245 if (fep->bufdesc_ex) {
1246 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1247 ebdp->cbd_esc = BD_ENET_RX_INT;
1248 }
1249
1250 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
f0b3fbea
SH
1251 }
1252
1253 /* Set the last buffer to wrap. */
ff43da86 1254 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
f0b3fbea
SH
1255 bdp->cbd_sc |= BD_SC_WRAP;
1256
1257 bdp = fep->tx_bd_base;
1258 for (i = 0; i < TX_RING_SIZE; i++) {
1259 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1260
1261 bdp->cbd_sc = 0;
1262 bdp->cbd_bufaddr = 0;
6605b730 1263
ff43da86
FL
1264 if (fep->bufdesc_ex) {
1265 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1266 ebdp->cbd_esc = BD_ENET_RX_INT;
1267 }
1268
1269 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
f0b3fbea
SH
1270 }
1271
1272 /* Set the last buffer to wrap. */
ff43da86 1273 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
f0b3fbea
SH
1274 bdp->cbd_sc |= BD_SC_WRAP;
1275
1276 return 0;
1277}
1278
1da177e4 1279static int
c556167f 1280fec_enet_open(struct net_device *ndev)
1da177e4 1281{
c556167f 1282 struct fec_enet_private *fep = netdev_priv(ndev);
f0b3fbea 1283 int ret;
1da177e4
LT
1284
1285 /* I should reset the ring buffers here, but I don't yet know
1286 * a simple way to do that.
1287 */
1da177e4 1288
c556167f 1289 ret = fec_enet_alloc_buffers(ndev);
f0b3fbea
SH
1290 if (ret)
1291 return ret;
1292
418bd0d4 1293 /* Probe and connect to PHY when open the interface */
c556167f 1294 ret = fec_enet_mii_probe(ndev);
418bd0d4 1295 if (ret) {
c556167f 1296 fec_enet_free_buffers(ndev);
418bd0d4
BW
1297 return ret;
1298 }
e6b043d5 1299 phy_start(fep->phy_dev);
c556167f 1300 netif_start_queue(ndev);
1da177e4 1301 fep->opened = 1;
22f6b860 1302 return 0;
1da177e4
LT
1303}
1304
1305static int
c556167f 1306fec_enet_close(struct net_device *ndev)
1da177e4 1307{
c556167f 1308 struct fec_enet_private *fep = netdev_priv(ndev);
1da177e4 1309
22f6b860 1310 /* Don't know what to do yet. */
1da177e4 1311 fep->opened = 0;
c556167f
UKK
1312 netif_stop_queue(ndev);
1313 fec_stop(ndev);
1da177e4 1314
e497ba82
UKK
1315 if (fep->phy_dev) {
1316 phy_stop(fep->phy_dev);
418bd0d4 1317 phy_disconnect(fep->phy_dev);
e497ba82 1318 }
418bd0d4 1319
db8880bc 1320 fec_enet_free_buffers(ndev);
f0b3fbea 1321
1da177e4
LT
1322 return 0;
1323}
1324
1da177e4
LT
1325/* Set or clear the multicast filter for this adaptor.
1326 * Skeleton taken from sunlance driver.
1327 * The CPM Ethernet implementation allows Multicast as well as individual
1328 * MAC address filtering. Some of the drivers check to make sure it is
1329 * a group multicast address, and discard those that are not. I guess I
1330 * will do the same for now, but just remove the test if you want
1331 * individual filtering as well (do the upper net layers want or support
1332 * this kind of feature?).
1333 */
1334
1335#define HASH_BITS 6 /* #bits in hash */
1336#define CRC32_POLY 0xEDB88320
1337
c556167f 1338static void set_multicast_list(struct net_device *ndev)
1da177e4 1339{
c556167f 1340 struct fec_enet_private *fep = netdev_priv(ndev);
22bedad3 1341 struct netdev_hw_addr *ha;
48e2f183 1342 unsigned int i, bit, data, crc, tmp;
1da177e4
LT
1343 unsigned char hash;
1344
c556167f 1345 if (ndev->flags & IFF_PROMISC) {
f44d6305
SH
1346 tmp = readl(fep->hwp + FEC_R_CNTRL);
1347 tmp |= 0x8;
1348 writel(tmp, fep->hwp + FEC_R_CNTRL);
4e831836
SH
1349 return;
1350 }
1da177e4 1351
4e831836
SH
1352 tmp = readl(fep->hwp + FEC_R_CNTRL);
1353 tmp &= ~0x8;
1354 writel(tmp, fep->hwp + FEC_R_CNTRL);
1355
c556167f 1356 if (ndev->flags & IFF_ALLMULTI) {
4e831836
SH
1357 /* Catch all multicast addresses, so set the
1358 * filter to all 1's
1359 */
1360 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1361 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1362
1363 return;
1364 }
1365
1366 /* Clear filter and add the addresses in hash register
1367 */
1368 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1369 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1370
c556167f 1371 netdev_for_each_mc_addr(ha, ndev) {
4e831836
SH
1372 /* calculate crc32 value of mac address */
1373 crc = 0xffffffff;
1374
c556167f 1375 for (i = 0; i < ndev->addr_len; i++) {
22bedad3 1376 data = ha->addr[i];
4e831836
SH
1377 for (bit = 0; bit < 8; bit++, data >>= 1) {
1378 crc = (crc >> 1) ^
1379 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1da177e4
LT
1380 }
1381 }
4e831836
SH
1382
1383 /* only upper 6 bits (HASH_BITS) are used
1384 * which point to specific bit in he hash registers
1385 */
1386 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1387
1388 if (hash > 31) {
1389 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1390 tmp |= 1 << (hash - 32);
1391 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1392 } else {
1393 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1394 tmp |= 1 << hash;
1395 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1396 }
1da177e4
LT
1397 }
1398}
1399
22f6b860 1400/* Set a MAC change in hardware. */
009fda83 1401static int
c556167f 1402fec_set_mac_address(struct net_device *ndev, void *p)
1da177e4 1403{
c556167f 1404 struct fec_enet_private *fep = netdev_priv(ndev);
009fda83
SH
1405 struct sockaddr *addr = p;
1406
1407 if (!is_valid_ether_addr(addr->sa_data))
1408 return -EADDRNOTAVAIL;
1409
c556167f 1410 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1da177e4 1411
c556167f
UKK
1412 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1413 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
f44d6305 1414 fep->hwp + FEC_ADDR_LOW);
c556167f 1415 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
7cff0943 1416 fep->hwp + FEC_ADDR_HIGH);
009fda83 1417 return 0;
1da177e4
LT
1418}
1419
7f5c6add 1420#ifdef CONFIG_NET_POLL_CONTROLLER
49ce9c2c
BH
1421/**
1422 * fec_poll_controller - FEC Poll controller function
7f5c6add
XJ
1423 * @dev: The FEC network adapter
1424 *
1425 * Polled functionality used by netconsole and others in non interrupt mode
1426 *
1427 */
1428void fec_poll_controller(struct net_device *dev)
1429{
1430 int i;
1431 struct fec_enet_private *fep = netdev_priv(dev);
1432
1433 for (i = 0; i < FEC_IRQ_NUM; i++) {
1434 if (fep->irq[i] > 0) {
1435 disable_irq(fep->irq[i]);
1436 fec_enet_interrupt(fep->irq[i], dev);
1437 enable_irq(fep->irq[i]);
1438 }
1439 }
1440}
1441#endif
1442
009fda83
SH
1443static const struct net_device_ops fec_netdev_ops = {
1444 .ndo_open = fec_enet_open,
1445 .ndo_stop = fec_enet_close,
1446 .ndo_start_xmit = fec_enet_start_xmit,
afc4b13d 1447 .ndo_set_rx_mode = set_multicast_list,
635ecaa7 1448 .ndo_change_mtu = eth_change_mtu,
009fda83
SH
1449 .ndo_validate_addr = eth_validate_addr,
1450 .ndo_tx_timeout = fec_timeout,
1451 .ndo_set_mac_address = fec_set_mac_address,
db8880bc 1452 .ndo_do_ioctl = fec_enet_ioctl,
7f5c6add
XJ
1453#ifdef CONFIG_NET_POLL_CONTROLLER
1454 .ndo_poll_controller = fec_poll_controller,
1455#endif
009fda83
SH
1456};
1457
1da177e4
LT
1458 /*
1459 * XXX: We need to clean up on failure exits here.
ead73183 1460 *
1da177e4 1461 */
c556167f 1462static int fec_enet_init(struct net_device *ndev)
1da177e4 1463{
c556167f 1464 struct fec_enet_private *fep = netdev_priv(ndev);
f0b3fbea 1465 struct bufdesc *cbd_base;
633e7533 1466 struct bufdesc *bdp;
f0b3fbea 1467 int i;
1da177e4 1468
8d4dd5cf
SH
1469 /* Allocate memory for buffer descriptors. */
1470 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1471 GFP_KERNEL);
1472 if (!cbd_base) {
562d2f8c
GU
1473 printk("FEC: allocate descriptor memory failed?\n");
1474 return -ENOMEM;
1475 }
1476
3b2b74ca 1477 spin_lock_init(&fep->hw_lock);
3b2b74ca 1478
c556167f 1479 fep->netdev = ndev;
1da177e4 1480
49da97dc 1481 /* Get the Ethernet address */
c556167f 1482 fec_get_mac(ndev);
1da177e4 1483
8d4dd5cf 1484 /* Set receive and transmit descriptor base. */
1da177e4 1485 fep->rx_bd_base = cbd_base;
ff43da86
FL
1486 if (fep->bufdesc_ex)
1487 fep->tx_bd_base = (struct bufdesc *)
1488 (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
1489 else
1490 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1da177e4 1491
22f6b860 1492 /* The FEC Ethernet specific entries in the device structure */
c556167f
UKK
1493 ndev->watchdog_timeo = TX_TIMEOUT;
1494 ndev->netdev_ops = &fec_netdev_ops;
1495 ndev->ethtool_ops = &fec_enet_ethtool_ops;
633e7533
RH
1496
1497 /* Initialize the receive buffer descriptors. */
1498 bdp = fep->rx_bd_base;
1499 for (i = 0; i < RX_RING_SIZE; i++) {
1500
1501 /* Initialize the BD for every fragment in the page. */
1502 bdp->cbd_sc = 0;
ff43da86 1503 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
633e7533
RH
1504 }
1505
1506 /* Set the last buffer to wrap */
ff43da86 1507 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
633e7533
RH
1508 bdp->cbd_sc |= BD_SC_WRAP;
1509
1510 /* ...and the same for transmit */
1511 bdp = fep->tx_bd_base;
1512 for (i = 0; i < TX_RING_SIZE; i++) {
1513
1514 /* Initialize the BD for every fragment in the page. */
1515 bdp->cbd_sc = 0;
1516 bdp->cbd_bufaddr = 0;
ff43da86 1517 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
633e7533
RH
1518 }
1519
1520 /* Set the last buffer to wrap */
ff43da86 1521 bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
633e7533
RH
1522 bdp->cbd_sc |= BD_SC_WRAP;
1523
c556167f 1524 fec_restart(ndev, 0);
1da177e4 1525
1da177e4
LT
1526 return 0;
1527}
1528
ca2cc333 1529#ifdef CONFIG_OF
33897cc8 1530static int fec_get_phy_mode_dt(struct platform_device *pdev)
ca2cc333
SG
1531{
1532 struct device_node *np = pdev->dev.of_node;
1533
1534 if (np)
1535 return of_get_phy_mode(np);
1536
1537 return -ENODEV;
1538}
1539
33897cc8 1540static void fec_reset_phy(struct platform_device *pdev)
ca2cc333
SG
1541{
1542 int err, phy_reset;
a3caad0a 1543 int msec = 1;
ca2cc333
SG
1544 struct device_node *np = pdev->dev.of_node;
1545
1546 if (!np)
a9b2c8ef 1547 return;
ca2cc333 1548
a3caad0a
SG
1549 of_property_read_u32(np, "phy-reset-duration", &msec);
1550 /* A sane reset duration should not be longer than 1s */
1551 if (msec > 1000)
1552 msec = 1;
1553
ca2cc333 1554 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
119fc007
SG
1555 err = devm_gpio_request_one(&pdev->dev, phy_reset,
1556 GPIOF_OUT_INIT_LOW, "phy-reset");
ca2cc333 1557 if (err) {
a9b2c8ef
SG
1558 pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
1559 return;
ca2cc333 1560 }
a3caad0a 1561 msleep(msec);
ca2cc333 1562 gpio_set_value(phy_reset, 1);
ca2cc333
SG
1563}
1564#else /* CONFIG_OF */
1565static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
1566{
1567 return -ENODEV;
1568}
1569
a9b2c8ef 1570static inline void fec_reset_phy(struct platform_device *pdev)
ca2cc333
SG
1571{
1572 /*
1573 * In case of platform probe, the reset has been done
1574 * by machine code.
1575 */
ca2cc333
SG
1576}
1577#endif /* CONFIG_OF */
1578
33897cc8 1579static int
ead73183
SH
1580fec_probe(struct platform_device *pdev)
1581{
1582 struct fec_enet_private *fep;
5eb32bd0 1583 struct fec_platform_data *pdata;
ead73183
SH
1584 struct net_device *ndev;
1585 int i, irq, ret = 0;
1586 struct resource *r;
ca2cc333 1587 const struct of_device_id *of_id;
43af940c 1588 static int dev_id;
b2bccee1 1589 struct pinctrl *pinctrl;
5fa9c0fe 1590 struct regulator *reg_phy;
ca2cc333
SG
1591
1592 of_id = of_match_device(fec_dt_ids, &pdev->dev);
1593 if (of_id)
1594 pdev->id_entry = of_id->data;
ead73183
SH
1595
1596 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1597 if (!r)
1598 return -ENXIO;
1599
1600 r = request_mem_region(r->start, resource_size(r), pdev->name);
1601 if (!r)
1602 return -EBUSY;
1603
1604 /* Init network device */
1605 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
28e2188e
UKK
1606 if (!ndev) {
1607 ret = -ENOMEM;
1608 goto failed_alloc_etherdev;
1609 }
ead73183
SH
1610
1611 SET_NETDEV_DEV(ndev, &pdev->dev);
1612
1613 /* setup board info structure */
1614 fep = netdev_priv(ndev);
ead73183 1615
24e531b4 1616 fep->hwp = ioremap(r->start, resource_size(r));
e6b043d5 1617 fep->pdev = pdev;
43af940c 1618 fep->dev_id = dev_id++;
ead73183 1619
ff43da86
FL
1620 fep->bufdesc_ex = 0;
1621
24e531b4 1622 if (!fep->hwp) {
ead73183
SH
1623 ret = -ENOMEM;
1624 goto failed_ioremap;
1625 }
1626
1627 platform_set_drvdata(pdev, ndev);
1628
ca2cc333
SG
1629 ret = fec_get_phy_mode_dt(pdev);
1630 if (ret < 0) {
1631 pdata = pdev->dev.platform_data;
1632 if (pdata)
1633 fep->phy_interface = pdata->phy;
1634 else
1635 fep->phy_interface = PHY_INTERFACE_MODE_MII;
1636 } else {
1637 fep->phy_interface = ret;
1638 }
1639
c7c83d1c 1640 for (i = 0; i < FEC_IRQ_NUM; i++) {
ead73183 1641 irq = platform_get_irq(pdev, i);
86f9f2c8
LW
1642 if (irq < 0) {
1643 if (i)
1644 break;
1645 ret = irq;
1646 goto failed_irq;
1647 }
ead73183
SH
1648 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1649 if (ret) {
b2b09ad6 1650 while (--i >= 0) {
ead73183
SH
1651 irq = platform_get_irq(pdev, i);
1652 free_irq(irq, ndev);
ead73183
SH
1653 }
1654 goto failed_irq;
1655 }
1656 }
1657
b2bccee1
SG
1658 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1659 if (IS_ERR(pinctrl)) {
1660 ret = PTR_ERR(pinctrl);
1661 goto failed_pin;
1662 }
1663
f4d40de3
SH
1664 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1665 if (IS_ERR(fep->clk_ipg)) {
1666 ret = PTR_ERR(fep->clk_ipg);
ead73183
SH
1667 goto failed_clk;
1668 }
f4d40de3
SH
1669
1670 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1671 if (IS_ERR(fep->clk_ahb)) {
1672 ret = PTR_ERR(fep->clk_ahb);
1673 goto failed_clk;
1674 }
1675
6605b730 1676 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
ff43da86
FL
1677 fep->bufdesc_ex =
1678 pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
6605b730
FL
1679 if (IS_ERR(fep->clk_ptp)) {
1680 ret = PTR_ERR(fep->clk_ptp);
ff43da86 1681 fep->bufdesc_ex = 0;
6605b730 1682 }
6605b730 1683
f4d40de3
SH
1684 clk_prepare_enable(fep->clk_ahb);
1685 clk_prepare_enable(fep->clk_ipg);
ff43da86
FL
1686 if (!IS_ERR(fep->clk_ptp))
1687 clk_prepare_enable(fep->clk_ptp);
1688
5fa9c0fe
SG
1689 reg_phy = devm_regulator_get(&pdev->dev, "phy");
1690 if (!IS_ERR(reg_phy)) {
1691 ret = regulator_enable(reg_phy);
1692 if (ret) {
1693 dev_err(&pdev->dev,
1694 "Failed to enable phy regulator: %d\n", ret);
1695 goto failed_regulator;
1696 }
1697 }
1698
2ca9b2aa
SG
1699 fec_reset_phy(pdev);
1700
8649a230 1701 ret = fec_enet_init(ndev);
ead73183
SH
1702 if (ret)
1703 goto failed_init;
1704
e6b043d5
BW
1705 ret = fec_enet_mii_init(pdev);
1706 if (ret)
1707 goto failed_mii_init;
1708
03c698c9
OS
1709 /* Carrier starts down, phylib will bring it up */
1710 netif_carrier_off(ndev);
1711
ead73183
SH
1712 ret = register_netdev(ndev);
1713 if (ret)
1714 goto failed_register;
1715
ff43da86
FL
1716 if (fep->bufdesc_ex)
1717 fec_ptp_init(ndev, pdev);
6605b730 1718
ead73183
SH
1719 return 0;
1720
1721failed_register:
e6b043d5
BW
1722 fec_enet_mii_remove(fep);
1723failed_mii_init:
ead73183 1724failed_init:
5fa9c0fe 1725failed_regulator:
f4d40de3
SH
1726 clk_disable_unprepare(fep->clk_ahb);
1727 clk_disable_unprepare(fep->clk_ipg);
ff43da86
FL
1728 if (!IS_ERR(fep->clk_ptp))
1729 clk_disable_unprepare(fep->clk_ptp);
b2bccee1 1730failed_pin:
ead73183 1731failed_clk:
c7c83d1c 1732 for (i = 0; i < FEC_IRQ_NUM; i++) {
ead73183
SH
1733 irq = platform_get_irq(pdev, i);
1734 if (irq > 0)
1735 free_irq(irq, ndev);
1736 }
1737failed_irq:
24e531b4 1738 iounmap(fep->hwp);
ead73183
SH
1739failed_ioremap:
1740 free_netdev(ndev);
28e2188e
UKK
1741failed_alloc_etherdev:
1742 release_mem_region(r->start, resource_size(r));
ead73183
SH
1743
1744 return ret;
1745}
1746
33897cc8 1747static int
ead73183
SH
1748fec_drv_remove(struct platform_device *pdev)
1749{
1750 struct net_device *ndev = platform_get_drvdata(pdev);
1751 struct fec_enet_private *fep = netdev_priv(ndev);
28e2188e 1752 struct resource *r;
e163cc97 1753 int i;
ead73183 1754
e163cc97 1755 unregister_netdev(ndev);
e6b043d5 1756 fec_enet_mii_remove(fep);
e163cc97
LW
1757 for (i = 0; i < FEC_IRQ_NUM; i++) {
1758 int irq = platform_get_irq(pdev, i);
1759 if (irq > 0)
1760 free_irq(irq, ndev);
1761 }
6605b730
FL
1762 del_timer_sync(&fep->time_keep);
1763 clk_disable_unprepare(fep->clk_ptp);
1764 if (fep->ptp_clock)
1765 ptp_clock_unregister(fep->ptp_clock);
f4d40de3
SH
1766 clk_disable_unprepare(fep->clk_ahb);
1767 clk_disable_unprepare(fep->clk_ipg);
24e531b4 1768 iounmap(fep->hwp);
ead73183 1769 free_netdev(ndev);
28e2188e
UKK
1770
1771 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1772 BUG_ON(!r);
1773 release_mem_region(r->start, resource_size(r));
1774
b3cde36c
UKK
1775 platform_set_drvdata(pdev, NULL);
1776
ead73183
SH
1777 return 0;
1778}
1779
59d4289b 1780#ifdef CONFIG_PM
ead73183 1781static int
87cad5c3 1782fec_suspend(struct device *dev)
ead73183 1783{
87cad5c3 1784 struct net_device *ndev = dev_get_drvdata(dev);
04e5216d 1785 struct fec_enet_private *fep = netdev_priv(ndev);
ead73183 1786
04e5216d
UKK
1787 if (netif_running(ndev)) {
1788 fec_stop(ndev);
1789 netif_device_detach(ndev);
ead73183 1790 }
f4d40de3
SH
1791 clk_disable_unprepare(fep->clk_ahb);
1792 clk_disable_unprepare(fep->clk_ipg);
04e5216d 1793
ead73183
SH
1794 return 0;
1795}
1796
1797static int
87cad5c3 1798fec_resume(struct device *dev)
ead73183 1799{
87cad5c3 1800 struct net_device *ndev = dev_get_drvdata(dev);
04e5216d 1801 struct fec_enet_private *fep = netdev_priv(ndev);
ead73183 1802
f4d40de3
SH
1803 clk_prepare_enable(fep->clk_ahb);
1804 clk_prepare_enable(fep->clk_ipg);
04e5216d
UKK
1805 if (netif_running(ndev)) {
1806 fec_restart(ndev, fep->full_duplex);
1807 netif_device_attach(ndev);
ead73183 1808 }
04e5216d 1809
ead73183
SH
1810 return 0;
1811}
1812
59d4289b
DK
1813static const struct dev_pm_ops fec_pm_ops = {
1814 .suspend = fec_suspend,
1815 .resume = fec_resume,
1816 .freeze = fec_suspend,
1817 .thaw = fec_resume,
1818 .poweroff = fec_suspend,
1819 .restore = fec_resume,
1820};
87cad5c3 1821#endif
59d4289b 1822
ead73183
SH
1823static struct platform_driver fec_driver = {
1824 .driver = {
b5680e0b 1825 .name = DRIVER_NAME,
87cad5c3
EB
1826 .owner = THIS_MODULE,
1827#ifdef CONFIG_PM
1828 .pm = &fec_pm_ops,
1829#endif
ca2cc333 1830 .of_match_table = fec_dt_ids,
ead73183 1831 },
b5680e0b 1832 .id_table = fec_devtype,
87cad5c3 1833 .probe = fec_probe,
33897cc8 1834 .remove = fec_drv_remove,
ead73183
SH
1835};
1836
aaca2377 1837module_platform_driver(fec_driver);
1da177e4
LT
1838
1839MODULE_LICENSE("GPL");