Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-block.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014 Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/sh_eth.h>
44 #include <linux/of_mdio.h>
45
46 #include "sh_eth.h"
47
48 #define SH_ETH_DEF_MSG_ENABLE \
49                 (NETIF_MSG_LINK | \
50                 NETIF_MSG_TIMER | \
51                 NETIF_MSG_RX_ERR| \
52                 NETIF_MSG_TX_ERR)
53
54 #define SH_ETH_OFFSET_INVALID   ((u16)~0)
55
56 #define SH_ETH_OFFSET_DEFAULTS                  \
57         [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
58
59 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
60         SH_ETH_OFFSET_DEFAULTS,
61
62         [EDSR]          = 0x0000,
63         [EDMR]          = 0x0400,
64         [EDTRR]         = 0x0408,
65         [EDRRR]         = 0x0410,
66         [EESR]          = 0x0428,
67         [EESIPR]        = 0x0430,
68         [TDLAR]         = 0x0010,
69         [TDFAR]         = 0x0014,
70         [TDFXR]         = 0x0018,
71         [TDFFR]         = 0x001c,
72         [RDLAR]         = 0x0030,
73         [RDFAR]         = 0x0034,
74         [RDFXR]         = 0x0038,
75         [RDFFR]         = 0x003c,
76         [TRSCER]        = 0x0438,
77         [RMFCR]         = 0x0440,
78         [TFTR]          = 0x0448,
79         [FDR]           = 0x0450,
80         [RMCR]          = 0x0458,
81         [RPADIR]        = 0x0460,
82         [FCFTR]         = 0x0468,
83         [CSMR]          = 0x04E4,
84
85         [ECMR]          = 0x0500,
86         [ECSR]          = 0x0510,
87         [ECSIPR]        = 0x0518,
88         [PIR]           = 0x0520,
89         [PSR]           = 0x0528,
90         [PIPR]          = 0x052c,
91         [RFLR]          = 0x0508,
92         [APR]           = 0x0554,
93         [MPR]           = 0x0558,
94         [PFTCR]         = 0x055c,
95         [PFRCR]         = 0x0560,
96         [TPAUSER]       = 0x0564,
97         [GECMR]         = 0x05b0,
98         [BCULR]         = 0x05b4,
99         [MAHR]          = 0x05c0,
100         [MALR]          = 0x05c8,
101         [TROCR]         = 0x0700,
102         [CDCR]          = 0x0708,
103         [LCCR]          = 0x0710,
104         [CEFCR]         = 0x0740,
105         [FRECR]         = 0x0748,
106         [TSFRCR]        = 0x0750,
107         [TLFRCR]        = 0x0758,
108         [RFCR]          = 0x0760,
109         [CERCR]         = 0x0768,
110         [CEECR]         = 0x0770,
111         [MAFCR]         = 0x0778,
112         [RMII_MII]      = 0x0790,
113
114         [ARSTR]         = 0x0000,
115         [TSU_CTRST]     = 0x0004,
116         [TSU_FWEN0]     = 0x0010,
117         [TSU_FWEN1]     = 0x0014,
118         [TSU_FCM]       = 0x0018,
119         [TSU_BSYSL0]    = 0x0020,
120         [TSU_BSYSL1]    = 0x0024,
121         [TSU_PRISL0]    = 0x0028,
122         [TSU_PRISL1]    = 0x002c,
123         [TSU_FWSL0]     = 0x0030,
124         [TSU_FWSL1]     = 0x0034,
125         [TSU_FWSLC]     = 0x0038,
126         [TSU_QTAG0]     = 0x0040,
127         [TSU_QTAG1]     = 0x0044,
128         [TSU_FWSR]      = 0x0050,
129         [TSU_FWINMK]    = 0x0054,
130         [TSU_ADQT0]     = 0x0048,
131         [TSU_ADQT1]     = 0x004c,
132         [TSU_VTAG0]     = 0x0058,
133         [TSU_VTAG1]     = 0x005c,
134         [TSU_ADSBSY]    = 0x0060,
135         [TSU_TEN]       = 0x0064,
136         [TSU_POST1]     = 0x0070,
137         [TSU_POST2]     = 0x0074,
138         [TSU_POST3]     = 0x0078,
139         [TSU_POST4]     = 0x007c,
140         [TSU_ADRH0]     = 0x0100,
141
142         [TXNLCR0]       = 0x0080,
143         [TXALCR0]       = 0x0084,
144         [RXNLCR0]       = 0x0088,
145         [RXALCR0]       = 0x008c,
146         [FWNLCR0]       = 0x0090,
147         [FWALCR0]       = 0x0094,
148         [TXNLCR1]       = 0x00a0,
149         [TXALCR1]       = 0x00a4,
150         [RXNLCR1]       = 0x00a8,
151         [RXALCR1]       = 0x00ac,
152         [FWNLCR1]       = 0x00b0,
153         [FWALCR1]       = 0x00b4,
154 };
155
156 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
157         SH_ETH_OFFSET_DEFAULTS,
158
159         [EDSR]          = 0x0000,
160         [EDMR]          = 0x0400,
161         [EDTRR]         = 0x0408,
162         [EDRRR]         = 0x0410,
163         [EESR]          = 0x0428,
164         [EESIPR]        = 0x0430,
165         [TDLAR]         = 0x0010,
166         [TDFAR]         = 0x0014,
167         [TDFXR]         = 0x0018,
168         [TDFFR]         = 0x001c,
169         [RDLAR]         = 0x0030,
170         [RDFAR]         = 0x0034,
171         [RDFXR]         = 0x0038,
172         [RDFFR]         = 0x003c,
173         [TRSCER]        = 0x0438,
174         [RMFCR]         = 0x0440,
175         [TFTR]          = 0x0448,
176         [FDR]           = 0x0450,
177         [RMCR]          = 0x0458,
178         [RPADIR]        = 0x0460,
179         [FCFTR]         = 0x0468,
180         [CSMR]          = 0x04E4,
181
182         [ECMR]          = 0x0500,
183         [RFLR]          = 0x0508,
184         [ECSR]          = 0x0510,
185         [ECSIPR]        = 0x0518,
186         [PIR]           = 0x0520,
187         [APR]           = 0x0554,
188         [MPR]           = 0x0558,
189         [PFTCR]         = 0x055c,
190         [PFRCR]         = 0x0560,
191         [TPAUSER]       = 0x0564,
192         [MAHR]          = 0x05c0,
193         [MALR]          = 0x05c8,
194         [CEFCR]         = 0x0740,
195         [FRECR]         = 0x0748,
196         [TSFRCR]        = 0x0750,
197         [TLFRCR]        = 0x0758,
198         [RFCR]          = 0x0760,
199         [MAFCR]         = 0x0778,
200
201         [ARSTR]         = 0x0000,
202         [TSU_CTRST]     = 0x0004,
203         [TSU_FWSLC]     = 0x0038,
204         [TSU_VTAG0]     = 0x0058,
205         [TSU_ADSBSY]    = 0x0060,
206         [TSU_TEN]       = 0x0064,
207         [TSU_POST1]     = 0x0070,
208         [TSU_POST2]     = 0x0074,
209         [TSU_POST3]     = 0x0078,
210         [TSU_POST4]     = 0x007c,
211         [TSU_ADRH0]     = 0x0100,
212
213         [TXNLCR0]       = 0x0080,
214         [TXALCR0]       = 0x0084,
215         [RXNLCR0]       = 0x0088,
216         [RXALCR0]       = 0x008C,
217 };
218
219 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
220         SH_ETH_OFFSET_DEFAULTS,
221
222         [ECMR]          = 0x0300,
223         [RFLR]          = 0x0308,
224         [ECSR]          = 0x0310,
225         [ECSIPR]        = 0x0318,
226         [PIR]           = 0x0320,
227         [PSR]           = 0x0328,
228         [RDMLR]         = 0x0340,
229         [IPGR]          = 0x0350,
230         [APR]           = 0x0354,
231         [MPR]           = 0x0358,
232         [RFCF]          = 0x0360,
233         [TPAUSER]       = 0x0364,
234         [TPAUSECR]      = 0x0368,
235         [MAHR]          = 0x03c0,
236         [MALR]          = 0x03c8,
237         [TROCR]         = 0x03d0,
238         [CDCR]          = 0x03d4,
239         [LCCR]          = 0x03d8,
240         [CNDCR]         = 0x03dc,
241         [CEFCR]         = 0x03e4,
242         [FRECR]         = 0x03e8,
243         [TSFRCR]        = 0x03ec,
244         [TLFRCR]        = 0x03f0,
245         [RFCR]          = 0x03f4,
246         [MAFCR]         = 0x03f8,
247
248         [EDMR]          = 0x0200,
249         [EDTRR]         = 0x0208,
250         [EDRRR]         = 0x0210,
251         [TDLAR]         = 0x0218,
252         [RDLAR]         = 0x0220,
253         [EESR]          = 0x0228,
254         [EESIPR]        = 0x0230,
255         [TRSCER]        = 0x0238,
256         [RMFCR]         = 0x0240,
257         [TFTR]          = 0x0248,
258         [FDR]           = 0x0250,
259         [RMCR]          = 0x0258,
260         [TFUCR]         = 0x0264,
261         [RFOCR]         = 0x0268,
262         [RMIIMODE]      = 0x026c,
263         [FCFTR]         = 0x0270,
264         [TRIMD]         = 0x027c,
265 };
266
267 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
268         SH_ETH_OFFSET_DEFAULTS,
269
270         [ECMR]          = 0x0100,
271         [RFLR]          = 0x0108,
272         [ECSR]          = 0x0110,
273         [ECSIPR]        = 0x0118,
274         [PIR]           = 0x0120,
275         [PSR]           = 0x0128,
276         [RDMLR]         = 0x0140,
277         [IPGR]          = 0x0150,
278         [APR]           = 0x0154,
279         [MPR]           = 0x0158,
280         [TPAUSER]       = 0x0164,
281         [RFCF]          = 0x0160,
282         [TPAUSECR]      = 0x0168,
283         [BCFRR]         = 0x016c,
284         [MAHR]          = 0x01c0,
285         [MALR]          = 0x01c8,
286         [TROCR]         = 0x01d0,
287         [CDCR]          = 0x01d4,
288         [LCCR]          = 0x01d8,
289         [CNDCR]         = 0x01dc,
290         [CEFCR]         = 0x01e4,
291         [FRECR]         = 0x01e8,
292         [TSFRCR]        = 0x01ec,
293         [TLFRCR]        = 0x01f0,
294         [RFCR]          = 0x01f4,
295         [MAFCR]         = 0x01f8,
296         [RTRATE]        = 0x01fc,
297
298         [EDMR]          = 0x0000,
299         [EDTRR]         = 0x0008,
300         [EDRRR]         = 0x0010,
301         [TDLAR]         = 0x0018,
302         [RDLAR]         = 0x0020,
303         [EESR]          = 0x0028,
304         [EESIPR]        = 0x0030,
305         [TRSCER]        = 0x0038,
306         [RMFCR]         = 0x0040,
307         [TFTR]          = 0x0048,
308         [FDR]           = 0x0050,
309         [RMCR]          = 0x0058,
310         [TFUCR]         = 0x0064,
311         [RFOCR]         = 0x0068,
312         [FCFTR]         = 0x0070,
313         [RPADIR]        = 0x0078,
314         [TRIMD]         = 0x007c,
315         [RBWAR]         = 0x00c8,
316         [RDFAR]         = 0x00cc,
317         [TBRAR]         = 0x00d4,
318         [TDFAR]         = 0x00d8,
319 };
320
321 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
322         SH_ETH_OFFSET_DEFAULTS,
323
324         [EDMR]          = 0x0000,
325         [EDTRR]         = 0x0004,
326         [EDRRR]         = 0x0008,
327         [TDLAR]         = 0x000c,
328         [RDLAR]         = 0x0010,
329         [EESR]          = 0x0014,
330         [EESIPR]        = 0x0018,
331         [TRSCER]        = 0x001c,
332         [RMFCR]         = 0x0020,
333         [TFTR]          = 0x0024,
334         [FDR]           = 0x0028,
335         [RMCR]          = 0x002c,
336         [EDOCR]         = 0x0030,
337         [FCFTR]         = 0x0034,
338         [RPADIR]        = 0x0038,
339         [TRIMD]         = 0x003c,
340         [RBWAR]         = 0x0040,
341         [RDFAR]         = 0x0044,
342         [TBRAR]         = 0x004c,
343         [TDFAR]         = 0x0050,
344
345         [ECMR]          = 0x0160,
346         [ECSR]          = 0x0164,
347         [ECSIPR]        = 0x0168,
348         [PIR]           = 0x016c,
349         [MAHR]          = 0x0170,
350         [MALR]          = 0x0174,
351         [RFLR]          = 0x0178,
352         [PSR]           = 0x017c,
353         [TROCR]         = 0x0180,
354         [CDCR]          = 0x0184,
355         [LCCR]          = 0x0188,
356         [CNDCR]         = 0x018c,
357         [CEFCR]         = 0x0194,
358         [FRECR]         = 0x0198,
359         [TSFRCR]        = 0x019c,
360         [TLFRCR]        = 0x01a0,
361         [RFCR]          = 0x01a4,
362         [MAFCR]         = 0x01a8,
363         [IPGR]          = 0x01b4,
364         [APR]           = 0x01b8,
365         [MPR]           = 0x01bc,
366         [TPAUSER]       = 0x01c4,
367         [BCFR]          = 0x01cc,
368
369         [ARSTR]         = 0x0000,
370         [TSU_CTRST]     = 0x0004,
371         [TSU_FWEN0]     = 0x0010,
372         [TSU_FWEN1]     = 0x0014,
373         [TSU_FCM]       = 0x0018,
374         [TSU_BSYSL0]    = 0x0020,
375         [TSU_BSYSL1]    = 0x0024,
376         [TSU_PRISL0]    = 0x0028,
377         [TSU_PRISL1]    = 0x002c,
378         [TSU_FWSL0]     = 0x0030,
379         [TSU_FWSL1]     = 0x0034,
380         [TSU_FWSLC]     = 0x0038,
381         [TSU_QTAGM0]    = 0x0040,
382         [TSU_QTAGM1]    = 0x0044,
383         [TSU_ADQT0]     = 0x0048,
384         [TSU_ADQT1]     = 0x004c,
385         [TSU_FWSR]      = 0x0050,
386         [TSU_FWINMK]    = 0x0054,
387         [TSU_ADSBSY]    = 0x0060,
388         [TSU_TEN]       = 0x0064,
389         [TSU_POST1]     = 0x0070,
390         [TSU_POST2]     = 0x0074,
391         [TSU_POST3]     = 0x0078,
392         [TSU_POST4]     = 0x007c,
393
394         [TXNLCR0]       = 0x0080,
395         [TXALCR0]       = 0x0084,
396         [RXNLCR0]       = 0x0088,
397         [RXALCR0]       = 0x008c,
398         [FWNLCR0]       = 0x0090,
399         [FWALCR0]       = 0x0094,
400         [TXNLCR1]       = 0x00a0,
401         [TXALCR1]       = 0x00a4,
402         [RXNLCR1]       = 0x00a8,
403         [RXALCR1]       = 0x00ac,
404         [FWNLCR1]       = 0x00b0,
405         [FWALCR1]       = 0x00b4,
406
407         [TSU_ADRH0]     = 0x0100,
408 };
409
410 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
411 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
412
413 static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
414 {
415         struct sh_eth_private *mdp = netdev_priv(ndev);
416         u16 offset = mdp->reg_offset[enum_index];
417
418         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
419                 return;
420
421         iowrite32(data, mdp->addr + offset);
422 }
423
424 static u32 sh_eth_read(struct net_device *ndev, int enum_index)
425 {
426         struct sh_eth_private *mdp = netdev_priv(ndev);
427         u16 offset = mdp->reg_offset[enum_index];
428
429         if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
430                 return ~0U;
431
432         return ioread32(mdp->addr + offset);
433 }
434
435 static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
436                           u32 set)
437 {
438         sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
439                      enum_index);
440 }
441
442 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
443 {
444         return mdp->reg_offset == sh_eth_offset_gigabit;
445 }
446
447 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
448 {
449         return mdp->reg_offset == sh_eth_offset_fast_rz;
450 }
451
452 static void sh_eth_select_mii(struct net_device *ndev)
453 {
454         struct sh_eth_private *mdp = netdev_priv(ndev);
455         u32 value;
456
457         switch (mdp->phy_interface) {
458         case PHY_INTERFACE_MODE_GMII:
459                 value = 0x2;
460                 break;
461         case PHY_INTERFACE_MODE_MII:
462                 value = 0x1;
463                 break;
464         case PHY_INTERFACE_MODE_RMII:
465                 value = 0x0;
466                 break;
467         default:
468                 netdev_warn(ndev,
469                             "PHY interface mode was not setup. Set to MII.\n");
470                 value = 0x1;
471                 break;
472         }
473
474         sh_eth_write(ndev, value, RMII_MII);
475 }
476
477 static void sh_eth_set_duplex(struct net_device *ndev)
478 {
479         struct sh_eth_private *mdp = netdev_priv(ndev);
480
481         sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
482 }
483
484 static void sh_eth_chip_reset(struct net_device *ndev)
485 {
486         struct sh_eth_private *mdp = netdev_priv(ndev);
487
488         /* reset device */
489         sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
490         mdelay(1);
491 }
492
493 static void sh_eth_set_rate_gether(struct net_device *ndev)
494 {
495         struct sh_eth_private *mdp = netdev_priv(ndev);
496
497         switch (mdp->speed) {
498         case 10: /* 10BASE */
499                 sh_eth_write(ndev, GECMR_10, GECMR);
500                 break;
501         case 100:/* 100BASE */
502                 sh_eth_write(ndev, GECMR_100, GECMR);
503                 break;
504         case 1000: /* 1000BASE */
505                 sh_eth_write(ndev, GECMR_1000, GECMR);
506                 break;
507         }
508 }
509
510 #ifdef CONFIG_OF
511 /* R7S72100 */
512 static struct sh_eth_cpu_data r7s72100_data = {
513         .chip_reset     = sh_eth_chip_reset,
514         .set_duplex     = sh_eth_set_duplex,
515
516         .register_type  = SH_ETH_REG_FAST_RZ,
517
518         .ecsr_value     = ECSR_ICD,
519         .ecsipr_value   = ECSIPR_ICDIP,
520         .eesipr_value   = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
521                           EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
522                           EESIPR_ECIIP |
523                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
524                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
525                           EESIPR_RMAFIP | EESIPR_RRFIP |
526                           EESIPR_RTLFIP | EESIPR_RTSFIP |
527                           EESIPR_PREIP | EESIPR_CERFIP,
528
529         .tx_check       = EESR_TC1 | EESR_FTC,
530         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
531                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
532                           EESR_TDE,
533         .fdr_value      = 0x0000070f,
534
535         .no_psr         = 1,
536         .apr            = 1,
537         .mpr            = 1,
538         .tpauser        = 1,
539         .hw_swap        = 1,
540         .rpadir         = 1,
541         .rpadir_value   = 2 << 16,
542         .no_trimd       = 1,
543         .no_ade         = 1,
544         .hw_checksum    = 1,
545         .tsu            = 1,
546 };
547
548 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
549 {
550         sh_eth_chip_reset(ndev);
551
552         sh_eth_select_mii(ndev);
553 }
554
555 /* R8A7740 */
556 static struct sh_eth_cpu_data r8a7740_data = {
557         .chip_reset     = sh_eth_chip_reset_r8a7740,
558         .set_duplex     = sh_eth_set_duplex,
559         .set_rate       = sh_eth_set_rate_gether,
560
561         .register_type  = SH_ETH_REG_GIGABIT,
562
563         .ecsr_value     = ECSR_ICD | ECSR_MPD,
564         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
565         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
566                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
567                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
568                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
569                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
570                           EESIPR_CEEFIP | EESIPR_CELFIP |
571                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
572                           EESIPR_PREIP | EESIPR_CERFIP,
573
574         .tx_check       = EESR_TC1 | EESR_FTC,
575         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
576                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
577                           EESR_TDE,
578         .fdr_value      = 0x0000070f,
579
580         .apr            = 1,
581         .mpr            = 1,
582         .tpauser        = 1,
583         .bculr          = 1,
584         .hw_swap        = 1,
585         .rpadir         = 1,
586         .rpadir_value   = 2 << 16,
587         .no_trimd       = 1,
588         .no_ade         = 1,
589         .hw_checksum    = 1,
590         .tsu            = 1,
591         .select_mii     = 1,
592         .magic          = 1,
593 };
594
595 /* There is CPU dependent code */
596 static void sh_eth_set_rate_rcar(struct net_device *ndev)
597 {
598         struct sh_eth_private *mdp = netdev_priv(ndev);
599
600         switch (mdp->speed) {
601         case 10: /* 10BASE */
602                 sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
603                 break;
604         case 100:/* 100BASE */
605                 sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
606                 break;
607         }
608 }
609
610 /* R-Car Gen1 */
611 static struct sh_eth_cpu_data rcar_gen1_data = {
612         .set_duplex     = sh_eth_set_duplex,
613         .set_rate       = sh_eth_set_rate_rcar,
614
615         .register_type  = SH_ETH_REG_FAST_RCAR,
616
617         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
618         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
619         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
620                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
621                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
622                           EESIPR_RMAFIP | EESIPR_RRFIP |
623                           EESIPR_RTLFIP | EESIPR_RTSFIP |
624                           EESIPR_PREIP | EESIPR_CERFIP,
625
626         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
627         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
628                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
629         .fdr_value      = 0x00000f0f,
630
631         .apr            = 1,
632         .mpr            = 1,
633         .tpauser        = 1,
634         .hw_swap        = 1,
635 };
636
637 /* R-Car Gen2 and RZ/G1 */
638 static struct sh_eth_cpu_data rcar_gen2_data = {
639         .set_duplex     = sh_eth_set_duplex,
640         .set_rate       = sh_eth_set_rate_rcar,
641
642         .register_type  = SH_ETH_REG_FAST_RCAR,
643
644         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
645         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
646                           ECSIPR_MPDIP,
647         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
648                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
649                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
650                           EESIPR_RMAFIP | EESIPR_RRFIP |
651                           EESIPR_RTLFIP | EESIPR_RTSFIP |
652                           EESIPR_PREIP | EESIPR_CERFIP,
653
654         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
655         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
656                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
657         .fdr_value      = 0x00000f0f,
658
659         .trscer_err_mask = DESC_I_RINT8,
660
661         .apr            = 1,
662         .mpr            = 1,
663         .tpauser        = 1,
664         .hw_swap        = 1,
665         .rmiimode       = 1,
666         .magic          = 1,
667 };
668 #endif /* CONFIG_OF */
669
670 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
671 {
672         struct sh_eth_private *mdp = netdev_priv(ndev);
673
674         switch (mdp->speed) {
675         case 10: /* 10BASE */
676                 sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
677                 break;
678         case 100:/* 100BASE */
679                 sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
680                 break;
681         }
682 }
683
684 /* SH7724 */
685 static struct sh_eth_cpu_data sh7724_data = {
686         .set_duplex     = sh_eth_set_duplex,
687         .set_rate       = sh_eth_set_rate_sh7724,
688
689         .register_type  = SH_ETH_REG_FAST_SH4,
690
691         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
692         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
693         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
694                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
695                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
696                           EESIPR_RMAFIP | EESIPR_RRFIP |
697                           EESIPR_RTLFIP | EESIPR_RTSFIP |
698                           EESIPR_PREIP | EESIPR_CERFIP,
699
700         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
701         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
702                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
703
704         .apr            = 1,
705         .mpr            = 1,
706         .tpauser        = 1,
707         .hw_swap        = 1,
708         .rpadir         = 1,
709         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
710 };
711
712 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
713 {
714         struct sh_eth_private *mdp = netdev_priv(ndev);
715
716         switch (mdp->speed) {
717         case 10: /* 10BASE */
718                 sh_eth_write(ndev, 0, RTRATE);
719                 break;
720         case 100:/* 100BASE */
721                 sh_eth_write(ndev, 1, RTRATE);
722                 break;
723         }
724 }
725
726 /* SH7757 */
727 static struct sh_eth_cpu_data sh7757_data = {
728         .set_duplex     = sh_eth_set_duplex,
729         .set_rate       = sh_eth_set_rate_sh7757,
730
731         .register_type  = SH_ETH_REG_FAST_SH4,
732
733         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
734                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
735                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
736                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
737                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
738                           EESIPR_CEEFIP | EESIPR_CELFIP |
739                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
740                           EESIPR_PREIP | EESIPR_CERFIP,
741
742         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
743         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
744                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
745
746         .irq_flags      = IRQF_SHARED,
747         .apr            = 1,
748         .mpr            = 1,
749         .tpauser        = 1,
750         .hw_swap        = 1,
751         .no_ade         = 1,
752         .rpadir         = 1,
753         .rpadir_value   = 2 << 16,
754         .rtrate         = 1,
755 };
756
757 #define SH_GIGA_ETH_BASE        0xfee00000UL
758 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
759 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
760 static void sh_eth_chip_reset_giga(struct net_device *ndev)
761 {
762         u32 mahr[2], malr[2];
763         int i;
764
765         /* save MAHR and MALR */
766         for (i = 0; i < 2; i++) {
767                 malr[i] = ioread32((void *)GIGA_MALR(i));
768                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
769         }
770
771         sh_eth_chip_reset(ndev);
772
773         /* restore MAHR and MALR */
774         for (i = 0; i < 2; i++) {
775                 iowrite32(malr[i], (void *)GIGA_MALR(i));
776                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
777         }
778 }
779
780 static void sh_eth_set_rate_giga(struct net_device *ndev)
781 {
782         struct sh_eth_private *mdp = netdev_priv(ndev);
783
784         switch (mdp->speed) {
785         case 10: /* 10BASE */
786                 sh_eth_write(ndev, 0x00000000, GECMR);
787                 break;
788         case 100:/* 100BASE */
789                 sh_eth_write(ndev, 0x00000010, GECMR);
790                 break;
791         case 1000: /* 1000BASE */
792                 sh_eth_write(ndev, 0x00000020, GECMR);
793                 break;
794         }
795 }
796
797 /* SH7757(GETHERC) */
798 static struct sh_eth_cpu_data sh7757_data_giga = {
799         .chip_reset     = sh_eth_chip_reset_giga,
800         .set_duplex     = sh_eth_set_duplex,
801         .set_rate       = sh_eth_set_rate_giga,
802
803         .register_type  = SH_ETH_REG_GIGABIT,
804
805         .ecsr_value     = ECSR_ICD | ECSR_MPD,
806         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
807         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
808                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
809                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
810                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
811                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
812                           EESIPR_CEEFIP | EESIPR_CELFIP |
813                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
814                           EESIPR_PREIP | EESIPR_CERFIP,
815
816         .tx_check       = EESR_TC1 | EESR_FTC,
817         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
818                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
819                           EESR_TDE,
820         .fdr_value      = 0x0000072f,
821
822         .irq_flags      = IRQF_SHARED,
823         .apr            = 1,
824         .mpr            = 1,
825         .tpauser        = 1,
826         .bculr          = 1,
827         .hw_swap        = 1,
828         .rpadir         = 1,
829         .rpadir_value   = 2 << 16,
830         .no_trimd       = 1,
831         .no_ade         = 1,
832         .tsu            = 1,
833 };
834
835 /* SH7734 */
836 static struct sh_eth_cpu_data sh7734_data = {
837         .chip_reset     = sh_eth_chip_reset,
838         .set_duplex     = sh_eth_set_duplex,
839         .set_rate       = sh_eth_set_rate_gether,
840
841         .register_type  = SH_ETH_REG_GIGABIT,
842
843         .ecsr_value     = ECSR_ICD | ECSR_MPD,
844         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
845         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
846                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
847                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
848                           EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
849                           EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
850                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
851                           EESIPR_PREIP | EESIPR_CERFIP,
852
853         .tx_check       = EESR_TC1 | EESR_FTC,
854         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
855                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
856                           EESR_TDE,
857
858         .apr            = 1,
859         .mpr            = 1,
860         .tpauser        = 1,
861         .bculr          = 1,
862         .hw_swap        = 1,
863         .no_trimd       = 1,
864         .no_ade         = 1,
865         .tsu            = 1,
866         .hw_checksum    = 1,
867         .select_mii     = 1,
868         .magic          = 1,
869 };
870
871 /* SH7763 */
872 static struct sh_eth_cpu_data sh7763_data = {
873         .chip_reset     = sh_eth_chip_reset,
874         .set_duplex     = sh_eth_set_duplex,
875         .set_rate       = sh_eth_set_rate_gether,
876
877         .register_type  = SH_ETH_REG_GIGABIT,
878
879         .ecsr_value     = ECSR_ICD | ECSR_MPD,
880         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
881         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
882                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
883                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
884                           EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
885                           EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
886                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
887                           EESIPR_PREIP | EESIPR_CERFIP,
888
889         .tx_check       = EESR_TC1 | EESR_FTC,
890         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
891                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
892
893         .apr            = 1,
894         .mpr            = 1,
895         .tpauser        = 1,
896         .bculr          = 1,
897         .hw_swap        = 1,
898         .no_trimd       = 1,
899         .no_ade         = 1,
900         .tsu            = 1,
901         .irq_flags      = IRQF_SHARED,
902         .magic          = 1,
903 };
904
905 static struct sh_eth_cpu_data sh7619_data = {
906         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
907
908         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
909                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
910                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
911                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
912                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
913                           EESIPR_CEEFIP | EESIPR_CELFIP |
914                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
915                           EESIPR_PREIP | EESIPR_CERFIP,
916
917         .apr            = 1,
918         .mpr            = 1,
919         .tpauser        = 1,
920         .hw_swap        = 1,
921 };
922
923 static struct sh_eth_cpu_data sh771x_data = {
924         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
925
926         .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
927                           EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
928                           EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
929                           0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
930                           EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
931                           EESIPR_CEEFIP | EESIPR_CELFIP |
932                           EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
933                           EESIPR_PREIP | EESIPR_CERFIP,
934         .tsu            = 1,
935 };
936
937 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
938 {
939         if (!cd->ecsr_value)
940                 cd->ecsr_value = DEFAULT_ECSR_INIT;
941
942         if (!cd->ecsipr_value)
943                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
944
945         if (!cd->fcftr_value)
946                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
947                                   DEFAULT_FIFO_F_D_RFD;
948
949         if (!cd->fdr_value)
950                 cd->fdr_value = DEFAULT_FDR_INIT;
951
952         if (!cd->tx_check)
953                 cd->tx_check = DEFAULT_TX_CHECK;
954
955         if (!cd->eesr_err_check)
956                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
957
958         if (!cd->trscer_err_mask)
959                 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
960 }
961
962 static int sh_eth_check_reset(struct net_device *ndev)
963 {
964         int cnt;
965
966         for (cnt = 100; cnt > 0; cnt--) {
967                 if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
968                         return 0;
969                 mdelay(1);
970         }
971
972         netdev_err(ndev, "Device reset failed\n");
973         return -ETIMEDOUT;
974 }
975
976 static int sh_eth_reset(struct net_device *ndev)
977 {
978         struct sh_eth_private *mdp = netdev_priv(ndev);
979         int ret = 0;
980
981         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
982                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
983                 sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
984
985                 ret = sh_eth_check_reset(ndev);
986                 if (ret)
987                         return ret;
988
989                 /* Table Init */
990                 sh_eth_write(ndev, 0x0, TDLAR);
991                 sh_eth_write(ndev, 0x0, TDFAR);
992                 sh_eth_write(ndev, 0x0, TDFXR);
993                 sh_eth_write(ndev, 0x0, TDFFR);
994                 sh_eth_write(ndev, 0x0, RDLAR);
995                 sh_eth_write(ndev, 0x0, RDFAR);
996                 sh_eth_write(ndev, 0x0, RDFXR);
997                 sh_eth_write(ndev, 0x0, RDFFR);
998
999                 /* Reset HW CRC register */
1000                 if (mdp->cd->hw_checksum)
1001                         sh_eth_write(ndev, 0x0, CSMR);
1002
1003                 /* Select MII mode */
1004                 if (mdp->cd->select_mii)
1005                         sh_eth_select_mii(ndev);
1006         } else {
1007                 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
1008                 mdelay(3);
1009                 sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
1010         }
1011
1012         return ret;
1013 }
1014
1015 static void sh_eth_set_receive_align(struct sk_buff *skb)
1016 {
1017         uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
1018
1019         if (reserve)
1020                 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
1021 }
1022
1023 /* Program the hardware MAC address from dev->dev_addr. */
1024 static void update_mac_address(struct net_device *ndev)
1025 {
1026         sh_eth_write(ndev,
1027                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
1028                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1029         sh_eth_write(ndev,
1030                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1031 }
1032
1033 /* Get MAC address from SuperH MAC address register
1034  *
1035  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1036  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1037  * When you want use this device, you must set MAC address in bootloader.
1038  *
1039  */
1040 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1041 {
1042         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1043                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
1044         } else {
1045                 u32 mahr = sh_eth_read(ndev, MAHR);
1046                 u32 malr = sh_eth_read(ndev, MALR);
1047
1048                 ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
1049                 ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
1050                 ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
1051                 ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
1052                 ndev->dev_addr[4] = (malr >>  8) & 0xFF;
1053                 ndev->dev_addr[5] = (malr >>  0) & 0xFF;
1054         }
1055 }
1056
1057 static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
1058 {
1059         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
1060                 return EDTRR_TRNS_GETHER;
1061         else
1062                 return EDTRR_TRNS_ETHER;
1063 }
1064
1065 struct bb_info {
1066         void (*set_gate)(void *addr);
1067         struct mdiobb_ctrl ctrl;
1068         void *addr;
1069 };
1070
1071 static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1072 {
1073         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1074         u32 pir;
1075
1076         if (bitbang->set_gate)
1077                 bitbang->set_gate(bitbang->addr);
1078
1079         pir = ioread32(bitbang->addr);
1080         if (set)
1081                 pir |=  mask;
1082         else
1083                 pir &= ~mask;
1084         iowrite32(pir, bitbang->addr);
1085 }
1086
1087 /* Data I/O pin control */
1088 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1089 {
1090         sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1091 }
1092
1093 /* Set bit data*/
1094 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1095 {
1096         sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1097 }
1098
1099 /* Get bit data*/
1100 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1101 {
1102         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1103
1104         if (bitbang->set_gate)
1105                 bitbang->set_gate(bitbang->addr);
1106
1107         return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1108 }
1109
1110 /* MDC pin control */
1111 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1112 {
1113         sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1114 }
1115
1116 /* mdio bus control struct */
1117 static struct mdiobb_ops bb_ops = {
1118         .owner = THIS_MODULE,
1119         .set_mdc = sh_mdc_ctrl,
1120         .set_mdio_dir = sh_mmd_ctrl,
1121         .set_mdio_data = sh_set_mdio,
1122         .get_mdio_data = sh_get_mdio,
1123 };
1124
1125 /* free Tx skb function */
1126 static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1127 {
1128         struct sh_eth_private *mdp = netdev_priv(ndev);
1129         struct sh_eth_txdesc *txdesc;
1130         int free_num = 0;
1131         int entry;
1132         bool sent;
1133
1134         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1135                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1136                 txdesc = &mdp->tx_ring[entry];
1137                 sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1138                 if (sent_only && !sent)
1139                         break;
1140                 /* TACT bit must be checked before all the following reads */
1141                 dma_rmb();
1142                 netif_info(mdp, tx_done, ndev,
1143                            "tx entry %d status 0x%08x\n",
1144                            entry, le32_to_cpu(txdesc->status));
1145                 /* Free the original skb. */
1146                 if (mdp->tx_skbuff[entry]) {
1147                         dma_unmap_single(&mdp->pdev->dev,
1148                                          le32_to_cpu(txdesc->addr),
1149                                          le32_to_cpu(txdesc->len) >> 16,
1150                                          DMA_TO_DEVICE);
1151                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1152                         mdp->tx_skbuff[entry] = NULL;
1153                         free_num++;
1154                 }
1155                 txdesc->status = cpu_to_le32(TD_TFP);
1156                 if (entry >= mdp->num_tx_ring - 1)
1157                         txdesc->status |= cpu_to_le32(TD_TDLE);
1158
1159                 if (sent) {
1160                         ndev->stats.tx_packets++;
1161                         ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1162                 }
1163         }
1164         return free_num;
1165 }
1166
1167 /* free skb and descriptor buffer */
1168 static void sh_eth_ring_free(struct net_device *ndev)
1169 {
1170         struct sh_eth_private *mdp = netdev_priv(ndev);
1171         int ringsize, i;
1172
1173         if (mdp->rx_ring) {
1174                 for (i = 0; i < mdp->num_rx_ring; i++) {
1175                         if (mdp->rx_skbuff[i]) {
1176                                 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1177
1178                                 dma_unmap_single(&mdp->pdev->dev,
1179                                                  le32_to_cpu(rxdesc->addr),
1180                                                  ALIGN(mdp->rx_buf_sz, 32),
1181                                                  DMA_FROM_DEVICE);
1182                         }
1183                 }
1184                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1185                 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1186                                   mdp->rx_desc_dma);
1187                 mdp->rx_ring = NULL;
1188         }
1189
1190         /* Free Rx skb ringbuffer */
1191         if (mdp->rx_skbuff) {
1192                 for (i = 0; i < mdp->num_rx_ring; i++)
1193                         dev_kfree_skb(mdp->rx_skbuff[i]);
1194         }
1195         kfree(mdp->rx_skbuff);
1196         mdp->rx_skbuff = NULL;
1197
1198         if (mdp->tx_ring) {
1199                 sh_eth_tx_free(ndev, false);
1200
1201                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1202                 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1203                                   mdp->tx_desc_dma);
1204                 mdp->tx_ring = NULL;
1205         }
1206
1207         /* Free Tx skb ringbuffer */
1208         kfree(mdp->tx_skbuff);
1209         mdp->tx_skbuff = NULL;
1210 }
1211
1212 /* format skb and descriptor buffer */
1213 static void sh_eth_ring_format(struct net_device *ndev)
1214 {
1215         struct sh_eth_private *mdp = netdev_priv(ndev);
1216         int i;
1217         struct sk_buff *skb;
1218         struct sh_eth_rxdesc *rxdesc = NULL;
1219         struct sh_eth_txdesc *txdesc = NULL;
1220         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1221         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1222         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1223         dma_addr_t dma_addr;
1224         u32 buf_len;
1225
1226         mdp->cur_rx = 0;
1227         mdp->cur_tx = 0;
1228         mdp->dirty_rx = 0;
1229         mdp->dirty_tx = 0;
1230
1231         memset(mdp->rx_ring, 0, rx_ringsize);
1232
1233         /* build Rx ring buffer */
1234         for (i = 0; i < mdp->num_rx_ring; i++) {
1235                 /* skb */
1236                 mdp->rx_skbuff[i] = NULL;
1237                 skb = netdev_alloc_skb(ndev, skbuff_size);
1238                 if (skb == NULL)
1239                         break;
1240                 sh_eth_set_receive_align(skb);
1241
1242                 /* The size of the buffer is a multiple of 32 bytes. */
1243                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1244                 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1245                                           DMA_FROM_DEVICE);
1246                 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1247                         kfree_skb(skb);
1248                         break;
1249                 }
1250                 mdp->rx_skbuff[i] = skb;
1251
1252                 /* RX descriptor */
1253                 rxdesc = &mdp->rx_ring[i];
1254                 rxdesc->len = cpu_to_le32(buf_len << 16);
1255                 rxdesc->addr = cpu_to_le32(dma_addr);
1256                 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1257
1258                 /* Rx descriptor address set */
1259                 if (i == 0) {
1260                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1261                         if (sh_eth_is_gether(mdp) ||
1262                             sh_eth_is_rz_fast_ether(mdp))
1263                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1264                 }
1265         }
1266
1267         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1268
1269         /* Mark the last entry as wrapping the ring. */
1270         if (rxdesc)
1271                 rxdesc->status |= cpu_to_le32(RD_RDLE);
1272
1273         memset(mdp->tx_ring, 0, tx_ringsize);
1274
1275         /* build Tx ring buffer */
1276         for (i = 0; i < mdp->num_tx_ring; i++) {
1277                 mdp->tx_skbuff[i] = NULL;
1278                 txdesc = &mdp->tx_ring[i];
1279                 txdesc->status = cpu_to_le32(TD_TFP);
1280                 txdesc->len = cpu_to_le32(0);
1281                 if (i == 0) {
1282                         /* Tx descriptor address set */
1283                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1284                         if (sh_eth_is_gether(mdp) ||
1285                             sh_eth_is_rz_fast_ether(mdp))
1286                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1287                 }
1288         }
1289
1290         txdesc->status |= cpu_to_le32(TD_TDLE);
1291 }
1292
1293 /* Get skb and descriptor buffer */
1294 static int sh_eth_ring_init(struct net_device *ndev)
1295 {
1296         struct sh_eth_private *mdp = netdev_priv(ndev);
1297         int rx_ringsize, tx_ringsize;
1298
1299         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1300          * card needs room to do 8 byte alignment, +2 so we can reserve
1301          * the first 2 bytes, and +16 gets room for the status word from the
1302          * card.
1303          */
1304         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1305                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1306         if (mdp->cd->rpadir)
1307                 mdp->rx_buf_sz += NET_IP_ALIGN;
1308
1309         /* Allocate RX and TX skb rings */
1310         mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1311                                  GFP_KERNEL);
1312         if (!mdp->rx_skbuff)
1313                 return -ENOMEM;
1314
1315         mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1316                                  GFP_KERNEL);
1317         if (!mdp->tx_skbuff)
1318                 goto ring_free;
1319
1320         /* Allocate all Rx descriptors. */
1321         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1322         mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1323                                           &mdp->rx_desc_dma, GFP_KERNEL);
1324         if (!mdp->rx_ring)
1325                 goto ring_free;
1326
1327         mdp->dirty_rx = 0;
1328
1329         /* Allocate all Tx descriptors. */
1330         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1331         mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1332                                           &mdp->tx_desc_dma, GFP_KERNEL);
1333         if (!mdp->tx_ring)
1334                 goto ring_free;
1335         return 0;
1336
1337 ring_free:
1338         /* Free Rx and Tx skb ring buffer and DMA buffer */
1339         sh_eth_ring_free(ndev);
1340
1341         return -ENOMEM;
1342 }
1343
1344 static int sh_eth_dev_init(struct net_device *ndev)
1345 {
1346         struct sh_eth_private *mdp = netdev_priv(ndev);
1347         int ret;
1348
1349         /* Soft Reset */
1350         ret = sh_eth_reset(ndev);
1351         if (ret)
1352                 return ret;
1353
1354         if (mdp->cd->rmiimode)
1355                 sh_eth_write(ndev, 0x1, RMIIMODE);
1356
1357         /* Descriptor format */
1358         sh_eth_ring_format(ndev);
1359         if (mdp->cd->rpadir)
1360                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1361
1362         /* all sh_eth int mask */
1363         sh_eth_write(ndev, 0, EESIPR);
1364
1365 #if defined(__LITTLE_ENDIAN)
1366         if (mdp->cd->hw_swap)
1367                 sh_eth_write(ndev, EDMR_EL, EDMR);
1368         else
1369 #endif
1370                 sh_eth_write(ndev, 0, EDMR);
1371
1372         /* FIFO size set */
1373         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1374         sh_eth_write(ndev, 0, TFTR);
1375
1376         /* Frame recv control (enable multiple-packets per rx irq) */
1377         sh_eth_write(ndev, RMCR_RNC, RMCR);
1378
1379         sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1380
1381         if (mdp->cd->bculr)
1382                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1383
1384         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1385
1386         if (!mdp->cd->no_trimd)
1387                 sh_eth_write(ndev, 0, TRIMD);
1388
1389         /* Recv frame limit set register */
1390         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1391                      RFLR);
1392
1393         sh_eth_modify(ndev, EESR, 0, 0);
1394         mdp->irq_enabled = true;
1395         sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1396
1397         /* PAUSE Prohibition */
1398         sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1399                      ECMR_TE | ECMR_RE, ECMR);
1400
1401         if (mdp->cd->set_rate)
1402                 mdp->cd->set_rate(ndev);
1403
1404         /* E-MAC Status Register clear */
1405         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1406
1407         /* E-MAC Interrupt Enable register */
1408         sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1409
1410         /* Set MAC address */
1411         update_mac_address(ndev);
1412
1413         /* mask reset */
1414         if (mdp->cd->apr)
1415                 sh_eth_write(ndev, APR_AP, APR);
1416         if (mdp->cd->mpr)
1417                 sh_eth_write(ndev, MPR_MP, MPR);
1418         if (mdp->cd->tpauser)
1419                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1420
1421         /* Setting the Rx mode will start the Rx process. */
1422         sh_eth_write(ndev, EDRRR_R, EDRRR);
1423
1424         return ret;
1425 }
1426
1427 static void sh_eth_dev_exit(struct net_device *ndev)
1428 {
1429         struct sh_eth_private *mdp = netdev_priv(ndev);
1430         int i;
1431
1432         /* Deactivate all TX descriptors, so DMA should stop at next
1433          * packet boundary if it's currently running
1434          */
1435         for (i = 0; i < mdp->num_tx_ring; i++)
1436                 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1437
1438         /* Disable TX FIFO egress to MAC */
1439         sh_eth_rcv_snd_disable(ndev);
1440
1441         /* Stop RX DMA at next packet boundary */
1442         sh_eth_write(ndev, 0, EDRRR);
1443
1444         /* Aside from TX DMA, we can't tell when the hardware is
1445          * really stopped, so we need to reset to make sure.
1446          * Before doing that, wait for long enough to *probably*
1447          * finish transmitting the last packet and poll stats.
1448          */
1449         msleep(2); /* max frame time at 10 Mbps < 1250 us */
1450         sh_eth_get_stats(ndev);
1451         sh_eth_reset(ndev);
1452
1453         /* Set MAC address again */
1454         update_mac_address(ndev);
1455 }
1456
1457 /* Packet receive function */
1458 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1459 {
1460         struct sh_eth_private *mdp = netdev_priv(ndev);
1461         struct sh_eth_rxdesc *rxdesc;
1462
1463         int entry = mdp->cur_rx % mdp->num_rx_ring;
1464         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1465         int limit;
1466         struct sk_buff *skb;
1467         u32 desc_status;
1468         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1469         dma_addr_t dma_addr;
1470         u16 pkt_len;
1471         u32 buf_len;
1472
1473         boguscnt = min(boguscnt, *quota);
1474         limit = boguscnt;
1475         rxdesc = &mdp->rx_ring[entry];
1476         while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1477                 /* RACT bit must be checked before all the following reads */
1478                 dma_rmb();
1479                 desc_status = le32_to_cpu(rxdesc->status);
1480                 pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1481
1482                 if (--boguscnt < 0)
1483                         break;
1484
1485                 netif_info(mdp, rx_status, ndev,
1486                            "rx entry %d status 0x%08x len %d\n",
1487                            entry, desc_status, pkt_len);
1488
1489                 if (!(desc_status & RDFEND))
1490                         ndev->stats.rx_length_errors++;
1491
1492                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1493                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1494                  * bit 0. However, in case of the R8A7740 and R7S72100
1495                  * the RFS bits are from bit 25 to bit 16. So, the
1496                  * driver needs right shifting by 16.
1497                  */
1498                 if (mdp->cd->hw_checksum)
1499                         desc_status >>= 16;
1500
1501                 skb = mdp->rx_skbuff[entry];
1502                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1503                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1504                         ndev->stats.rx_errors++;
1505                         if (desc_status & RD_RFS1)
1506                                 ndev->stats.rx_crc_errors++;
1507                         if (desc_status & RD_RFS2)
1508                                 ndev->stats.rx_frame_errors++;
1509                         if (desc_status & RD_RFS3)
1510                                 ndev->stats.rx_length_errors++;
1511                         if (desc_status & RD_RFS4)
1512                                 ndev->stats.rx_length_errors++;
1513                         if (desc_status & RD_RFS6)
1514                                 ndev->stats.rx_missed_errors++;
1515                         if (desc_status & RD_RFS10)
1516                                 ndev->stats.rx_over_errors++;
1517                 } else  if (skb) {
1518                         dma_addr = le32_to_cpu(rxdesc->addr);
1519                         if (!mdp->cd->hw_swap)
1520                                 sh_eth_soft_swap(
1521                                         phys_to_virt(ALIGN(dma_addr, 4)),
1522                                         pkt_len + 2);
1523                         mdp->rx_skbuff[entry] = NULL;
1524                         if (mdp->cd->rpadir)
1525                                 skb_reserve(skb, NET_IP_ALIGN);
1526                         dma_unmap_single(&mdp->pdev->dev, dma_addr,
1527                                          ALIGN(mdp->rx_buf_sz, 32),
1528                                          DMA_FROM_DEVICE);
1529                         skb_put(skb, pkt_len);
1530                         skb->protocol = eth_type_trans(skb, ndev);
1531                         netif_receive_skb(skb);
1532                         ndev->stats.rx_packets++;
1533                         ndev->stats.rx_bytes += pkt_len;
1534                         if (desc_status & RD_RFS8)
1535                                 ndev->stats.multicast++;
1536                 }
1537                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1538                 rxdesc = &mdp->rx_ring[entry];
1539         }
1540
1541         /* Refill the Rx ring buffers. */
1542         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1543                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1544                 rxdesc = &mdp->rx_ring[entry];
1545                 /* The size of the buffer is 32 byte boundary. */
1546                 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1547                 rxdesc->len = cpu_to_le32(buf_len << 16);
1548
1549                 if (mdp->rx_skbuff[entry] == NULL) {
1550                         skb = netdev_alloc_skb(ndev, skbuff_size);
1551                         if (skb == NULL)
1552                                 break;  /* Better luck next round. */
1553                         sh_eth_set_receive_align(skb);
1554                         dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1555                                                   buf_len, DMA_FROM_DEVICE);
1556                         if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1557                                 kfree_skb(skb);
1558                                 break;
1559                         }
1560                         mdp->rx_skbuff[entry] = skb;
1561
1562                         skb_checksum_none_assert(skb);
1563                         rxdesc->addr = cpu_to_le32(dma_addr);
1564                 }
1565                 dma_wmb(); /* RACT bit must be set after all the above writes */
1566                 if (entry >= mdp->num_rx_ring - 1)
1567                         rxdesc->status |=
1568                                 cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1569                 else
1570                         rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1571         }
1572
1573         /* Restart Rx engine if stopped. */
1574         /* If we don't need to check status, don't. -KDU */
1575         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1576                 /* fix the values for the next receiving if RDE is set */
1577                 if (intr_status & EESR_RDE &&
1578                     mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) {
1579                         u32 count = (sh_eth_read(ndev, RDFAR) -
1580                                      sh_eth_read(ndev, RDLAR)) >> 4;
1581
1582                         mdp->cur_rx = count;
1583                         mdp->dirty_rx = count;
1584                 }
1585                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1586         }
1587
1588         *quota -= limit - boguscnt - 1;
1589
1590         return *quota <= 0;
1591 }
1592
1593 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1594 {
1595         /* disable tx and rx */
1596         sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1597 }
1598
1599 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1600 {
1601         /* enable tx and rx */
1602         sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1603 }
1604
1605 /* E-MAC interrupt handler */
1606 static void sh_eth_emac_interrupt(struct net_device *ndev)
1607 {
1608         struct sh_eth_private *mdp = netdev_priv(ndev);
1609         u32 felic_stat;
1610         u32 link_stat;
1611
1612         felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
1613         sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1614         if (felic_stat & ECSR_ICD)
1615                 ndev->stats.tx_carrier_errors++;
1616         if (felic_stat & ECSR_MPD)
1617                 pm_wakeup_event(&mdp->pdev->dev, 0);
1618         if (felic_stat & ECSR_LCHNG) {
1619                 /* Link Changed */
1620                 if (mdp->cd->no_psr || mdp->no_ether_link)
1621                         return;
1622                 link_stat = sh_eth_read(ndev, PSR);
1623                 if (mdp->ether_link_active_low)
1624                         link_stat = ~link_stat;
1625                 if (!(link_stat & PHY_ST_LINK)) {
1626                         sh_eth_rcv_snd_disable(ndev);
1627                 } else {
1628                         /* Link Up */
1629                         sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
1630                         /* clear int */
1631                         sh_eth_modify(ndev, ECSR, 0, 0);
1632                         sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
1633                         /* enable tx and rx */
1634                         sh_eth_rcv_snd_enable(ndev);
1635                 }
1636         }
1637 }
1638
1639 /* error control function */
1640 static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1641 {
1642         struct sh_eth_private *mdp = netdev_priv(ndev);
1643         u32 mask;
1644
1645         if (intr_status & EESR_TWB) {
1646                 /* Unused write back interrupt */
1647                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1648                         ndev->stats.tx_aborted_errors++;
1649                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1650                 }
1651         }
1652
1653         if (intr_status & EESR_RABT) {
1654                 /* Receive Abort int */
1655                 if (intr_status & EESR_RFRMER) {
1656                         /* Receive Frame Overflow int */
1657                         ndev->stats.rx_frame_errors++;
1658                 }
1659         }
1660
1661         if (intr_status & EESR_TDE) {
1662                 /* Transmit Descriptor Empty int */
1663                 ndev->stats.tx_fifo_errors++;
1664                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1665         }
1666
1667         if (intr_status & EESR_TFE) {
1668                 /* FIFO under flow */
1669                 ndev->stats.tx_fifo_errors++;
1670                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1671         }
1672
1673         if (intr_status & EESR_RDE) {
1674                 /* Receive Descriptor Empty int */
1675                 ndev->stats.rx_over_errors++;
1676         }
1677
1678         if (intr_status & EESR_RFE) {
1679                 /* Receive FIFO Overflow int */
1680                 ndev->stats.rx_fifo_errors++;
1681         }
1682
1683         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1684                 /* Address Error */
1685                 ndev->stats.tx_fifo_errors++;
1686                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1687         }
1688
1689         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1690         if (mdp->cd->no_ade)
1691                 mask &= ~EESR_ADE;
1692         if (intr_status & mask) {
1693                 /* Tx error */
1694                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1695
1696                 /* dmesg */
1697                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1698                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1699                            (u32)ndev->state, edtrr);
1700                 /* dirty buffer free */
1701                 sh_eth_tx_free(ndev, true);
1702
1703                 /* SH7712 BUG */
1704                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1705                         /* tx dma start */
1706                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1707                 }
1708                 /* wakeup */
1709                 netif_wake_queue(ndev);
1710         }
1711 }
1712
1713 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1714 {
1715         struct net_device *ndev = netdev;
1716         struct sh_eth_private *mdp = netdev_priv(ndev);
1717         struct sh_eth_cpu_data *cd = mdp->cd;
1718         irqreturn_t ret = IRQ_NONE;
1719         u32 intr_status, intr_enable;
1720
1721         spin_lock(&mdp->lock);
1722
1723         /* Get interrupt status */
1724         intr_status = sh_eth_read(ndev, EESR);
1725         /* Mask it with the interrupt mask, forcing ECI interrupt  to be always
1726          * enabled since it's the one that  comes  thru regardless of the mask,
1727          * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
1728          * to quench it as it doesn't get cleared by just writing 1 to the  ECI
1729          * bit...
1730          */
1731         intr_enable = sh_eth_read(ndev, EESIPR);
1732         intr_status &= intr_enable | EESIPR_ECIIP;
1733         if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
1734                            cd->eesr_err_check))
1735                 ret = IRQ_HANDLED;
1736         else
1737                 goto out;
1738
1739         if (unlikely(!mdp->irq_enabled)) {
1740                 sh_eth_write(ndev, 0, EESIPR);
1741                 goto out;
1742         }
1743
1744         if (intr_status & EESR_RX_CHECK) {
1745                 if (napi_schedule_prep(&mdp->napi)) {
1746                         /* Mask Rx interrupts */
1747                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1748                                      EESIPR);
1749                         __napi_schedule(&mdp->napi);
1750                 } else {
1751                         netdev_warn(ndev,
1752                                     "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1753                                     intr_status, intr_enable);
1754                 }
1755         }
1756
1757         /* Tx Check */
1758         if (intr_status & cd->tx_check) {
1759                 /* Clear Tx interrupts */
1760                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1761
1762                 sh_eth_tx_free(ndev, true);
1763                 netif_wake_queue(ndev);
1764         }
1765
1766         /* E-MAC interrupt */
1767         if (intr_status & EESR_ECI)
1768                 sh_eth_emac_interrupt(ndev);
1769
1770         if (intr_status & cd->eesr_err_check) {
1771                 /* Clear error interrupts */
1772                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1773
1774                 sh_eth_error(ndev, intr_status);
1775         }
1776
1777 out:
1778         spin_unlock(&mdp->lock);
1779
1780         return ret;
1781 }
1782
1783 static int sh_eth_poll(struct napi_struct *napi, int budget)
1784 {
1785         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1786                                                   napi);
1787         struct net_device *ndev = napi->dev;
1788         int quota = budget;
1789         u32 intr_status;
1790
1791         for (;;) {
1792                 intr_status = sh_eth_read(ndev, EESR);
1793                 if (!(intr_status & EESR_RX_CHECK))
1794                         break;
1795                 /* Clear Rx interrupts */
1796                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1797
1798                 if (sh_eth_rx(ndev, intr_status, &quota))
1799                         goto out;
1800         }
1801
1802         napi_complete(napi);
1803
1804         /* Reenable Rx interrupts */
1805         if (mdp->irq_enabled)
1806                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1807 out:
1808         return budget - quota;
1809 }
1810
1811 /* PHY state control function */
1812 static void sh_eth_adjust_link(struct net_device *ndev)
1813 {
1814         struct sh_eth_private *mdp = netdev_priv(ndev);
1815         struct phy_device *phydev = ndev->phydev;
1816         int new_state = 0;
1817
1818         if (phydev->link) {
1819                 if (phydev->duplex != mdp->duplex) {
1820                         new_state = 1;
1821                         mdp->duplex = phydev->duplex;
1822                         if (mdp->cd->set_duplex)
1823                                 mdp->cd->set_duplex(ndev);
1824                 }
1825
1826                 if (phydev->speed != mdp->speed) {
1827                         new_state = 1;
1828                         mdp->speed = phydev->speed;
1829                         if (mdp->cd->set_rate)
1830                                 mdp->cd->set_rate(ndev);
1831                 }
1832                 if (!mdp->link) {
1833                         sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1834                         new_state = 1;
1835                         mdp->link = phydev->link;
1836                         if (mdp->cd->no_psr || mdp->no_ether_link)
1837                                 sh_eth_rcv_snd_enable(ndev);
1838                 }
1839         } else if (mdp->link) {
1840                 new_state = 1;
1841                 mdp->link = 0;
1842                 mdp->speed = 0;
1843                 mdp->duplex = -1;
1844                 if (mdp->cd->no_psr || mdp->no_ether_link)
1845                         sh_eth_rcv_snd_disable(ndev);
1846         }
1847
1848         if (new_state && netif_msg_link(mdp))
1849                 phy_print_status(phydev);
1850 }
1851
1852 /* PHY init function */
1853 static int sh_eth_phy_init(struct net_device *ndev)
1854 {
1855         struct device_node *np = ndev->dev.parent->of_node;
1856         struct sh_eth_private *mdp = netdev_priv(ndev);
1857         struct phy_device *phydev;
1858
1859         mdp->link = 0;
1860         mdp->speed = 0;
1861         mdp->duplex = -1;
1862
1863         /* Try connect to PHY */
1864         if (np) {
1865                 struct device_node *pn;
1866
1867                 pn = of_parse_phandle(np, "phy-handle", 0);
1868                 phydev = of_phy_connect(ndev, pn,
1869                                         sh_eth_adjust_link, 0,
1870                                         mdp->phy_interface);
1871
1872                 of_node_put(pn);
1873                 if (!phydev)
1874                         phydev = ERR_PTR(-ENOENT);
1875         } else {
1876                 char phy_id[MII_BUS_ID_SIZE + 3];
1877
1878                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1879                          mdp->mii_bus->id, mdp->phy_id);
1880
1881                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1882                                      mdp->phy_interface);
1883         }
1884
1885         if (IS_ERR(phydev)) {
1886                 netdev_err(ndev, "failed to connect PHY\n");
1887                 return PTR_ERR(phydev);
1888         }
1889
1890         /* mask with MAC supported features */
1891         if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1892                 int err = phy_set_max_speed(phydev, SPEED_100);
1893                 if (err) {
1894                         netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1895                         phy_disconnect(phydev);
1896                         return err;
1897                 }
1898         }
1899
1900         phy_attached_info(phydev);
1901
1902         return 0;
1903 }
1904
1905 /* PHY control start function */
1906 static int sh_eth_phy_start(struct net_device *ndev)
1907 {
1908         int ret;
1909
1910         ret = sh_eth_phy_init(ndev);
1911         if (ret)
1912                 return ret;
1913
1914         phy_start(ndev->phydev);
1915
1916         return 0;
1917 }
1918
1919 static int sh_eth_get_link_ksettings(struct net_device *ndev,
1920                                      struct ethtool_link_ksettings *cmd)
1921 {
1922         struct sh_eth_private *mdp = netdev_priv(ndev);
1923         unsigned long flags;
1924
1925         if (!ndev->phydev)
1926                 return -ENODEV;
1927
1928         spin_lock_irqsave(&mdp->lock, flags);
1929         phy_ethtool_ksettings_get(ndev->phydev, cmd);
1930         spin_unlock_irqrestore(&mdp->lock, flags);
1931
1932         return 0;
1933 }
1934
1935 static int sh_eth_set_link_ksettings(struct net_device *ndev,
1936                                      const struct ethtool_link_ksettings *cmd)
1937 {
1938         struct sh_eth_private *mdp = netdev_priv(ndev);
1939         unsigned long flags;
1940         int ret;
1941
1942         if (!ndev->phydev)
1943                 return -ENODEV;
1944
1945         spin_lock_irqsave(&mdp->lock, flags);
1946
1947         /* disable tx and rx */
1948         sh_eth_rcv_snd_disable(ndev);
1949
1950         ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1951         if (ret)
1952                 goto error_exit;
1953
1954         if (cmd->base.duplex == DUPLEX_FULL)
1955                 mdp->duplex = 1;
1956         else
1957                 mdp->duplex = 0;
1958
1959         if (mdp->cd->set_duplex)
1960                 mdp->cd->set_duplex(ndev);
1961
1962 error_exit:
1963         mdelay(1);
1964
1965         /* enable tx and rx */
1966         sh_eth_rcv_snd_enable(ndev);
1967
1968         spin_unlock_irqrestore(&mdp->lock, flags);
1969
1970         return ret;
1971 }
1972
1973 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
1974  * version must be bumped as well.  Just adding registers up to that
1975  * limit is fine, as long as the existing register indices don't
1976  * change.
1977  */
1978 #define SH_ETH_REG_DUMP_VERSION         1
1979 #define SH_ETH_REG_DUMP_MAX_REGS        256
1980
1981 static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
1982 {
1983         struct sh_eth_private *mdp = netdev_priv(ndev);
1984         struct sh_eth_cpu_data *cd = mdp->cd;
1985         u32 *valid_map;
1986         size_t len;
1987
1988         BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
1989
1990         /* Dump starts with a bitmap that tells ethtool which
1991          * registers are defined for this chip.
1992          */
1993         len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
1994         if (buf) {
1995                 valid_map = buf;
1996                 buf += len;
1997         } else {
1998                 valid_map = NULL;
1999         }
2000
2001         /* Add a register to the dump, if it has a defined offset.
2002          * This automatically skips most undefined registers, but for
2003          * some it is also necessary to check a capability flag in
2004          * struct sh_eth_cpu_data.
2005          */
2006 #define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2007 #define add_reg_from(reg, read_expr) do {                               \
2008                 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {    \
2009                         if (buf) {                                      \
2010                                 mark_reg_valid(reg);                    \
2011                                 *buf++ = read_expr;                     \
2012                         }                                               \
2013                         ++len;                                          \
2014                 }                                                       \
2015         } while (0)
2016 #define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2017 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2018
2019         add_reg(EDSR);
2020         add_reg(EDMR);
2021         add_reg(EDTRR);
2022         add_reg(EDRRR);
2023         add_reg(EESR);
2024         add_reg(EESIPR);
2025         add_reg(TDLAR);
2026         add_reg(TDFAR);
2027         add_reg(TDFXR);
2028         add_reg(TDFFR);
2029         add_reg(RDLAR);
2030         add_reg(RDFAR);
2031         add_reg(RDFXR);
2032         add_reg(RDFFR);
2033         add_reg(TRSCER);
2034         add_reg(RMFCR);
2035         add_reg(TFTR);
2036         add_reg(FDR);
2037         add_reg(RMCR);
2038         add_reg(TFUCR);
2039         add_reg(RFOCR);
2040         if (cd->rmiimode)
2041                 add_reg(RMIIMODE);
2042         add_reg(FCFTR);
2043         if (cd->rpadir)
2044                 add_reg(RPADIR);
2045         if (!cd->no_trimd)
2046                 add_reg(TRIMD);
2047         add_reg(ECMR);
2048         add_reg(ECSR);
2049         add_reg(ECSIPR);
2050         add_reg(PIR);
2051         if (!cd->no_psr)
2052                 add_reg(PSR);
2053         add_reg(RDMLR);
2054         add_reg(RFLR);
2055         add_reg(IPGR);
2056         if (cd->apr)
2057                 add_reg(APR);
2058         if (cd->mpr)
2059                 add_reg(MPR);
2060         add_reg(RFCR);
2061         add_reg(RFCF);
2062         if (cd->tpauser)
2063                 add_reg(TPAUSER);
2064         add_reg(TPAUSECR);
2065         add_reg(GECMR);
2066         if (cd->bculr)
2067                 add_reg(BCULR);
2068         add_reg(MAHR);
2069         add_reg(MALR);
2070         add_reg(TROCR);
2071         add_reg(CDCR);
2072         add_reg(LCCR);
2073         add_reg(CNDCR);
2074         add_reg(CEFCR);
2075         add_reg(FRECR);
2076         add_reg(TSFRCR);
2077         add_reg(TLFRCR);
2078         add_reg(CERCR);
2079         add_reg(CEECR);
2080         add_reg(MAFCR);
2081         if (cd->rtrate)
2082                 add_reg(RTRATE);
2083         if (cd->hw_checksum)
2084                 add_reg(CSMR);
2085         if (cd->select_mii)
2086                 add_reg(RMII_MII);
2087         if (cd->tsu) {
2088                 add_tsu_reg(ARSTR);
2089                 add_tsu_reg(TSU_CTRST);
2090                 add_tsu_reg(TSU_FWEN0);
2091                 add_tsu_reg(TSU_FWEN1);
2092                 add_tsu_reg(TSU_FCM);
2093                 add_tsu_reg(TSU_BSYSL0);
2094                 add_tsu_reg(TSU_BSYSL1);
2095                 add_tsu_reg(TSU_PRISL0);
2096                 add_tsu_reg(TSU_PRISL1);
2097                 add_tsu_reg(TSU_FWSL0);
2098                 add_tsu_reg(TSU_FWSL1);
2099                 add_tsu_reg(TSU_FWSLC);
2100                 add_tsu_reg(TSU_QTAG0);
2101                 add_tsu_reg(TSU_QTAG1);
2102                 add_tsu_reg(TSU_QTAGM0);
2103                 add_tsu_reg(TSU_QTAGM1);
2104                 add_tsu_reg(TSU_FWSR);
2105                 add_tsu_reg(TSU_FWINMK);
2106                 add_tsu_reg(TSU_ADQT0);
2107                 add_tsu_reg(TSU_ADQT1);
2108                 add_tsu_reg(TSU_VTAG0);
2109                 add_tsu_reg(TSU_VTAG1);
2110                 add_tsu_reg(TSU_ADSBSY);
2111                 add_tsu_reg(TSU_TEN);
2112                 add_tsu_reg(TSU_POST1);
2113                 add_tsu_reg(TSU_POST2);
2114                 add_tsu_reg(TSU_POST3);
2115                 add_tsu_reg(TSU_POST4);
2116                 if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) {
2117                         /* This is the start of a table, not just a single
2118                          * register.
2119                          */
2120                         if (buf) {
2121                                 unsigned int i;
2122
2123                                 mark_reg_valid(TSU_ADRH0);
2124                                 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2125                                         *buf++ = ioread32(
2126                                                 mdp->tsu_addr +
2127                                                 mdp->reg_offset[TSU_ADRH0] +
2128                                                 i * 4);
2129                         }
2130                         len += SH_ETH_TSU_CAM_ENTRIES * 2;
2131                 }
2132         }
2133
2134 #undef mark_reg_valid
2135 #undef add_reg_from
2136 #undef add_reg
2137 #undef add_tsu_reg
2138
2139         return len * 4;
2140 }
2141
2142 static int sh_eth_get_regs_len(struct net_device *ndev)
2143 {
2144         return __sh_eth_get_regs(ndev, NULL);
2145 }
2146
2147 static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2148                             void *buf)
2149 {
2150         struct sh_eth_private *mdp = netdev_priv(ndev);
2151
2152         regs->version = SH_ETH_REG_DUMP_VERSION;
2153
2154         pm_runtime_get_sync(&mdp->pdev->dev);
2155         __sh_eth_get_regs(ndev, buf);
2156         pm_runtime_put_sync(&mdp->pdev->dev);
2157 }
2158
2159 static int sh_eth_nway_reset(struct net_device *ndev)
2160 {
2161         struct sh_eth_private *mdp = netdev_priv(ndev);
2162         unsigned long flags;
2163         int ret;
2164
2165         if (!ndev->phydev)
2166                 return -ENODEV;
2167
2168         spin_lock_irqsave(&mdp->lock, flags);
2169         ret = phy_start_aneg(ndev->phydev);
2170         spin_unlock_irqrestore(&mdp->lock, flags);
2171
2172         return ret;
2173 }
2174
2175 static u32 sh_eth_get_msglevel(struct net_device *ndev)
2176 {
2177         struct sh_eth_private *mdp = netdev_priv(ndev);
2178         return mdp->msg_enable;
2179 }
2180
2181 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2182 {
2183         struct sh_eth_private *mdp = netdev_priv(ndev);
2184         mdp->msg_enable = value;
2185 }
2186
2187 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2188         "rx_current", "tx_current",
2189         "rx_dirty", "tx_dirty",
2190 };
2191 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2192
2193 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2194 {
2195         switch (sset) {
2196         case ETH_SS_STATS:
2197                 return SH_ETH_STATS_LEN;
2198         default:
2199                 return -EOPNOTSUPP;
2200         }
2201 }
2202
2203 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2204                                      struct ethtool_stats *stats, u64 *data)
2205 {
2206         struct sh_eth_private *mdp = netdev_priv(ndev);
2207         int i = 0;
2208
2209         /* device-specific stats */
2210         data[i++] = mdp->cur_rx;
2211         data[i++] = mdp->cur_tx;
2212         data[i++] = mdp->dirty_rx;
2213         data[i++] = mdp->dirty_tx;
2214 }
2215
2216 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2217 {
2218         switch (stringset) {
2219         case ETH_SS_STATS:
2220                 memcpy(data, *sh_eth_gstrings_stats,
2221                        sizeof(sh_eth_gstrings_stats));
2222                 break;
2223         }
2224 }
2225
2226 static void sh_eth_get_ringparam(struct net_device *ndev,
2227                                  struct ethtool_ringparam *ring)
2228 {
2229         struct sh_eth_private *mdp = netdev_priv(ndev);
2230
2231         ring->rx_max_pending = RX_RING_MAX;
2232         ring->tx_max_pending = TX_RING_MAX;
2233         ring->rx_pending = mdp->num_rx_ring;
2234         ring->tx_pending = mdp->num_tx_ring;
2235 }
2236
2237 static int sh_eth_set_ringparam(struct net_device *ndev,
2238                                 struct ethtool_ringparam *ring)
2239 {
2240         struct sh_eth_private *mdp = netdev_priv(ndev);
2241         int ret;
2242
2243         if (ring->tx_pending > TX_RING_MAX ||
2244             ring->rx_pending > RX_RING_MAX ||
2245             ring->tx_pending < TX_RING_MIN ||
2246             ring->rx_pending < RX_RING_MIN)
2247                 return -EINVAL;
2248         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2249                 return -EINVAL;
2250
2251         if (netif_running(ndev)) {
2252                 netif_device_detach(ndev);
2253                 netif_tx_disable(ndev);
2254
2255                 /* Serialise with the interrupt handler and NAPI, then
2256                  * disable interrupts.  We have to clear the
2257                  * irq_enabled flag first to ensure that interrupts
2258                  * won't be re-enabled.
2259                  */
2260                 mdp->irq_enabled = false;
2261                 synchronize_irq(ndev->irq);
2262                 napi_synchronize(&mdp->napi);
2263                 sh_eth_write(ndev, 0x0000, EESIPR);
2264
2265                 sh_eth_dev_exit(ndev);
2266
2267                 /* Free all the skbuffs in the Rx queue and the DMA buffers. */
2268                 sh_eth_ring_free(ndev);
2269         }
2270
2271         /* Set new parameters */
2272         mdp->num_rx_ring = ring->rx_pending;
2273         mdp->num_tx_ring = ring->tx_pending;
2274
2275         if (netif_running(ndev)) {
2276                 ret = sh_eth_ring_init(ndev);
2277                 if (ret < 0) {
2278                         netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2279                                    __func__);
2280                         return ret;
2281                 }
2282                 ret = sh_eth_dev_init(ndev);
2283                 if (ret < 0) {
2284                         netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2285                                    __func__);
2286                         return ret;
2287                 }
2288
2289                 netif_device_attach(ndev);
2290         }
2291
2292         return 0;
2293 }
2294
2295 static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2296 {
2297         struct sh_eth_private *mdp = netdev_priv(ndev);
2298
2299         wol->supported = 0;
2300         wol->wolopts = 0;
2301
2302         if (mdp->cd->magic) {
2303                 wol->supported = WAKE_MAGIC;
2304                 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2305         }
2306 }
2307
2308 static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2309 {
2310         struct sh_eth_private *mdp = netdev_priv(ndev);
2311
2312         if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2313                 return -EOPNOTSUPP;
2314
2315         mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2316
2317         device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2318
2319         return 0;
2320 }
2321
2322 static const struct ethtool_ops sh_eth_ethtool_ops = {
2323         .get_regs_len   = sh_eth_get_regs_len,
2324         .get_regs       = sh_eth_get_regs,
2325         .nway_reset     = sh_eth_nway_reset,
2326         .get_msglevel   = sh_eth_get_msglevel,
2327         .set_msglevel   = sh_eth_set_msglevel,
2328         .get_link       = ethtool_op_get_link,
2329         .get_strings    = sh_eth_get_strings,
2330         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2331         .get_sset_count     = sh_eth_get_sset_count,
2332         .get_ringparam  = sh_eth_get_ringparam,
2333         .set_ringparam  = sh_eth_set_ringparam,
2334         .get_link_ksettings = sh_eth_get_link_ksettings,
2335         .set_link_ksettings = sh_eth_set_link_ksettings,
2336         .get_wol        = sh_eth_get_wol,
2337         .set_wol        = sh_eth_set_wol,
2338 };
2339
2340 /* network device open function */
2341 static int sh_eth_open(struct net_device *ndev)
2342 {
2343         struct sh_eth_private *mdp = netdev_priv(ndev);
2344         int ret;
2345
2346         pm_runtime_get_sync(&mdp->pdev->dev);
2347
2348         napi_enable(&mdp->napi);
2349
2350         ret = request_irq(ndev->irq, sh_eth_interrupt,
2351                           mdp->cd->irq_flags, ndev->name, ndev);
2352         if (ret) {
2353                 netdev_err(ndev, "Can not assign IRQ number\n");
2354                 goto out_napi_off;
2355         }
2356
2357         /* Descriptor set */
2358         ret = sh_eth_ring_init(ndev);
2359         if (ret)
2360                 goto out_free_irq;
2361
2362         /* device init */
2363         ret = sh_eth_dev_init(ndev);
2364         if (ret)
2365                 goto out_free_irq;
2366
2367         /* PHY control start*/
2368         ret = sh_eth_phy_start(ndev);
2369         if (ret)
2370                 goto out_free_irq;
2371
2372         netif_start_queue(ndev);
2373
2374         mdp->is_opened = 1;
2375
2376         return ret;
2377
2378 out_free_irq:
2379         free_irq(ndev->irq, ndev);
2380 out_napi_off:
2381         napi_disable(&mdp->napi);
2382         pm_runtime_put_sync(&mdp->pdev->dev);
2383         return ret;
2384 }
2385
2386 /* Timeout function */
2387 static void sh_eth_tx_timeout(struct net_device *ndev)
2388 {
2389         struct sh_eth_private *mdp = netdev_priv(ndev);
2390         struct sh_eth_rxdesc *rxdesc;
2391         int i;
2392
2393         netif_stop_queue(ndev);
2394
2395         netif_err(mdp, timer, ndev,
2396                   "transmit timed out, status %8.8x, resetting...\n",
2397                   sh_eth_read(ndev, EESR));
2398
2399         /* tx_errors count up */
2400         ndev->stats.tx_errors++;
2401
2402         /* Free all the skbuffs in the Rx queue. */
2403         for (i = 0; i < mdp->num_rx_ring; i++) {
2404                 rxdesc = &mdp->rx_ring[i];
2405                 rxdesc->status = cpu_to_le32(0);
2406                 rxdesc->addr = cpu_to_le32(0xBADF00D0);
2407                 dev_kfree_skb(mdp->rx_skbuff[i]);
2408                 mdp->rx_skbuff[i] = NULL;
2409         }
2410         for (i = 0; i < mdp->num_tx_ring; i++) {
2411                 dev_kfree_skb(mdp->tx_skbuff[i]);
2412                 mdp->tx_skbuff[i] = NULL;
2413         }
2414
2415         /* device init */
2416         sh_eth_dev_init(ndev);
2417
2418         netif_start_queue(ndev);
2419 }
2420
2421 /* Packet transmit function */
2422 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2423 {
2424         struct sh_eth_private *mdp = netdev_priv(ndev);
2425         struct sh_eth_txdesc *txdesc;
2426         dma_addr_t dma_addr;
2427         u32 entry;
2428         unsigned long flags;
2429
2430         spin_lock_irqsave(&mdp->lock, flags);
2431         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2432                 if (!sh_eth_tx_free(ndev, true)) {
2433                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2434                         netif_stop_queue(ndev);
2435                         spin_unlock_irqrestore(&mdp->lock, flags);
2436                         return NETDEV_TX_BUSY;
2437                 }
2438         }
2439         spin_unlock_irqrestore(&mdp->lock, flags);
2440
2441         if (skb_put_padto(skb, ETH_ZLEN))
2442                 return NETDEV_TX_OK;
2443
2444         entry = mdp->cur_tx % mdp->num_tx_ring;
2445         mdp->tx_skbuff[entry] = skb;
2446         txdesc = &mdp->tx_ring[entry];
2447         /* soft swap. */
2448         if (!mdp->cd->hw_swap)
2449                 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2450         dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2451                                   DMA_TO_DEVICE);
2452         if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2453                 kfree_skb(skb);
2454                 return NETDEV_TX_OK;
2455         }
2456         txdesc->addr = cpu_to_le32(dma_addr);
2457         txdesc->len  = cpu_to_le32(skb->len << 16);
2458
2459         dma_wmb(); /* TACT bit must be set after all the above writes */
2460         if (entry >= mdp->num_tx_ring - 1)
2461                 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2462         else
2463                 txdesc->status |= cpu_to_le32(TD_TACT);
2464
2465         mdp->cur_tx++;
2466
2467         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2468                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2469
2470         return NETDEV_TX_OK;
2471 }
2472
2473 /* The statistics registers have write-clear behaviour, which means we
2474  * will lose any increment between the read and write.  We mitigate
2475  * this by only clearing when we read a non-zero value, so we will
2476  * never falsely report a total of zero.
2477  */
2478 static void
2479 sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2480 {
2481         u32 delta = sh_eth_read(ndev, reg);
2482
2483         if (delta) {
2484                 *stat += delta;
2485                 sh_eth_write(ndev, 0, reg);
2486         }
2487 }
2488
2489 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2490 {
2491         struct sh_eth_private *mdp = netdev_priv(ndev);
2492
2493         if (sh_eth_is_rz_fast_ether(mdp))
2494                 return &ndev->stats;
2495
2496         if (!mdp->is_opened)
2497                 return &ndev->stats;
2498
2499         sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2500         sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2501         sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2502
2503         if (sh_eth_is_gether(mdp)) {
2504                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2505                                    CERCR);
2506                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2507                                    CEECR);
2508         } else {
2509                 sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2510                                    CNDCR);
2511         }
2512
2513         return &ndev->stats;
2514 }
2515
2516 /* device close function */
2517 static int sh_eth_close(struct net_device *ndev)
2518 {
2519         struct sh_eth_private *mdp = netdev_priv(ndev);
2520
2521         netif_stop_queue(ndev);
2522
2523         /* Serialise with the interrupt handler and NAPI, then disable
2524          * interrupts.  We have to clear the irq_enabled flag first to
2525          * ensure that interrupts won't be re-enabled.
2526          */
2527         mdp->irq_enabled = false;
2528         synchronize_irq(ndev->irq);
2529         napi_disable(&mdp->napi);
2530         sh_eth_write(ndev, 0x0000, EESIPR);
2531
2532         sh_eth_dev_exit(ndev);
2533
2534         /* PHY Disconnect */
2535         if (ndev->phydev) {
2536                 phy_stop(ndev->phydev);
2537                 phy_disconnect(ndev->phydev);
2538         }
2539
2540         free_irq(ndev->irq, ndev);
2541
2542         /* Free all the skbuffs in the Rx queue and the DMA buffer. */
2543         sh_eth_ring_free(ndev);
2544
2545         pm_runtime_put_sync(&mdp->pdev->dev);
2546
2547         mdp->is_opened = 0;
2548
2549         return 0;
2550 }
2551
2552 /* ioctl to device function */
2553 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2554 {
2555         struct phy_device *phydev = ndev->phydev;
2556
2557         if (!netif_running(ndev))
2558                 return -EINVAL;
2559
2560         if (!phydev)
2561                 return -ENODEV;
2562
2563         return phy_mii_ioctl(phydev, rq, cmd);
2564 }
2565
2566 static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2567 {
2568         if (netif_running(ndev))
2569                 return -EBUSY;
2570
2571         ndev->mtu = new_mtu;
2572         netdev_update_features(ndev);
2573
2574         return 0;
2575 }
2576
2577 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2578 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2579                                             int entry)
2580 {
2581         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2582 }
2583
2584 static u32 sh_eth_tsu_get_post_mask(int entry)
2585 {
2586         return 0x0f << (28 - ((entry % 8) * 4));
2587 }
2588
2589 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2590 {
2591         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2592 }
2593
2594 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2595                                              int entry)
2596 {
2597         struct sh_eth_private *mdp = netdev_priv(ndev);
2598         u32 tmp;
2599         void *reg_offset;
2600
2601         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2602         tmp = ioread32(reg_offset);
2603         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2604 }
2605
2606 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2607                                               int entry)
2608 {
2609         struct sh_eth_private *mdp = netdev_priv(ndev);
2610         u32 post_mask, ref_mask, tmp;
2611         void *reg_offset;
2612
2613         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2614         post_mask = sh_eth_tsu_get_post_mask(entry);
2615         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2616
2617         tmp = ioread32(reg_offset);
2618         iowrite32(tmp & ~post_mask, reg_offset);
2619
2620         /* If other port enables, the function returns "true" */
2621         return tmp & ref_mask;
2622 }
2623
2624 static int sh_eth_tsu_busy(struct net_device *ndev)
2625 {
2626         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2627         struct sh_eth_private *mdp = netdev_priv(ndev);
2628
2629         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2630                 udelay(10);
2631                 timeout--;
2632                 if (timeout <= 0) {
2633                         netdev_err(ndev, "%s: timeout\n", __func__);
2634                         return -ETIMEDOUT;
2635                 }
2636         }
2637
2638         return 0;
2639 }
2640
2641 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2642                                   const u8 *addr)
2643 {
2644         u32 val;
2645
2646         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2647         iowrite32(val, reg);
2648         if (sh_eth_tsu_busy(ndev) < 0)
2649                 return -EBUSY;
2650
2651         val = addr[4] << 8 | addr[5];
2652         iowrite32(val, reg + 4);
2653         if (sh_eth_tsu_busy(ndev) < 0)
2654                 return -EBUSY;
2655
2656         return 0;
2657 }
2658
2659 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2660 {
2661         u32 val;
2662
2663         val = ioread32(reg);
2664         addr[0] = (val >> 24) & 0xff;
2665         addr[1] = (val >> 16) & 0xff;
2666         addr[2] = (val >> 8) & 0xff;
2667         addr[3] = val & 0xff;
2668         val = ioread32(reg + 4);
2669         addr[4] = (val >> 8) & 0xff;
2670         addr[5] = val & 0xff;
2671 }
2672
2673
2674 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2675 {
2676         struct sh_eth_private *mdp = netdev_priv(ndev);
2677         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2678         int i;
2679         u8 c_addr[ETH_ALEN];
2680
2681         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2682                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2683                 if (ether_addr_equal(addr, c_addr))
2684                         return i;
2685         }
2686
2687         return -ENOENT;
2688 }
2689
2690 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2691 {
2692         u8 blank[ETH_ALEN];
2693         int entry;
2694
2695         memset(blank, 0, sizeof(blank));
2696         entry = sh_eth_tsu_find_entry(ndev, blank);
2697         return (entry < 0) ? -ENOMEM : entry;
2698 }
2699
2700 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2701                                               int entry)
2702 {
2703         struct sh_eth_private *mdp = netdev_priv(ndev);
2704         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2705         int ret;
2706         u8 blank[ETH_ALEN];
2707
2708         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2709                          ~(1 << (31 - entry)), TSU_TEN);
2710
2711         memset(blank, 0, sizeof(blank));
2712         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2713         if (ret < 0)
2714                 return ret;
2715         return 0;
2716 }
2717
2718 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2719 {
2720         struct sh_eth_private *mdp = netdev_priv(ndev);
2721         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2722         int i, ret;
2723
2724         if (!mdp->cd->tsu)
2725                 return 0;
2726
2727         i = sh_eth_tsu_find_entry(ndev, addr);
2728         if (i < 0) {
2729                 /* No entry found, create one */
2730                 i = sh_eth_tsu_find_empty(ndev);
2731                 if (i < 0)
2732                         return -ENOMEM;
2733                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2734                 if (ret < 0)
2735                         return ret;
2736
2737                 /* Enable the entry */
2738                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2739                                  (1 << (31 - i)), TSU_TEN);
2740         }
2741
2742         /* Entry found or created, enable POST */
2743         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2744
2745         return 0;
2746 }
2747
2748 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2749 {
2750         struct sh_eth_private *mdp = netdev_priv(ndev);
2751         int i, ret;
2752
2753         if (!mdp->cd->tsu)
2754                 return 0;
2755
2756         i = sh_eth_tsu_find_entry(ndev, addr);
2757         if (i) {
2758                 /* Entry found */
2759                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2760                         goto done;
2761
2762                 /* Disable the entry if both ports was disabled */
2763                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2764                 if (ret < 0)
2765                         return ret;
2766         }
2767 done:
2768         return 0;
2769 }
2770
2771 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2772 {
2773         struct sh_eth_private *mdp = netdev_priv(ndev);
2774         int i, ret;
2775
2776         if (!mdp->cd->tsu)
2777                 return 0;
2778
2779         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2780                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2781                         continue;
2782
2783                 /* Disable the entry if both ports was disabled */
2784                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2785                 if (ret < 0)
2786                         return ret;
2787         }
2788
2789         return 0;
2790 }
2791
2792 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2793 {
2794         struct sh_eth_private *mdp = netdev_priv(ndev);
2795         u8 addr[ETH_ALEN];
2796         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2797         int i;
2798
2799         if (!mdp->cd->tsu)
2800                 return;
2801
2802         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2803                 sh_eth_tsu_read_entry(reg_offset, addr);
2804                 if (is_multicast_ether_addr(addr))
2805                         sh_eth_tsu_del_entry(ndev, addr);
2806         }
2807 }
2808
2809 /* Update promiscuous flag and multicast filter */
2810 static void sh_eth_set_rx_mode(struct net_device *ndev)
2811 {
2812         struct sh_eth_private *mdp = netdev_priv(ndev);
2813         u32 ecmr_bits;
2814         int mcast_all = 0;
2815         unsigned long flags;
2816
2817         spin_lock_irqsave(&mdp->lock, flags);
2818         /* Initial condition is MCT = 1, PRM = 0.
2819          * Depending on ndev->flags, set PRM or clear MCT
2820          */
2821         ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2822         if (mdp->cd->tsu)
2823                 ecmr_bits |= ECMR_MCT;
2824
2825         if (!(ndev->flags & IFF_MULTICAST)) {
2826                 sh_eth_tsu_purge_mcast(ndev);
2827                 mcast_all = 1;
2828         }
2829         if (ndev->flags & IFF_ALLMULTI) {
2830                 sh_eth_tsu_purge_mcast(ndev);
2831                 ecmr_bits &= ~ECMR_MCT;
2832                 mcast_all = 1;
2833         }
2834
2835         if (ndev->flags & IFF_PROMISC) {
2836                 sh_eth_tsu_purge_all(ndev);
2837                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2838         } else if (mdp->cd->tsu) {
2839                 struct netdev_hw_addr *ha;
2840                 netdev_for_each_mc_addr(ha, ndev) {
2841                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2842                                 continue;
2843
2844                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2845                                 if (!mcast_all) {
2846                                         sh_eth_tsu_purge_mcast(ndev);
2847                                         ecmr_bits &= ~ECMR_MCT;
2848                                         mcast_all = 1;
2849                                 }
2850                         }
2851                 }
2852         }
2853
2854         /* update the ethernet mode */
2855         sh_eth_write(ndev, ecmr_bits, ECMR);
2856
2857         spin_unlock_irqrestore(&mdp->lock, flags);
2858 }
2859
2860 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2861 {
2862         if (!mdp->port)
2863                 return TSU_VTAG0;
2864         else
2865                 return TSU_VTAG1;
2866 }
2867
2868 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2869                                   __be16 proto, u16 vid)
2870 {
2871         struct sh_eth_private *mdp = netdev_priv(ndev);
2872         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2873
2874         if (unlikely(!mdp->cd->tsu))
2875                 return -EPERM;
2876
2877         /* No filtering if vid = 0 */
2878         if (!vid)
2879                 return 0;
2880
2881         mdp->vlan_num_ids++;
2882
2883         /* The controller has one VLAN tag HW filter. So, if the filter is
2884          * already enabled, the driver disables it and the filte
2885          */
2886         if (mdp->vlan_num_ids > 1) {
2887                 /* disable VLAN filter */
2888                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2889                 return 0;
2890         }
2891
2892         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2893                          vtag_reg_index);
2894
2895         return 0;
2896 }
2897
2898 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2899                                    __be16 proto, u16 vid)
2900 {
2901         struct sh_eth_private *mdp = netdev_priv(ndev);
2902         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2903
2904         if (unlikely(!mdp->cd->tsu))
2905                 return -EPERM;
2906
2907         /* No filtering if vid = 0 */
2908         if (!vid)
2909                 return 0;
2910
2911         mdp->vlan_num_ids--;
2912         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2913
2914         return 0;
2915 }
2916
2917 /* SuperH's TSU register init function */
2918 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2919 {
2920         if (sh_eth_is_rz_fast_ether(mdp)) {
2921                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2922                 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
2923                                  TSU_FWSLC);    /* Enable POST registers */
2924                 return;
2925         }
2926
2927         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2928         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2929         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2930         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2931         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2932         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2933         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2934         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2935         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2936         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2937         if (sh_eth_is_gether(mdp)) {
2938                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2939                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2940         } else {
2941                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2942                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2943         }
2944         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2945         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2946         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2947         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2948         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2949         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2950         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2951 }
2952
2953 /* MDIO bus release function */
2954 static int sh_mdio_release(struct sh_eth_private *mdp)
2955 {
2956         /* unregister mdio bus */
2957         mdiobus_unregister(mdp->mii_bus);
2958
2959         /* free bitbang info */
2960         free_mdio_bitbang(mdp->mii_bus);
2961
2962         return 0;
2963 }
2964
2965 /* MDIO bus init function */
2966 static int sh_mdio_init(struct sh_eth_private *mdp,
2967                         struct sh_eth_plat_data *pd)
2968 {
2969         int ret;
2970         struct bb_info *bitbang;
2971         struct platform_device *pdev = mdp->pdev;
2972         struct device *dev = &mdp->pdev->dev;
2973
2974         /* create bit control struct for PHY */
2975         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2976         if (!bitbang)
2977                 return -ENOMEM;
2978
2979         /* bitbang init */
2980         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2981         bitbang->set_gate = pd->set_mdio_gate;
2982         bitbang->ctrl.ops = &bb_ops;
2983
2984         /* MII controller setting */
2985         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2986         if (!mdp->mii_bus)
2987                 return -ENOMEM;
2988
2989         /* Hook up MII support for ethtool */
2990         mdp->mii_bus->name = "sh_mii";
2991         mdp->mii_bus->parent = dev;
2992         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2993                  pdev->name, pdev->id);
2994
2995         /* register MDIO bus */
2996         if (dev->of_node) {
2997                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2998         } else {
2999                 if (pd->phy_irq > 0)
3000                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3001
3002                 ret = mdiobus_register(mdp->mii_bus);
3003         }
3004
3005         if (ret)
3006                 goto out_free_bus;
3007
3008         return 0;
3009
3010 out_free_bus:
3011         free_mdio_bitbang(mdp->mii_bus);
3012         return ret;
3013 }
3014
3015 static const u16 *sh_eth_get_register_offset(int register_type)
3016 {
3017         const u16 *reg_offset = NULL;
3018
3019         switch (register_type) {
3020         case SH_ETH_REG_GIGABIT:
3021                 reg_offset = sh_eth_offset_gigabit;
3022                 break;
3023         case SH_ETH_REG_FAST_RZ:
3024                 reg_offset = sh_eth_offset_fast_rz;
3025                 break;
3026         case SH_ETH_REG_FAST_RCAR:
3027                 reg_offset = sh_eth_offset_fast_rcar;
3028                 break;
3029         case SH_ETH_REG_FAST_SH4:
3030                 reg_offset = sh_eth_offset_fast_sh4;
3031                 break;
3032         case SH_ETH_REG_FAST_SH3_SH2:
3033                 reg_offset = sh_eth_offset_fast_sh3_sh2;
3034                 break;
3035         }
3036
3037         return reg_offset;
3038 }
3039
3040 static const struct net_device_ops sh_eth_netdev_ops = {
3041         .ndo_open               = sh_eth_open,
3042         .ndo_stop               = sh_eth_close,
3043         .ndo_start_xmit         = sh_eth_start_xmit,
3044         .ndo_get_stats          = sh_eth_get_stats,
3045         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
3046         .ndo_tx_timeout         = sh_eth_tx_timeout,
3047         .ndo_do_ioctl           = sh_eth_do_ioctl,
3048         .ndo_change_mtu         = sh_eth_change_mtu,
3049         .ndo_validate_addr      = eth_validate_addr,
3050         .ndo_set_mac_address    = eth_mac_addr,
3051 };
3052
3053 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3054         .ndo_open               = sh_eth_open,
3055         .ndo_stop               = sh_eth_close,
3056         .ndo_start_xmit         = sh_eth_start_xmit,
3057         .ndo_get_stats          = sh_eth_get_stats,
3058         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
3059         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
3060         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
3061         .ndo_tx_timeout         = sh_eth_tx_timeout,
3062         .ndo_do_ioctl           = sh_eth_do_ioctl,
3063         .ndo_change_mtu         = sh_eth_change_mtu,
3064         .ndo_validate_addr      = eth_validate_addr,
3065         .ndo_set_mac_address    = eth_mac_addr,
3066 };
3067
3068 #ifdef CONFIG_OF
3069 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3070 {
3071         struct device_node *np = dev->of_node;
3072         struct sh_eth_plat_data *pdata;
3073         const char *mac_addr;
3074
3075         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3076         if (!pdata)
3077                 return NULL;
3078
3079         pdata->phy_interface = of_get_phy_mode(np);
3080
3081         mac_addr = of_get_mac_address(np);
3082         if (mac_addr)
3083                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3084
3085         pdata->no_ether_link =
3086                 of_property_read_bool(np, "renesas,no-ether-link");
3087         pdata->ether_link_active_low =
3088                 of_property_read_bool(np, "renesas,ether-link-active-low");
3089
3090         return pdata;
3091 }
3092
3093 static const struct of_device_id sh_eth_match_table[] = {
3094         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3095         { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3096         { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3097         { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3098         { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3099         { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3100         { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3101         { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3102         { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3103         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3104         { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3105         { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3106         { }
3107 };
3108 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3109 #else
3110 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3111 {
3112         return NULL;
3113 }
3114 #endif
3115
3116 static int sh_eth_drv_probe(struct platform_device *pdev)
3117 {
3118         struct resource *res;
3119         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3120         const struct platform_device_id *id = platform_get_device_id(pdev);
3121         struct sh_eth_private *mdp;
3122         struct net_device *ndev;
3123         int ret;
3124
3125         /* get base addr */
3126         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3127
3128         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3129         if (!ndev)
3130                 return -ENOMEM;
3131
3132         pm_runtime_enable(&pdev->dev);
3133         pm_runtime_get_sync(&pdev->dev);
3134
3135         ret = platform_get_irq(pdev, 0);
3136         if (ret < 0)
3137                 goto out_release;
3138         ndev->irq = ret;
3139
3140         SET_NETDEV_DEV(ndev, &pdev->dev);
3141
3142         mdp = netdev_priv(ndev);
3143         mdp->num_tx_ring = TX_RING_SIZE;
3144         mdp->num_rx_ring = RX_RING_SIZE;
3145         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3146         if (IS_ERR(mdp->addr)) {
3147                 ret = PTR_ERR(mdp->addr);
3148                 goto out_release;
3149         }
3150
3151         ndev->base_addr = res->start;
3152
3153         spin_lock_init(&mdp->lock);
3154         mdp->pdev = pdev;
3155
3156         if (pdev->dev.of_node)
3157                 pd = sh_eth_parse_dt(&pdev->dev);
3158         if (!pd) {
3159                 dev_err(&pdev->dev, "no platform data\n");
3160                 ret = -EINVAL;
3161                 goto out_release;
3162         }
3163
3164         /* get PHY ID */
3165         mdp->phy_id = pd->phy;
3166         mdp->phy_interface = pd->phy_interface;
3167         mdp->no_ether_link = pd->no_ether_link;
3168         mdp->ether_link_active_low = pd->ether_link_active_low;
3169
3170         /* set cpu data */
3171         if (id)
3172                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3173         else
3174                 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3175
3176         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3177         if (!mdp->reg_offset) {
3178                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
3179                         mdp->cd->register_type);
3180                 ret = -EINVAL;
3181                 goto out_release;
3182         }
3183         sh_eth_set_default_cpu_data(mdp->cd);
3184
3185         /* User's manual states max MTU should be 2048 but due to the
3186          * alignment calculations in sh_eth_ring_init() the practical
3187          * MTU is a bit less. Maybe this can be optimized some more.
3188          */
3189         ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3190         ndev->min_mtu = ETH_MIN_MTU;
3191
3192         /* set function */
3193         if (mdp->cd->tsu)
3194                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3195         else
3196                 ndev->netdev_ops = &sh_eth_netdev_ops;
3197         ndev->ethtool_ops = &sh_eth_ethtool_ops;
3198         ndev->watchdog_timeo = TX_TIMEOUT;
3199
3200         /* debug message level */
3201         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3202
3203         /* read and set MAC address */
3204         read_mac_address(ndev, pd->mac_addr);
3205         if (!is_valid_ether_addr(ndev->dev_addr)) {
3206                 dev_warn(&pdev->dev,
3207                          "no valid MAC address supplied, using a random one.\n");
3208                 eth_hw_addr_random(ndev);
3209         }
3210
3211         if (mdp->cd->tsu) {
3212                 int port = pdev->id < 0 ? 0 : pdev->id % 2;
3213                 struct resource *rtsu;
3214
3215                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3216                 if (!rtsu) {
3217                         dev_err(&pdev->dev, "no TSU resource\n");
3218                         ret = -ENODEV;
3219                         goto out_release;
3220                 }
3221                 /* We can only request the  TSU region  for the first port
3222                  * of the two  sharing this TSU for the probe to succeed...
3223                  */
3224                 if (port == 0 &&
3225                     !devm_request_mem_region(&pdev->dev, rtsu->start,
3226                                              resource_size(rtsu),
3227                                              dev_name(&pdev->dev))) {
3228                         dev_err(&pdev->dev, "can't request TSU resource.\n");
3229                         ret = -EBUSY;
3230                         goto out_release;
3231                 }
3232                 /* ioremap the TSU registers */
3233                 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3234                                              resource_size(rtsu));
3235                 if (!mdp->tsu_addr) {
3236                         dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3237                         ret = -ENOMEM;
3238                         goto out_release;
3239                 }
3240                 mdp->port = port;
3241                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3242
3243                 /* Need to init only the first port of the two sharing a TSU */
3244                 if (port == 0) {
3245                         if (mdp->cd->chip_reset)
3246                                 mdp->cd->chip_reset(ndev);
3247
3248                         /* TSU init (Init only)*/
3249                         sh_eth_tsu_init(mdp);
3250                 }
3251         }
3252
3253         if (mdp->cd->rmiimode)
3254                 sh_eth_write(ndev, 0x1, RMIIMODE);
3255
3256         /* MDIO bus init */
3257         ret = sh_mdio_init(mdp, pd);
3258         if (ret) {
3259                 if (ret != -EPROBE_DEFER)
3260                         dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
3261                 goto out_release;
3262         }
3263
3264         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3265
3266         /* network device register */
3267         ret = register_netdev(ndev);
3268         if (ret)
3269                 goto out_napi_del;
3270
3271         if (mdp->cd->magic)
3272                 device_set_wakeup_capable(&pdev->dev, 1);
3273
3274         /* print device information */
3275         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3276                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3277
3278         pm_runtime_put(&pdev->dev);
3279         platform_set_drvdata(pdev, ndev);
3280
3281         return ret;
3282
3283 out_napi_del:
3284         netif_napi_del(&mdp->napi);
3285         sh_mdio_release(mdp);
3286
3287 out_release:
3288         /* net_dev free */
3289         free_netdev(ndev);
3290
3291         pm_runtime_put(&pdev->dev);
3292         pm_runtime_disable(&pdev->dev);
3293         return ret;
3294 }
3295
3296 static int sh_eth_drv_remove(struct platform_device *pdev)
3297 {
3298         struct net_device *ndev = platform_get_drvdata(pdev);
3299         struct sh_eth_private *mdp = netdev_priv(ndev);
3300
3301         unregister_netdev(ndev);
3302         netif_napi_del(&mdp->napi);
3303         sh_mdio_release(mdp);
3304         pm_runtime_disable(&pdev->dev);
3305         free_netdev(ndev);
3306
3307         return 0;
3308 }
3309
3310 #ifdef CONFIG_PM
3311 #ifdef CONFIG_PM_SLEEP
3312 static int sh_eth_wol_setup(struct net_device *ndev)
3313 {
3314         struct sh_eth_private *mdp = netdev_priv(ndev);
3315
3316         /* Only allow ECI interrupts */
3317         synchronize_irq(ndev->irq);
3318         napi_disable(&mdp->napi);
3319         sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
3320
3321         /* Enable MagicPacket */
3322         sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3323
3324         return enable_irq_wake(ndev->irq);
3325 }
3326
3327 static int sh_eth_wol_restore(struct net_device *ndev)
3328 {
3329         struct sh_eth_private *mdp = netdev_priv(ndev);
3330         int ret;
3331
3332         napi_enable(&mdp->napi);
3333
3334         /* Disable MagicPacket */
3335         sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
3336
3337         /* The device needs to be reset to restore MagicPacket logic
3338          * for next wakeup. If we close and open the device it will
3339          * both be reset and all registers restored. This is what
3340          * happens during suspend and resume without WoL enabled.
3341          */
3342         ret = sh_eth_close(ndev);
3343         if (ret < 0)
3344                 return ret;
3345         ret = sh_eth_open(ndev);
3346         if (ret < 0)
3347                 return ret;
3348
3349         return disable_irq_wake(ndev->irq);
3350 }
3351
3352 static int sh_eth_suspend(struct device *dev)
3353 {
3354         struct net_device *ndev = dev_get_drvdata(dev);
3355         struct sh_eth_private *mdp = netdev_priv(ndev);
3356         int ret = 0;
3357
3358         if (!netif_running(ndev))
3359                 return 0;
3360
3361         netif_device_detach(ndev);
3362
3363         if (mdp->wol_enabled)
3364                 ret = sh_eth_wol_setup(ndev);
3365         else
3366                 ret = sh_eth_close(ndev);
3367
3368         return ret;
3369 }
3370
3371 static int sh_eth_resume(struct device *dev)
3372 {
3373         struct net_device *ndev = dev_get_drvdata(dev);
3374         struct sh_eth_private *mdp = netdev_priv(ndev);
3375         int ret = 0;
3376
3377         if (!netif_running(ndev))
3378                 return 0;
3379
3380         if (mdp->wol_enabled)
3381                 ret = sh_eth_wol_restore(ndev);
3382         else
3383                 ret = sh_eth_open(ndev);
3384
3385         if (ret < 0)
3386                 return ret;
3387
3388         netif_device_attach(ndev);
3389
3390         return ret;
3391 }
3392 #endif
3393
3394 static int sh_eth_runtime_nop(struct device *dev)
3395 {
3396         /* Runtime PM callback shared between ->runtime_suspend()
3397          * and ->runtime_resume(). Simply returns success.
3398          *
3399          * This driver re-initializes all registers after
3400          * pm_runtime_get_sync() anyway so there is no need
3401          * to save and restore registers here.
3402          */
3403         return 0;
3404 }
3405
3406 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3407         SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3408         SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3409 };
3410 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3411 #else
3412 #define SH_ETH_PM_OPS NULL
3413 #endif
3414
3415 static const struct platform_device_id sh_eth_id_table[] = {
3416         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3417         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3418         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3419         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3420         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3421         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3422         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3423         { }
3424 };
3425 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3426
3427 static struct platform_driver sh_eth_driver = {
3428         .probe = sh_eth_drv_probe,
3429         .remove = sh_eth_drv_remove,
3430         .id_table = sh_eth_id_table,
3431         .driver = {
3432                    .name = CARDNAME,
3433                    .pm = SH_ETH_PM_OPS,
3434                    .of_match_table = of_match_ptr(sh_eth_match_table),
3435         },
3436 };
3437
3438 module_platform_driver(sh_eth_driver);
3439
3440 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3441 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3442 MODULE_LICENSE("GPL v2");