net: drop the weight argument from netif_napi_add
[linux-2.6-block.git] / drivers / net / ethernet / socionext / sni_ave.c
CommitLineData
4c270b55 1// SPDX-License-Identifier: GPL-2.0
40d9fca8 2/*
4c270b55
KH
3 * sni_ave.c - Socionext UniPhier AVE ethernet driver
4 * Copyright 2014 Panasonic Corporation
5 * Copyright 2015-2017 Socionext Inc.
6 */
7
8#include <linux/bitops.h>
9#include <linux/clk.h>
10#include <linux/etherdevice.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/iopoll.h>
57878f2f 14#include <linux/mfd/syscon.h>
4c270b55
KH
15#include <linux/mii.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/of_net.h>
19#include <linux/of_mdio.h>
20#include <linux/of_platform.h>
21#include <linux/phy.h>
57878f2f 22#include <linux/regmap.h>
4c270b55
KH
23#include <linux/reset.h>
24#include <linux/types.h>
25#include <linux/u64_stats_sync.h>
26
27/* General Register Group */
28#define AVE_IDR 0x000 /* ID */
29#define AVE_VR 0x004 /* Version */
30#define AVE_GRR 0x008 /* Global Reset */
31#define AVE_CFGR 0x00c /* Configuration */
32
33/* Interrupt Register Group */
34#define AVE_GIMR 0x100 /* Global Interrupt Mask */
35#define AVE_GISR 0x104 /* Global Interrupt Status */
36
37/* MAC Register Group */
38#define AVE_TXCR 0x200 /* TX Setup */
39#define AVE_RXCR 0x204 /* RX Setup */
40#define AVE_RXMAC1R 0x208 /* MAC address (lower) */
41#define AVE_RXMAC2R 0x20c /* MAC address (upper) */
42#define AVE_MDIOCTR 0x214 /* MDIO Control */
43#define AVE_MDIOAR 0x218 /* MDIO Address */
44#define AVE_MDIOWDR 0x21c /* MDIO Data */
45#define AVE_MDIOSR 0x220 /* MDIO Status */
46#define AVE_MDIORDR 0x224 /* MDIO Rd Data */
47
48/* Descriptor Control Register Group */
49#define AVE_DESCC 0x300 /* Descriptor Control */
50#define AVE_TXDC 0x304 /* TX Descriptor Configuration */
51#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */
52#define AVE_IIRQC 0x34c /* Interval IRQ Control */
53
54/* Packet Filter Register Group */
55#define AVE_PKTF_BASE 0x800 /* PF Base Address */
56#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */
57#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */
58#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */
59#define AVE_PFEN 0xffc /* Packet Filter Enable */
60#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40)
61#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8)
62#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4)
63#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4)
64
65/* 64bit descriptor memory */
66#define AVE_DESC_SIZE_64 12 /* Descriptor Size */
67
68#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */
69#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */
70
71#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */
72#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */
73
74/* 32bit descriptor memory */
75#define AVE_DESC_SIZE_32 8 /* Descriptor Size */
76
77#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */
78#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */
79
80#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */
81#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */
82
83/* RMII Bridge Register Group */
84#define AVE_RSTCTRL 0x8028 /* Reset control */
85#define AVE_RSTCTRL_RMIIRST BIT(16)
86#define AVE_LINKSEL 0x8034 /* Link speed setting */
87#define AVE_LINKSEL_100M BIT(0)
88
89/* AVE_GRR */
90#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */
91#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */
92#define AVE_GRR_GRST BIT(0) /* Reset all MAC */
93
94/* AVE_CFGR */
95#define AVE_CFGR_FLE BIT(31) /* Filter Function */
96#define AVE_CFGR_CHE BIT(30) /* Checksum Function */
97#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */
98#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */
99
100/* AVE_GISR (common with GIMR) */
101#define AVE_GI_PHY BIT(24) /* PHY interrupt */
102#define AVE_GI_TX BIT(16) /* Tx complete */
103#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */
104#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */
105#define AVE_GI_RXDROP BIT(6) /* Drop packet */
106#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */
107
108/* AVE_TXCR */
109#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */
110#define AVE_TXCR_TXSPD_1G BIT(17)
111#define AVE_TXCR_TXSPD_100 BIT(16)
112
113/* AVE_RXCR */
114#define AVE_RXCR_RXEN BIT(30) /* Rx enable */
115#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */
116#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */
117#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */
118#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */
119#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0)
120
121/* AVE_MDIOCTR */
122#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */
123#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */
124
125/* AVE_MDIOSR */
126#define AVE_MDIOSR_STS BIT(0) /* access status */
127
128/* AVE_DESCC */
129#define AVE_DESCC_STATUS_MASK GENMASK(31, 16)
130#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */
131#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */
132#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */
133
134/* AVE_TXDC */
135#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */
136#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */
137#define AVE_TXDC_ADDR_START 0
138
139/* AVE_RXDC0 */
140#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */
141#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */
142#define AVE_RXDC0_ADDR_START 0
143
144/* AVE_IIRQC */
145#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */
146#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */
147
148/* Command status for descriptor */
149#define AVE_STS_OWN BIT(31) /* Descriptor ownership */
150#define AVE_STS_INTR BIT(29) /* Request for interrupt */
151#define AVE_STS_OK BIT(27) /* Normal transmit */
152/* TX */
153#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */
154#define AVE_STS_1ST BIT(26) /* Head of buffer chain */
155#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */
156#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */
157#define AVE_STS_EC BIT(20) /* Excess collision occurred */
158#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0)
159/* RX */
160#define AVE_STS_CSSV BIT(21) /* Checksum check performed */
161#define AVE_STS_CSER BIT(20) /* Checksum error detected */
162#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0)
163
164/* Packet filter */
165#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0))
166#define AVE_PFMBYTE_MASK1 GENMASK(25, 0)
167#define AVE_PFMBIT_MASK GENMASK(15, 0)
168
169#define AVE_PF_SIZE 17 /* Number of all packet filter */
170#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */
171
172#define AVE_PFNUM_FILTER 0 /* No.0 */
173#define AVE_PFNUM_UNICAST 1 /* No.1 */
174#define AVE_PFNUM_BROADCAST 2 /* No.2 */
175#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */
176
177/* NETIF Message control */
178#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
179 NETIF_MSG_PROBE | \
180 NETIF_MSG_LINK | \
181 NETIF_MSG_TIMER | \
182 NETIF_MSG_IFDOWN | \
183 NETIF_MSG_IFUP | \
184 NETIF_MSG_RX_ERR | \
185 NETIF_MSG_TX_ERR)
186
187/* Parameter for descriptor */
09ee3b4a
KH
188#define AVE_NR_TXDESC 64 /* Tx descriptor */
189#define AVE_NR_RXDESC 256 /* Rx descriptor */
4c270b55
KH
190
191#define AVE_DESC_OFS_CMDSTS 0
192#define AVE_DESC_OFS_ADDRL 4
193#define AVE_DESC_OFS_ADDRU 8
194
195/* Parameter for ethernet frame */
196#define AVE_MAX_ETHFRAME 1518
88113957 197#define AVE_FRAME_HEADROOM 2
4c270b55
KH
198
199/* Parameter for interrupt */
200#define AVE_INTM_COUNT 20
201#define AVE_FORCE_TXINTCNT 1
202
57878f2f
KH
203/* SG */
204#define SG_ETPINMODE 0x540
205#define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */
206#define SG_ETPINMODE_RMII(ins) BIT(ins)
207
4c270b55
KH
208#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit)
209
6b9227d6
KH
210#define AVE_MAX_CLKS 4
211#define AVE_MAX_RSTS 2
212
4c270b55
KH
213enum desc_id {
214 AVE_DESCID_RX,
215 AVE_DESCID_TX,
216};
217
218enum desc_state {
219 AVE_DESC_RX_PERMIT,
220 AVE_DESC_RX_SUSPEND,
221 AVE_DESC_START,
222 AVE_DESC_STOP,
223};
224
225struct ave_desc {
226 struct sk_buff *skbs;
227 dma_addr_t skbs_dma;
228 size_t skbs_dmalen;
229};
230
231struct ave_desc_info {
232 u32 ndesc; /* number of descriptor */
233 u32 daddr; /* start address of descriptor */
234 u32 proc_idx; /* index of processing packet */
235 u32 done_idx; /* index of processed packet */
236 struct ave_desc *desc; /* skb info related descriptor */
237};
238
4c270b55
KH
239struct ave_stats {
240 struct u64_stats_sync syncp;
241 u64 packets;
242 u64 bytes;
243 u64 errors;
244 u64 dropped;
245 u64 collisions;
246 u64 fifo_errors;
247};
248
249struct ave_private {
250 void __iomem *base;
251 int irq;
252 int phy_id;
253 unsigned int desc_size;
254 u32 msg_enable;
6b9227d6
KH
255 int nclks;
256 struct clk *clk[AVE_MAX_CLKS];
257 int nrsts;
258 struct reset_control *rst[AVE_MAX_RSTS];
4c270b55
KH
259 phy_interface_t phy_mode;
260 struct phy_device *phydev;
261 struct mii_bus *mdio;
57878f2f
KH
262 struct regmap *regmap;
263 unsigned int pinmode_mask;
264 unsigned int pinmode_val;
8d1283b1 265 u32 wolopts;
4c270b55
KH
266
267 /* stats */
268 struct ave_stats stats_rx;
269 struct ave_stats stats_tx;
270
271 /* NAPI support */
272 struct net_device *ndev;
273 struct napi_struct napi_rx;
274 struct napi_struct napi_tx;
275
276 /* descriptor */
277 struct ave_desc_info rx;
278 struct ave_desc_info tx;
279
280 /* flow control */
281 int pause_auto;
282 int pause_rx;
283 int pause_tx;
284
285 const struct ave_soc_data *data;
286};
287
57878f2f
KH
288struct ave_soc_data {
289 bool is_desc_64bit;
290 const char *clock_names[AVE_MAX_CLKS];
291 const char *reset_names[AVE_MAX_RSTS];
292 int (*get_pinmode)(struct ave_private *priv,
293 phy_interface_t phy_mode, u32 arg);
294};
295
4c270b55
KH
296static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
297 int offset)
298{
299 struct ave_private *priv = netdev_priv(ndev);
300 u32 addr;
301
302 addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
303 + entry * priv->desc_size + offset;
304
305 return readl(priv->base + addr);
306}
307
308static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
309 int entry)
310{
311 return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
312}
313
314static void ave_desc_write(struct net_device *ndev, enum desc_id id,
315 int entry, int offset, u32 val)
316{
317 struct ave_private *priv = netdev_priv(ndev);
318 u32 addr;
319
320 addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
321 + entry * priv->desc_size + offset;
322
323 writel(val, priv->base + addr);
324}
325
326static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
327 int entry, u32 val)
328{
329 ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
330}
331
332static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
333 int entry, dma_addr_t paddr)
334{
335 struct ave_private *priv = netdev_priv(ndev);
336
337 ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
338 lower_32_bits(paddr));
339 if (IS_DESC_64BIT(priv))
340 ave_desc_write(ndev, id,
341 entry, AVE_DESC_OFS_ADDRU,
342 upper_32_bits(paddr));
343}
344
345static u32 ave_irq_disable_all(struct net_device *ndev)
346{
347 struct ave_private *priv = netdev_priv(ndev);
348 u32 ret;
349
350 ret = readl(priv->base + AVE_GIMR);
351 writel(0, priv->base + AVE_GIMR);
352
353 return ret;
354}
355
356static void ave_irq_restore(struct net_device *ndev, u32 val)
357{
358 struct ave_private *priv = netdev_priv(ndev);
359
360 writel(val, priv->base + AVE_GIMR);
361}
362
363static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
364{
365 struct ave_private *priv = netdev_priv(ndev);
366
367 writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
368 writel(bitflag, priv->base + AVE_GISR);
369}
370
371static void ave_hw_write_macaddr(struct net_device *ndev,
372 const unsigned char *mac_addr,
373 int reg1, int reg2)
374{
375 struct ave_private *priv = netdev_priv(ndev);
376
377 writel(mac_addr[0] | mac_addr[1] << 8 |
378 mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
379 writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
380}
381
382static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
383{
384 struct ave_private *priv = netdev_priv(ndev);
385 u32 major, minor, vr;
386
387 vr = readl(priv->base + AVE_VR);
388 major = (vr & GENMASK(15, 8)) >> 8;
389 minor = (vr & GENMASK(7, 0));
390 snprintf(buf, len, "v%u.%u", major, minor);
391}
392
393static void ave_ethtool_get_drvinfo(struct net_device *ndev,
394 struct ethtool_drvinfo *info)
395{
396 struct device *dev = ndev->dev.parent;
397
f029c781
WS
398 strscpy(info->driver, dev->driver->name, sizeof(info->driver));
399 strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
4c270b55
KH
400 ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
401}
402
403static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
404{
405 struct ave_private *priv = netdev_priv(ndev);
406
407 return priv->msg_enable;
408}
409
410static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
411{
412 struct ave_private *priv = netdev_priv(ndev);
413
414 priv->msg_enable = val;
415}
416
417static void ave_ethtool_get_wol(struct net_device *ndev,
418 struct ethtool_wolinfo *wol)
419{
420 wol->supported = 0;
421 wol->wolopts = 0;
422
423 if (ndev->phydev)
424 phy_ethtool_get_wol(ndev->phydev, wol);
425}
426
82d5d6a6
KH
427static int __ave_ethtool_set_wol(struct net_device *ndev,
428 struct ethtool_wolinfo *wol)
4c270b55 429{
4c270b55
KH
430 if (!ndev->phydev ||
431 (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
432 return -EOPNOTSUPP;
433
82d5d6a6
KH
434 return phy_ethtool_set_wol(ndev->phydev, wol);
435}
436
437static int ave_ethtool_set_wol(struct net_device *ndev,
438 struct ethtool_wolinfo *wol)
439{
440 int ret;
441
442 ret = __ave_ethtool_set_wol(ndev, wol);
4c270b55
KH
443 if (!ret)
444 device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
445
446 return ret;
447}
448
449static void ave_ethtool_get_pauseparam(struct net_device *ndev,
450 struct ethtool_pauseparam *pause)
451{
452 struct ave_private *priv = netdev_priv(ndev);
453
454 pause->autoneg = priv->pause_auto;
455 pause->rx_pause = priv->pause_rx;
456 pause->tx_pause = priv->pause_tx;
457}
458
459static int ave_ethtool_set_pauseparam(struct net_device *ndev,
460 struct ethtool_pauseparam *pause)
461{
462 struct ave_private *priv = netdev_priv(ndev);
463 struct phy_device *phydev = ndev->phydev;
464
465 if (!phydev)
466 return -EINVAL;
467
468 priv->pause_auto = pause->autoneg;
469 priv->pause_rx = pause->rx_pause;
470 priv->pause_tx = pause->tx_pause;
471
70814e81 472 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
4c270b55
KH
473
474 return 0;
475}
476
477static const struct ethtool_ops ave_ethtool_ops = {
478 .get_link_ksettings = phy_ethtool_get_link_ksettings,
479 .set_link_ksettings = phy_ethtool_set_link_ksettings,
480 .get_drvinfo = ave_ethtool_get_drvinfo,
481 .nway_reset = phy_ethtool_nway_reset,
482 .get_link = ethtool_op_get_link,
483 .get_msglevel = ave_ethtool_get_msglevel,
484 .set_msglevel = ave_ethtool_set_msglevel,
485 .get_wol = ave_ethtool_get_wol,
486 .set_wol = ave_ethtool_set_wol,
487 .get_pauseparam = ave_ethtool_get_pauseparam,
488 .set_pauseparam = ave_ethtool_set_pauseparam,
489};
490
491static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
492{
493 struct net_device *ndev = bus->priv;
494 struct ave_private *priv;
495 u32 mdioctl, mdiosr;
496 int ret;
497
498 priv = netdev_priv(ndev);
499
500 /* write address */
501 writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
502
503 /* read request */
504 mdioctl = readl(priv->base + AVE_MDIOCTR);
505 writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
506 priv->base + AVE_MDIOCTR);
507
508 ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
509 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
510 if (ret) {
511 netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
512 phyid, regnum);
513 return ret;
514 }
515
516 return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
517}
518
519static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
520 u16 val)
521{
522 struct net_device *ndev = bus->priv;
523 struct ave_private *priv;
524 u32 mdioctl, mdiosr;
525 int ret;
526
527 priv = netdev_priv(ndev);
528
529 /* write address */
530 writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
531
532 /* write data */
533 writel(val, priv->base + AVE_MDIOWDR);
534
535 /* write request */
536 mdioctl = readl(priv->base + AVE_MDIOCTR);
537 writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
538 priv->base + AVE_MDIOCTR);
539
540 ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
541 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
542 if (ret)
543 netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
544 phyid, regnum);
545
546 return ret;
547}
548
549static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
550 void *ptr, size_t len, enum dma_data_direction dir,
551 dma_addr_t *paddr)
552{
553 dma_addr_t map_addr;
554
555 map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
556 if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
557 return -ENOMEM;
558
559 desc->skbs_dma = map_addr;
560 desc->skbs_dmalen = len;
561 *paddr = map_addr;
562
563 return 0;
564}
565
566static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
567 enum dma_data_direction dir)
568{
569 if (!desc->skbs_dma)
570 return;
571
572 dma_unmap_single(ndev->dev.parent,
573 desc->skbs_dma, desc->skbs_dmalen, dir);
574 desc->skbs_dma = 0;
575}
576
577/* Prepare Rx descriptor and memory */
578static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
579{
580 struct ave_private *priv = netdev_priv(ndev);
581 struct sk_buff *skb;
582 dma_addr_t paddr;
583 int ret;
584
585 skb = priv->rx.desc[entry].skbs;
586 if (!skb) {
88113957 587 skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
4c270b55
KH
588 if (!skb) {
589 netdev_err(ndev, "can't allocate skb for Rx\n");
590 return -ENOMEM;
591 }
88113957
KH
592 skb->data += AVE_FRAME_HEADROOM;
593 skb->tail += AVE_FRAME_HEADROOM;
4c270b55
KH
594 }
595
596 /* set disable to cmdsts */
597 ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
598 AVE_STS_INTR | AVE_STS_OWN);
599
600 /* map Rx buffer
601 * Rx buffer set to the Rx descriptor has two restrictions:
602 * - Rx buffer address is 4 byte aligned.
603 * - Rx buffer begins with 2 byte headroom, and data will be put from
604 * (buffer + 2).
605 * To satisfy this, specify the address to put back the buffer
88113957
KH
606 * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
607 * by AVE_FRAME_HEADROOM.
4c270b55
KH
608 */
609 ret = ave_dma_map(ndev, &priv->rx.desc[entry],
88113957
KH
610 skb->data - AVE_FRAME_HEADROOM,
611 AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
4c270b55
KH
612 DMA_FROM_DEVICE, &paddr);
613 if (ret) {
614 netdev_err(ndev, "can't map skb for Rx\n");
615 dev_kfree_skb_any(skb);
616 return ret;
617 }
618 priv->rx.desc[entry].skbs = skb;
619
620 /* set buffer pointer */
621 ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
622
623 /* set enable to cmdsts */
624 ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
625 AVE_STS_INTR | AVE_MAX_ETHFRAME);
626
627 return ret;
628}
629
630/* Switch state of descriptor */
631static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
632{
633 struct ave_private *priv = netdev_priv(ndev);
634 int ret = 0;
635 u32 val;
636
637 switch (state) {
638 case AVE_DESC_START:
639 writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
640 break;
641
642 case AVE_DESC_STOP:
643 writel(0, priv->base + AVE_DESCC);
644 if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
645 150, 15000)) {
646 netdev_err(ndev, "can't stop descriptor\n");
647 ret = -EBUSY;
648 }
649 break;
650
651 case AVE_DESC_RX_SUSPEND:
652 val = readl(priv->base + AVE_DESCC);
653 val |= AVE_DESCC_RDSTP;
654 val &= ~AVE_DESCC_STATUS_MASK;
655 writel(val, priv->base + AVE_DESCC);
656 if (readl_poll_timeout(priv->base + AVE_DESCC, val,
657 val & (AVE_DESCC_RDSTP << 16),
658 150, 150000)) {
659 netdev_err(ndev, "can't suspend descriptor\n");
660 ret = -EBUSY;
661 }
662 break;
663
664 case AVE_DESC_RX_PERMIT:
665 val = readl(priv->base + AVE_DESCC);
666 val &= ~AVE_DESCC_RDSTP;
667 val &= ~AVE_DESCC_STATUS_MASK;
668 writel(val, priv->base + AVE_DESCC);
669 break;
670
671 default:
672 ret = -EINVAL;
673 break;
674 }
675
676 return ret;
677}
678
679static int ave_tx_complete(struct net_device *ndev)
680{
681 struct ave_private *priv = netdev_priv(ndev);
682 u32 proc_idx, done_idx, ndesc, cmdsts;
683 unsigned int nr_freebuf = 0;
684 unsigned int tx_packets = 0;
685 unsigned int tx_bytes = 0;
686
687 proc_idx = priv->tx.proc_idx;
688 done_idx = priv->tx.done_idx;
689 ndesc = priv->tx.ndesc;
690
691 /* free pre-stored skb from done_idx to proc_idx */
692 while (proc_idx != done_idx) {
693 cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
694
695 /* do nothing if owner is HW (==1 for Tx) */
696 if (cmdsts & AVE_STS_OWN)
697 break;
698
699 /* check Tx status and updates statistics */
700 if (cmdsts & AVE_STS_OK) {
701 tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
702 /* success */
703 if (cmdsts & AVE_STS_LAST)
704 tx_packets++;
705 } else {
706 /* error */
707 if (cmdsts & AVE_STS_LAST) {
708 priv->stats_tx.errors++;
709 if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
710 priv->stats_tx.collisions++;
711 }
712 }
713
714 /* release skb */
715 if (priv->tx.desc[done_idx].skbs) {
716 ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
717 DMA_TO_DEVICE);
718 dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
719 priv->tx.desc[done_idx].skbs = NULL;
720 nr_freebuf++;
721 }
722 done_idx = (done_idx + 1) % ndesc;
723 }
724
725 priv->tx.done_idx = done_idx;
726
727 /* update stats */
728 u64_stats_update_begin(&priv->stats_tx.syncp);
729 priv->stats_tx.packets += tx_packets;
730 priv->stats_tx.bytes += tx_bytes;
731 u64_stats_update_end(&priv->stats_tx.syncp);
732
733 /* wake queue for freeing buffer */
734 if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
735 netif_wake_queue(ndev);
736
737 return nr_freebuf;
738}
739
740static int ave_rx_receive(struct net_device *ndev, int num)
741{
742 struct ave_private *priv = netdev_priv(ndev);
743 unsigned int rx_packets = 0;
744 unsigned int rx_bytes = 0;
745 u32 proc_idx, done_idx;
746 struct sk_buff *skb;
747 unsigned int pktlen;
748 int restpkt, npkts;
749 u32 ndesc, cmdsts;
750
751 proc_idx = priv->rx.proc_idx;
752 done_idx = priv->rx.done_idx;
753 ndesc = priv->rx.ndesc;
754 restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
755
756 for (npkts = 0; npkts < num; npkts++) {
757 /* we can't receive more packet, so fill desc quickly */
758 if (--restpkt < 0)
759 break;
760
761 cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
762
763 /* do nothing if owner is HW (==0 for Rx) */
764 if (!(cmdsts & AVE_STS_OWN))
765 break;
766
767 if (!(cmdsts & AVE_STS_OK)) {
768 priv->stats_rx.errors++;
769 proc_idx = (proc_idx + 1) % ndesc;
770 continue;
771 }
772
773 pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
774
775 /* get skbuff for rx */
776 skb = priv->rx.desc[proc_idx].skbs;
777 priv->rx.desc[proc_idx].skbs = NULL;
778
779 ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
780
781 skb->dev = ndev;
782 skb_put(skb, pktlen);
783 skb->protocol = eth_type_trans(skb, ndev);
784
785 if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
786 skb->ip_summed = CHECKSUM_UNNECESSARY;
787
788 rx_packets++;
789 rx_bytes += pktlen;
790
791 netif_receive_skb(skb);
792
793 proc_idx = (proc_idx + 1) % ndesc;
794 }
795
796 priv->rx.proc_idx = proc_idx;
797
798 /* update stats */
799 u64_stats_update_begin(&priv->stats_rx.syncp);
800 priv->stats_rx.packets += rx_packets;
801 priv->stats_rx.bytes += rx_bytes;
802 u64_stats_update_end(&priv->stats_rx.syncp);
803
804 /* refill the Rx buffers */
805 while (proc_idx != done_idx) {
806 if (ave_rxdesc_prepare(ndev, done_idx))
807 break;
808 done_idx = (done_idx + 1) % ndesc;
809 }
810
811 priv->rx.done_idx = done_idx;
812
813 return npkts;
814}
815
816static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
817{
818 struct ave_private *priv;
819 struct net_device *ndev;
820 int num;
821
822 priv = container_of(napi, struct ave_private, napi_rx);
823 ndev = priv->ndev;
824
825 num = ave_rx_receive(ndev, budget);
826 if (num < budget) {
827 napi_complete_done(napi, num);
828
829 /* enable Rx interrupt when NAPI finishes */
830 ave_irq_enable(ndev, AVE_GI_RXIINT);
831 }
832
833 return num;
834}
835
836static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
837{
838 struct ave_private *priv;
839 struct net_device *ndev;
840 int num;
841
842 priv = container_of(napi, struct ave_private, napi_tx);
843 ndev = priv->ndev;
844
845 num = ave_tx_complete(ndev);
846 napi_complete(napi);
847
848 /* enable Tx interrupt when NAPI finishes */
849 ave_irq_enable(ndev, AVE_GI_TX);
850
851 return num;
852}
853
854static void ave_global_reset(struct net_device *ndev)
855{
856 struct ave_private *priv = netdev_priv(ndev);
857 u32 val;
858
859 /* set config register */
860 val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
861 if (!phy_interface_mode_is_rgmii(priv->phy_mode))
862 val |= AVE_CFGR_MII;
863 writel(val, priv->base + AVE_CFGR);
864
865 /* reset RMII register */
866 val = readl(priv->base + AVE_RSTCTRL);
867 val &= ~AVE_RSTCTRL_RMIIRST;
868 writel(val, priv->base + AVE_RSTCTRL);
869
870 /* assert reset */
871 writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
872 msleep(20);
873
874 /* 1st, negate PHY reset only */
875 writel(AVE_GRR_GRST, priv->base + AVE_GRR);
876 msleep(40);
877
878 /* negate reset */
879 writel(0, priv->base + AVE_GRR);
880 msleep(40);
881
882 /* negate RMII register */
883 val = readl(priv->base + AVE_RSTCTRL);
884 val |= AVE_RSTCTRL_RMIIRST;
885 writel(val, priv->base + AVE_RSTCTRL);
886
887 ave_irq_disable_all(ndev);
888}
889
890static void ave_rxfifo_reset(struct net_device *ndev)
891{
892 struct ave_private *priv = netdev_priv(ndev);
893 u32 rxcr_org;
894
895 /* save and disable MAC receive op */
896 rxcr_org = readl(priv->base + AVE_RXCR);
897 writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
898
899 /* suspend Rx descriptor */
900 ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
901
902 /* receive all packets before descriptor starts */
903 ave_rx_receive(ndev, priv->rx.ndesc);
904
905 /* assert reset */
906 writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
0020f5c8 907 udelay(50);
4c270b55
KH
908
909 /* negate reset */
910 writel(0, priv->base + AVE_GRR);
0020f5c8 911 udelay(20);
4c270b55
KH
912
913 /* negate interrupt status */
914 writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
915
916 /* permit descriptor */
917 ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
918
919 /* restore MAC reccieve op */
920 writel(rxcr_org, priv->base + AVE_RXCR);
921}
922
923static irqreturn_t ave_irq_handler(int irq, void *netdev)
924{
925 struct net_device *ndev = (struct net_device *)netdev;
926 struct ave_private *priv = netdev_priv(ndev);
927 u32 gimr_val, gisr_val;
928
929 gimr_val = ave_irq_disable_all(ndev);
930
931 /* get interrupt status */
932 gisr_val = readl(priv->base + AVE_GISR);
933
934 /* PHY */
935 if (gisr_val & AVE_GI_PHY)
936 writel(AVE_GI_PHY, priv->base + AVE_GISR);
937
938 /* check exceeding packet */
939 if (gisr_val & AVE_GI_RXERR) {
940 writel(AVE_GI_RXERR, priv->base + AVE_GISR);
941 netdev_err(ndev, "receive a packet exceeding frame buffer\n");
942 }
943
944 gisr_val &= gimr_val;
945 if (!gisr_val)
946 goto exit_isr;
947
948 /* RxFIFO overflow */
949 if (gisr_val & AVE_GI_RXOVF) {
950 priv->stats_rx.fifo_errors++;
951 ave_rxfifo_reset(ndev);
952 goto exit_isr;
953 }
954
955 /* Rx drop */
956 if (gisr_val & AVE_GI_RXDROP) {
957 priv->stats_rx.dropped++;
958 writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
959 }
960
961 /* Rx interval */
962 if (gisr_val & AVE_GI_RXIINT) {
963 napi_schedule(&priv->napi_rx);
964 /* still force to disable Rx interrupt until NAPI finishes */
965 gimr_val &= ~AVE_GI_RXIINT;
966 }
967
968 /* Tx completed */
969 if (gisr_val & AVE_GI_TX) {
970 napi_schedule(&priv->napi_tx);
971 /* still force to disable Tx interrupt until NAPI finishes */
972 gimr_val &= ~AVE_GI_TX;
973 }
974
975exit_isr:
976 ave_irq_restore(ndev, gimr_val);
977
978 return IRQ_HANDLED;
979}
980
981static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
982{
983 struct ave_private *priv = netdev_priv(ndev);
984 u32 val;
985
986 if (WARN_ON(entry > AVE_PF_SIZE))
987 return -EINVAL;
988
989 val = readl(priv->base + AVE_PFEN);
990 writel(val | BIT(entry), priv->base + AVE_PFEN);
991
992 return 0;
993}
994
995static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
996{
997 struct ave_private *priv = netdev_priv(ndev);
998 u32 val;
999
1000 if (WARN_ON(entry > AVE_PF_SIZE))
1001 return -EINVAL;
1002
1003 val = readl(priv->base + AVE_PFEN);
1004 writel(val & ~BIT(entry), priv->base + AVE_PFEN);
1005
1006 return 0;
1007}
1008
1009static int ave_pfsel_set_macaddr(struct net_device *ndev,
1010 unsigned int entry,
1011 const unsigned char *mac_addr,
1012 unsigned int set_size)
1013{
1014 struct ave_private *priv = netdev_priv(ndev);
1015
1016 if (WARN_ON(entry > AVE_PF_SIZE))
1017 return -EINVAL;
1018 if (WARN_ON(set_size > 6))
1019 return -EINVAL;
1020
1021 ave_pfsel_stop(ndev, entry);
1022
1023 /* set MAC address for the filter */
1024 ave_hw_write_macaddr(ndev, mac_addr,
1025 AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1026
1027 /* set byte mask */
1028 writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1029 priv->base + AVE_PFMBYTE(entry));
1030 writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1031
1032 /* set bit mask filter */
1033 writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1034
1035 /* set selector to ring 0 */
1036 writel(0, priv->base + AVE_PFSEL(entry));
1037
1038 /* restart filter */
1039 ave_pfsel_start(ndev, entry);
1040
1041 return 0;
1042}
1043
1044static void ave_pfsel_set_promisc(struct net_device *ndev,
1045 unsigned int entry, u32 rxring)
1046{
1047 struct ave_private *priv = netdev_priv(ndev);
1048
1049 if (WARN_ON(entry > AVE_PF_SIZE))
1050 return;
1051
1052 ave_pfsel_stop(ndev, entry);
1053
1054 /* set byte mask */
1055 writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1056 writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1057
1058 /* set bit mask filter */
1059 writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1060
1061 /* set selector to rxring */
1062 writel(rxring, priv->base + AVE_PFSEL(entry));
1063
1064 ave_pfsel_start(ndev, entry);
1065}
1066
1067static void ave_pfsel_init(struct net_device *ndev)
1068{
1069 unsigned char bcast_mac[ETH_ALEN];
1070 int i;
1071
1072 eth_broadcast_addr(bcast_mac);
1073
1074 for (i = 0; i < AVE_PF_SIZE; i++)
1075 ave_pfsel_stop(ndev, i);
1076
1077 /* promiscious entry, select ring 0 */
1078 ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1079
1080 /* unicast entry */
1081 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1082
1083 /* broadcast entry */
1084 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1085}
1086
1087static void ave_phy_adjust_link(struct net_device *ndev)
1088{
1089 struct ave_private *priv = netdev_priv(ndev);
1090 struct phy_device *phydev = ndev->phydev;
1091 u32 val, txcr, rxcr, rxcr_org;
1092 u16 rmt_adv = 0, lcl_adv = 0;
1093 u8 cap;
1094
1095 /* set RGMII speed */
1096 val = readl(priv->base + AVE_TXCR);
1097 val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1098
1099 if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1100 val |= AVE_TXCR_TXSPD_1G;
1101 else if (phydev->speed == SPEED_100)
1102 val |= AVE_TXCR_TXSPD_100;
1103
1104 writel(val, priv->base + AVE_TXCR);
1105
1106 /* set RMII speed (100M/10M only) */
1107 if (!phy_interface_is_rgmii(phydev)) {
1108 val = readl(priv->base + AVE_LINKSEL);
1109 if (phydev->speed == SPEED_10)
1110 val &= ~AVE_LINKSEL_100M;
1111 else
1112 val |= AVE_LINKSEL_100M;
1113 writel(val, priv->base + AVE_LINKSEL);
1114 }
1115
1116 /* check current RXCR/TXCR */
1117 rxcr = readl(priv->base + AVE_RXCR);
1118 txcr = readl(priv->base + AVE_TXCR);
1119 rxcr_org = rxcr;
1120
1121 if (phydev->duplex) {
1122 rxcr |= AVE_RXCR_FDUPEN;
1123
1124 if (phydev->pause)
1125 rmt_adv |= LPA_PAUSE_CAP;
1126 if (phydev->asym_pause)
1127 rmt_adv |= LPA_PAUSE_ASYM;
4c270b55 1128
3c1bcc86 1129 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
4c270b55
KH
1130 cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1131 if (cap & FLOW_CTRL_TX)
1132 txcr |= AVE_TXCR_FLOCTR;
1133 else
1134 txcr &= ~AVE_TXCR_FLOCTR;
1135 if (cap & FLOW_CTRL_RX)
1136 rxcr |= AVE_RXCR_FLOCTR;
1137 else
1138 rxcr &= ~AVE_RXCR_FLOCTR;
1139 } else {
1140 rxcr &= ~AVE_RXCR_FDUPEN;
1141 rxcr &= ~AVE_RXCR_FLOCTR;
1142 txcr &= ~AVE_TXCR_FLOCTR;
1143 }
1144
1145 if (rxcr_org != rxcr) {
1146 /* disable Rx mac */
1147 writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1148 /* change and enable TX/Rx mac */
1149 writel(txcr, priv->base + AVE_TXCR);
1150 writel(rxcr, priv->base + AVE_RXCR);
1151 }
1152
1153 phy_print_status(phydev);
1154}
1155
1156static void ave_macaddr_init(struct net_device *ndev)
1157{
1158 ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1159
1160 /* pfsel unicast entry */
1161 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1162}
1163
1164static int ave_init(struct net_device *ndev)
1165{
1166 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1167 struct ave_private *priv = netdev_priv(ndev);
1168 struct device *dev = ndev->dev.parent;
1169 struct device_node *np = dev->of_node;
1170 struct device_node *mdio_np;
1171 struct phy_device *phydev;
6b9227d6 1172 int nc, nr, ret;
4c270b55
KH
1173
1174 /* enable clk because of hw access until ndo_open */
6b9227d6
KH
1175 for (nc = 0; nc < priv->nclks; nc++) {
1176 ret = clk_prepare_enable(priv->clk[nc]);
1177 if (ret) {
1178 dev_err(dev, "can't enable clock\n");
1179 goto out_clk_disable;
1180 }
4c270b55 1181 }
6b9227d6
KH
1182
1183 for (nr = 0; nr < priv->nrsts; nr++) {
1184 ret = reset_control_deassert(priv->rst[nr]);
1185 if (ret) {
1186 dev_err(dev, "can't deassert reset\n");
1187 goto out_reset_assert;
1188 }
4c270b55
KH
1189 }
1190
57878f2f
KH
1191 ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
1192 priv->pinmode_mask, priv->pinmode_val);
1193 if (ret)
1264d7fa 1194 goto out_reset_assert;
57878f2f 1195
4c270b55
KH
1196 ave_global_reset(ndev);
1197
1198 mdio_np = of_get_child_by_name(np, "mdio");
1199 if (!mdio_np) {
1200 dev_err(dev, "mdio node not found\n");
1201 ret = -EINVAL;
1202 goto out_reset_assert;
1203 }
1204 ret = of_mdiobus_register(priv->mdio, mdio_np);
1205 of_node_put(mdio_np);
1206 if (ret) {
1207 dev_err(dev, "failed to register mdiobus\n");
1208 goto out_reset_assert;
1209 }
1210
1211 phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1212 if (!phydev) {
1213 dev_err(dev, "could not attach to PHY\n");
1214 ret = -ENODEV;
1215 goto out_mdio_unregister;
1216 }
1217
1218 priv->phydev = phydev;
1219
7200f2e3 1220 ave_ethtool_get_wol(ndev, &wol);
4c270b55
KH
1221 device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1222
7200f2e3
KH
1223 /* set wol initial state disabled */
1224 wol.wolopts = 0;
82d5d6a6 1225 __ave_ethtool_set_wol(ndev, &wol);
7200f2e3 1226
58056c1e
AL
1227 if (!phy_interface_is_rgmii(phydev))
1228 phy_set_max_speed(phydev, SPEED_100);
af8d9bb2
AL
1229
1230 phy_support_asym_pause(phydev);
4c270b55
KH
1231
1232 phy_attached_info(phydev);
1233
1234 return 0;
1235
1236out_mdio_unregister:
1237 mdiobus_unregister(priv->mdio);
1238out_reset_assert:
6b9227d6
KH
1239 while (--nr >= 0)
1240 reset_control_assert(priv->rst[nr]);
4c270b55 1241out_clk_disable:
6b9227d6
KH
1242 while (--nc >= 0)
1243 clk_disable_unprepare(priv->clk[nc]);
4c270b55
KH
1244
1245 return ret;
1246}
1247
1248static void ave_uninit(struct net_device *ndev)
1249{
1250 struct ave_private *priv = netdev_priv(ndev);
6b9227d6 1251 int i;
4c270b55
KH
1252
1253 phy_disconnect(priv->phydev);
1254 mdiobus_unregister(priv->mdio);
1255
1256 /* disable clk because of hw access after ndo_stop */
6b9227d6
KH
1257 for (i = 0; i < priv->nrsts; i++)
1258 reset_control_assert(priv->rst[i]);
1259 for (i = 0; i < priv->nclks; i++)
1260 clk_disable_unprepare(priv->clk[i]);
4c270b55
KH
1261}
1262
1263static int ave_open(struct net_device *ndev)
1264{
1265 struct ave_private *priv = netdev_priv(ndev);
1266 int entry;
1267 int ret;
1268 u32 val;
1269
1270 ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1271 ndev);
1272 if (ret)
1273 return ret;
1274
1275 priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1276 GFP_KERNEL);
1277 if (!priv->tx.desc) {
1278 ret = -ENOMEM;
1279 goto out_free_irq;
1280 }
1281
1282 priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1283 GFP_KERNEL);
1284 if (!priv->rx.desc) {
1285 kfree(priv->tx.desc);
1286 ret = -ENOMEM;
1287 goto out_free_irq;
1288 }
1289
1290 /* initialize Tx work and descriptor */
1291 priv->tx.proc_idx = 0;
1292 priv->tx.done_idx = 0;
1293 for (entry = 0; entry < priv->tx.ndesc; entry++) {
1294 ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1295 ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1296 }
1297 writel(AVE_TXDC_ADDR_START |
1298 (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1299 priv->base + AVE_TXDC);
1300
1301 /* initialize Rx work and descriptor */
1302 priv->rx.proc_idx = 0;
1303 priv->rx.done_idx = 0;
1304 for (entry = 0; entry < priv->rx.ndesc; entry++) {
1305 if (ave_rxdesc_prepare(ndev, entry))
1306 break;
1307 }
1308 writel(AVE_RXDC0_ADDR_START |
1309 (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1310 priv->base + AVE_RXDC0);
1311
1312 ave_desc_switch(ndev, AVE_DESC_START);
1313
1314 ave_pfsel_init(ndev);
1315 ave_macaddr_init(ndev);
1316
1317 /* set Rx configuration */
1318 /* full duplex, enable pause drop, enalbe flow control */
1319 val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1320 AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1321 writel(val, priv->base + AVE_RXCR);
1322
1323 /* set Tx configuration */
1324 /* enable flow control, disable loopback */
1325 writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1326
1327 /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1328 val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1329 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1330 writel(val, priv->base + AVE_IIRQC);
1331
d06cbe9c 1332 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
4c270b55
KH
1333 ave_irq_restore(ndev, val);
1334
1335 napi_enable(&priv->napi_rx);
1336 napi_enable(&priv->napi_tx);
1337
1338 phy_start(ndev->phydev);
1339 phy_start_aneg(ndev->phydev);
1340 netif_start_queue(ndev);
1341
1342 return 0;
1343
1344out_free_irq:
1345 disable_irq(priv->irq);
1346 free_irq(priv->irq, ndev);
1347
1348 return ret;
1349}
1350
1351static int ave_stop(struct net_device *ndev)
1352{
1353 struct ave_private *priv = netdev_priv(ndev);
1354 int entry;
1355
1356 ave_irq_disable_all(ndev);
1357 disable_irq(priv->irq);
1358 free_irq(priv->irq, ndev);
1359
1360 netif_tx_disable(ndev);
1361 phy_stop(ndev->phydev);
1362 napi_disable(&priv->napi_tx);
1363 napi_disable(&priv->napi_rx);
1364
1365 ave_desc_switch(ndev, AVE_DESC_STOP);
1366
1367 /* free Tx buffer */
1368 for (entry = 0; entry < priv->tx.ndesc; entry++) {
1369 if (!priv->tx.desc[entry].skbs)
1370 continue;
1371
1372 ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1373 dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1374 priv->tx.desc[entry].skbs = NULL;
1375 }
1376 priv->tx.proc_idx = 0;
1377 priv->tx.done_idx = 0;
1378
1379 /* free Rx buffer */
1380 for (entry = 0; entry < priv->rx.ndesc; entry++) {
1381 if (!priv->rx.desc[entry].skbs)
1382 continue;
1383
1384 ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1385 dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1386 priv->rx.desc[entry].skbs = NULL;
1387 }
1388 priv->rx.proc_idx = 0;
1389 priv->rx.done_idx = 0;
1390
1391 kfree(priv->tx.desc);
1392 kfree(priv->rx.desc);
1393
1394 return 0;
1395}
1396
d54fc481 1397static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
4c270b55
KH
1398{
1399 struct ave_private *priv = netdev_priv(ndev);
1400 u32 proc_idx, done_idx, ndesc, cmdsts;
1401 int ret, freepkt;
1402 dma_addr_t paddr;
1403
1404 proc_idx = priv->tx.proc_idx;
1405 done_idx = priv->tx.done_idx;
1406 ndesc = priv->tx.ndesc;
1407 freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1408
1409 /* stop queue when not enough entry */
1410 if (unlikely(freepkt < 1)) {
1411 netif_stop_queue(ndev);
1412 return NETDEV_TX_BUSY;
1413 }
1414
1415 /* add padding for short packet */
1416 if (skb_put_padto(skb, ETH_ZLEN)) {
1417 priv->stats_tx.dropped++;
1418 return NETDEV_TX_OK;
1419 }
1420
1421 /* map Tx buffer
1422 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1423 */
1424 ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1425 skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1426 if (ret) {
1427 dev_kfree_skb_any(skb);
1428 priv->stats_tx.dropped++;
1429 return NETDEV_TX_OK;
1430 }
1431
1432 priv->tx.desc[proc_idx].skbs = skb;
1433
1434 ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1435
1436 cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1437 (skb->len & AVE_STS_PKTLEN_TX_MASK);
1438
1439 /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1440 if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1441 cmdsts |= AVE_STS_INTR;
1442
1443 /* disable checksum calculation when skb doesn't calurate checksum */
1444 if (skb->ip_summed == CHECKSUM_NONE ||
1445 skb->ip_summed == CHECKSUM_UNNECESSARY)
1446 cmdsts |= AVE_STS_NOCSUM;
1447
1448 ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1449
1450 priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1451
1452 return NETDEV_TX_OK;
1453}
1454
1455static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1456{
1457 return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1458}
1459
1460static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1461static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1462
1463static void ave_set_rx_mode(struct net_device *ndev)
1464{
1465 struct ave_private *priv = netdev_priv(ndev);
1466 struct netdev_hw_addr *hw_adr;
1467 int count, mc_cnt;
1468 u32 val;
1469
1470 /* MAC addr filter enable for promiscious mode */
1471 mc_cnt = netdev_mc_count(ndev);
1472 val = readl(priv->base + AVE_RXCR);
1473 if (ndev->flags & IFF_PROMISC || !mc_cnt)
1474 val &= ~AVE_RXCR_AFEN;
1475 else
1476 val |= AVE_RXCR_AFEN;
1477 writel(val, priv->base + AVE_RXCR);
1478
1479 /* set all multicast address */
1480 if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1481 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1482 v4multi_macadr, 1);
1483 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1484 v6multi_macadr, 1);
1485 } else {
1486 /* stop all multicast filter */
1487 for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1488 ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1489
1490 /* set multicast addresses */
1491 count = 0;
1492 netdev_for_each_mc_addr(hw_adr, ndev) {
1493 if (count == mc_cnt)
1494 break;
1495 ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1496 hw_adr->addr, 6);
1497 count++;
1498 }
1499 }
1500}
1501
1502static void ave_get_stats64(struct net_device *ndev,
1503 struct rtnl_link_stats64 *stats)
1504{
1505 struct ave_private *priv = netdev_priv(ndev);
1506 unsigned int start;
1507
1508 do {
1509 start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
1510 stats->rx_packets = priv->stats_rx.packets;
1511 stats->rx_bytes = priv->stats_rx.bytes;
1512 } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
1513
1514 do {
1515 start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
1516 stats->tx_packets = priv->stats_tx.packets;
1517 stats->tx_bytes = priv->stats_tx.bytes;
1518 } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
1519
1520 stats->rx_errors = priv->stats_rx.errors;
1521 stats->tx_errors = priv->stats_tx.errors;
1522 stats->rx_dropped = priv->stats_rx.dropped;
1523 stats->tx_dropped = priv->stats_tx.dropped;
1524 stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1525 stats->collisions = priv->stats_tx.collisions;
1526}
1527
1528static int ave_set_mac_address(struct net_device *ndev, void *p)
1529{
1530 int ret = eth_mac_addr(ndev, p);
1531
1532 if (ret)
1533 return ret;
1534
1535 ave_macaddr_init(ndev);
1536
1537 return 0;
1538}
1539
1540static const struct net_device_ops ave_netdev_ops = {
1541 .ndo_init = ave_init,
1542 .ndo_uninit = ave_uninit,
1543 .ndo_open = ave_open,
1544 .ndo_stop = ave_stop,
1545 .ndo_start_xmit = ave_start_xmit,
a7605370 1546 .ndo_eth_ioctl = ave_ioctl,
4c270b55
KH
1547 .ndo_set_rx_mode = ave_set_rx_mode,
1548 .ndo_get_stats64 = ave_get_stats64,
1549 .ndo_set_mac_address = ave_set_mac_address,
1550};
1551
1552static int ave_probe(struct platform_device *pdev)
1553{
1554 const struct ave_soc_data *data;
1555 struct device *dev = &pdev->dev;
1556 char buf[ETHTOOL_FWVERS_LEN];
57878f2f 1557 struct of_phandle_args args;
4c270b55
KH
1558 phy_interface_t phy_mode;
1559 struct ave_private *priv;
1560 struct net_device *ndev;
1561 struct device_node *np;
4c270b55 1562 void __iomem *base;
6b9227d6
KH
1563 const char *name;
1564 int i, irq, ret;
4c270b55 1565 u64 dma_mask;
4c270b55
KH
1566 u32 ave_id;
1567
1568 data = of_device_get_match_data(dev);
1569 if (WARN_ON(!data))
1570 return -EINVAL;
1571
1572 np = dev->of_node;
0c65b2b9
AL
1573 ret = of_get_phy_mode(np, &phy_mode);
1574 if (ret) {
4c270b55 1575 dev_err(dev, "phy-mode not found\n");
0c65b2b9 1576 return ret;
4c270b55 1577 }
4c270b55
KH
1578
1579 irq = platform_get_irq(pdev, 0);
d1a55841 1580 if (irq < 0)
4c270b55 1581 return irq;
4c270b55 1582
5bd5b564 1583 base = devm_platform_ioremap_resource(pdev, 0);
4c270b55
KH
1584 if (IS_ERR(base))
1585 return PTR_ERR(base);
1586
e87fb82d 1587 ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private));
4c270b55
KH
1588 if (!ndev) {
1589 dev_err(dev, "can't allocate ethernet device\n");
1590 return -ENOMEM;
1591 }
1592
1593 ndev->netdev_ops = &ave_netdev_ops;
1594 ndev->ethtool_ops = &ave_ethtool_ops;
1595 SET_NETDEV_DEV(ndev, dev);
1596
1597 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1598 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1599
1600 ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1601
9ca01b25 1602 ret = of_get_ethdev_address(np, ndev);
83216e39
MW
1603 if (ret) {
1604 /* if the mac address is invalid, use random mac address */
4c270b55
KH
1605 eth_hw_addr_random(ndev);
1606 dev_warn(dev, "Using random MAC address: %pM\n",
1607 ndev->dev_addr);
1608 }
1609
1610 priv = netdev_priv(ndev);
1611 priv->base = base;
1612 priv->irq = irq;
1613 priv->ndev = ndev;
1614 priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1615 priv->phy_mode = phy_mode;
1616 priv->data = data;
1617
1618 if (IS_DESC_64BIT(priv)) {
1619 priv->desc_size = AVE_DESC_SIZE_64;
1620 priv->tx.daddr = AVE_TXDM_64;
1621 priv->rx.daddr = AVE_RXDM_64;
1622 dma_mask = DMA_BIT_MASK(64);
1623 } else {
1624 priv->desc_size = AVE_DESC_SIZE_32;
1625 priv->tx.daddr = AVE_TXDM_32;
1626 priv->rx.daddr = AVE_RXDM_32;
1627 dma_mask = DMA_BIT_MASK(32);
1628 }
1629 ret = dma_set_mask(dev, dma_mask);
1630 if (ret)
e87fb82d 1631 return ret;
4c270b55
KH
1632
1633 priv->tx.ndesc = AVE_NR_TXDESC;
1634 priv->rx.ndesc = AVE_NR_RXDESC;
1635
1636 u64_stats_init(&priv->stats_tx.syncp);
1637 u64_stats_init(&priv->stats_rx.syncp);
1638
6b9227d6
KH
1639 for (i = 0; i < AVE_MAX_CLKS; i++) {
1640 name = priv->data->clock_names[i];
1641 if (!name)
1642 break;
1643 priv->clk[i] = devm_clk_get(dev, name);
e87fb82d
KH
1644 if (IS_ERR(priv->clk[i]))
1645 return PTR_ERR(priv->clk[i]);
6b9227d6 1646 priv->nclks++;
4c270b55
KH
1647 }
1648
6b9227d6
KH
1649 for (i = 0; i < AVE_MAX_RSTS; i++) {
1650 name = priv->data->reset_names[i];
1651 if (!name)
1652 break;
1653 priv->rst[i] = devm_reset_control_get_shared(dev, name);
e87fb82d
KH
1654 if (IS_ERR(priv->rst[i]))
1655 return PTR_ERR(priv->rst[i]);
6b9227d6 1656 priv->nrsts++;
4c270b55
KH
1657 }
1658
57878f2f
KH
1659 ret = of_parse_phandle_with_fixed_args(np,
1660 "socionext,syscon-phy-mode",
1661 1, 0, &args);
1662 if (ret) {
fd4a8093 1663 dev_err(dev, "can't get syscon-phy-mode property\n");
e87fb82d 1664 return ret;
57878f2f
KH
1665 }
1666 priv->regmap = syscon_node_to_regmap(args.np);
1667 of_node_put(args.np);
1668 if (IS_ERR(priv->regmap)) {
fd4a8093 1669 dev_err(dev, "can't map syscon-phy-mode\n");
e87fb82d 1670 return PTR_ERR(priv->regmap);
57878f2f
KH
1671 }
1672 ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
1673 if (ret) {
fd4a8093 1674 dev_err(dev, "invalid phy-mode setting\n");
e87fb82d 1675 return ret;
57878f2f
KH
1676 }
1677
4c270b55 1678 priv->mdio = devm_mdiobus_alloc(dev);
e87fb82d
KH
1679 if (!priv->mdio)
1680 return -ENOMEM;
4c270b55
KH
1681 priv->mdio->priv = ndev;
1682 priv->mdio->parent = dev;
1683 priv->mdio->read = ave_mdiobus_read;
1684 priv->mdio->write = ave_mdiobus_write;
1685 priv->mdio->name = "uniphier-mdio";
1686 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1687 pdev->name, pdev->id);
1688
1689 /* Register as a NAPI supported driver */
b48b89f9 1690 netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
16d083e2 1691 netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
4c270b55
KH
1692
1693 platform_set_drvdata(pdev, ndev);
1694
1695 ret = register_netdev(ndev);
1696 if (ret) {
1697 dev_err(dev, "failed to register netdevice\n");
1698 goto out_del_napi;
1699 }
1700
1701 /* get ID and version */
1702 ave_id = readl(priv->base + AVE_IDR);
1703 ave_hw_read_version(ndev, buf, sizeof(buf));
1704
1705 dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1706 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1707 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1708 buf, priv->irq, phy_modes(phy_mode));
1709
1710 return 0;
1711
1712out_del_napi:
1713 netif_napi_del(&priv->napi_rx);
1714 netif_napi_del(&priv->napi_tx);
4c270b55
KH
1715
1716 return ret;
1717}
1718
1719static int ave_remove(struct platform_device *pdev)
1720{
1721 struct net_device *ndev = platform_get_drvdata(pdev);
1722 struct ave_private *priv = netdev_priv(ndev);
1723
1724 unregister_netdev(ndev);
1725 netif_napi_del(&priv->napi_rx);
1726 netif_napi_del(&priv->napi_tx);
4c270b55
KH
1727
1728 return 0;
1729}
1730
0ba78b4a
KH
1731#ifdef CONFIG_PM_SLEEP
1732static int ave_suspend(struct device *dev)
1733{
8d1283b1 1734 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
0ba78b4a
KH
1735 struct net_device *ndev = dev_get_drvdata(dev);
1736 struct ave_private *priv = netdev_priv(ndev);
1737 int ret = 0;
1738
1739 if (netif_running(ndev)) {
1740 ret = ave_stop(ndev);
1741 netif_device_detach(ndev);
1742 }
1743
8d1283b1
KH
1744 ave_ethtool_get_wol(ndev, &wol);
1745 priv->wolopts = wol.wolopts;
1746
0ba78b4a
KH
1747 return ret;
1748}
1749
1750static int ave_resume(struct device *dev)
1751{
8d1283b1 1752 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
0ba78b4a
KH
1753 struct net_device *ndev = dev_get_drvdata(dev);
1754 struct ave_private *priv = netdev_priv(ndev);
1755 int ret = 0;
1756
1757 ave_global_reset(ndev);
1758
8d1283b1
KH
1759 ave_ethtool_get_wol(ndev, &wol);
1760 wol.wolopts = priv->wolopts;
82d5d6a6 1761 __ave_ethtool_set_wol(ndev, &wol);
8d1283b1 1762
0ba78b4a
KH
1763 if (ndev->phydev) {
1764 ret = phy_resume(ndev->phydev);
1765 if (ret)
1766 return ret;
1767 }
1768
1769 if (netif_running(ndev)) {
1770 ret = ave_open(ndev);
1771 netif_device_attach(ndev);
1772 }
1773
1774 return ret;
1775}
1776
1777static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume);
1778#define AVE_PM_OPS (&ave_pm_ops)
1779#else
1780#define AVE_PM_OPS NULL
1781#endif
1782
57878f2f
KH
1783static int ave_pro4_get_pinmode(struct ave_private *priv,
1784 phy_interface_t phy_mode, u32 arg)
1785{
1786 if (arg > 0)
1787 return -EINVAL;
1788
1789 priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1790
1791 switch (phy_mode) {
1792 case PHY_INTERFACE_MODE_RMII:
1793 priv->pinmode_val = SG_ETPINMODE_RMII(0);
1794 break;
1795 case PHY_INTERFACE_MODE_MII:
1796 case PHY_INTERFACE_MODE_RGMII:
b9287f2a
KH
1797 case PHY_INTERFACE_MODE_RGMII_ID:
1798 case PHY_INTERFACE_MODE_RGMII_RXID:
1799 case PHY_INTERFACE_MODE_RGMII_TXID:
57878f2f
KH
1800 priv->pinmode_val = 0;
1801 break;
1802 default:
1803 return -EINVAL;
1804 }
1805
1806 return 0;
1807}
1808
1809static int ave_ld11_get_pinmode(struct ave_private *priv,
1810 phy_interface_t phy_mode, u32 arg)
1811{
1812 if (arg > 0)
1813 return -EINVAL;
1814
1815 priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1816
1817 switch (phy_mode) {
1818 case PHY_INTERFACE_MODE_INTERNAL:
1819 priv->pinmode_val = 0;
1820 break;
1821 case PHY_INTERFACE_MODE_RMII:
1822 priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1823 break;
1824 default:
1825 return -EINVAL;
1826 }
1827
1828 return 0;
1829}
1830
1831static int ave_ld20_get_pinmode(struct ave_private *priv,
1832 phy_interface_t phy_mode, u32 arg)
1833{
1834 if (arg > 0)
1835 return -EINVAL;
1836
1837 priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1838
1839 switch (phy_mode) {
1840 case PHY_INTERFACE_MODE_RMII:
1841 priv->pinmode_val = SG_ETPINMODE_RMII(0);
1842 break;
1843 case PHY_INTERFACE_MODE_RGMII:
b9287f2a
KH
1844 case PHY_INTERFACE_MODE_RGMII_ID:
1845 case PHY_INTERFACE_MODE_RGMII_RXID:
1846 case PHY_INTERFACE_MODE_RGMII_TXID:
57878f2f
KH
1847 priv->pinmode_val = 0;
1848 break;
1849 default:
1850 return -EINVAL;
1851 }
1852
1853 return 0;
1854}
1855
1856static int ave_pxs3_get_pinmode(struct ave_private *priv,
1857 phy_interface_t phy_mode, u32 arg)
1858{
1859 if (arg > 1)
1860 return -EINVAL;
1861
1862 priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
1863
1864 switch (phy_mode) {
1865 case PHY_INTERFACE_MODE_RMII:
1866 priv->pinmode_val = SG_ETPINMODE_RMII(arg);
1867 break;
1868 case PHY_INTERFACE_MODE_RGMII:
b9287f2a
KH
1869 case PHY_INTERFACE_MODE_RGMII_ID:
1870 case PHY_INTERFACE_MODE_RGMII_RXID:
1871 case PHY_INTERFACE_MODE_RGMII_TXID:
57878f2f
KH
1872 priv->pinmode_val = 0;
1873 break;
1874 default:
1875 return -EINVAL;
1876 }
1877
1878 return 0;
1879}
1880
4c270b55
KH
1881static const struct ave_soc_data ave_pro4_data = {
1882 .is_desc_64bit = false,
6b9227d6
KH
1883 .clock_names = {
1884 "gio", "ether", "ether-gb", "ether-phy",
1885 },
1886 .reset_names = {
1887 "gio", "ether",
1888 },
57878f2f 1889 .get_pinmode = ave_pro4_get_pinmode,
4c270b55
KH
1890};
1891
1892static const struct ave_soc_data ave_pxs2_data = {
1893 .is_desc_64bit = false,
6b9227d6
KH
1894 .clock_names = {
1895 "ether",
1896 },
1897 .reset_names = {
1898 "ether",
1899 },
57878f2f 1900 .get_pinmode = ave_pro4_get_pinmode,
4c270b55
KH
1901};
1902
1903static const struct ave_soc_data ave_ld11_data = {
1904 .is_desc_64bit = false,
6b9227d6
KH
1905 .clock_names = {
1906 "ether",
1907 },
1908 .reset_names = {
1909 "ether",
1910 },
57878f2f 1911 .get_pinmode = ave_ld11_get_pinmode,
4c270b55
KH
1912};
1913
1914static const struct ave_soc_data ave_ld20_data = {
1915 .is_desc_64bit = true,
6b9227d6
KH
1916 .clock_names = {
1917 "ether",
1918 },
1919 .reset_names = {
1920 "ether",
1921 },
57878f2f 1922 .get_pinmode = ave_ld20_get_pinmode,
4c270b55
KH
1923};
1924
469e3d79
KH
1925static const struct ave_soc_data ave_pxs3_data = {
1926 .is_desc_64bit = false,
6b9227d6
KH
1927 .clock_names = {
1928 "ether",
1929 },
1930 .reset_names = {
1931 "ether",
1932 },
57878f2f 1933 .get_pinmode = ave_pxs3_get_pinmode,
469e3d79
KH
1934};
1935
9fd3d5dc
KH
1936static const struct ave_soc_data ave_nx1_data = {
1937 .is_desc_64bit = true,
1938 .clock_names = {
1939 "ether",
1940 },
1941 .reset_names = {
1942 "ether",
1943 },
1944 .get_pinmode = ave_pxs3_get_pinmode,
1945};
1946
4c270b55
KH
1947static const struct of_device_id of_ave_match[] = {
1948 {
1949 .compatible = "socionext,uniphier-pro4-ave4",
1950 .data = &ave_pro4_data,
1951 },
1952 {
1953 .compatible = "socionext,uniphier-pxs2-ave4",
1954 .data = &ave_pxs2_data,
1955 },
1956 {
1957 .compatible = "socionext,uniphier-ld11-ave4",
1958 .data = &ave_ld11_data,
1959 },
1960 {
1961 .compatible = "socionext,uniphier-ld20-ave4",
1962 .data = &ave_ld20_data,
1963 },
469e3d79
KH
1964 {
1965 .compatible = "socionext,uniphier-pxs3-ave4",
1966 .data = &ave_pxs3_data,
1967 },
9fd3d5dc
KH
1968 {
1969 .compatible = "socionext,uniphier-nx1-ave4",
1970 .data = &ave_nx1_data,
1971 },
4c270b55
KH
1972 { /* Sentinel */ }
1973};
1974MODULE_DEVICE_TABLE(of, of_ave_match);
1975
1976static struct platform_driver ave_driver = {
1977 .probe = ave_probe,
1978 .remove = ave_remove,
1979 .driver = {
1980 .name = "ave",
0ba78b4a 1981 .pm = AVE_PM_OPS,
4c270b55
KH
1982 .of_match_table = of_ave_match,
1983 },
1984};
1985module_platform_driver(ave_driver);
1986
d75d0e87 1987MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
4c270b55
KH
1988MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1989MODULE_LICENSE("GPL v2");