eda5ea56674cb014631a259d90e9d35c26c5e063
[linux-2.6-block.git] / drivers / net / ethernet / renesas / rswitch.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3  *
4  * Copyright (C) 2022 Renesas Electronics Corporation
5  */
6
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/etherdevice.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/sys_soc.h>
26
27 #include "rswitch.h"
28
29 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
30 {
31         u32 val;
32
33         return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
34                                          1, RSWITCH_TIMEOUT_US);
35 }
36
37 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
38 {
39         iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
40 }
41
42 /* Common Agent block (COMA) */
43 static void rswitch_reset(struct rswitch_private *priv)
44 {
45         iowrite32(RRC_RR, priv->addr + RRC);
46         iowrite32(RRC_RR_CLR, priv->addr + RRC);
47 }
48
49 static void rswitch_clock_enable(struct rswitch_private *priv)
50 {
51         iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
52 }
53
54 static void rswitch_clock_disable(struct rswitch_private *priv)
55 {
56         iowrite32(RCDC_RCD, priv->addr + RCDC);
57 }
58
59 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
60                                            unsigned int port)
61 {
62         u32 val = ioread32(coma_addr + RCEC);
63
64         if (val & RCEC_RCE)
65                 return (val & BIT(port)) ? true : false;
66         else
67                 return false;
68 }
69
70 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
71                                      int enable)
72 {
73         u32 val;
74
75         if (enable) {
76                 val = ioread32(coma_addr + RCEC);
77                 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
78         } else {
79                 val = ioread32(coma_addr + RCDC);
80                 iowrite32(val | BIT(port), coma_addr + RCDC);
81         }
82 }
83
84 static int rswitch_bpool_config(struct rswitch_private *priv)
85 {
86         u32 val;
87
88         val = ioread32(priv->addr + CABPIRM);
89         if (val & CABPIRM_BPR)
90                 return 0;
91
92         iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
93
94         return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
95 }
96
97 static void rswitch_coma_init(struct rswitch_private *priv)
98 {
99         iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
100 }
101
102 /* R-Switch-2 block (TOP) */
103 static void rswitch_top_init(struct rswitch_private *priv)
104 {
105         unsigned int i;
106
107         for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
108                 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
109 }
110
111 /* Forwarding engine block (MFWD) */
112 static void rswitch_fwd_init(struct rswitch_private *priv)
113 {
114         unsigned int i;
115
116         /* For ETHA */
117         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
118                 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
119                 iowrite32(0, priv->addr + FWPBFC(i));
120         }
121
122         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
123                 iowrite32(priv->rdev[i]->rx_queue->index,
124                           priv->addr + FWPBFCSDC(GWCA_INDEX, i));
125                 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
126         }
127
128         /* For GWCA */
129         iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
130         iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
131         iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
132         iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
133 }
134
135 /* Gateway CPU agent block (GWCA) */
136 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
137                                     enum rswitch_gwca_mode mode)
138 {
139         int ret;
140
141         if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
142                 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
143
144         iowrite32(mode, priv->addr + GWMC);
145
146         ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
147
148         if (mode == GWMC_OPC_DISABLE)
149                 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
150
151         return ret;
152 }
153
154 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
155 {
156         iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
157
158         return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
159 }
160
161 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
162 {
163         iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
164
165         return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
166 }
167
168 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
169 {
170         u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
171         unsigned int i;
172
173         for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
174                 if (dis[i] & mask[i])
175                         return true;
176         }
177
178         return false;
179 }
180
181 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
182 {
183         unsigned int i;
184
185         for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
186                 dis[i] = ioread32(priv->addr + GWDIS(i));
187                 dis[i] &= ioread32(priv->addr + GWDIE(i));
188         }
189 }
190
191 static void rswitch_enadis_data_irq(struct rswitch_private *priv,
192                                     unsigned int index, bool enable)
193 {
194         u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
195
196         iowrite32(BIT(index % 32), priv->addr + offs);
197 }
198
199 static void rswitch_ack_data_irq(struct rswitch_private *priv,
200                                  unsigned int index)
201 {
202         u32 offs = GWDIS(index / 32);
203
204         iowrite32(BIT(index % 32), priv->addr + offs);
205 }
206
207 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
208                                              bool cur, unsigned int num)
209 {
210         unsigned int index = cur ? gq->cur : gq->dirty;
211
212         if (index + num >= gq->ring_size)
213                 index = (index + num) % gq->ring_size;
214         else
215                 index += num;
216
217         return index;
218 }
219
220 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
221 {
222         if (gq->cur >= gq->dirty)
223                 return gq->cur - gq->dirty;
224         else
225                 return gq->ring_size - gq->dirty + gq->cur;
226 }
227
228 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
229 {
230         struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
231
232         if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
233                 return true;
234
235         return false;
236 }
237
238 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
239                                            unsigned int start_index,
240                                            unsigned int num)
241 {
242         unsigned int i, index;
243
244         for (i = 0; i < num; i++) {
245                 index = (i + start_index) % gq->ring_size;
246                 if (gq->rx_bufs[index])
247                         continue;
248                 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
249                 if (!gq->rx_bufs[index])
250                         goto err;
251         }
252
253         return 0;
254
255 err:
256         for (; i-- > 0; ) {
257                 index = (i + start_index) % gq->ring_size;
258                 skb_free_frag(gq->rx_bufs[index]);
259                 gq->rx_bufs[index] = NULL;
260         }
261
262         return -ENOMEM;
263 }
264
265 static void rswitch_gwca_queue_free(struct net_device *ndev,
266                                     struct rswitch_gwca_queue *gq)
267 {
268         unsigned int i;
269
270         if (!gq->dir_tx) {
271                 dma_free_coherent(ndev->dev.parent,
272                                   sizeof(struct rswitch_ext_ts_desc) *
273                                   (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
274                 gq->rx_ring = NULL;
275
276                 for (i = 0; i < gq->ring_size; i++)
277                         skb_free_frag(gq->rx_bufs[i]);
278                 kfree(gq->rx_bufs);
279                 gq->rx_bufs = NULL;
280         } else {
281                 dma_free_coherent(ndev->dev.parent,
282                                   sizeof(struct rswitch_ext_desc) *
283                                   (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
284                 gq->tx_ring = NULL;
285                 kfree(gq->skbs);
286                 gq->skbs = NULL;
287                 kfree(gq->unmap_addrs);
288                 gq->unmap_addrs = NULL;
289         }
290 }
291
292 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
293 {
294         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
295
296         dma_free_coherent(&priv->pdev->dev,
297                           sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
298                           gq->ts_ring, gq->ring_dma);
299         gq->ts_ring = NULL;
300 }
301
302 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
303                                     struct rswitch_private *priv,
304                                     struct rswitch_gwca_queue *gq,
305                                     bool dir_tx, unsigned int ring_size)
306 {
307         unsigned int i, bit;
308
309         gq->dir_tx = dir_tx;
310         gq->ring_size = ring_size;
311         gq->ndev = ndev;
312
313         if (!dir_tx) {
314                 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
315                 if (!gq->rx_bufs)
316                         return -ENOMEM;
317                 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
318                         goto out;
319
320                 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
321                                                  sizeof(struct rswitch_ext_ts_desc) *
322                                                  (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
323         } else {
324                 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
325                 if (!gq->skbs)
326                         return -ENOMEM;
327                 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
328                 if (!gq->unmap_addrs)
329                         goto out;
330                 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
331                                                  sizeof(struct rswitch_ext_desc) *
332                                                  (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
333         }
334
335         if (!gq->rx_ring && !gq->tx_ring)
336                 goto out;
337
338         i = gq->index / 32;
339         bit = BIT(gq->index % 32);
340         if (dir_tx)
341                 priv->gwca.tx_irq_bits[i] |= bit;
342         else
343                 priv->gwca.rx_irq_bits[i] |= bit;
344
345         return 0;
346
347 out:
348         rswitch_gwca_queue_free(ndev, gq);
349
350         return -ENOMEM;
351 }
352
353 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
354 {
355         desc->dptrl = cpu_to_le32(lower_32_bits(addr));
356         desc->dptrh = upper_32_bits(addr) & 0xff;
357 }
358
359 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
360 {
361         return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
362 }
363
364 static int rswitch_gwca_queue_format(struct net_device *ndev,
365                                      struct rswitch_private *priv,
366                                      struct rswitch_gwca_queue *gq)
367 {
368         unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
369         struct rswitch_ext_desc *desc;
370         struct rswitch_desc *linkfix;
371         dma_addr_t dma_addr;
372         unsigned int i;
373
374         memset(gq->tx_ring, 0, ring_size);
375         for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
376                 if (!gq->dir_tx) {
377                         dma_addr = dma_map_single(ndev->dev.parent,
378                                                   gq->rx_bufs[i] + RSWITCH_HEADROOM,
379                                                   RSWITCH_MAP_BUF_SIZE,
380                                                   DMA_FROM_DEVICE);
381                         if (dma_mapping_error(ndev->dev.parent, dma_addr))
382                                 goto err;
383
384                         desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
385                         rswitch_desc_set_dptr(&desc->desc, dma_addr);
386                         desc->desc.die_dt = DT_FEMPTY | DIE;
387                 } else {
388                         desc->desc.die_dt = DT_EEMPTY | DIE;
389                 }
390         }
391         rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
392         desc->desc.die_dt = DT_LINKFIX;
393
394         linkfix = &priv->gwca.linkfix_table[gq->index];
395         linkfix->die_dt = DT_LINKFIX;
396         rswitch_desc_set_dptr(linkfix, gq->ring_dma);
397
398         iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
399                   priv->addr + GWDCC_OFFS(gq->index));
400
401         return 0;
402
403 err:
404         if (!gq->dir_tx) {
405                 for (desc = gq->tx_ring; i-- > 0; desc++) {
406                         dma_addr = rswitch_desc_get_dptr(&desc->desc);
407                         dma_unmap_single(ndev->dev.parent, dma_addr,
408                                          RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
409                 }
410         }
411
412         return -ENOMEM;
413 }
414
415 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
416                                        unsigned int start_index,
417                                        unsigned int num)
418 {
419         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
420         struct rswitch_ts_desc *desc;
421         unsigned int i, index;
422
423         for (i = 0; i < num; i++) {
424                 index = (i + start_index) % gq->ring_size;
425                 desc = &gq->ts_ring[index];
426                 desc->desc.die_dt = DT_FEMPTY_ND | DIE;
427         }
428 }
429
430 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
431                                           struct rswitch_gwca_queue *gq,
432                                           unsigned int start_index,
433                                           unsigned int num)
434 {
435         struct rswitch_device *rdev = netdev_priv(ndev);
436         struct rswitch_ext_ts_desc *desc;
437         unsigned int i, index;
438         dma_addr_t dma_addr;
439
440         for (i = 0; i < num; i++) {
441                 index = (i + start_index) % gq->ring_size;
442                 desc = &gq->rx_ring[index];
443                 if (!gq->dir_tx) {
444                         dma_addr = dma_map_single(ndev->dev.parent,
445                                                   gq->rx_bufs[index] + RSWITCH_HEADROOM,
446                                                   RSWITCH_MAP_BUF_SIZE,
447                                                   DMA_FROM_DEVICE);
448                         if (dma_mapping_error(ndev->dev.parent, dma_addr))
449                                 goto err;
450
451                         desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
452                         rswitch_desc_set_dptr(&desc->desc, dma_addr);
453                         dma_wmb();
454                         desc->desc.die_dt = DT_FEMPTY | DIE;
455                         desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
456                 } else {
457                         desc->desc.die_dt = DT_EEMPTY | DIE;
458                 }
459         }
460
461         return 0;
462
463 err:
464         if (!gq->dir_tx) {
465                 for (; i-- > 0; ) {
466                         index = (i + start_index) % gq->ring_size;
467                         desc = &gq->rx_ring[index];
468                         dma_addr = rswitch_desc_get_dptr(&desc->desc);
469                         dma_unmap_single(ndev->dev.parent, dma_addr,
470                                          RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
471                 }
472         }
473
474         return -ENOMEM;
475 }
476
477 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
478                                             struct rswitch_private *priv,
479                                             struct rswitch_gwca_queue *gq)
480 {
481         unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
482         struct rswitch_ext_ts_desc *desc;
483         struct rswitch_desc *linkfix;
484         int err;
485
486         memset(gq->rx_ring, 0, ring_size);
487         err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
488         if (err < 0)
489                 return err;
490
491         desc = &gq->rx_ring[gq->ring_size];     /* Last */
492         rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
493         desc->desc.die_dt = DT_LINKFIX;
494
495         linkfix = &priv->gwca.linkfix_table[gq->index];
496         linkfix->die_dt = DT_LINKFIX;
497         rswitch_desc_set_dptr(linkfix, gq->ring_dma);
498
499         iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
500                   GWDCC_ETS | GWDCC_EDE,
501                   priv->addr + GWDCC_OFFS(gq->index));
502
503         return 0;
504 }
505
506 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
507 {
508         unsigned int i, num_queues = priv->gwca.num_queues;
509         struct rswitch_gwca *gwca = &priv->gwca;
510         struct device *dev = &priv->pdev->dev;
511
512         gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
513         gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
514                                                  &gwca->linkfix_table_dma, GFP_KERNEL);
515         if (!gwca->linkfix_table)
516                 return -ENOMEM;
517         for (i = 0; i < num_queues; i++)
518                 gwca->linkfix_table[i].die_dt = DT_EOS;
519
520         return 0;
521 }
522
523 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
524 {
525         struct rswitch_gwca *gwca = &priv->gwca;
526
527         if (gwca->linkfix_table)
528                 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
529                                   gwca->linkfix_table, gwca->linkfix_table_dma);
530         gwca->linkfix_table = NULL;
531 }
532
533 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
534 {
535         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
536         struct rswitch_ts_desc *desc;
537
538         gq->ring_size = TS_RING_SIZE;
539         gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
540                                          sizeof(struct rswitch_ts_desc) *
541                                          (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
542
543         if (!gq->ts_ring)
544                 return -ENOMEM;
545
546         rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
547         desc = &gq->ts_ring[gq->ring_size];
548         desc->desc.die_dt = DT_LINKFIX;
549         rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
550         INIT_LIST_HEAD(&priv->gwca.ts_info_list);
551
552         return 0;
553 }
554
555 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
556 {
557         struct rswitch_gwca_queue *gq;
558         unsigned int index;
559
560         index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
561         if (index >= priv->gwca.num_queues)
562                 return NULL;
563         set_bit(index, priv->gwca.used);
564         gq = &priv->gwca.queues[index];
565         memset(gq, 0, sizeof(*gq));
566         gq->index = index;
567
568         return gq;
569 }
570
571 static void rswitch_gwca_put(struct rswitch_private *priv,
572                              struct rswitch_gwca_queue *gq)
573 {
574         clear_bit(gq->index, priv->gwca.used);
575 }
576
577 static int rswitch_txdmac_alloc(struct net_device *ndev)
578 {
579         struct rswitch_device *rdev = netdev_priv(ndev);
580         struct rswitch_private *priv = rdev->priv;
581         int err;
582
583         rdev->tx_queue = rswitch_gwca_get(priv);
584         if (!rdev->tx_queue)
585                 return -EBUSY;
586
587         err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
588         if (err < 0) {
589                 rswitch_gwca_put(priv, rdev->tx_queue);
590                 return err;
591         }
592
593         return 0;
594 }
595
596 static void rswitch_txdmac_free(struct net_device *ndev)
597 {
598         struct rswitch_device *rdev = netdev_priv(ndev);
599
600         rswitch_gwca_queue_free(ndev, rdev->tx_queue);
601         rswitch_gwca_put(rdev->priv, rdev->tx_queue);
602 }
603
604 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
605 {
606         struct rswitch_device *rdev = priv->rdev[index];
607
608         return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
609 }
610
611 static int rswitch_rxdmac_alloc(struct net_device *ndev)
612 {
613         struct rswitch_device *rdev = netdev_priv(ndev);
614         struct rswitch_private *priv = rdev->priv;
615         int err;
616
617         rdev->rx_queue = rswitch_gwca_get(priv);
618         if (!rdev->rx_queue)
619                 return -EBUSY;
620
621         err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
622         if (err < 0) {
623                 rswitch_gwca_put(priv, rdev->rx_queue);
624                 return err;
625         }
626
627         return 0;
628 }
629
630 static void rswitch_rxdmac_free(struct net_device *ndev)
631 {
632         struct rswitch_device *rdev = netdev_priv(ndev);
633
634         rswitch_gwca_queue_free(ndev, rdev->rx_queue);
635         rswitch_gwca_put(rdev->priv, rdev->rx_queue);
636 }
637
638 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
639 {
640         struct rswitch_device *rdev = priv->rdev[index];
641         struct net_device *ndev = rdev->ndev;
642
643         return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
644 }
645
646 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
647 {
648         unsigned int i;
649         int err;
650
651         err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
652         if (err < 0)
653                 return err;
654         err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
655         if (err < 0)
656                 return err;
657
658         err = rswitch_gwca_mcast_table_reset(priv);
659         if (err < 0)
660                 return err;
661         err = rswitch_gwca_axi_ram_reset(priv);
662         if (err < 0)
663                 return err;
664
665         iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
666         iowrite32(0, priv->addr + GWTTFC);
667         iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
668         iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
669         iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
670         iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
671         iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
672
673         iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
674
675         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
676                 err = rswitch_rxdmac_init(priv, i);
677                 if (err < 0)
678                         return err;
679                 err = rswitch_txdmac_init(priv, i);
680                 if (err < 0)
681                         return err;
682         }
683
684         err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
685         if (err < 0)
686                 return err;
687         return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
688 }
689
690 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
691 {
692         int err;
693
694         err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
695         if (err < 0)
696                 return err;
697         err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
698         if (err < 0)
699                 return err;
700
701         return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
702 }
703
704 static int rswitch_gwca_halt(struct rswitch_private *priv)
705 {
706         int err;
707
708         priv->gwca_halt = true;
709         err = rswitch_gwca_hw_deinit(priv);
710         dev_err(&priv->pdev->dev, "halted (%d)\n", err);
711
712         return err;
713 }
714
715 static bool rswitch_rx(struct net_device *ndev, int *quota)
716 {
717         struct rswitch_device *rdev = netdev_priv(ndev);
718         struct rswitch_gwca_queue *gq = rdev->rx_queue;
719         struct rswitch_ext_ts_desc *desc;
720         int limit, boguscnt, ret;
721         struct sk_buff *skb;
722         dma_addr_t dma_addr;
723         unsigned int num;
724         u16 pkt_len;
725         u32 get_ts;
726
727         if (*quota <= 0)
728                 return true;
729
730         boguscnt = min_t(int, gq->ring_size, *quota);
731         limit = boguscnt;
732
733         desc = &gq->rx_ring[gq->cur];
734         while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
735                 dma_rmb();
736                 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
737                 dma_addr = rswitch_desc_get_dptr(&desc->desc);
738                 dma_unmap_single(ndev->dev.parent, dma_addr,
739                                  RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
740                 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
741                 if (!skb)
742                         goto out;
743                 skb_reserve(skb, RSWITCH_HEADROOM);
744                 skb_put(skb, pkt_len);
745
746                 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
747                 if (get_ts) {
748                         struct skb_shared_hwtstamps *shhwtstamps;
749                         struct timespec64 ts;
750
751                         shhwtstamps = skb_hwtstamps(skb);
752                         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
753                         ts.tv_sec = __le32_to_cpu(desc->ts_sec);
754                         ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
755                         shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
756                 }
757                 skb->protocol = eth_type_trans(skb, ndev);
758                 napi_gro_receive(&rdev->napi, skb);
759                 rdev->ndev->stats.rx_packets++;
760                 rdev->ndev->stats.rx_bytes += pkt_len;
761
762 out:
763                 gq->rx_bufs[gq->cur] = NULL;
764                 gq->cur = rswitch_next_queue_index(gq, true, 1);
765                 desc = &gq->rx_ring[gq->cur];
766
767                 if (--boguscnt <= 0)
768                         break;
769         }
770
771         num = rswitch_get_num_cur_queues(gq);
772         ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
773         if (ret < 0)
774                 goto err;
775         ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
776         if (ret < 0)
777                 goto err;
778         gq->dirty = rswitch_next_queue_index(gq, false, num);
779
780         *quota -= limit - boguscnt;
781
782         return boguscnt <= 0;
783
784 err:
785         rswitch_gwca_halt(rdev->priv);
786
787         return 0;
788 }
789
790 static void rswitch_tx_free(struct net_device *ndev)
791 {
792         struct rswitch_device *rdev = netdev_priv(ndev);
793         struct rswitch_gwca_queue *gq = rdev->tx_queue;
794         struct rswitch_ext_desc *desc;
795         struct sk_buff *skb;
796
797         for (; rswitch_get_num_cur_queues(gq) > 0;
798              gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
799                 desc = &gq->tx_ring[gq->dirty];
800                 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
801                         break;
802
803                 dma_rmb();
804                 skb = gq->skbs[gq->dirty];
805                 if (skb) {
806                         dma_unmap_single(ndev->dev.parent,
807                                          gq->unmap_addrs[gq->dirty],
808                                          skb->len, DMA_TO_DEVICE);
809                         dev_kfree_skb_any(gq->skbs[gq->dirty]);
810                         gq->skbs[gq->dirty] = NULL;
811                         rdev->ndev->stats.tx_packets++;
812                         rdev->ndev->stats.tx_bytes += skb->len;
813                 }
814                 desc->desc.die_dt = DT_EEMPTY;
815         }
816 }
817
818 static int rswitch_poll(struct napi_struct *napi, int budget)
819 {
820         struct net_device *ndev = napi->dev;
821         struct rswitch_private *priv;
822         struct rswitch_device *rdev;
823         unsigned long flags;
824         int quota = budget;
825
826         rdev = netdev_priv(ndev);
827         priv = rdev->priv;
828
829 retry:
830         rswitch_tx_free(ndev);
831
832         if (rswitch_rx(ndev, &quota))
833                 goto out;
834         else if (rdev->priv->gwca_halt)
835                 goto err;
836         else if (rswitch_is_queue_rxed(rdev->rx_queue))
837                 goto retry;
838
839         netif_wake_subqueue(ndev, 0);
840
841         if (napi_complete_done(napi, budget - quota)) {
842                 spin_lock_irqsave(&priv->lock, flags);
843                 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
844                 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
845                 spin_unlock_irqrestore(&priv->lock, flags);
846         }
847
848 out:
849         return budget - quota;
850
851 err:
852         napi_complete(napi);
853
854         return 0;
855 }
856
857 static void rswitch_queue_interrupt(struct net_device *ndev)
858 {
859         struct rswitch_device *rdev = netdev_priv(ndev);
860
861         if (napi_schedule_prep(&rdev->napi)) {
862                 spin_lock(&rdev->priv->lock);
863                 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
864                 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
865                 spin_unlock(&rdev->priv->lock);
866                 __napi_schedule(&rdev->napi);
867         }
868 }
869
870 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
871 {
872         struct rswitch_gwca_queue *gq;
873         unsigned int i, index, bit;
874
875         for (i = 0; i < priv->gwca.num_queues; i++) {
876                 gq = &priv->gwca.queues[i];
877                 index = gq->index / 32;
878                 bit = BIT(gq->index % 32);
879                 if (!(dis[index] & bit))
880                         continue;
881
882                 rswitch_ack_data_irq(priv, gq->index);
883                 rswitch_queue_interrupt(gq->ndev);
884         }
885
886         return IRQ_HANDLED;
887 }
888
889 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
890 {
891         struct rswitch_private *priv = dev_id;
892         u32 dis[RSWITCH_NUM_IRQ_REGS];
893         irqreturn_t ret = IRQ_NONE;
894
895         rswitch_get_data_irq_status(priv, dis);
896
897         if (rswitch_is_any_data_irq(priv, dis, true) ||
898             rswitch_is_any_data_irq(priv, dis, false))
899                 ret = rswitch_data_irq(priv, dis);
900
901         return ret;
902 }
903
904 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
905 {
906         char *resource_name, *irq_name;
907         int i, ret, irq;
908
909         for (i = 0; i < GWCA_NUM_IRQS; i++) {
910                 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
911                 if (!resource_name)
912                         return -ENOMEM;
913
914                 irq = platform_get_irq_byname(priv->pdev, resource_name);
915                 kfree(resource_name);
916                 if (irq < 0)
917                         return irq;
918
919                 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
920                                           GWCA_IRQ_NAME, i);
921                 if (!irq_name)
922                         return -ENOMEM;
923
924                 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
925                                        0, irq_name, priv);
926                 if (ret < 0)
927                         return ret;
928         }
929
930         return 0;
931 }
932
933 static void rswitch_ts(struct rswitch_private *priv)
934 {
935         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
936         struct rswitch_gwca_ts_info *ts_info, *ts_info2;
937         struct skb_shared_hwtstamps shhwtstamps;
938         struct rswitch_ts_desc *desc;
939         struct timespec64 ts;
940         unsigned int num;
941         u32 tag, port;
942
943         desc = &gq->ts_ring[gq->cur];
944         while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
945                 dma_rmb();
946
947                 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
948                 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
949
950                 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
951                         if (!(ts_info->port == port && ts_info->tag == tag))
952                                 continue;
953
954                         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
955                         ts.tv_sec = __le32_to_cpu(desc->ts_sec);
956                         ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
957                         shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
958                         skb_tstamp_tx(ts_info->skb, &shhwtstamps);
959                         dev_consume_skb_irq(ts_info->skb);
960                         list_del(&ts_info->list);
961                         kfree(ts_info);
962                         break;
963                 }
964
965                 gq->cur = rswitch_next_queue_index(gq, true, 1);
966                 desc = &gq->ts_ring[gq->cur];
967         }
968
969         num = rswitch_get_num_cur_queues(gq);
970         rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
971         gq->dirty = rswitch_next_queue_index(gq, false, num);
972 }
973
974 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
975 {
976         struct rswitch_private *priv = dev_id;
977
978         if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
979                 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
980                 rswitch_ts(priv);
981
982                 return IRQ_HANDLED;
983         }
984
985         return IRQ_NONE;
986 }
987
988 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
989 {
990         int irq;
991
992         irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
993         if (irq < 0)
994                 return irq;
995
996         return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
997                                 0, GWCA_TS_IRQ_NAME, priv);
998 }
999
1000 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
1001 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
1002                                     enum rswitch_etha_mode mode)
1003 {
1004         int ret;
1005
1006         if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
1007                 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
1008
1009         iowrite32(mode, etha->addr + EAMC);
1010
1011         ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
1012
1013         if (mode == EAMC_OPC_DISABLE)
1014                 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
1015
1016         return ret;
1017 }
1018
1019 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1020 {
1021         u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1022         u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1023         u8 *mac = &etha->mac_addr[0];
1024
1025         mac[0] = (mrmac0 >>  8) & 0xFF;
1026         mac[1] = (mrmac0 >>  0) & 0xFF;
1027         mac[2] = (mrmac1 >> 24) & 0xFF;
1028         mac[3] = (mrmac1 >> 16) & 0xFF;
1029         mac[4] = (mrmac1 >>  8) & 0xFF;
1030         mac[5] = (mrmac1 >>  0) & 0xFF;
1031 }
1032
1033 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1034 {
1035         iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1036         iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1037                   etha->addr + MRMAC1);
1038 }
1039
1040 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1041 {
1042         iowrite32(MLVC_PLV, etha->addr + MLVC);
1043
1044         return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1045 }
1046
1047 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1048 {
1049         u32 val;
1050
1051         rswitch_etha_write_mac_address(etha, mac);
1052
1053         switch (etha->speed) {
1054         case 100:
1055                 val = MPIC_LSC_100M;
1056                 break;
1057         case 1000:
1058                 val = MPIC_LSC_1G;
1059                 break;
1060         case 2500:
1061                 val = MPIC_LSC_2_5G;
1062                 break;
1063         default:
1064                 return;
1065         }
1066
1067         iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
1068 }
1069
1070 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1071 {
1072         rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
1073                        MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
1074         rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
1075 }
1076
1077 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1078 {
1079         int err;
1080
1081         err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1082         if (err < 0)
1083                 return err;
1084         err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1085         if (err < 0)
1086                 return err;
1087
1088         iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1089         rswitch_rmac_setting(etha, mac);
1090         rswitch_etha_enable_mii(etha);
1091
1092         err = rswitch_etha_wait_link_verification(etha);
1093         if (err < 0)
1094                 return err;
1095
1096         err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1097         if (err < 0)
1098                 return err;
1099
1100         return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1101 }
1102
1103 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
1104                                    int phyad, int devad, int regad, int data)
1105 {
1106         int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
1107         u32 val;
1108         int ret;
1109
1110         if (devad == 0xffffffff)
1111                 return -ENODEV;
1112
1113         writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
1114
1115         val = MPSM_PSME | MPSM_MFF_C45;
1116         iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1117
1118         ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1119         if (ret)
1120                 return ret;
1121
1122         rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1123
1124         if (read) {
1125                 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1126
1127                 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1128                 if (ret)
1129                         return ret;
1130
1131                 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1132
1133                 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1134         } else {
1135                 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1136                           etha->addr + MPSM);
1137
1138                 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1139         }
1140
1141         return ret;
1142 }
1143
1144 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1145                                      int regad)
1146 {
1147         struct rswitch_etha *etha = bus->priv;
1148
1149         return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1150 }
1151
1152 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1153                                       int regad, u16 val)
1154 {
1155         struct rswitch_etha *etha = bus->priv;
1156
1157         return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1158 }
1159
1160 /* Call of_node_put(port) after done */
1161 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1162 {
1163         struct device_node *ports, *port;
1164         int err = 0;
1165         u32 index;
1166
1167         ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1168                                      "ethernet-ports");
1169         if (!ports)
1170                 return NULL;
1171
1172         for_each_child_of_node(ports, port) {
1173                 err = of_property_read_u32(port, "reg", &index);
1174                 if (err < 0) {
1175                         port = NULL;
1176                         goto out;
1177                 }
1178                 if (index == rdev->etha->index) {
1179                         if (!of_device_is_available(port))
1180                                 port = NULL;
1181                         break;
1182                 }
1183         }
1184
1185 out:
1186         of_node_put(ports);
1187
1188         return port;
1189 }
1190
1191 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1192 {
1193         u32 max_speed;
1194         int err;
1195
1196         if (!rdev->np_port)
1197                 return 0;       /* ignored */
1198
1199         err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1200         if (err)
1201                 return err;
1202
1203         err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1204         if (!err) {
1205                 rdev->etha->speed = max_speed;
1206                 return 0;
1207         }
1208
1209         /* if no "max-speed" property, let's use default speed */
1210         switch (rdev->etha->phy_interface) {
1211         case PHY_INTERFACE_MODE_MII:
1212                 rdev->etha->speed = SPEED_100;
1213                 break;
1214         case PHY_INTERFACE_MODE_SGMII:
1215                 rdev->etha->speed = SPEED_1000;
1216                 break;
1217         case PHY_INTERFACE_MODE_USXGMII:
1218                 rdev->etha->speed = SPEED_2500;
1219                 break;
1220         default:
1221                 return -EINVAL;
1222         }
1223
1224         return 0;
1225 }
1226
1227 static int rswitch_mii_register(struct rswitch_device *rdev)
1228 {
1229         struct device_node *mdio_np;
1230         struct mii_bus *mii_bus;
1231         int err;
1232
1233         mii_bus = mdiobus_alloc();
1234         if (!mii_bus)
1235                 return -ENOMEM;
1236
1237         mii_bus->name = "rswitch_mii";
1238         sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1239         mii_bus->priv = rdev->etha;
1240         mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1241         mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1242         mii_bus->parent = &rdev->priv->pdev->dev;
1243
1244         mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1245         err = of_mdiobus_register(mii_bus, mdio_np);
1246         if (err < 0) {
1247                 mdiobus_free(mii_bus);
1248                 goto out;
1249         }
1250
1251         rdev->etha->mii = mii_bus;
1252
1253 out:
1254         of_node_put(mdio_np);
1255
1256         return err;
1257 }
1258
1259 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1260 {
1261         if (rdev->etha->mii) {
1262                 mdiobus_unregister(rdev->etha->mii);
1263                 mdiobus_free(rdev->etha->mii);
1264                 rdev->etha->mii = NULL;
1265         }
1266 }
1267
1268 static void rswitch_adjust_link(struct net_device *ndev)
1269 {
1270         struct rswitch_device *rdev = netdev_priv(ndev);
1271         struct phy_device *phydev = ndev->phydev;
1272
1273         if (phydev->link != rdev->etha->link) {
1274                 phy_print_status(phydev);
1275                 if (phydev->link)
1276                         phy_power_on(rdev->serdes);
1277                 else if (rdev->serdes->power_count)
1278                         phy_power_off(rdev->serdes);
1279
1280                 rdev->etha->link = phydev->link;
1281
1282                 if (!rdev->priv->etha_no_runtime_change &&
1283                     phydev->speed != rdev->etha->speed) {
1284                         rdev->etha->speed = phydev->speed;
1285
1286                         rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1287                         phy_set_speed(rdev->serdes, rdev->etha->speed);
1288                 }
1289         }
1290 }
1291
1292 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1293                                          struct phy_device *phydev)
1294 {
1295         if (!rdev->priv->etha_no_runtime_change)
1296                 return;
1297
1298         switch (rdev->etha->speed) {
1299         case SPEED_2500:
1300                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1301                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1302                 break;
1303         case SPEED_1000:
1304                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1305                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1306                 break;
1307         case SPEED_100:
1308                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1309                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1310                 break;
1311         default:
1312                 break;
1313         }
1314
1315         phy_set_max_speed(phydev, rdev->etha->speed);
1316 }
1317
1318 static int rswitch_phy_device_init(struct rswitch_device *rdev)
1319 {
1320         struct phy_device *phydev;
1321         struct device_node *phy;
1322         int err = -ENOENT;
1323
1324         if (!rdev->np_port)
1325                 return -ENODEV;
1326
1327         phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1328         if (!phy)
1329                 return -ENODEV;
1330
1331         /* Set phydev->host_interfaces before calling of_phy_connect() to
1332          * configure the PHY with the information of host_interfaces.
1333          */
1334         phydev = of_phy_find_device(phy);
1335         if (!phydev)
1336                 goto out;
1337         __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1338         phydev->mac_managed_pm = true;
1339
1340         phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1341                                 rdev->etha->phy_interface);
1342         if (!phydev)
1343                 goto out;
1344
1345         phy_set_max_speed(phydev, SPEED_2500);
1346         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1347         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1348         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1349         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1350         rswitch_phy_remove_link_mode(rdev, phydev);
1351
1352         phy_attached_info(phydev);
1353
1354         err = 0;
1355 out:
1356         of_node_put(phy);
1357
1358         return err;
1359 }
1360
1361 static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1362 {
1363         if (rdev->ndev->phydev)
1364                 phy_disconnect(rdev->ndev->phydev);
1365 }
1366
1367 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1368 {
1369         int err;
1370
1371         err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1372                                rdev->etha->phy_interface);
1373         if (err < 0)
1374                 return err;
1375
1376         return phy_set_speed(rdev->serdes, rdev->etha->speed);
1377 }
1378
1379 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1380 {
1381         int err;
1382
1383         if (!rdev->etha->operated) {
1384                 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1385                 if (err < 0)
1386                         return err;
1387                 if (rdev->priv->etha_no_runtime_change)
1388                         rdev->etha->operated = true;
1389         }
1390
1391         err = rswitch_mii_register(rdev);
1392         if (err < 0)
1393                 return err;
1394
1395         err = rswitch_phy_device_init(rdev);
1396         if (err < 0)
1397                 goto err_phy_device_init;
1398
1399         rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1400         if (IS_ERR(rdev->serdes)) {
1401                 err = PTR_ERR(rdev->serdes);
1402                 goto err_serdes_phy_get;
1403         }
1404
1405         err = rswitch_serdes_set_params(rdev);
1406         if (err < 0)
1407                 goto err_serdes_set_params;
1408
1409         return 0;
1410
1411 err_serdes_set_params:
1412 err_serdes_phy_get:
1413         rswitch_phy_device_deinit(rdev);
1414
1415 err_phy_device_init:
1416         rswitch_mii_unregister(rdev);
1417
1418         return err;
1419 }
1420
1421 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1422 {
1423         rswitch_phy_device_deinit(rdev);
1424         rswitch_mii_unregister(rdev);
1425 }
1426
1427 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1428 {
1429         unsigned int i;
1430         int err;
1431
1432         rswitch_for_each_enabled_port(priv, i) {
1433                 err = rswitch_ether_port_init_one(priv->rdev[i]);
1434                 if (err)
1435                         goto err_init_one;
1436         }
1437
1438         rswitch_for_each_enabled_port(priv, i) {
1439                 err = phy_init(priv->rdev[i]->serdes);
1440                 if (err)
1441                         goto err_serdes;
1442         }
1443
1444         return 0;
1445
1446 err_serdes:
1447         rswitch_for_each_enabled_port_continue_reverse(priv, i)
1448                 phy_exit(priv->rdev[i]->serdes);
1449         i = RSWITCH_NUM_PORTS;
1450
1451 err_init_one:
1452         rswitch_for_each_enabled_port_continue_reverse(priv, i)
1453                 rswitch_ether_port_deinit_one(priv->rdev[i]);
1454
1455         return err;
1456 }
1457
1458 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1459 {
1460         unsigned int i;
1461
1462         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1463                 phy_exit(priv->rdev[i]->serdes);
1464                 rswitch_ether_port_deinit_one(priv->rdev[i]);
1465         }
1466 }
1467
1468 static int rswitch_open(struct net_device *ndev)
1469 {
1470         struct rswitch_device *rdev = netdev_priv(ndev);
1471         unsigned long flags;
1472
1473         phy_start(ndev->phydev);
1474
1475         napi_enable(&rdev->napi);
1476         netif_start_queue(ndev);
1477
1478         spin_lock_irqsave(&rdev->priv->lock, flags);
1479         rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1480         rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1481         spin_unlock_irqrestore(&rdev->priv->lock, flags);
1482
1483         if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1484                 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1485
1486         bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1487
1488         return 0;
1489 };
1490
1491 static int rswitch_stop(struct net_device *ndev)
1492 {
1493         struct rswitch_device *rdev = netdev_priv(ndev);
1494         struct rswitch_gwca_ts_info *ts_info, *ts_info2;
1495         unsigned long flags;
1496
1497         netif_tx_stop_all_queues(ndev);
1498         bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1499
1500         if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1501                 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1502
1503         list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
1504                 if (ts_info->port != rdev->port)
1505                         continue;
1506                 dev_kfree_skb_irq(ts_info->skb);
1507                 list_del(&ts_info->list);
1508                 kfree(ts_info);
1509         }
1510
1511         spin_lock_irqsave(&rdev->priv->lock, flags);
1512         rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1513         rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1514         spin_unlock_irqrestore(&rdev->priv->lock, flags);
1515
1516         phy_stop(ndev->phydev);
1517         napi_disable(&rdev->napi);
1518
1519         return 0;
1520 };
1521
1522 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
1523                                        struct sk_buff *skb,
1524                                        struct rswitch_ext_desc *desc)
1525 {
1526         desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1527                                   INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1528         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1529                 struct rswitch_gwca_ts_info *ts_info;
1530
1531                 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
1532                 if (!ts_info)
1533                         return false;
1534
1535                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1536                 rdev->ts_tag++;
1537                 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
1538
1539                 ts_info->skb = skb_get(skb);
1540                 ts_info->port = rdev->port;
1541                 ts_info->tag = rdev->ts_tag;
1542                 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
1543
1544                 skb_tx_timestamp(skb);
1545         }
1546
1547         return true;
1548 }
1549
1550 static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
1551                                  struct sk_buff *skb,
1552                                  struct rswitch_ext_desc *desc,
1553                                  dma_addr_t dma_addr, u16 len, u8 die_dt)
1554 {
1555         rswitch_desc_set_dptr(&desc->desc, dma_addr);
1556         desc->desc.info_ds = cpu_to_le16(len);
1557         if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
1558                 return false;
1559
1560         dma_wmb();
1561
1562         desc->desc.die_dt = die_dt;
1563
1564         return true;
1565 }
1566
1567 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1568 {
1569         struct rswitch_device *rdev = netdev_priv(ndev);
1570         struct rswitch_gwca_queue *gq = rdev->tx_queue;
1571         netdev_tx_t ret = NETDEV_TX_OK;
1572         struct rswitch_ext_desc *desc;
1573         dma_addr_t dma_addr;
1574
1575         if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1576                 netif_stop_subqueue(ndev, 0);
1577                 return NETDEV_TX_BUSY;
1578         }
1579
1580         if (skb_put_padto(skb, ETH_ZLEN))
1581                 return ret;
1582
1583         dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1584         if (dma_mapping_error(ndev->dev.parent, dma_addr))
1585                 goto err_kfree;
1586
1587         gq->skbs[gq->cur] = skb;
1588         gq->unmap_addrs[gq->cur] = dma_addr;
1589         desc = &gq->tx_ring[gq->cur];
1590         if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, skb->len, DT_FSINGLE | DIE))
1591                 goto err_unmap;
1592
1593         wmb();  /* gq->cur must be incremented after die_dt was set */
1594
1595         gq->cur = rswitch_next_queue_index(gq, true, 1);
1596         rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1597
1598         return ret;
1599
1600 err_unmap:
1601         dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
1602
1603 err_kfree:
1604         dev_kfree_skb_any(skb);
1605
1606         return ret;
1607 }
1608
1609 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1610 {
1611         return &ndev->stats;
1612 }
1613
1614 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1615 {
1616         struct rswitch_device *rdev = netdev_priv(ndev);
1617         struct rcar_gen4_ptp_private *ptp_priv;
1618         struct hwtstamp_config config;
1619
1620         ptp_priv = rdev->priv->ptp_priv;
1621
1622         config.flags = 0;
1623         config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1624                                                     HWTSTAMP_TX_OFF;
1625         switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1626         case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1627                 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1628                 break;
1629         case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1630                 config.rx_filter = HWTSTAMP_FILTER_ALL;
1631                 break;
1632         default:
1633                 config.rx_filter = HWTSTAMP_FILTER_NONE;
1634                 break;
1635         }
1636
1637         return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1638 }
1639
1640 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1641 {
1642         struct rswitch_device *rdev = netdev_priv(ndev);
1643         u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1644         struct hwtstamp_config config;
1645         u32 tstamp_tx_ctrl;
1646
1647         if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1648                 return -EFAULT;
1649
1650         if (config.flags)
1651                 return -EINVAL;
1652
1653         switch (config.tx_type) {
1654         case HWTSTAMP_TX_OFF:
1655                 tstamp_tx_ctrl = 0;
1656                 break;
1657         case HWTSTAMP_TX_ON:
1658                 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1659                 break;
1660         default:
1661                 return -ERANGE;
1662         }
1663
1664         switch (config.rx_filter) {
1665         case HWTSTAMP_FILTER_NONE:
1666                 tstamp_rx_ctrl = 0;
1667                 break;
1668         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1669                 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1670                 break;
1671         default:
1672                 config.rx_filter = HWTSTAMP_FILTER_ALL;
1673                 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1674                 break;
1675         }
1676
1677         rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1678         rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1679
1680         return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1681 }
1682
1683 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1684 {
1685         if (!netif_running(ndev))
1686                 return -EINVAL;
1687
1688         switch (cmd) {
1689         case SIOCGHWTSTAMP:
1690                 return rswitch_hwstamp_get(ndev, req);
1691         case SIOCSHWTSTAMP:
1692                 return rswitch_hwstamp_set(ndev, req);
1693         default:
1694                 return phy_mii_ioctl(ndev->phydev, req, cmd);
1695         }
1696 }
1697
1698 static const struct net_device_ops rswitch_netdev_ops = {
1699         .ndo_open = rswitch_open,
1700         .ndo_stop = rswitch_stop,
1701         .ndo_start_xmit = rswitch_start_xmit,
1702         .ndo_get_stats = rswitch_get_stats,
1703         .ndo_eth_ioctl = rswitch_eth_ioctl,
1704         .ndo_validate_addr = eth_validate_addr,
1705         .ndo_set_mac_address = eth_mac_addr,
1706 };
1707
1708 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1709 {
1710         struct rswitch_device *rdev = netdev_priv(ndev);
1711
1712         info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1713         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1714                                 SOF_TIMESTAMPING_RX_SOFTWARE |
1715                                 SOF_TIMESTAMPING_SOFTWARE |
1716                                 SOF_TIMESTAMPING_TX_HARDWARE |
1717                                 SOF_TIMESTAMPING_RX_HARDWARE |
1718                                 SOF_TIMESTAMPING_RAW_HARDWARE;
1719         info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1720         info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1721
1722         return 0;
1723 }
1724
1725 static const struct ethtool_ops rswitch_ethtool_ops = {
1726         .get_ts_info = rswitch_get_ts_info,
1727         .get_link_ksettings = phy_ethtool_get_link_ksettings,
1728         .set_link_ksettings = phy_ethtool_set_link_ksettings,
1729 };
1730
1731 static const struct of_device_id renesas_eth_sw_of_table[] = {
1732         { .compatible = "renesas,r8a779f0-ether-switch", },
1733         { }
1734 };
1735 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1736
1737 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
1738 {
1739         struct rswitch_etha *etha = &priv->etha[index];
1740
1741         memset(etha, 0, sizeof(*etha));
1742         etha->index = index;
1743         etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1744         etha->coma_addr = priv->addr;
1745
1746         /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1747          * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1748          * both the numerator and the denominator by 10.
1749          */
1750         etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
1751 }
1752
1753 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
1754 {
1755         struct platform_device *pdev = priv->pdev;
1756         struct rswitch_device *rdev;
1757         struct net_device *ndev;
1758         int err;
1759
1760         if (index >= RSWITCH_NUM_PORTS)
1761                 return -EINVAL;
1762
1763         ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1764         if (!ndev)
1765                 return -ENOMEM;
1766
1767         SET_NETDEV_DEV(ndev, &pdev->dev);
1768         ether_setup(ndev);
1769
1770         rdev = netdev_priv(ndev);
1771         rdev->ndev = ndev;
1772         rdev->priv = priv;
1773         priv->rdev[index] = rdev;
1774         rdev->port = index;
1775         rdev->etha = &priv->etha[index];
1776         rdev->addr = priv->addr;
1777
1778         ndev->base_addr = (unsigned long)rdev->addr;
1779         snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1780         ndev->netdev_ops = &rswitch_netdev_ops;
1781         ndev->ethtool_ops = &rswitch_ethtool_ops;
1782
1783         netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1784
1785         rdev->np_port = rswitch_get_port_node(rdev);
1786         rdev->disabled = !rdev->np_port;
1787         err = of_get_ethdev_address(rdev->np_port, ndev);
1788         of_node_put(rdev->np_port);
1789         if (err) {
1790                 if (is_valid_ether_addr(rdev->etha->mac_addr))
1791                         eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1792                 else
1793                         eth_hw_addr_random(ndev);
1794         }
1795
1796         err = rswitch_etha_get_params(rdev);
1797         if (err < 0)
1798                 goto out_get_params;
1799
1800         if (rdev->priv->gwca.speed < rdev->etha->speed)
1801                 rdev->priv->gwca.speed = rdev->etha->speed;
1802
1803         err = rswitch_rxdmac_alloc(ndev);
1804         if (err < 0)
1805                 goto out_rxdmac;
1806
1807         err = rswitch_txdmac_alloc(ndev);
1808         if (err < 0)
1809                 goto out_txdmac;
1810
1811         return 0;
1812
1813 out_txdmac:
1814         rswitch_rxdmac_free(ndev);
1815
1816 out_rxdmac:
1817 out_get_params:
1818         netif_napi_del(&rdev->napi);
1819         free_netdev(ndev);
1820
1821         return err;
1822 }
1823
1824 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
1825 {
1826         struct rswitch_device *rdev = priv->rdev[index];
1827         struct net_device *ndev = rdev->ndev;
1828
1829         rswitch_txdmac_free(ndev);
1830         rswitch_rxdmac_free(ndev);
1831         netif_napi_del(&rdev->napi);
1832         free_netdev(ndev);
1833 }
1834
1835 static int rswitch_init(struct rswitch_private *priv)
1836 {
1837         unsigned int i;
1838         int err;
1839
1840         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1841                 rswitch_etha_init(priv, i);
1842
1843         rswitch_clock_enable(priv);
1844         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1845                 rswitch_etha_read_mac_address(&priv->etha[i]);
1846
1847         rswitch_reset(priv);
1848
1849         rswitch_clock_enable(priv);
1850         rswitch_top_init(priv);
1851         err = rswitch_bpool_config(priv);
1852         if (err < 0)
1853                 return err;
1854
1855         rswitch_coma_init(priv);
1856
1857         err = rswitch_gwca_linkfix_alloc(priv);
1858         if (err < 0)
1859                 return -ENOMEM;
1860
1861         err = rswitch_gwca_ts_queue_alloc(priv);
1862         if (err < 0)
1863                 goto err_ts_queue_alloc;
1864
1865         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1866                 err = rswitch_device_alloc(priv, i);
1867                 if (err < 0) {
1868                         for (; i-- > 0; )
1869                                 rswitch_device_free(priv, i);
1870                         goto err_device_alloc;
1871                 }
1872         }
1873
1874         rswitch_fwd_init(priv);
1875
1876         err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
1877                                      clk_get_rate(priv->clk));
1878         if (err < 0)
1879                 goto err_ptp_register;
1880
1881         err = rswitch_gwca_request_irqs(priv);
1882         if (err < 0)
1883                 goto err_gwca_request_irq;
1884
1885         err = rswitch_gwca_ts_request_irqs(priv);
1886         if (err < 0)
1887                 goto err_gwca_ts_request_irq;
1888
1889         err = rswitch_gwca_hw_init(priv);
1890         if (err < 0)
1891                 goto err_gwca_hw_init;
1892
1893         err = rswitch_ether_port_init_all(priv);
1894         if (err)
1895                 goto err_ether_port_init_all;
1896
1897         rswitch_for_each_enabled_port(priv, i) {
1898                 err = register_netdev(priv->rdev[i]->ndev);
1899                 if (err) {
1900                         rswitch_for_each_enabled_port_continue_reverse(priv, i)
1901                                 unregister_netdev(priv->rdev[i]->ndev);
1902                         goto err_register_netdev;
1903                 }
1904         }
1905
1906         rswitch_for_each_enabled_port(priv, i)
1907                 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
1908                             priv->rdev[i]->ndev->dev_addr);
1909
1910         return 0;
1911
1912 err_register_netdev:
1913         rswitch_ether_port_deinit_all(priv);
1914
1915 err_ether_port_init_all:
1916         rswitch_gwca_hw_deinit(priv);
1917
1918 err_gwca_hw_init:
1919 err_gwca_ts_request_irq:
1920 err_gwca_request_irq:
1921         rcar_gen4_ptp_unregister(priv->ptp_priv);
1922
1923 err_ptp_register:
1924         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1925                 rswitch_device_free(priv, i);
1926
1927 err_device_alloc:
1928         rswitch_gwca_ts_queue_free(priv);
1929
1930 err_ts_queue_alloc:
1931         rswitch_gwca_linkfix_free(priv);
1932
1933         return err;
1934 }
1935
1936 static const struct soc_device_attribute rswitch_soc_no_speed_change[]  = {
1937         { .soc_id = "r8a779f0", .revision = "ES1.0" },
1938         { /* Sentinel */ }
1939 };
1940
1941 static int renesas_eth_sw_probe(struct platform_device *pdev)
1942 {
1943         const struct soc_device_attribute *attr;
1944         struct rswitch_private *priv;
1945         struct resource *res;
1946         int ret;
1947
1948         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1949         if (!res) {
1950                 dev_err(&pdev->dev, "invalid resource\n");
1951                 return -EINVAL;
1952         }
1953
1954         priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1955         if (!priv)
1956                 return -ENOMEM;
1957         spin_lock_init(&priv->lock);
1958
1959         priv->clk = devm_clk_get(&pdev->dev, NULL);
1960         if (IS_ERR(priv->clk))
1961                 return PTR_ERR(priv->clk);
1962
1963         attr = soc_device_match(rswitch_soc_no_speed_change);
1964         if (attr)
1965                 priv->etha_no_runtime_change = true;
1966
1967         priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1968         if (!priv->ptp_priv)
1969                 return -ENOMEM;
1970
1971         platform_set_drvdata(pdev, priv);
1972         priv->pdev = pdev;
1973         priv->addr = devm_ioremap_resource(&pdev->dev, res);
1974         if (IS_ERR(priv->addr))
1975                 return PTR_ERR(priv->addr);
1976
1977         priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1978
1979         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1980         if (ret < 0) {
1981                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1982                 if (ret < 0)
1983                         return ret;
1984         }
1985
1986         priv->gwca.index = AGENT_INDEX_GWCA;
1987         priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1988                                     RSWITCH_MAX_NUM_QUEUES);
1989         priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1990                                          sizeof(*priv->gwca.queues), GFP_KERNEL);
1991         if (!priv->gwca.queues)
1992                 return -ENOMEM;
1993
1994         pm_runtime_enable(&pdev->dev);
1995         pm_runtime_get_sync(&pdev->dev);
1996
1997         ret = rswitch_init(priv);
1998         if (ret < 0) {
1999                 pm_runtime_put(&pdev->dev);
2000                 pm_runtime_disable(&pdev->dev);
2001                 return ret;
2002         }
2003
2004         device_set_wakeup_capable(&pdev->dev, 1);
2005
2006         return ret;
2007 }
2008
2009 static void rswitch_deinit(struct rswitch_private *priv)
2010 {
2011         unsigned int i;
2012
2013         rswitch_gwca_hw_deinit(priv);
2014         rcar_gen4_ptp_unregister(priv->ptp_priv);
2015
2016         rswitch_for_each_enabled_port(priv, i) {
2017                 struct rswitch_device *rdev = priv->rdev[i];
2018
2019                 unregister_netdev(rdev->ndev);
2020                 rswitch_ether_port_deinit_one(rdev);
2021                 phy_exit(priv->rdev[i]->serdes);
2022         }
2023
2024         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2025                 rswitch_device_free(priv, i);
2026
2027         rswitch_gwca_ts_queue_free(priv);
2028         rswitch_gwca_linkfix_free(priv);
2029
2030         rswitch_clock_disable(priv);
2031 }
2032
2033 static void renesas_eth_sw_remove(struct platform_device *pdev)
2034 {
2035         struct rswitch_private *priv = platform_get_drvdata(pdev);
2036
2037         rswitch_deinit(priv);
2038
2039         pm_runtime_put(&pdev->dev);
2040         pm_runtime_disable(&pdev->dev);
2041
2042         platform_set_drvdata(pdev, NULL);
2043 }
2044
2045 static int renesas_eth_sw_suspend(struct device *dev)
2046 {
2047         struct rswitch_private *priv = dev_get_drvdata(dev);
2048         struct net_device *ndev;
2049         unsigned int i;
2050
2051         rswitch_for_each_enabled_port(priv, i) {
2052                 ndev = priv->rdev[i]->ndev;
2053                 if (netif_running(ndev)) {
2054                         netif_device_detach(ndev);
2055                         rswitch_stop(ndev);
2056                 }
2057                 if (priv->rdev[i]->serdes->init_count)
2058                         phy_exit(priv->rdev[i]->serdes);
2059         }
2060
2061         return 0;
2062 }
2063
2064 static int renesas_eth_sw_resume(struct device *dev)
2065 {
2066         struct rswitch_private *priv = dev_get_drvdata(dev);
2067         struct net_device *ndev;
2068         unsigned int i;
2069
2070         rswitch_for_each_enabled_port(priv, i) {
2071                 phy_init(priv->rdev[i]->serdes);
2072                 ndev = priv->rdev[i]->ndev;
2073                 if (netif_running(ndev)) {
2074                         rswitch_open(ndev);
2075                         netif_device_attach(ndev);
2076                 }
2077         }
2078
2079         return 0;
2080 }
2081
2082 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
2083                                 renesas_eth_sw_resume);
2084
2085 static struct platform_driver renesas_eth_sw_driver_platform = {
2086         .probe = renesas_eth_sw_probe,
2087         .remove_new = renesas_eth_sw_remove,
2088         .driver = {
2089                 .name = "renesas_eth_sw",
2090                 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
2091                 .of_match_table = renesas_eth_sw_of_table,
2092         }
2093 };
2094 module_platform_driver(renesas_eth_sw_driver_platform);
2095 MODULE_AUTHOR("Yoshihiro Shimoda");
2096 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2097 MODULE_LICENSE("GPL");