net: nfc: llcp: Add lock when modifying device list
[linux-block.git] / drivers / net / ethernet / renesas / rswitch.c
CommitLineData
3590918b
YS
1// SPDX-License-Identifier: GPL-2.0
2/* Renesas Ethernet Switch device driver
3 *
4 * Copyright (C) 2022 Renesas Electronics Corporation
5 */
6
7#include <linux/dma-mapping.h>
8#include <linux/err.h>
9#include <linux/etherdevice.h>
10#include <linux/iopoll.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
6c6fa1a0 13#include <linux/net_tstamp.h>
3590918b 14#include <linux/of.h>
3590918b
YS
15#include <linux/of_mdio.h>
16#include <linux/of_net.h>
3590918b 17#include <linux/phy/phy.h>
3d40aed8 18#include <linux/platform_device.h>
3590918b
YS
19#include <linux/pm_runtime.h>
20#include <linux/rtnetlink.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
c009b903 23#include <linux/sys_soc.h>
3590918b
YS
24
25#include "rswitch.h"
26
27static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
28{
29 u32 val;
30
31 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
32 1, RSWITCH_TIMEOUT_US);
33}
34
35static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
36{
37 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
38}
39
40/* Common Agent block (COMA) */
41static void rswitch_reset(struct rswitch_private *priv)
42{
43 iowrite32(RRC_RR, priv->addr + RRC);
44 iowrite32(RRC_RR_CLR, priv->addr + RRC);
45}
46
47static void rswitch_clock_enable(struct rswitch_private *priv)
48{
49 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
50}
51
52static void rswitch_clock_disable(struct rswitch_private *priv)
53{
54 iowrite32(RCDC_RCD, priv->addr + RCDC);
55}
56
57static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
58{
59 u32 val = ioread32(coma_addr + RCEC);
60
61 if (val & RCEC_RCE)
62 return (val & BIT(port)) ? true : false;
63 else
64 return false;
65}
66
67static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
68{
69 u32 val;
70
71 if (enable) {
72 val = ioread32(coma_addr + RCEC);
73 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
74 } else {
75 val = ioread32(coma_addr + RCDC);
76 iowrite32(val | BIT(port), coma_addr + RCDC);
77 }
78}
79
80static int rswitch_bpool_config(struct rswitch_private *priv)
81{
82 u32 val;
83
84 val = ioread32(priv->addr + CABPIRM);
85 if (val & CABPIRM_BPR)
86 return 0;
87
88 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
89
90 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
91}
92
c87bd91e
YS
93static void rswitch_coma_init(struct rswitch_private *priv)
94{
95 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
96}
97
3590918b
YS
98/* R-Switch-2 block (TOP) */
99static void rswitch_top_init(struct rswitch_private *priv)
100{
101 int i;
102
103 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
104 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
105}
106
107/* Forwarding engine block (MFWD) */
108static void rswitch_fwd_init(struct rswitch_private *priv)
109{
110 int i;
111
112 /* For ETHA */
113 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
114 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
115 iowrite32(0, priv->addr + FWPBFC(i));
116 }
117
118 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
119 iowrite32(priv->rdev[i]->rx_queue->index,
120 priv->addr + FWPBFCSDC(GWCA_INDEX, i));
121 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
122 }
123
124 /* For GWCA */
125 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
126 iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
127 iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
128 iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
129}
130
131/* Gateway CPU agent block (GWCA) */
132static int rswitch_gwca_change_mode(struct rswitch_private *priv,
133 enum rswitch_gwca_mode mode)
134{
135 int ret;
136
137 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
138 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
139
140 iowrite32(mode, priv->addr + GWMC);
141
142 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
143
144 if (mode == GWMC_OPC_DISABLE)
145 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
146
147 return ret;
148}
149
150static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
151{
152 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
153
154 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
155}
156
157static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
158{
159 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
160
161 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
162}
163
3590918b
YS
164static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
165{
166 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
167 int i;
168
169 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
170 if (dis[i] & mask[i])
171 return true;
172 }
173
174 return false;
175}
176
177static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
178{
179 int i;
180
181 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
182 dis[i] = ioread32(priv->addr + GWDIS(i));
183 dis[i] &= ioread32(priv->addr + GWDIE(i));
184 }
185}
186
187static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
188{
189 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
190
191 iowrite32(BIT(index % 32), priv->addr + offs);
192}
193
194static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
195{
196 u32 offs = GWDIS(index / 32);
197
198 iowrite32(BIT(index % 32), priv->addr + offs);
199}
200
380f9acd 201static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
3590918b 202{
380f9acd 203 int index = cur ? gq->cur : gq->dirty;
3590918b
YS
204
205 if (index + num >= gq->ring_size)
206 index = (index + num) % gq->ring_size;
207 else
208 index += num;
209
210 return index;
211}
212
380f9acd 213static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
3590918b
YS
214{
215 if (gq->cur >= gq->dirty)
216 return gq->cur - gq->dirty;
217 else
218 return gq->ring_size - gq->dirty + gq->cur;
219}
220
221static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
222{
251eadcc 223 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
3590918b
YS
224
225 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
226 return true;
227
228 return false;
229}
230
231static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
380f9acd 232 int start_index, int num)
3590918b 233{
380f9acd 234 int i, index;
3590918b
YS
235
236 for (i = 0; i < num; i++) {
237 index = (i + start_index) % gq->ring_size;
238 if (gq->skbs[index])
239 continue;
240 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
241 PKT_BUF_SZ + RSWITCH_ALIGN - 1);
242 if (!gq->skbs[index])
243 goto err;
244 }
245
246 return 0;
247
248err:
249 for (i--; i >= 0; i--) {
250 index = (i + start_index) % gq->ring_size;
251 dev_kfree_skb(gq->skbs[index]);
252 gq->skbs[index] = NULL;
253 }
254
255 return -ENOMEM;
256}
257
258static void rswitch_gwca_queue_free(struct net_device *ndev,
259 struct rswitch_gwca_queue *gq)
260{
261 int i;
262
48cf0a25 263 if (!gq->dir_tx) {
3590918b
YS
264 dma_free_coherent(ndev->dev.parent,
265 sizeof(struct rswitch_ext_ts_desc) *
251eadcc
YS
266 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
267 gq->rx_ring = NULL;
48cf0a25
YS
268
269 for (i = 0; i < gq->ring_size; i++)
270 dev_kfree_skb(gq->skbs[i]);
3590918b
YS
271 } else {
272 dma_free_coherent(ndev->dev.parent,
273 sizeof(struct rswitch_ext_desc) *
251eadcc
YS
274 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
275 gq->tx_ring = NULL;
3590918b
YS
276 }
277
3590918b
YS
278 kfree(gq->skbs);
279 gq->skbs = NULL;
280}
281
33f5d733
YS
282static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
283{
284 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
285
286 dma_free_coherent(&priv->pdev->dev,
287 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
288 gq->ts_ring, gq->ring_dma);
289 gq->ts_ring = NULL;
290}
291
3590918b
YS
292static int rswitch_gwca_queue_alloc(struct net_device *ndev,
293 struct rswitch_private *priv,
294 struct rswitch_gwca_queue *gq,
48cf0a25 295 bool dir_tx, int ring_size)
3590918b
YS
296{
297 int i, bit;
298
299 gq->dir_tx = dir_tx;
3590918b
YS
300 gq->ring_size = ring_size;
301 gq->ndev = ndev;
302
303 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
304 if (!gq->skbs)
305 return -ENOMEM;
306
48cf0a25 307 if (!dir_tx) {
3590918b
YS
308 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
309
251eadcc 310 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
3590918b
YS
311 sizeof(struct rswitch_ext_ts_desc) *
312 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
48cf0a25 313 } else {
251eadcc
YS
314 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
315 sizeof(struct rswitch_ext_desc) *
316 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
48cf0a25
YS
317 }
318
251eadcc 319 if (!gq->rx_ring && !gq->tx_ring)
3590918b
YS
320 goto out;
321
322 i = gq->index / 32;
323 bit = BIT(gq->index % 32);
324 if (dir_tx)
325 priv->gwca.tx_irq_bits[i] |= bit;
326 else
327 priv->gwca.rx_irq_bits[i] |= bit;
328
329 return 0;
330
331out:
332 rswitch_gwca_queue_free(ndev, gq);
333
334 return -ENOMEM;
335}
336
337static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
338{
339 desc->dptrl = cpu_to_le32(lower_32_bits(addr));
340 desc->dptrh = upper_32_bits(addr) & 0xff;
341}
342
343static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
344{
345 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
346}
347
348static int rswitch_gwca_queue_format(struct net_device *ndev,
349 struct rswitch_private *priv,
350 struct rswitch_gwca_queue *gq)
351{
251eadcc 352 int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
3590918b
YS
353 struct rswitch_ext_desc *desc;
354 struct rswitch_desc *linkfix;
355 dma_addr_t dma_addr;
356 int i;
357
251eadcc
YS
358 memset(gq->tx_ring, 0, ring_size);
359 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
3590918b
YS
360 if (!gq->dir_tx) {
361 dma_addr = dma_map_single(ndev->dev.parent,
362 gq->skbs[i]->data, PKT_BUF_SZ,
363 DMA_FROM_DEVICE);
364 if (dma_mapping_error(ndev->dev.parent, dma_addr))
365 goto err;
366
367 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
368 rswitch_desc_set_dptr(&desc->desc, dma_addr);
369 desc->desc.die_dt = DT_FEMPTY | DIE;
370 } else {
371 desc->desc.die_dt = DT_EEMPTY | DIE;
372 }
373 }
374 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
375 desc->desc.die_dt = DT_LINKFIX;
376
e3f38039 377 linkfix = &priv->gwca.linkfix_table[gq->index];
3590918b
YS
378 linkfix->die_dt = DT_LINKFIX;
379 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
380
c87bd91e 381 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
3590918b
YS
382 priv->addr + GWDCC_OFFS(gq->index));
383
384 return 0;
385
386err:
387 if (!gq->dir_tx) {
251eadcc 388 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
3590918b
YS
389 dma_addr = rswitch_desc_get_dptr(&desc->desc);
390 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
391 DMA_FROM_DEVICE);
392 }
393 }
394
395 return -ENOMEM;
396}
397
33f5d733
YS
398static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
399 int start_index, int num)
400{
401 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
402 struct rswitch_ts_desc *desc;
403 int i, index;
404
405 for (i = 0; i < num; i++) {
406 index = (i + start_index) % gq->ring_size;
407 desc = &gq->ts_ring[index];
408 desc->desc.die_dt = DT_FEMPTY_ND | DIE;
409 }
410}
411
251eadcc
YS
412static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
413 struct rswitch_gwca_queue *gq,
414 int start_index, int num)
3590918b
YS
415{
416 struct rswitch_device *rdev = netdev_priv(ndev);
417 struct rswitch_ext_ts_desc *desc;
418 dma_addr_t dma_addr;
380f9acd 419 int i, index;
3590918b
YS
420
421 for (i = 0; i < num; i++) {
422 index = (i + start_index) % gq->ring_size;
251eadcc 423 desc = &gq->rx_ring[index];
3590918b
YS
424 if (!gq->dir_tx) {
425 dma_addr = dma_map_single(ndev->dev.parent,
426 gq->skbs[index]->data, PKT_BUF_SZ,
427 DMA_FROM_DEVICE);
428 if (dma_mapping_error(ndev->dev.parent, dma_addr))
429 goto err;
430
431 desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
432 rswitch_desc_set_dptr(&desc->desc, dma_addr);
433 dma_wmb();
434 desc->desc.die_dt = DT_FEMPTY | DIE;
435 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
436 } else {
437 desc->desc.die_dt = DT_EEMPTY | DIE;
438 }
439 }
440
441 return 0;
442
443err:
444 if (!gq->dir_tx) {
445 for (i--; i >= 0; i--) {
446 index = (i + start_index) % gq->ring_size;
251eadcc 447 desc = &gq->rx_ring[index];
3590918b
YS
448 dma_addr = rswitch_desc_get_dptr(&desc->desc);
449 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
450 DMA_FROM_DEVICE);
451 }
452 }
453
454 return -ENOMEM;
455}
456
251eadcc
YS
457static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
458 struct rswitch_private *priv,
459 struct rswitch_gwca_queue *gq)
3590918b 460{
251eadcc 461 int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
3590918b
YS
462 struct rswitch_ext_ts_desc *desc;
463 struct rswitch_desc *linkfix;
464 int err;
465
251eadcc
YS
466 memset(gq->rx_ring, 0, ring_size);
467 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
3590918b
YS
468 if (err < 0)
469 return err;
470
251eadcc 471 desc = &gq->rx_ring[gq->ring_size]; /* Last */
3590918b
YS
472 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
473 desc->desc.die_dt = DT_LINKFIX;
474
e3f38039 475 linkfix = &priv->gwca.linkfix_table[gq->index];
3590918b
YS
476 linkfix->die_dt = DT_LINKFIX;
477 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
478
c87bd91e
YS
479 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
480 GWDCC_ETS | GWDCC_EDE,
3590918b
YS
481 priv->addr + GWDCC_OFFS(gq->index));
482
483 return 0;
484}
485
e3f38039 486static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
3590918b
YS
487{
488 int i, num_queues = priv->gwca.num_queues;
e3f38039 489 struct rswitch_gwca *gwca = &priv->gwca;
3590918b
YS
490 struct device *dev = &priv->pdev->dev;
491
e3f38039
YS
492 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
493 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
494 &gwca->linkfix_table_dma, GFP_KERNEL);
495 if (!gwca->linkfix_table)
3590918b
YS
496 return -ENOMEM;
497 for (i = 0; i < num_queues; i++)
e3f38039 498 gwca->linkfix_table[i].die_dt = DT_EOS;
3590918b
YS
499
500 return 0;
501}
502
e3f38039 503static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
3590918b 504{
e3f38039
YS
505 struct rswitch_gwca *gwca = &priv->gwca;
506
507 if (gwca->linkfix_table)
508 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
509 gwca->linkfix_table, gwca->linkfix_table_dma);
510 gwca->linkfix_table = NULL;
3590918b
YS
511}
512
0ad4982c
YS
513static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
514{
515 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
516 struct rswitch_ts_desc *desc;
517
518 gq->ring_size = TS_RING_SIZE;
519 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
520 sizeof(struct rswitch_ts_desc) *
521 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
522
523 if (!gq->ts_ring)
524 return -ENOMEM;
525
526 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
527 desc = &gq->ts_ring[gq->ring_size];
528 desc->desc.die_dt = DT_LINKFIX;
529 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
530 INIT_LIST_HEAD(&priv->gwca.ts_info_list);
531
532 return 0;
533}
534
3590918b
YS
535static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
536{
537 struct rswitch_gwca_queue *gq;
538 int index;
539
540 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
541 if (index >= priv->gwca.num_queues)
542 return NULL;
543 set_bit(index, priv->gwca.used);
544 gq = &priv->gwca.queues[index];
545 memset(gq, 0, sizeof(*gq));
546 gq->index = index;
547
548 return gq;
549}
550
551static void rswitch_gwca_put(struct rswitch_private *priv,
552 struct rswitch_gwca_queue *gq)
553{
554 clear_bit(gq->index, priv->gwca.used);
555}
556
557static int rswitch_txdmac_alloc(struct net_device *ndev)
558{
559 struct rswitch_device *rdev = netdev_priv(ndev);
560 struct rswitch_private *priv = rdev->priv;
561 int err;
562
563 rdev->tx_queue = rswitch_gwca_get(priv);
564 if (!rdev->tx_queue)
565 return -EBUSY;
566
48cf0a25 567 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
3590918b
YS
568 if (err < 0) {
569 rswitch_gwca_put(priv, rdev->tx_queue);
570 return err;
571 }
572
573 return 0;
574}
575
576static void rswitch_txdmac_free(struct net_device *ndev)
577{
578 struct rswitch_device *rdev = netdev_priv(ndev);
579
580 rswitch_gwca_queue_free(ndev, rdev->tx_queue);
581 rswitch_gwca_put(rdev->priv, rdev->tx_queue);
582}
583
584static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
585{
586 struct rswitch_device *rdev = priv->rdev[index];
587
588 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
589}
590
591static int rswitch_rxdmac_alloc(struct net_device *ndev)
592{
593 struct rswitch_device *rdev = netdev_priv(ndev);
594 struct rswitch_private *priv = rdev->priv;
595 int err;
596
597 rdev->rx_queue = rswitch_gwca_get(priv);
598 if (!rdev->rx_queue)
599 return -EBUSY;
600
48cf0a25 601 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
3590918b
YS
602 if (err < 0) {
603 rswitch_gwca_put(priv, rdev->rx_queue);
604 return err;
605 }
606
607 return 0;
608}
609
610static void rswitch_rxdmac_free(struct net_device *ndev)
611{
612 struct rswitch_device *rdev = netdev_priv(ndev);
613
614 rswitch_gwca_queue_free(ndev, rdev->rx_queue);
615 rswitch_gwca_put(rdev->priv, rdev->rx_queue);
616}
617
618static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
619{
620 struct rswitch_device *rdev = priv->rdev[index];
621 struct net_device *ndev = rdev->ndev;
622
251eadcc 623 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
3590918b
YS
624}
625
626static int rswitch_gwca_hw_init(struct rswitch_private *priv)
627{
628 int i, err;
629
630 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
631 if (err < 0)
632 return err;
633 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
634 if (err < 0)
635 return err;
636
637 err = rswitch_gwca_mcast_table_reset(priv);
638 if (err < 0)
639 return err;
640 err = rswitch_gwca_axi_ram_reset(priv);
641 if (err < 0)
642 return err;
643
644 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
645 iowrite32(0, priv->addr + GWTTFC);
e3f38039
YS
646 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
647 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
33f5d733
YS
648 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
649 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
650 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
c87bd91e
YS
651
652 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
3590918b
YS
653
654 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
655 err = rswitch_rxdmac_init(priv, i);
656 if (err < 0)
657 return err;
658 err = rswitch_txdmac_init(priv, i);
659 if (err < 0)
660 return err;
661 }
662
663 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
664 if (err < 0)
665 return err;
666 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
667}
668
669static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
670{
671 int err;
672
673 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
674 if (err < 0)
675 return err;
676 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
677 if (err < 0)
678 return err;
679
680 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
681}
682
683static int rswitch_gwca_halt(struct rswitch_private *priv)
684{
685 int err;
686
687 priv->gwca_halt = true;
688 err = rswitch_gwca_hw_deinit(priv);
689 dev_err(&priv->pdev->dev, "halted (%d)\n", err);
690
691 return err;
692}
693
694static bool rswitch_rx(struct net_device *ndev, int *quota)
695{
696 struct rswitch_device *rdev = netdev_priv(ndev);
697 struct rswitch_gwca_queue *gq = rdev->rx_queue;
698 struct rswitch_ext_ts_desc *desc;
699 int limit, boguscnt, num, ret;
700 struct sk_buff *skb;
701 dma_addr_t dma_addr;
702 u16 pkt_len;
6c6fa1a0 703 u32 get_ts;
3590918b 704
e05bb97d
YS
705 if (*quota <= 0)
706 return true;
707
3590918b
YS
708 boguscnt = min_t(int, gq->ring_size, *quota);
709 limit = boguscnt;
710
251eadcc 711 desc = &gq->rx_ring[gq->cur];
3590918b 712 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
3590918b
YS
713 dma_rmb();
714 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
715 skb = gq->skbs[gq->cur];
716 gq->skbs[gq->cur] = NULL;
717 dma_addr = rswitch_desc_get_dptr(&desc->desc);
718 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
6c6fa1a0
YS
719 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
720 if (get_ts) {
721 struct skb_shared_hwtstamps *shhwtstamps;
722 struct timespec64 ts;
723
724 shhwtstamps = skb_hwtstamps(skb);
725 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
726 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
727 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
728 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
729 }
3590918b
YS
730 skb_put(skb, pkt_len);
731 skb->protocol = eth_type_trans(skb, ndev);
dc510c6d 732 napi_gro_receive(&rdev->napi, skb);
3590918b
YS
733 rdev->ndev->stats.rx_packets++;
734 rdev->ndev->stats.rx_bytes += pkt_len;
735
736 gq->cur = rswitch_next_queue_index(gq, true, 1);
251eadcc 737 desc = &gq->rx_ring[gq->cur];
e05bb97d
YS
738
739 if (--boguscnt <= 0)
740 break;
3590918b
YS
741 }
742
743 num = rswitch_get_num_cur_queues(gq);
744 ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
745 if (ret < 0)
746 goto err;
251eadcc 747 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
3590918b
YS
748 if (ret < 0)
749 goto err;
750 gq->dirty = rswitch_next_queue_index(gq, false, num);
751
e05bb97d 752 *quota -= limit - boguscnt;
3590918b
YS
753
754 return boguscnt <= 0;
755
756err:
757 rswitch_gwca_halt(rdev->priv);
758
759 return 0;
760}
761
762static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
763{
764 struct rswitch_device *rdev = netdev_priv(ndev);
765 struct rswitch_gwca_queue *gq = rdev->tx_queue;
766 struct rswitch_ext_desc *desc;
767 dma_addr_t dma_addr;
768 struct sk_buff *skb;
769 int free_num = 0;
770 int size;
771
380f9acd
YS
772 for (; rswitch_get_num_cur_queues(gq) > 0;
773 gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
251eadcc 774 desc = &gq->tx_ring[gq->dirty];
3590918b
YS
775 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
776 break;
777
778 dma_rmb();
779 size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
780 skb = gq->skbs[gq->dirty];
781 if (skb) {
782 dma_addr = rswitch_desc_get_dptr(&desc->desc);
783 dma_unmap_single(ndev->dev.parent, dma_addr,
784 size, DMA_TO_DEVICE);
785 dev_kfree_skb_any(gq->skbs[gq->dirty]);
786 gq->skbs[gq->dirty] = NULL;
787 free_num++;
788 }
789 desc->desc.die_dt = DT_EEMPTY;
790 rdev->ndev->stats.tx_packets++;
791 rdev->ndev->stats.tx_bytes += size;
792 }
793
794 return free_num;
795}
796
797static int rswitch_poll(struct napi_struct *napi, int budget)
798{
799 struct net_device *ndev = napi->dev;
800 struct rswitch_private *priv;
801 struct rswitch_device *rdev;
c4f922e8 802 unsigned long flags;
3590918b
YS
803 int quota = budget;
804
805 rdev = netdev_priv(ndev);
806 priv = rdev->priv;
807
808retry:
809 rswitch_tx_free(ndev, true);
810
811 if (rswitch_rx(ndev, &quota))
812 goto out;
813 else if (rdev->priv->gwca_halt)
814 goto err;
815 else if (rswitch_is_queue_rxed(rdev->rx_queue))
816 goto retry;
817
818 netif_wake_subqueue(ndev, 0);
819
e7b1ef29 820 if (napi_complete_done(napi, budget - quota)) {
c4f922e8 821 spin_lock_irqsave(&priv->lock, flags);
e7b1ef29
YS
822 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
823 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
c4f922e8 824 spin_unlock_irqrestore(&priv->lock, flags);
e7b1ef29 825 }
3590918b
YS
826
827out:
828 return budget - quota;
829
830err:
831 napi_complete(napi);
832
833 return 0;
834}
835
836static void rswitch_queue_interrupt(struct net_device *ndev)
837{
838 struct rswitch_device *rdev = netdev_priv(ndev);
839
840 if (napi_schedule_prep(&rdev->napi)) {
c4f922e8 841 spin_lock(&rdev->priv->lock);
3590918b
YS
842 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
843 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
c4f922e8 844 spin_unlock(&rdev->priv->lock);
3590918b
YS
845 __napi_schedule(&rdev->napi);
846 }
847}
848
849static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
850{
851 struct rswitch_gwca_queue *gq;
852 int i, index, bit;
853
854 for (i = 0; i < priv->gwca.num_queues; i++) {
855 gq = &priv->gwca.queues[i];
856 index = gq->index / 32;
857 bit = BIT(gq->index % 32);
858 if (!(dis[index] & bit))
859 continue;
860
861 rswitch_ack_data_irq(priv, gq->index);
862 rswitch_queue_interrupt(gq->ndev);
863 }
864
865 return IRQ_HANDLED;
866}
867
868static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
869{
870 struct rswitch_private *priv = dev_id;
871 u32 dis[RSWITCH_NUM_IRQ_REGS];
872 irqreturn_t ret = IRQ_NONE;
873
874 rswitch_get_data_irq_status(priv, dis);
875
876 if (rswitch_is_any_data_irq(priv, dis, true) ||
877 rswitch_is_any_data_irq(priv, dis, false))
878 ret = rswitch_data_irq(priv, dis);
879
880 return ret;
881}
882
883static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
884{
885 char *resource_name, *irq_name;
886 int i, ret, irq;
887
888 for (i = 0; i < GWCA_NUM_IRQS; i++) {
889 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
890 if (!resource_name)
891 return -ENOMEM;
892
893 irq = platform_get_irq_byname(priv->pdev, resource_name);
894 kfree(resource_name);
895 if (irq < 0)
896 return irq;
897
898 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
899 GWCA_IRQ_NAME, i);
900 if (!irq_name)
901 return -ENOMEM;
902
903 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
904 0, irq_name, priv);
905 if (ret < 0)
906 return ret;
907 }
908
909 return 0;
910}
911
33f5d733
YS
912static void rswitch_ts(struct rswitch_private *priv)
913{
914 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
915 struct rswitch_gwca_ts_info *ts_info, *ts_info2;
916 struct skb_shared_hwtstamps shhwtstamps;
917 struct rswitch_ts_desc *desc;
918 struct timespec64 ts;
919 u32 tag, port;
920 int num;
921
922 desc = &gq->ts_ring[gq->cur];
923 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
924 dma_rmb();
925
926 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
927 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
928
929 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
930 if (!(ts_info->port == port && ts_info->tag == tag))
931 continue;
932
933 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
934 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
935 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
936 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
937 skb_tstamp_tx(ts_info->skb, &shhwtstamps);
938 dev_consume_skb_irq(ts_info->skb);
939 list_del(&ts_info->list);
940 kfree(ts_info);
941 break;
942 }
943
944 gq->cur = rswitch_next_queue_index(gq, true, 1);
945 desc = &gq->ts_ring[gq->cur];
946 }
947
948 num = rswitch_get_num_cur_queues(gq);
949 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
950 gq->dirty = rswitch_next_queue_index(gq, false, num);
951}
952
953static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
954{
955 struct rswitch_private *priv = dev_id;
956
957 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
958 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
959 rswitch_ts(priv);
960
961 return IRQ_HANDLED;
962 }
963
964 return IRQ_NONE;
965}
966
967static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
968{
969 int irq;
970
971 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
972 if (irq < 0)
973 return irq;
974
975 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
976 0, GWCA_TS_IRQ_NAME, priv);
977}
978
3590918b
YS
979/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
980static int rswitch_etha_change_mode(struct rswitch_etha *etha,
981 enum rswitch_etha_mode mode)
982{
983 int ret;
984
985 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
986 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
987
988 iowrite32(mode, etha->addr + EAMC);
989
990 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
991
992 if (mode == EAMC_OPC_DISABLE)
993 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
994
995 return ret;
996}
997
998static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
999{
1000 u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1001 u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1002 u8 *mac = &etha->mac_addr[0];
1003
1004 mac[0] = (mrmac0 >> 8) & 0xFF;
1005 mac[1] = (mrmac0 >> 0) & 0xFF;
1006 mac[2] = (mrmac1 >> 24) & 0xFF;
1007 mac[3] = (mrmac1 >> 16) & 0xFF;
1008 mac[4] = (mrmac1 >> 8) & 0xFF;
1009 mac[5] = (mrmac1 >> 0) & 0xFF;
1010}
1011
1012static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1013{
1014 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1015 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1016 etha->addr + MRMAC1);
1017}
1018
b4b221bd 1019static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
3590918b
YS
1020{
1021 iowrite32(MLVC_PLV, etha->addr + MLVC);
1022
1023 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1024}
1025
1026static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1027{
1028 u32 val;
1029
1030 rswitch_etha_write_mac_address(etha, mac);
1031
1032 switch (etha->speed) {
1033 case 100:
1034 val = MPIC_LSC_100M;
1035 break;
1036 case 1000:
1037 val = MPIC_LSC_1G;
1038 break;
1039 case 2500:
1040 val = MPIC_LSC_2_5G;
1041 break;
1042 default:
1043 return;
1044 }
1045
1046 iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
1047}
1048
1049static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1050{
1051 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
1052 MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
1053 rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
1054}
1055
1056static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1057{
1058 int err;
1059
1060 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1061 if (err < 0)
1062 return err;
1063 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1064 if (err < 0)
1065 return err;
1066
1067 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1068 rswitch_rmac_setting(etha, mac);
1069 rswitch_etha_enable_mii(etha);
1070
1071 err = rswitch_etha_wait_link_verification(etha);
1072 if (err < 0)
1073 return err;
1074
1075 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1076 if (err < 0)
1077 return err;
1078
1079 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1080}
1081
1082static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
1083 int phyad, int devad, int regad, int data)
1084{
1085 int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
1086 u32 val;
1087 int ret;
1088
1089 if (devad == 0xffffffff)
1090 return -ENODEV;
1091
1092 writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
1093
1094 val = MPSM_PSME | MPSM_MFF_C45;
1095 iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1096
1097 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1098 if (ret)
1099 return ret;
1100
1101 rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1102
1103 if (read) {
1104 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1105
1106 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1107 if (ret)
1108 return ret;
1109
1110 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1111
1112 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1113 } else {
1114 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1115 etha->addr + MPSM);
1116
1117 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1118 }
1119
1120 return ret;
1121}
1122
95331514
MW
1123static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1124 int regad)
3590918b
YS
1125{
1126 struct rswitch_etha *etha = bus->priv;
3590918b
YS
1127
1128 return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1129}
1130
95331514
MW
1131static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1132 int regad, u16 val)
3590918b
YS
1133{
1134 struct rswitch_etha *etha = bus->priv;
3590918b
YS
1135
1136 return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1137}
1138
1139/* Call of_node_put(port) after done */
1140static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1141{
1142 struct device_node *ports, *port;
1143 int err = 0;
1144 u32 index;
1145
1146 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1147 "ethernet-ports");
1148 if (!ports)
1149 return NULL;
1150
1151 for_each_child_of_node(ports, port) {
1152 err = of_property_read_u32(port, "reg", &index);
1153 if (err < 0) {
1154 port = NULL;
1155 goto out;
1156 }
fd941bd6
YS
1157 if (index == rdev->etha->index) {
1158 if (!of_device_is_available(port))
1159 port = NULL;
3590918b 1160 break;
fd941bd6 1161 }
3590918b
YS
1162 }
1163
1164out:
1165 of_node_put(ports);
1166
1167 return port;
1168}
1169
3590918b
YS
1170static int rswitch_etha_get_params(struct rswitch_device *rdev)
1171{
04c77d91 1172 u32 max_speed;
3590918b
YS
1173 int err;
1174
b46f1e57 1175 if (!rdev->np_port)
fd941bd6 1176 return 0; /* ignored */
3590918b 1177
b46f1e57 1178 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
04c77d91
YS
1179 if (err)
1180 return err;
1181
1182 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1183 if (!err) {
1184 rdev->etha->speed = max_speed;
1185 return 0;
1186 }
3590918b 1187
04c77d91 1188 /* if no "max-speed" property, let's use default speed */
3590918b
YS
1189 switch (rdev->etha->phy_interface) {
1190 case PHY_INTERFACE_MODE_MII:
1191 rdev->etha->speed = SPEED_100;
1192 break;
1193 case PHY_INTERFACE_MODE_SGMII:
1194 rdev->etha->speed = SPEED_1000;
1195 break;
1196 case PHY_INTERFACE_MODE_USXGMII:
1197 rdev->etha->speed = SPEED_2500;
1198 break;
1199 default:
04c77d91 1200 return -EINVAL;
3590918b
YS
1201 }
1202
04c77d91 1203 return 0;
3590918b
YS
1204}
1205
1206static int rswitch_mii_register(struct rswitch_device *rdev)
1207{
1208 struct device_node *mdio_np;
1209 struct mii_bus *mii_bus;
1210 int err;
1211
1212 mii_bus = mdiobus_alloc();
1213 if (!mii_bus)
1214 return -ENOMEM;
1215
1216 mii_bus->name = "rswitch_mii";
1217 sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1218 mii_bus->priv = rdev->etha;
95331514
MW
1219 mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1220 mii_bus->write_c45 = rswitch_etha_mii_write_c45;
3590918b
YS
1221 mii_bus->parent = &rdev->priv->pdev->dev;
1222
b46f1e57 1223 mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
3590918b
YS
1224 err = of_mdiobus_register(mii_bus, mdio_np);
1225 if (err < 0) {
1226 mdiobus_free(mii_bus);
1227 goto out;
1228 }
1229
1230 rdev->etha->mii = mii_bus;
1231
1232out:
1233 of_node_put(mdio_np);
1234
1235 return err;
1236}
1237
1238static void rswitch_mii_unregister(struct rswitch_device *rdev)
1239{
1240 if (rdev->etha->mii) {
1241 mdiobus_unregister(rdev->etha->mii);
1242 mdiobus_free(rdev->etha->mii);
1243 rdev->etha->mii = NULL;
1244 }
1245}
1246
c16a5033 1247static void rswitch_adjust_link(struct net_device *ndev)
3590918b 1248{
c16a5033
YS
1249 struct rswitch_device *rdev = netdev_priv(ndev);
1250 struct phy_device *phydev = ndev->phydev;
3590918b 1251
c16a5033
YS
1252 if (phydev->link != rdev->etha->link) {
1253 phy_print_status(phydev);
5cb63092
YS
1254 if (phydev->link)
1255 phy_power_on(rdev->serdes);
1256 else
1257 phy_power_off(rdev->serdes);
1258
c16a5033 1259 rdev->etha->link = phydev->link;
c009b903
YS
1260
1261 if (!rdev->priv->etha_no_runtime_change &&
1262 phydev->speed != rdev->etha->speed) {
1263 rdev->etha->speed = phydev->speed;
1264
1265 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1266 phy_set_speed(rdev->serdes, rdev->etha->speed);
1267 }
c16a5033 1268 }
3590918b
YS
1269}
1270
c16a5033
YS
1271static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1272 struct phy_device *phydev)
3590918b 1273{
c009b903
YS
1274 if (!rdev->priv->etha_no_runtime_change)
1275 return;
1276
c16a5033
YS
1277 switch (rdev->etha->speed) {
1278 case SPEED_2500:
1279 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1280 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1281 break;
1282 case SPEED_1000:
1283 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1284 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1285 break;
1286 case SPEED_100:
1287 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1288 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1289 break;
1290 default:
1291 break;
1292 }
3590918b 1293
c16a5033
YS
1294 phy_set_max_speed(phydev, rdev->etha->speed);
1295}
3590918b 1296
c16a5033 1297static int rswitch_phy_device_init(struct rswitch_device *rdev)
3590918b 1298{
c16a5033
YS
1299 struct phy_device *phydev;
1300 struct device_node *phy;
0df024d0 1301 int err = -ENOENT;
3590918b 1302
b46f1e57 1303 if (!rdev->np_port)
3590918b
YS
1304 return -ENODEV;
1305
c16a5033
YS
1306 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1307 if (!phy)
1308 return -ENODEV;
1309
0df024d0
YS
1310 /* Set phydev->host_interfaces before calling of_phy_connect() to
1311 * configure the PHY with the information of host_interfaces.
1312 */
1313 phydev = of_phy_find_device(phy);
1314 if (!phydev)
1315 goto out;
1316 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1317
c16a5033
YS
1318 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1319 rdev->etha->phy_interface);
c16a5033 1320 if (!phydev)
0df024d0 1321 goto out;
3590918b 1322
c16a5033
YS
1323 phy_set_max_speed(phydev, SPEED_2500);
1324 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1325 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1326 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1327 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1328 rswitch_phy_remove_link_mode(rdev, phydev);
3590918b 1329
c16a5033 1330 phy_attached_info(phydev);
3590918b 1331
0df024d0
YS
1332 err = 0;
1333out:
1334 of_node_put(phy);
1335
1336 return err;
3590918b
YS
1337}
1338
c16a5033 1339static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
3590918b 1340{
22f5c234 1341 if (rdev->ndev->phydev)
c16a5033 1342 phy_disconnect(rdev->ndev->phydev);
3590918b
YS
1343}
1344
1345static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1346{
3590918b
YS
1347 int err;
1348
b46f1e57 1349 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
3590918b
YS
1350 rdev->etha->phy_interface);
1351 if (err < 0)
1352 return err;
1353
b46f1e57 1354 return phy_set_speed(rdev->serdes, rdev->etha->speed);
3590918b
YS
1355}
1356
1357static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1358{
1359 int err;
1360
1361 if (!rdev->etha->operated) {
1362 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1363 if (err < 0)
1364 return err;
c009b903
YS
1365 if (rdev->priv->etha_no_runtime_change)
1366 rdev->etha->operated = true;
3590918b
YS
1367 }
1368
1369 err = rswitch_mii_register(rdev);
1370 if (err < 0)
1371 return err;
1372
c16a5033 1373 err = rswitch_phy_device_init(rdev);
3590918b 1374 if (err < 0)
c16a5033 1375 goto err_phy_device_init;
3590918b 1376
b46f1e57
YS
1377 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1378 if (IS_ERR(rdev->serdes)) {
1379 err = PTR_ERR(rdev->serdes);
1380 goto err_serdes_phy_get;
1381 }
1382
3590918b
YS
1383 err = rswitch_serdes_set_params(rdev);
1384 if (err < 0)
1385 goto err_serdes_set_params;
1386
1387 return 0;
1388
1389err_serdes_set_params:
b46f1e57 1390err_serdes_phy_get:
c16a5033 1391 rswitch_phy_device_deinit(rdev);
3590918b 1392
c16a5033 1393err_phy_device_init:
3590918b
YS
1394 rswitch_mii_unregister(rdev);
1395
1396 return err;
1397}
1398
1399static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1400{
c16a5033 1401 rswitch_phy_device_deinit(rdev);
3590918b
YS
1402 rswitch_mii_unregister(rdev);
1403}
1404
1405static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1406{
1407 int i, err;
1408
fd941bd6 1409 rswitch_for_each_enabled_port(priv, i) {
3590918b
YS
1410 err = rswitch_ether_port_init_one(priv->rdev[i]);
1411 if (err)
1412 goto err_init_one;
1413 }
1414
fd941bd6 1415 rswitch_for_each_enabled_port(priv, i) {
b46f1e57 1416 err = phy_init(priv->rdev[i]->serdes);
3590918b
YS
1417 if (err)
1418 goto err_serdes;
1419 }
1420
1421 return 0;
1422
1423err_serdes:
fd941bd6 1424 rswitch_for_each_enabled_port_continue_reverse(priv, i)
b46f1e57 1425 phy_exit(priv->rdev[i]->serdes);
3590918b
YS
1426 i = RSWITCH_NUM_PORTS;
1427
1428err_init_one:
fd941bd6 1429 rswitch_for_each_enabled_port_continue_reverse(priv, i)
3590918b
YS
1430 rswitch_ether_port_deinit_one(priv->rdev[i]);
1431
1432 return err;
1433}
1434
1435static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1436{
1437 int i;
1438
1439 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
b46f1e57 1440 phy_exit(priv->rdev[i]->serdes);
3590918b
YS
1441 rswitch_ether_port_deinit_one(priv->rdev[i]);
1442 }
1443}
1444
1445static int rswitch_open(struct net_device *ndev)
1446{
1447 struct rswitch_device *rdev = netdev_priv(ndev);
c4f922e8 1448 unsigned long flags;
3590918b 1449
c16a5033 1450 phy_start(ndev->phydev);
3590918b
YS
1451
1452 napi_enable(&rdev->napi);
1453 netif_start_queue(ndev);
1454
c4f922e8 1455 spin_lock_irqsave(&rdev->priv->lock, flags);
3590918b
YS
1456 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1457 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
c4f922e8 1458 spin_unlock_irqrestore(&rdev->priv->lock, flags);
3590918b 1459
2c59e993
YS
1460 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1461 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1462
1463 bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
33f5d733 1464
3590918b
YS
1465 return 0;
1466};
1467
1468static int rswitch_stop(struct net_device *ndev)
1469{
1470 struct rswitch_device *rdev = netdev_priv(ndev);
33f5d733 1471 struct rswitch_gwca_ts_info *ts_info, *ts_info2;
c4f922e8 1472 unsigned long flags;
3590918b
YS
1473
1474 netif_tx_stop_all_queues(ndev);
2c59e993 1475 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
3590918b 1476
2c59e993
YS
1477 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1478 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
33f5d733
YS
1479
1480 list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
1481 if (ts_info->port != rdev->port)
1482 continue;
1483 dev_kfree_skb_irq(ts_info->skb);
1484 list_del(&ts_info->list);
1485 kfree(ts_info);
1486 }
1487
c4f922e8 1488 spin_lock_irqsave(&rdev->priv->lock, flags);
3590918b
YS
1489 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1490 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
c4f922e8 1491 spin_unlock_irqrestore(&rdev->priv->lock, flags);
3590918b 1492
c16a5033 1493 phy_stop(ndev->phydev);
3590918b
YS
1494 napi_disable(&rdev->napi);
1495
1496 return 0;
1497};
1498
8e0aa1ff 1499static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
3590918b
YS
1500{
1501 struct rswitch_device *rdev = netdev_priv(ndev);
1502 struct rswitch_gwca_queue *gq = rdev->tx_queue;
1503 struct rswitch_ext_desc *desc;
1504 int ret = NETDEV_TX_OK;
1505 dma_addr_t dma_addr;
1506
1507 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1508 netif_stop_subqueue(ndev, 0);
a60caf03 1509 return NETDEV_TX_BUSY;
3590918b
YS
1510 }
1511
1512 if (skb_put_padto(skb, ETH_ZLEN))
1513 return ret;
1514
1515 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1516 if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1517 dev_kfree_skb_any(skb);
1518 return ret;
1519 }
1520
1521 gq->skbs[gq->cur] = skb;
251eadcc 1522 desc = &gq->tx_ring[gq->cur];
3590918b
YS
1523 rswitch_desc_set_dptr(&desc->desc, dma_addr);
1524 desc->desc.info_ds = cpu_to_le16(skb->len);
1525
c87bd91e
YS
1526 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1527 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
3590918b 1528 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
33f5d733
YS
1529 struct rswitch_gwca_ts_info *ts_info;
1530
1531 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
1532 if (!ts_info) {
1533 dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
1534 return -ENOMEM;
1535 }
1536
3590918b
YS
1537 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1538 rdev->ts_tag++;
1539 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
33f5d733
YS
1540
1541 ts_info->skb = skb_get(skb);
1542 ts_info->port = rdev->port;
1543 ts_info->tag = rdev->ts_tag;
1544 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
1545
1546 skb_tx_timestamp(skb);
3590918b 1547 }
3590918b
YS
1548
1549 dma_wmb();
1550
1551 desc->desc.die_dt = DT_FSINGLE | DIE;
1552 wmb(); /* gq->cur must be incremented after die_dt was set */
1553
1554 gq->cur = rswitch_next_queue_index(gq, true, 1);
1555 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1556
1557 return ret;
1558}
1559
1560static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1561{
1562 return &ndev->stats;
1563}
1564
6c6fa1a0
YS
1565static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1566{
1567 struct rswitch_device *rdev = netdev_priv(ndev);
1568 struct rcar_gen4_ptp_private *ptp_priv;
1569 struct hwtstamp_config config;
1570
1571 ptp_priv = rdev->priv->ptp_priv;
1572
1573 config.flags = 0;
1574 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1575 HWTSTAMP_TX_OFF;
1576 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1577 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1578 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1579 break;
1580 case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1581 config.rx_filter = HWTSTAMP_FILTER_ALL;
1582 break;
1583 default:
1584 config.rx_filter = HWTSTAMP_FILTER_NONE;
1585 break;
1586 }
1587
1588 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1589}
1590
1591static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1592{
1593 struct rswitch_device *rdev = netdev_priv(ndev);
1594 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1595 struct hwtstamp_config config;
1596 u32 tstamp_tx_ctrl;
1597
1598 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1599 return -EFAULT;
1600
1601 if (config.flags)
1602 return -EINVAL;
1603
1604 switch (config.tx_type) {
1605 case HWTSTAMP_TX_OFF:
1606 tstamp_tx_ctrl = 0;
1607 break;
1608 case HWTSTAMP_TX_ON:
1609 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1610 break;
1611 default:
1612 return -ERANGE;
1613 }
1614
1615 switch (config.rx_filter) {
1616 case HWTSTAMP_FILTER_NONE:
1617 tstamp_rx_ctrl = 0;
1618 break;
1619 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1620 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1621 break;
1622 default:
1623 config.rx_filter = HWTSTAMP_FILTER_ALL;
1624 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1625 break;
1626 }
1627
1628 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1629 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1630
1631 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1632}
1633
3590918b
YS
1634static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1635{
3590918b
YS
1636 if (!netif_running(ndev))
1637 return -EINVAL;
1638
6c6fa1a0
YS
1639 switch (cmd) {
1640 case SIOCGHWTSTAMP:
1641 return rswitch_hwstamp_get(ndev, req);
1642 case SIOCSHWTSTAMP:
1643 return rswitch_hwstamp_set(ndev, req);
1644 default:
c16a5033 1645 return phy_mii_ioctl(ndev->phydev, req, cmd);
6c6fa1a0 1646 }
3590918b
YS
1647}
1648
1649static const struct net_device_ops rswitch_netdev_ops = {
1650 .ndo_open = rswitch_open,
1651 .ndo_stop = rswitch_stop,
1652 .ndo_start_xmit = rswitch_start_xmit,
1653 .ndo_get_stats = rswitch_get_stats,
1654 .ndo_eth_ioctl = rswitch_eth_ioctl,
1655 .ndo_validate_addr = eth_validate_addr,
1656 .ndo_set_mac_address = eth_mac_addr,
1657};
1658
6c6fa1a0
YS
1659static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1660{
1661 struct rswitch_device *rdev = netdev_priv(ndev);
1662
1663 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1664 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1665 SOF_TIMESTAMPING_RX_SOFTWARE |
1666 SOF_TIMESTAMPING_SOFTWARE |
1667 SOF_TIMESTAMPING_TX_HARDWARE |
1668 SOF_TIMESTAMPING_RX_HARDWARE |
1669 SOF_TIMESTAMPING_RAW_HARDWARE;
1670 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1671 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1672
1673 return 0;
1674}
1675
1676static const struct ethtool_ops rswitch_ethtool_ops = {
1677 .get_ts_info = rswitch_get_ts_info,
20f8be6b
YS
1678 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1679 .set_link_ksettings = phy_ethtool_set_link_ksettings,
6c6fa1a0
YS
1680};
1681
3590918b
YS
1682static const struct of_device_id renesas_eth_sw_of_table[] = {
1683 { .compatible = "renesas,r8a779f0-ether-switch", },
1684 { }
1685};
1686MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1687
1688static void rswitch_etha_init(struct rswitch_private *priv, int index)
1689{
1690 struct rswitch_etha *etha = &priv->etha[index];
1691
1692 memset(etha, 0, sizeof(*etha));
1693 etha->index = index;
1694 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1695 etha->coma_addr = priv->addr;
1696}
1697
1698static int rswitch_device_alloc(struct rswitch_private *priv, int index)
1699{
1700 struct platform_device *pdev = priv->pdev;
1701 struct rswitch_device *rdev;
1702 struct net_device *ndev;
1703 int err;
1704
1705 if (index >= RSWITCH_NUM_PORTS)
1706 return -EINVAL;
1707
1708 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1709 if (!ndev)
1710 return -ENOMEM;
1711
1712 SET_NETDEV_DEV(ndev, &pdev->dev);
1713 ether_setup(ndev);
1714
1715 rdev = netdev_priv(ndev);
1716 rdev->ndev = ndev;
1717 rdev->priv = priv;
1718 priv->rdev[index] = rdev;
1719 rdev->port = index;
1720 rdev->etha = &priv->etha[index];
1721 rdev->addr = priv->addr;
1722
1723 ndev->base_addr = (unsigned long)rdev->addr;
1724 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1725 ndev->netdev_ops = &rswitch_netdev_ops;
6c6fa1a0 1726 ndev->ethtool_ops = &rswitch_ethtool_ops;
3590918b
YS
1727
1728 netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1729
b46f1e57
YS
1730 rdev->np_port = rswitch_get_port_node(rdev);
1731 rdev->disabled = !rdev->np_port;
1732 err = of_get_ethdev_address(rdev->np_port, ndev);
1733 of_node_put(rdev->np_port);
3590918b
YS
1734 if (err) {
1735 if (is_valid_ether_addr(rdev->etha->mac_addr))
1736 eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1737 else
1738 eth_hw_addr_random(ndev);
1739 }
1740
1741 err = rswitch_etha_get_params(rdev);
1742 if (err < 0)
1743 goto out_get_params;
1744
1745 if (rdev->priv->gwca.speed < rdev->etha->speed)
1746 rdev->priv->gwca.speed = rdev->etha->speed;
1747
1748 err = rswitch_rxdmac_alloc(ndev);
1749 if (err < 0)
1750 goto out_rxdmac;
1751
1752 err = rswitch_txdmac_alloc(ndev);
1753 if (err < 0)
1754 goto out_txdmac;
1755
1756 return 0;
1757
1758out_txdmac:
1759 rswitch_rxdmac_free(ndev);
1760
1761out_rxdmac:
1762out_get_params:
1763 netif_napi_del(&rdev->napi);
1764 free_netdev(ndev);
1765
1766 return err;
1767}
1768
1769static void rswitch_device_free(struct rswitch_private *priv, int index)
1770{
1771 struct rswitch_device *rdev = priv->rdev[index];
1772 struct net_device *ndev = rdev->ndev;
1773
1774 rswitch_txdmac_free(ndev);
1775 rswitch_rxdmac_free(ndev);
1776 netif_napi_del(&rdev->napi);
1777 free_netdev(ndev);
1778}
1779
1780static int rswitch_init(struct rswitch_private *priv)
1781{
1782 int i, err;
1783
1784 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1785 rswitch_etha_init(priv, i);
1786
1787 rswitch_clock_enable(priv);
1788 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1789 rswitch_etha_read_mac_address(&priv->etha[i]);
1790
1791 rswitch_reset(priv);
1792
1793 rswitch_clock_enable(priv);
1794 rswitch_top_init(priv);
1795 err = rswitch_bpool_config(priv);
1796 if (err < 0)
1797 return err;
1798
c87bd91e
YS
1799 rswitch_coma_init(priv);
1800
e3f38039 1801 err = rswitch_gwca_linkfix_alloc(priv);
3590918b
YS
1802 if (err < 0)
1803 return -ENOMEM;
1804
33f5d733
YS
1805 err = rswitch_gwca_ts_queue_alloc(priv);
1806 if (err < 0)
1807 goto err_ts_queue_alloc;
1808
3590918b
YS
1809 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1810 err = rswitch_device_alloc(priv, i);
1811 if (err < 0) {
1812 for (i--; i >= 0; i--)
1813 rswitch_device_free(priv, i);
1814 goto err_device_alloc;
1815 }
1816 }
1817
1818 rswitch_fwd_init(priv);
1819
6c6fa1a0
YS
1820 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
1821 RCAR_GEN4_PTP_CLOCK_S4);
1822 if (err < 0)
1823 goto err_ptp_register;
1824
3590918b
YS
1825 err = rswitch_gwca_request_irqs(priv);
1826 if (err < 0)
1827 goto err_gwca_request_irq;
1828
33f5d733
YS
1829 err = rswitch_gwca_ts_request_irqs(priv);
1830 if (err < 0)
1831 goto err_gwca_ts_request_irq;
1832
3590918b
YS
1833 err = rswitch_gwca_hw_init(priv);
1834 if (err < 0)
1835 goto err_gwca_hw_init;
1836
1837 err = rswitch_ether_port_init_all(priv);
1838 if (err)
1839 goto err_ether_port_init_all;
1840
fd941bd6 1841 rswitch_for_each_enabled_port(priv, i) {
3590918b
YS
1842 err = register_netdev(priv->rdev[i]->ndev);
1843 if (err) {
fd941bd6 1844 rswitch_for_each_enabled_port_continue_reverse(priv, i)
3590918b
YS
1845 unregister_netdev(priv->rdev[i]->ndev);
1846 goto err_register_netdev;
1847 }
1848 }
1849
fd941bd6 1850 rswitch_for_each_enabled_port(priv, i)
1cb50726 1851 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
3590918b
YS
1852 priv->rdev[i]->ndev->dev_addr);
1853
1854 return 0;
1855
1856err_register_netdev:
1857 rswitch_ether_port_deinit_all(priv);
1858
1859err_ether_port_init_all:
1860 rswitch_gwca_hw_deinit(priv);
1861
1862err_gwca_hw_init:
33f5d733 1863err_gwca_ts_request_irq:
3590918b 1864err_gwca_request_irq:
6c6fa1a0
YS
1865 rcar_gen4_ptp_unregister(priv->ptp_priv);
1866
1867err_ptp_register:
3590918b
YS
1868 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1869 rswitch_device_free(priv, i);
1870
1871err_device_alloc:
33f5d733
YS
1872 rswitch_gwca_ts_queue_free(priv);
1873
1874err_ts_queue_alloc:
e3f38039 1875 rswitch_gwca_linkfix_free(priv);
3590918b
YS
1876
1877 return err;
1878}
1879
c009b903
YS
1880static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
1881 { .soc_id = "r8a779f0", .revision = "ES1.0" },
1882 { /* Sentinel */ }
1883};
1884
3590918b
YS
1885static int renesas_eth_sw_probe(struct platform_device *pdev)
1886{
c009b903 1887 const struct soc_device_attribute *attr;
3590918b
YS
1888 struct rswitch_private *priv;
1889 struct resource *res;
1890 int ret;
1891
1892 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1893 if (!res) {
1894 dev_err(&pdev->dev, "invalid resource\n");
1895 return -EINVAL;
1896 }
1897
1898 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1899 if (!priv)
1900 return -ENOMEM;
c4f922e8 1901 spin_lock_init(&priv->lock);
3590918b 1902
c009b903
YS
1903 attr = soc_device_match(rswitch_soc_no_speed_change);
1904 if (attr)
1905 priv->etha_no_runtime_change = true;
1906
6c6fa1a0
YS
1907 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1908 if (!priv->ptp_priv)
1909 return -ENOMEM;
1910
3590918b
YS
1911 platform_set_drvdata(pdev, priv);
1912 priv->pdev = pdev;
1913 priv->addr = devm_ioremap_resource(&pdev->dev, res);
1914 if (IS_ERR(priv->addr))
1915 return PTR_ERR(priv->addr);
1916
6c6fa1a0
YS
1917 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1918
3590918b
YS
1919 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1920 if (ret < 0) {
1921 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1922 if (ret < 0)
1923 return ret;
1924 }
1925
1926 priv->gwca.index = AGENT_INDEX_GWCA;
1927 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1928 RSWITCH_MAX_NUM_QUEUES);
1929 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1930 sizeof(*priv->gwca.queues), GFP_KERNEL);
1931 if (!priv->gwca.queues)
1932 return -ENOMEM;
1933
1934 pm_runtime_enable(&pdev->dev);
1935 pm_runtime_get_sync(&pdev->dev);
1936
1937 ret = rswitch_init(priv);
8e6a8d7a
YS
1938 if (ret < 0) {
1939 pm_runtime_put(&pdev->dev);
1940 pm_runtime_disable(&pdev->dev);
1941 return ret;
1942 }
3590918b
YS
1943
1944 device_set_wakeup_capable(&pdev->dev, 1);
1945
1946 return ret;
1947}
1948
1949static void rswitch_deinit(struct rswitch_private *priv)
1950{
1951 int i;
1952
1953 rswitch_gwca_hw_deinit(priv);
6c6fa1a0 1954 rcar_gen4_ptp_unregister(priv->ptp_priv);
3590918b
YS
1955
1956 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1957 struct rswitch_device *rdev = priv->rdev[i];
1958
b46f1e57 1959 phy_exit(priv->rdev[i]->serdes);
3590918b
YS
1960 rswitch_ether_port_deinit_one(rdev);
1961 unregister_netdev(rdev->ndev);
1962 rswitch_device_free(priv, i);
1963 }
1964
33f5d733 1965 rswitch_gwca_ts_queue_free(priv);
e3f38039 1966 rswitch_gwca_linkfix_free(priv);
3590918b
YS
1967
1968 rswitch_clock_disable(priv);
1969}
1970
1971static int renesas_eth_sw_remove(struct platform_device *pdev)
1972{
1973 struct rswitch_private *priv = platform_get_drvdata(pdev);
1974
1975 rswitch_deinit(priv);
1976
1977 pm_runtime_put(&pdev->dev);
1978 pm_runtime_disable(&pdev->dev);
1979
1980 platform_set_drvdata(pdev, NULL);
1981
1982 return 0;
1983}
1984
1985static struct platform_driver renesas_eth_sw_driver_platform = {
1986 .probe = renesas_eth_sw_probe,
1987 .remove = renesas_eth_sw_remove,
1988 .driver = {
1989 .name = "renesas_eth_sw",
1990 .of_match_table = renesas_eth_sw_of_table,
1991 }
1992};
1993module_platform_driver(renesas_eth_sw_driver_platform);
1994MODULE_AUTHOR("Yoshihiro Shimoda");
1995MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
1996MODULE_LICENSE("GPL");