net: lantiq_xrx200: Hardcode the burst length value
[linux-block.git] / drivers / net / ethernet / lantiq_xrx200.c
CommitLineData
fe1a5642
HM
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Lantiq / Intel PMAC driver for XRX200 SoCs
4 *
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/interrupt.h>
14#include <linux/clk.h>
15#include <linux/delay.h>
16
998ac358
AJB
17#include <linux/if_vlan.h>
18
fe1a5642
HM
19#include <linux/of_net.h>
20#include <linux/of_platform.h>
21
22#include <xway_dma.h>
23
24/* DMA */
998ac358 25#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
fe1a5642
HM
26#define XRX200_DMA_RX 0
27#define XRX200_DMA_TX 1
7e553c44 28#define XRX200_DMA_BURST_LEN 8
fe1a5642
HM
29
30/* cpu port mac */
31#define PMAC_RX_IPG 0x0024
32#define PMAC_RX_IPG_MASK 0xf
33
34#define PMAC_HD_CTL 0x0000
35/* Add Ethernet header to packets from DMA to PMAC */
36#define PMAC_HD_CTL_ADD BIT(0)
37/* Add VLAN tag to Packets from DMA to PMAC */
38#define PMAC_HD_CTL_TAG BIT(1)
39/* Add CRC to packets from DMA to PMAC */
40#define PMAC_HD_CTL_AC BIT(2)
41/* Add status header to packets from PMAC to DMA */
42#define PMAC_HD_CTL_AS BIT(3)
43/* Remove CRC from packets from PMAC to DMA */
44#define PMAC_HD_CTL_RC BIT(4)
45/* Remove Layer-2 header from packets from PMAC to DMA */
46#define PMAC_HD_CTL_RL2 BIT(5)
47/* Status header is present from DMA to PMAC */
48#define PMAC_HD_CTL_RXSH BIT(6)
49/* Add special tag from PMAC to switch */
50#define PMAC_HD_CTL_AST BIT(7)
51/* Remove specail Tag from PMAC to DMA */
52#define PMAC_HD_CTL_RST BIT(8)
53/* Check CRC from DMA to PMAC */
54#define PMAC_HD_CTL_CCRC BIT(9)
55/* Enable reaction to Pause frames in the PMAC */
56#define PMAC_HD_CTL_FC BIT(10)
57
58struct xrx200_chan {
59 int tx_free;
60
61 struct napi_struct napi;
62 struct ltq_dma_channel dma;
63 struct sk_buff *skb[LTQ_DESC_NUM];
64
65 struct xrx200_priv *priv;
66};
67
68struct xrx200_priv {
69 struct clk *clk;
70
71 struct xrx200_chan chan_tx;
72 struct xrx200_chan chan_rx;
73
74 struct net_device *net_dev;
75 struct device *dev;
76
77 __iomem void *pmac_reg;
78};
79
80static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
81{
82 return __raw_readl(priv->pmac_reg + offset);
83}
84
85static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
86{
87 __raw_writel(val, priv->pmac_reg + offset);
88}
89
90static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
91 u32 offset)
92{
93 u32 val = xrx200_pmac_r32(priv, offset);
94
95 val &= ~(clear);
96 val |= set;
97 xrx200_pmac_w32(priv, val, offset);
98}
99
100/* drop all the packets from the DMA ring */
101static void xrx200_flush_dma(struct xrx200_chan *ch)
102{
103 int i;
104
105 for (i = 0; i < LTQ_DESC_NUM; i++) {
106 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
107
108 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
109 break;
110
111 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
998ac358
AJB
112 (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
113 ETH_FCS_LEN);
fe1a5642
HM
114 ch->dma.desc++;
115 ch->dma.desc %= LTQ_DESC_NUM;
116 }
117}
118
119static int xrx200_open(struct net_device *net_dev)
120{
121 struct xrx200_priv *priv = netdev_priv(net_dev);
fe1a5642
HM
122
123 napi_enable(&priv->chan_tx.napi);
124 ltq_dma_open(&priv->chan_tx.dma);
125 ltq_dma_enable_irq(&priv->chan_tx.dma);
126
127 napi_enable(&priv->chan_rx.napi);
128 ltq_dma_open(&priv->chan_rx.dma);
129 /* The boot loader does not always deactivate the receiving of frames
130 * on the ports and then some packets queue up in the PPE buffers.
131 * They already passed the PMAC so they do not have the tags
132 * configured here. Read the these packets here and drop them.
133 * The HW should have written them into memory after 10us
134 */
135 usleep_range(20, 40);
136 xrx200_flush_dma(&priv->chan_rx);
137 ltq_dma_enable_irq(&priv->chan_rx.dma);
138
139 netif_wake_queue(net_dev);
140
141 return 0;
142}
143
144static int xrx200_close(struct net_device *net_dev)
145{
146 struct xrx200_priv *priv = netdev_priv(net_dev);
147
148 netif_stop_queue(net_dev);
149
150 napi_disable(&priv->chan_rx.napi);
151 ltq_dma_close(&priv->chan_rx.dma);
152
153 napi_disable(&priv->chan_tx.napi);
154 ltq_dma_close(&priv->chan_tx.dma);
155
fe1a5642
HM
156 return 0;
157}
158
159static int xrx200_alloc_skb(struct xrx200_chan *ch)
160{
998ac358 161 int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
7ea6cd16 162 struct sk_buff *skb = ch->skb[ch->dma.desc];
c7718ee9 163 dma_addr_t mapping;
fe1a5642
HM
164 int ret = 0;
165
166 ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
998ac358 167 len);
fe1a5642
HM
168 if (!ch->skb[ch->dma.desc]) {
169 ret = -ENOMEM;
170 goto skip;
171 }
172
c7718ee9 173 mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
998ac358 174 len, DMA_FROM_DEVICE);
c7718ee9 175 if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
fe1a5642 176 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
7ea6cd16 177 ch->skb[ch->dma.desc] = skb;
fe1a5642
HM
178 ret = -ENOMEM;
179 goto skip;
180 }
181
c7718ee9
AJB
182 ch->dma.desc_base[ch->dma.desc].addr = mapping;
183 /* Make sure the address is written before we give it to HW */
184 wmb();
fe1a5642
HM
185skip:
186 ch->dma.desc_base[ch->dma.desc].ctl =
998ac358 187 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
fe1a5642
HM
188
189 return ret;
190}
191
192static int xrx200_hw_receive(struct xrx200_chan *ch)
193{
194 struct xrx200_priv *priv = ch->priv;
195 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
196 struct sk_buff *skb = ch->skb[ch->dma.desc];
197 int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
198 struct net_device *net_dev = priv->net_dev;
199 int ret;
200
201 ret = xrx200_alloc_skb(ch);
202
203 ch->dma.desc++;
204 ch->dma.desc %= LTQ_DESC_NUM;
205
206 if (ret) {
c7718ee9 207 net_dev->stats.rx_dropped++;
fe1a5642
HM
208 netdev_err(net_dev, "failed to allocate new rx buffer\n");
209 return ret;
210 }
211
212 skb_put(skb, len);
213 skb->protocol = eth_type_trans(skb, net_dev);
214 netif_receive_skb(skb);
215 net_dev->stats.rx_packets++;
216 net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
217
218 return 0;
219}
220
221static int xrx200_poll_rx(struct napi_struct *napi, int budget)
222{
223 struct xrx200_chan *ch = container_of(napi,
224 struct xrx200_chan, napi);
225 int rx = 0;
226 int ret;
227
228 while (rx < budget) {
229 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
230
231 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
232 ret = xrx200_hw_receive(ch);
233 if (ret)
234 return ret;
235 rx++;
236 } else {
237 break;
238 }
239 }
240
241 if (rx < budget) {
c582a7fe
HM
242 if (napi_complete_done(&ch->napi, rx))
243 ltq_dma_enable_irq(&ch->dma);
fe1a5642
HM
244 }
245
246 return rx;
247}
248
249static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
250{
251 struct xrx200_chan *ch = container_of(napi,
252 struct xrx200_chan, napi);
253 struct net_device *net_dev = ch->priv->net_dev;
254 int pkts = 0;
255 int bytes = 0;
256
f9317ae5 257 netif_tx_lock(net_dev);
fe1a5642
HM
258 while (pkts < budget) {
259 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
260
261 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
262 struct sk_buff *skb = ch->skb[ch->tx_free];
263
264 pkts++;
265 bytes += skb->len;
266 ch->skb[ch->tx_free] = NULL;
267 consume_skb(skb);
268 memset(&ch->dma.desc_base[ch->tx_free], 0,
269 sizeof(struct ltq_dma_desc));
270 ch->tx_free++;
271 ch->tx_free %= LTQ_DESC_NUM;
272 } else {
273 break;
274 }
275 }
276
277 net_dev->stats.tx_packets += pkts;
278 net_dev->stats.tx_bytes += bytes;
279 netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
280
f9317ae5 281 netif_tx_unlock(net_dev);
dea36631
HM
282 if (netif_queue_stopped(net_dev))
283 netif_wake_queue(net_dev);
284
fe1a5642 285 if (pkts < budget) {
c582a7fe
HM
286 if (napi_complete_done(&ch->napi, pkts))
287 ltq_dma_enable_irq(&ch->dma);
fe1a5642
HM
288 }
289
290 return pkts;
291}
292
1cfecc23
YW
293static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
294 struct net_device *net_dev)
fe1a5642
HM
295{
296 struct xrx200_priv *priv = netdev_priv(net_dev);
297 struct xrx200_chan *ch = &priv->chan_tx;
298 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
299 u32 byte_offset;
300 dma_addr_t mapping;
301 int len;
302
303 skb->dev = net_dev;
304 if (skb_put_padto(skb, ETH_ZLEN)) {
305 net_dev->stats.tx_dropped++;
306 return NETDEV_TX_OK;
307 }
308
309 len = skb->len;
310
311 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
312 netdev_err(net_dev, "tx ring full\n");
313 netif_stop_queue(net_dev);
314 return NETDEV_TX_BUSY;
315 }
316
317 ch->skb[ch->dma.desc] = skb;
318
319 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
320 if (unlikely(dma_mapping_error(priv->dev, mapping)))
321 goto err_drop;
322
14d4e308 323 /* dma needs to start on a burst length value aligned address */
7e553c44 324 byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
fe1a5642
HM
325
326 desc->addr = mapping - byte_offset;
327 /* Make sure the address is written before we give it to HW */
328 wmb();
329 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
330 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
331 ch->dma.desc++;
332 ch->dma.desc %= LTQ_DESC_NUM;
333 if (ch->dma.desc == ch->tx_free)
334 netif_stop_queue(net_dev);
335
336 netdev_sent_queue(net_dev, len);
337
338 return NETDEV_TX_OK;
339
340err_drop:
341 dev_kfree_skb(skb);
342 net_dev->stats.tx_dropped++;
343 net_dev->stats.tx_errors++;
344 return NETDEV_TX_OK;
345}
346
998ac358
AJB
347static int
348xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
349{
350 struct xrx200_priv *priv = netdev_priv(net_dev);
351 struct xrx200_chan *ch_rx = &priv->chan_rx;
352 int old_mtu = net_dev->mtu;
353 bool running = false;
354 struct sk_buff *skb;
355 int curr_desc;
356 int ret = 0;
357
358 net_dev->mtu = new_mtu;
359
360 if (new_mtu <= old_mtu)
361 return ret;
362
363 running = netif_running(net_dev);
364 if (running) {
365 napi_disable(&ch_rx->napi);
366 ltq_dma_close(&ch_rx->dma);
367 }
368
369 xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
370 curr_desc = ch_rx->dma.desc;
371
372 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
373 ch_rx->dma.desc++) {
374 skb = ch_rx->skb[ch_rx->dma.desc];
375 ret = xrx200_alloc_skb(ch_rx);
376 if (ret) {
377 net_dev->mtu = old_mtu;
378 break;
379 }
380 dev_kfree_skb_any(skb);
381 }
382
383 ch_rx->dma.desc = curr_desc;
384 if (running) {
385 napi_enable(&ch_rx->napi);
386 ltq_dma_open(&ch_rx->dma);
387 ltq_dma_enable_irq(&ch_rx->dma);
388 }
389
390 return ret;
391}
392
fe1a5642
HM
393static const struct net_device_ops xrx200_netdev_ops = {
394 .ndo_open = xrx200_open,
395 .ndo_stop = xrx200_close,
396 .ndo_start_xmit = xrx200_start_xmit,
998ac358 397 .ndo_change_mtu = xrx200_change_mtu,
fe1a5642
HM
398 .ndo_set_mac_address = eth_mac_addr,
399 .ndo_validate_addr = eth_validate_addr,
fe1a5642
HM
400};
401
402static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
403{
404 struct xrx200_chan *ch = ptr;
405
9423361d 406 if (napi_schedule_prep(&ch->napi)) {
9423361d 407 ltq_dma_disable_irq(&ch->dma);
f2386cf7 408 __napi_schedule(&ch->napi);
9423361d 409 }
fe1a5642 410
9423361d 411 ltq_dma_ack_irq(&ch->dma);
fe1a5642
HM
412
413 return IRQ_HANDLED;
414}
415
416static int xrx200_dma_init(struct xrx200_priv *priv)
417{
418 struct xrx200_chan *ch_rx = &priv->chan_rx;
419 struct xrx200_chan *ch_tx = &priv->chan_tx;
420 int ret = 0;
421 int i;
422
7e553c44
AJB
423 ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
424 XRX200_DMA_BURST_LEN);
fe1a5642
HM
425
426 ch_rx->dma.nr = XRX200_DMA_RX;
427 ch_rx->dma.dev = priv->dev;
428 ch_rx->priv = priv;
429
430 ltq_dma_alloc_rx(&ch_rx->dma);
431 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
432 ch_rx->dma.desc++) {
433 ret = xrx200_alloc_skb(ch_rx);
434 if (ret)
435 goto rx_free;
436 }
437 ch_rx->dma.desc = 0;
438 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
439 "xrx200_net_rx", &priv->chan_rx);
440 if (ret) {
441 dev_err(priv->dev, "failed to request RX irq %d\n",
442 ch_rx->dma.irq);
443 goto rx_ring_free;
444 }
445
446 ch_tx->dma.nr = XRX200_DMA_TX;
447 ch_tx->dma.dev = priv->dev;
448 ch_tx->priv = priv;
449
450 ltq_dma_alloc_tx(&ch_tx->dma);
451 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
452 "xrx200_net_tx", &priv->chan_tx);
453 if (ret) {
454 dev_err(priv->dev, "failed to request TX irq %d\n",
455 ch_tx->dma.irq);
456 goto tx_free;
457 }
458
459 return ret;
460
461tx_free:
462 ltq_dma_free(&ch_tx->dma);
463
464rx_ring_free:
465 /* free the allocated RX ring */
466 for (i = 0; i < LTQ_DESC_NUM; i++) {
467 if (priv->chan_rx.skb[i])
468 dev_kfree_skb_any(priv->chan_rx.skb[i]);
469 }
470
471rx_free:
472 ltq_dma_free(&ch_rx->dma);
473 return ret;
474}
475
476static void xrx200_hw_cleanup(struct xrx200_priv *priv)
477{
478 int i;
479
480 ltq_dma_free(&priv->chan_tx.dma);
481 ltq_dma_free(&priv->chan_rx.dma);
482
483 /* free the allocated RX ring */
484 for (i = 0; i < LTQ_DESC_NUM; i++)
485 dev_kfree_skb_any(priv->chan_rx.skb[i]);
486}
487
488static int xrx200_probe(struct platform_device *pdev)
489{
490 struct device *dev = &pdev->dev;
491 struct device_node *np = dev->of_node;
fe1a5642
HM
492 struct xrx200_priv *priv;
493 struct net_device *net_dev;
fe1a5642
HM
494 int err;
495
496 /* alloc the network device */
497 net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
498 if (!net_dev)
499 return -ENOMEM;
500
501 priv = netdev_priv(net_dev);
502 priv->net_dev = net_dev;
503 priv->dev = dev;
504
505 net_dev->netdev_ops = &xrx200_netdev_ops;
506 SET_NETDEV_DEV(net_dev, dev);
507 net_dev->min_mtu = ETH_ZLEN;
998ac358 508 net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
fe1a5642
HM
509
510 /* load the memory ranges */
d402af20 511 priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
d759c1bd 512 if (IS_ERR(priv->pmac_reg))
b8b2de91 513 return PTR_ERR(priv->pmac_reg);
fe1a5642
HM
514
515 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
d1a55841 516 if (priv->chan_rx.dma.irq < 0)
fe1a5642 517 return -ENOENT;
fe1a5642 518 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
d1a55841 519 if (priv->chan_tx.dma.irq < 0)
fe1a5642 520 return -ENOENT;
fe1a5642
HM
521
522 /* get the clock */
523 priv->clk = devm_clk_get(dev, NULL);
524 if (IS_ERR(priv->clk)) {
525 dev_err(dev, "failed to get clock\n");
526 return PTR_ERR(priv->clk);
527 }
528
9ca01b25 529 err = of_get_ethdev_address(np, net_dev);
83216e39 530 if (err)
fe1a5642
HM
531 eth_hw_addr_random(net_dev);
532
533 /* bring up the dma engine and IP core */
534 err = xrx200_dma_init(priv);
535 if (err)
536 return err;
537
a44ecfbd
HM
538 /* enable clock gate */
539 err = clk_prepare_enable(priv->clk);
540 if (err)
541 goto err_uninit_dma;
542
fe1a5642
HM
543 /* set IPG to 12 */
544 xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
545
546 /* enable status header, enable CRC */
547 xrx200_pmac_mask(priv, 0,
548 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
549 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
550 PMAC_HD_CTL);
551
552 /* setup NAPI */
553 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
74c7b80e 554 netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
fe1a5642
HM
555
556 platform_set_drvdata(pdev, priv);
557
558 err = register_netdev(net_dev);
559 if (err)
a44ecfbd 560 goto err_unprepare_clk;
06bc4d00
CJ
561
562 return 0;
fe1a5642 563
a44ecfbd
HM
564err_unprepare_clk:
565 clk_disable_unprepare(priv->clk);
566
fe1a5642
HM
567err_uninit_dma:
568 xrx200_hw_cleanup(priv);
569
06bc4d00 570 return err;
fe1a5642
HM
571}
572
573static int xrx200_remove(struct platform_device *pdev)
574{
575 struct xrx200_priv *priv = platform_get_drvdata(pdev);
576 struct net_device *net_dev = priv->net_dev;
577
578 /* free stack related instances */
579 netif_stop_queue(net_dev);
580 netif_napi_del(&priv->chan_tx.napi);
581 netif_napi_del(&priv->chan_rx.napi);
582
583 /* remove the actual device */
584 unregister_netdev(net_dev);
585
a44ecfbd
HM
586 /* release the clock */
587 clk_disable_unprepare(priv->clk);
588
fe1a5642
HM
589 /* shut down hardware */
590 xrx200_hw_cleanup(priv);
591
592 return 0;
593}
594
595static const struct of_device_id xrx200_match[] = {
596 { .compatible = "lantiq,xrx200-net" },
597 {},
598};
599MODULE_DEVICE_TABLE(of, xrx200_match);
600
601static struct platform_driver xrx200_driver = {
602 .probe = xrx200_probe,
603 .remove = xrx200_remove,
604 .driver = {
605 .name = "lantiq,xrx200-net",
606 .of_match_table = xrx200_match,
607 },
608};
609
610module_platform_driver(xrx200_driver);
611
612MODULE_AUTHOR("John Crispin <john@phrozen.org>");
613MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
614MODULE_LICENSE("GPL");