net: stmmac: Do not disable interrupts when cleaning TX
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / dwxgmac2_dma.c
CommitLineData
d6ddfacd
JA
1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
5 */
6
7#include <linux/iopoll.h>
8#include "stmmac.h"
9#include "dwxgmac2.h"
10
11static int dwxgmac2_dma_reset(void __iomem *ioaddr)
12{
13 u32 value = readl(ioaddr + XGMAC_DMA_MODE);
14
15 /* DMA SW reset */
16 writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
17
18 return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
19 !(value & XGMAC_SWR), 0, 100000);
20}
21
22static void dwxgmac2_dma_init(void __iomem *ioaddr,
23 struct stmmac_dma_cfg *dma_cfg, int atds)
24{
25 u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
26
27 if (dma_cfg->aal)
28 value |= XGMAC_AAL;
29
30 writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
31}
32
33static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
34 struct stmmac_dma_cfg *dma_cfg, u32 chan)
35{
36 u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
37
38 if (dma_cfg->pblx8)
39 value |= XGMAC_PBLx8;
40
41 writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
42 writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
43}
44
45static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
46 struct stmmac_dma_cfg *dma_cfg,
47 u32 dma_rx_phy, u32 chan)
48{
49 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
50 u32 value;
51
52 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
53 value &= ~XGMAC_RxPBL;
54 value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
55 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
56
57 writel(dma_rx_phy, ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
58}
59
60static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
61 struct stmmac_dma_cfg *dma_cfg,
62 u32 dma_tx_phy, u32 chan)
63{
64 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
65 u32 value;
66
67 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
68 value &= ~XGMAC_TxPBL;
69 value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
70 value |= XGMAC_OSP;
71 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
72
73 writel(dma_tx_phy, ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
74}
75
76static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
77{
78 u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
79 int i;
80
81 if (axi->axi_lpi_en)
82 value |= XGMAC_EN_LPI;
83 if (axi->axi_xit_frm)
84 value |= XGMAC_LPI_XIT_PKT;
85
86 value &= ~XGMAC_WR_OSR_LMT;
87 value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
88 XGMAC_WR_OSR_LMT;
89
90 value &= ~XGMAC_RD_OSR_LMT;
91 value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
92 XGMAC_RD_OSR_LMT;
93
900a81cc
JA
94 if (!axi->axi_fb)
95 value |= XGMAC_UNDEF;
96
d6ddfacd
JA
97 value &= ~XGMAC_BLEN;
98 for (i = 0; i < AXI_BLEN; i++) {
d6ddfacd
JA
99 switch (axi->axi_blen[i]) {
100 case 256:
101 value |= XGMAC_BLEN256;
102 break;
103 case 128:
104 value |= XGMAC_BLEN128;
105 break;
106 case 64:
107 value |= XGMAC_BLEN64;
108 break;
109 case 32:
110 value |= XGMAC_BLEN32;
111 break;
112 case 16:
113 value |= XGMAC_BLEN16;
114 break;
115 case 8:
116 value |= XGMAC_BLEN8;
117 break;
118 case 4:
119 value |= XGMAC_BLEN4;
120 break;
121 }
122 }
123
124 writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
8fe82bd4
JA
125 writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
126 writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
d6ddfacd
JA
127}
128
129static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
130 u32 channel, int fifosz, u8 qmode)
131{
132 u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
133 unsigned int rqs = fifosz / 256 - 1;
134
135 if (mode == SF_DMA_MODE) {
136 value |= XGMAC_RSF;
137 } else {
138 value &= ~XGMAC_RSF;
139 value &= ~XGMAC_RTC;
140
141 if (mode <= 64)
142 value |= 0x0 << XGMAC_RTC_SHIFT;
143 else if (mode <= 96)
144 value |= 0x2 << XGMAC_RTC_SHIFT;
145 else
146 value |= 0x3 << XGMAC_RTC_SHIFT;
147 }
148
149 value &= ~XGMAC_RQS;
150 value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
151
ff82cfc7
JA
152 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
153 u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
154 unsigned int rfd, rfa;
155
156 value |= XGMAC_EHFC;
157
158 /* Set Threshold for Activating Flow Control to min 2 frames,
159 * i.e. 1500 * 2 = 3000 bytes.
160 *
161 * Set Threshold for Deactivating Flow Control to min 1 frame,
162 * i.e. 1500 bytes.
163 */
164 switch (fifosz) {
165 case 4096:
166 /* This violates the above formula because of FIFO size
167 * limit therefore overflow may occur in spite of this.
168 */
169 rfd = 0x03; /* Full-2.5K */
170 rfa = 0x01; /* Full-1.5K */
171 break;
172
173 case 8192:
174 rfd = 0x06; /* Full-4K */
175 rfa = 0x0a; /* Full-6K */
176 break;
177
178 case 16384:
179 rfd = 0x06; /* Full-4K */
180 rfa = 0x12; /* Full-10K */
181 break;
182
183 default:
184 rfd = 0x06; /* Full-4K */
185 rfa = 0x1e; /* Full-16K */
186 break;
187 }
188
189 flow &= ~XGMAC_RFD;
190 flow |= rfd << XGMAC_RFD_SHIFT;
191
192 flow &= ~XGMAC_RFA;
193 flow |= rfa << XGMAC_RFA_SHIFT;
194
195 writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
196 }
197
d6ddfacd
JA
198 writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
199
200 /* Enable MTL RX overflow */
201 value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
202 writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
203}
204
205static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
206 u32 channel, int fifosz, u8 qmode)
207{
208 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
209 unsigned int tqs = fifosz / 256 - 1;
210
211 if (mode == SF_DMA_MODE) {
212 value |= XGMAC_TSF;
213 } else {
214 value &= ~XGMAC_TSF;
215 value &= ~XGMAC_TTC;
216
217 if (mode <= 64)
218 value |= 0x0 << XGMAC_TTC_SHIFT;
219 else if (mode <= 96)
220 value |= 0x2 << XGMAC_TTC_SHIFT;
221 else if (mode <= 128)
222 value |= 0x3 << XGMAC_TTC_SHIFT;
223 else if (mode <= 192)
224 value |= 0x4 << XGMAC_TTC_SHIFT;
225 else if (mode <= 256)
226 value |= 0x5 << XGMAC_TTC_SHIFT;
227 else if (mode <= 384)
228 value |= 0x6 << XGMAC_TTC_SHIFT;
229 else
230 value |= 0x7 << XGMAC_TTC_SHIFT;
231 }
232
ec6ea8e3
JA
233 /* Use static TC to Queue mapping */
234 value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
235
d6ddfacd
JA
236 value &= ~XGMAC_TXQEN;
237 if (qmode != MTL_QUEUE_AVB)
238 value |= 0x2 << XGMAC_TXQEN_SHIFT;
239 else
240 value |= 0x1 << XGMAC_TXQEN_SHIFT;
241
242 value &= ~XGMAC_TQS;
243 value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
244
245 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
246}
247
248static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan)
249{
250 writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
251}
252
253static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan)
254{
255 writel(0, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
256}
257
258static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
259{
260 u32 value;
261
262 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
263 value |= XGMAC_TXST;
264 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
265
266 value = readl(ioaddr + XGMAC_TX_CONFIG);
267 value |= XGMAC_CONFIG_TE;
268 writel(value, ioaddr + XGMAC_TX_CONFIG);
269}
270
271static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
272{
273 u32 value;
274
275 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
276 value &= ~XGMAC_TXST;
277 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
278
279 value = readl(ioaddr + XGMAC_TX_CONFIG);
280 value &= ~XGMAC_CONFIG_TE;
281 writel(value, ioaddr + XGMAC_TX_CONFIG);
282}
283
284static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
285{
286 u32 value;
287
288 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
289 value |= XGMAC_RXST;
290 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
291
292 value = readl(ioaddr + XGMAC_RX_CONFIG);
293 value |= XGMAC_CONFIG_RE;
294 writel(value, ioaddr + XGMAC_RX_CONFIG);
295}
296
297static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
298{
299 u32 value;
300
301 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
302 value &= ~XGMAC_RXST;
303 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
d6ddfacd
JA
304}
305
306static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
307 struct stmmac_extra_stats *x, u32 chan)
308{
309 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
fcc509eb 310 u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
d6ddfacd
JA
311 int ret = 0;
312
313 /* ABNORMAL interrupts */
314 if (unlikely(intr_status & XGMAC_AIS)) {
315 if (unlikely(intr_status & XGMAC_TPS)) {
316 x->tx_process_stopped_irq++;
317 ret |= tx_hard_error;
318 }
319 if (unlikely(intr_status & XGMAC_FBE)) {
320 x->fatal_bus_error_irq++;
321 ret |= tx_hard_error;
322 }
323 }
324
325 /* TX/RX NORMAL interrupts */
326 if (likely(intr_status & XGMAC_NIS)) {
327 x->normal_irq_n++;
328
329 if (likely(intr_status & XGMAC_RI)) {
ae9f346d
JA
330 x->rx_normal_irq_n++;
331 ret |= handle_rx;
d6ddfacd 332 }
ae9f346d 333 if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
d6ddfacd
JA
334 x->tx_normal_irq_n++;
335 ret |= handle_tx;
336 }
337 }
338
339 /* Clear interrupts */
fcc509eb 340 writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
d6ddfacd
JA
341
342 return ret;
343}
344
345static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
346 struct dma_features *dma_cap)
347{
348 u32 hw_cap;
349
350 /* MAC HW feature 0 */
351 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
352 dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
353 dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
354 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
355 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
356 dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
357 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
358 dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
359 dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
360
361 /* MAC HW feature 1 */
362 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
363 dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
364 dma_cap->tx_fifo_size =
365 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
366 dma_cap->rx_fifo_size =
367 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
368
369 /* MAC HW feature 2 */
370 hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
371 dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
372 dma_cap->number_tx_channel =
373 ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
374 dma_cap->number_rx_channel =
375 ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
376 dma_cap->number_tx_queues =
377 ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
378 dma_cap->number_rx_queues =
379 ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
380}
381
382static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
383{
384 u32 i;
385
386 for (i = 0; i < nchan; i++)
387 writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(i));
388}
389
390static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
391{
392 writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
393}
394
395static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
396{
397 writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
398}
399
400static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
401{
402 writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
403}
404
405static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
406{
407 writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
408}
409
410static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
411{
412 u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
413
414 if (en)
415 value |= XGMAC_TSE;
416 else
417 value &= ~XGMAC_TSE;
418
419 writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
420}
421
ec6ea8e3
JA
422static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
423{
424 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
425
426 value &= ~XGMAC_TXQEN;
427 if (qmode != MTL_QUEUE_AVB) {
428 value |= 0x2 << XGMAC_TXQEN_SHIFT;
429 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
430 } else {
431 value |= 0x1 << XGMAC_TXQEN_SHIFT;
432 }
433
434 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
435}
436
d6ddfacd
JA
437static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
438{
439 u32 value;
440
441 value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
442 value |= bfsize << 1;
443 writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
444}
445
446const struct stmmac_dma_ops dwxgmac210_dma_ops = {
447 .reset = dwxgmac2_dma_reset,
448 .init = dwxgmac2_dma_init,
449 .init_chan = dwxgmac2_dma_init_chan,
450 .init_rx_chan = dwxgmac2_dma_init_rx_chan,
451 .init_tx_chan = dwxgmac2_dma_init_tx_chan,
452 .axi = dwxgmac2_dma_axi,
453 .dump_regs = NULL,
454 .dma_rx_mode = dwxgmac2_dma_rx_mode,
455 .dma_tx_mode = dwxgmac2_dma_tx_mode,
456 .enable_dma_irq = dwxgmac2_enable_dma_irq,
457 .disable_dma_irq = dwxgmac2_disable_dma_irq,
458 .start_tx = dwxgmac2_dma_start_tx,
459 .stop_tx = dwxgmac2_dma_stop_tx,
460 .start_rx = dwxgmac2_dma_start_rx,
461 .stop_rx = dwxgmac2_dma_stop_rx,
462 .dma_interrupt = dwxgmac2_dma_interrupt,
463 .get_hw_feature = dwxgmac2_get_hw_feature,
464 .rx_watchdog = dwxgmac2_rx_watchdog,
465 .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
466 .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
467 .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
468 .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
469 .enable_tso = dwxgmac2_enable_tso,
ec6ea8e3 470 .qmode = dwxgmac2_qmode,
d6ddfacd
JA
471 .set_bfsize = dwxgmac2_set_bfsize,
472};