net: stmmac: xgmac: Fix RSS not writing all Keys to HW
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / dwxgmac2_core.c
CommitLineData
2142754f
JA
1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
5 */
6
0efedbf1
JA
7#include <linux/bitrev.h>
8#include <linux/crc32.h>
76067459 9#include <linux/iopoll.h>
2142754f 10#include "stmmac.h"
95eaf3cd 11#include "stmmac_ptp.h"
2142754f
JA
12#include "dwxgmac2.h"
13
14static void dwxgmac2_core_init(struct mac_device_info *hw,
15 struct net_device *dev)
16{
17 void __iomem *ioaddr = hw->pcsr;
2142754f
JA
18 u32 tx, rx;
19
20 tx = readl(ioaddr + XGMAC_TX_CONFIG);
21 rx = readl(ioaddr + XGMAC_RX_CONFIG);
22
23 tx |= XGMAC_CORE_INIT_TX;
24 rx |= XGMAC_CORE_INIT_RX;
25
2142754f
JA
26 if (hw->ps) {
27 tx |= XGMAC_CONFIG_TE;
28 tx &= ~hw->link.speed_mask;
29
30 switch (hw->ps) {
31 case SPEED_10000:
5b0d7d7d 32 tx |= hw->link.xgmii.speed10000;
2142754f
JA
33 break;
34 case SPEED_2500:
35 tx |= hw->link.speed2500;
36 break;
37 case SPEED_1000:
38 default:
39 tx |= hw->link.speed1000;
40 break;
41 }
42 }
43
44 writel(tx, ioaddr + XGMAC_TX_CONFIG);
45 writel(rx, ioaddr + XGMAC_RX_CONFIG);
46 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
47}
48
49static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
50{
51 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
52 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
53
54 if (enable) {
55 tx |= XGMAC_CONFIG_TE;
56 rx |= XGMAC_CONFIG_RE;
57 } else {
58 tx &= ~XGMAC_CONFIG_TE;
59 rx &= ~XGMAC_CONFIG_RE;
60 }
61
62 writel(tx, ioaddr + XGMAC_TX_CONFIG);
63 writel(rx, ioaddr + XGMAC_RX_CONFIG);
64}
65
66static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
67{
68 void __iomem *ioaddr = hw->pcsr;
69 u32 value;
70
71 value = readl(ioaddr + XGMAC_RX_CONFIG);
72 if (hw->rx_csum)
73 value |= XGMAC_CONFIG_IPC;
74 else
75 value &= ~XGMAC_CONFIG_IPC;
76 writel(value, ioaddr + XGMAC_RX_CONFIG);
77
78 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
79}
80
81static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
82 u32 queue)
83{
84 void __iomem *ioaddr = hw->pcsr;
85 u32 value;
86
87 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
88 if (mode == MTL_QUEUE_AVB)
89 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
90 else if (mode == MTL_QUEUE_DCB)
91 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
92 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
93}
94
95static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
96 u32 queue)
97{
98 void __iomem *ioaddr = hw->pcsr;
99 u32 value, reg;
100
101 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
e8df7e8c
JA
102 if (queue >= 4)
103 queue -= 4;
2142754f
JA
104
105 value = readl(ioaddr + reg);
106 value &= ~XGMAC_PSRQ(queue);
107 value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
108
109 writel(value, ioaddr + reg);
110}
111
7035aad8
JA
112static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
113 u32 queue)
114{
115 void __iomem *ioaddr = hw->pcsr;
116 u32 value, reg;
117
118 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
119 if (queue >= 4)
120 queue -= 4;
121
122 value = readl(ioaddr + reg);
123 value &= ~XGMAC_PSTC(queue);
124 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
125
126 writel(value, ioaddr + reg);
127}
128
2142754f
JA
129static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
130 u32 rx_alg)
131{
132 void __iomem *ioaddr = hw->pcsr;
133 u32 value;
134
135 value = readl(ioaddr + XGMAC_MTL_OPMODE);
136 value &= ~XGMAC_RAA;
137
138 switch (rx_alg) {
139 case MTL_RX_ALGORITHM_SP:
140 break;
141 case MTL_RX_ALGORITHM_WSP:
142 value |= XGMAC_RAA;
143 break;
144 default:
145 break;
146 }
147
148 writel(value, ioaddr + XGMAC_MTL_OPMODE);
149}
150
151static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
152 u32 tx_alg)
153{
154 void __iomem *ioaddr = hw->pcsr;
5656ac55 155 bool ets = true;
2142754f 156 u32 value;
5656ac55 157 int i;
2142754f
JA
158
159 value = readl(ioaddr + XGMAC_MTL_OPMODE);
160 value &= ~XGMAC_ETSALG;
161
162 switch (tx_alg) {
163 case MTL_TX_ALGORITHM_WRR:
164 value |= XGMAC_WRR;
165 break;
166 case MTL_TX_ALGORITHM_WFQ:
167 value |= XGMAC_WFQ;
168 break;
169 case MTL_TX_ALGORITHM_DWRR:
170 value |= XGMAC_DWRR;
171 break;
172 default:
5656ac55 173 ets = false;
2142754f
JA
174 break;
175 }
176
177 writel(value, ioaddr + XGMAC_MTL_OPMODE);
5656ac55
JA
178
179 /* Set ETS if desired */
180 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
181 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
182 value &= ~XGMAC_TSA;
183 if (ets)
184 value |= XGMAC_ETS;
185 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
186 }
187}
188
189static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
190 u32 weight, u32 queue)
191{
192 void __iomem *ioaddr = hw->pcsr;
193
194 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
2142754f
JA
195}
196
197static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
198 u32 chan)
199{
200 void __iomem *ioaddr = hw->pcsr;
201 u32 value, reg;
202
203 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
e8df7e8c
JA
204 if (queue >= 4)
205 queue -= 4;
2142754f
JA
206
207 value = readl(ioaddr + reg);
208 value &= ~XGMAC_QxMDMACH(queue);
209 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
210
211 writel(value, ioaddr + reg);
212}
213
ec6ea8e3
JA
214static void dwxgmac2_config_cbs(struct mac_device_info *hw,
215 u32 send_slope, u32 idle_slope,
216 u32 high_credit, u32 low_credit, u32 queue)
217{
218 void __iomem *ioaddr = hw->pcsr;
219 u32 value;
220
221 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
222 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
223 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
225
226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
227 value |= XGMAC_CC | XGMAC_CBS;
228 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
229}
230
bfc56530
JA
231static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
232{
233 void __iomem *ioaddr = hw->pcsr;
234 int i;
235
236 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
237 reg_space[i] = readl(ioaddr + i * 4);
238}
239
2142754f
JA
240static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
241 struct stmmac_extra_stats *x)
242{
243 void __iomem *ioaddr = hw->pcsr;
244 u32 stat, en;
81b945ae 245 int ret = 0;
2142754f
JA
246
247 en = readl(ioaddr + XGMAC_INT_EN);
248 stat = readl(ioaddr + XGMAC_INT_STATUS);
249
250 stat &= en;
251
252 if (stat & XGMAC_PMTIS) {
253 x->irq_receive_pmt_irq_n++;
254 readl(ioaddr + XGMAC_PMT);
255 }
256
81b945ae
JA
257 if (stat & XGMAC_LPIIS) {
258 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
259
260 if (lpi & XGMAC_TLPIEN) {
261 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
262 x->irq_tx_path_in_lpi_mode_n++;
263 }
264 if (lpi & XGMAC_TLPIEX) {
265 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
266 x->irq_tx_path_exit_lpi_mode_n++;
267 }
268 if (lpi & XGMAC_RLPIEN)
269 x->irq_rx_path_in_lpi_mode_n++;
270 if (lpi & XGMAC_RLPIEX)
271 x->irq_rx_path_exit_lpi_mode_n++;
272 }
273
274 return ret;
2142754f
JA
275}
276
277static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
278{
279 void __iomem *ioaddr = hw->pcsr;
280 int ret = 0;
281 u32 status;
282
283 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
284 if (status & BIT(chan)) {
285 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
286
287 if (chan_status & XGMAC_RXOVFIS)
288 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
289
290 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
291 }
292
293 return ret;
294}
295
296static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
297 unsigned int fc, unsigned int pause_time,
298 u32 tx_cnt)
299{
300 void __iomem *ioaddr = hw->pcsr;
301 u32 i;
302
303 if (fc & FLOW_RX)
304 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
305 if (fc & FLOW_TX) {
306 for (i = 0; i < tx_cnt; i++) {
307 u32 value = XGMAC_TFE;
308
309 if (duplex)
310 value |= pause_time << XGMAC_PT_SHIFT;
311
312 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
313 }
314 }
315}
316
317static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
318{
319 void __iomem *ioaddr = hw->pcsr;
320 u32 val = 0x0;
321
322 if (mode & WAKE_MAGIC)
323 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
324 if (mode & WAKE_UCAST)
325 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
326 if (val) {
327 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
328 cfg |= XGMAC_CONFIG_RE;
329 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
330 }
331
332 writel(val, ioaddr + XGMAC_PMT);
333}
334
335static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
336 unsigned char *addr, unsigned int reg_n)
337{
338 void __iomem *ioaddr = hw->pcsr;
339 u32 value;
340
341 value = (addr[5] << 8) | addr[4];
0efedbf1 342 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
2142754f
JA
343
344 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
0efedbf1 345 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
2142754f
JA
346}
347
348static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
349 unsigned char *addr, unsigned int reg_n)
350{
351 void __iomem *ioaddr = hw->pcsr;
352 u32 hi_addr, lo_addr;
353
354 /* Read the MAC address from the hardware */
0efedbf1
JA
355 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
356 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
2142754f
JA
357
358 /* Extract the MAC address from the high and low words */
359 addr[0] = lo_addr & 0xff;
360 addr[1] = (lo_addr >> 8) & 0xff;
361 addr[2] = (lo_addr >> 16) & 0xff;
362 addr[3] = (lo_addr >> 24) & 0xff;
363 addr[4] = hi_addr & 0xff;
364 addr[5] = (hi_addr >> 8) & 0xff;
365}
366
81b945ae
JA
367static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
368 bool en_tx_lpi_clockgating)
369{
370 void __iomem *ioaddr = hw->pcsr;
371 u32 value;
372
373 value = readl(ioaddr + XGMAC_LPI_CTRL);
374
375 value |= XGMAC_LPITXEN | XGMAC_LPITXA;
376 if (en_tx_lpi_clockgating)
377 value |= XGMAC_TXCGE;
378
379 writel(value, ioaddr + XGMAC_LPI_CTRL);
380}
381
382static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
383{
384 void __iomem *ioaddr = hw->pcsr;
385 u32 value;
386
387 value = readl(ioaddr + XGMAC_LPI_CTRL);
388 value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
389 writel(value, ioaddr + XGMAC_LPI_CTRL);
390}
391
392static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
393{
394 void __iomem *ioaddr = hw->pcsr;
395 u32 value;
396
397 value = readl(ioaddr + XGMAC_LPI_CTRL);
398 if (link)
399 value |= XGMAC_PLS;
400 else
401 value &= ~XGMAC_PLS;
402 writel(value, ioaddr + XGMAC_LPI_CTRL);
403}
404
405static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
406{
407 void __iomem *ioaddr = hw->pcsr;
408 u32 value;
409
410 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
411 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
412}
413
0efedbf1
JA
414static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
415 int mcbitslog2)
416{
417 int numhashregs, regs;
418
419 switch (mcbitslog2) {
420 case 6:
421 numhashregs = 2;
422 break;
423 case 7:
424 numhashregs = 4;
425 break;
426 case 8:
427 numhashregs = 8;
428 break;
429 default:
430 return;
431 }
432
433 for (regs = 0; regs < numhashregs; regs++)
434 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
435}
436
2142754f
JA
437static void dwxgmac2_set_filter(struct mac_device_info *hw,
438 struct net_device *dev)
439{
440 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
0efedbf1
JA
441 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
442 int mcbitslog2 = hw->mcast_bits_log2;
443 u32 mc_filter[8];
444 int i;
445
446 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
447 value |= XGMAC_FILTER_HPF;
448
449 memset(mc_filter, 0, sizeof(mc_filter));
2142754f
JA
450
451 if (dev->flags & IFF_PROMISC) {
0efedbf1
JA
452 value |= XGMAC_FILTER_PR;
453 value |= XGMAC_FILTER_PCF;
2142754f 454 } else if ((dev->flags & IFF_ALLMULTI) ||
0efedbf1 455 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
2142754f 456 value |= XGMAC_FILTER_PM;
0efedbf1
JA
457
458 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
459 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
460 } else if (!netdev_mc_empty(dev)) {
461 struct netdev_hw_addr *ha;
462
463 value |= XGMAC_FILTER_HMC;
464
465 netdev_for_each_mc_addr(ha, dev) {
466 int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
467 (32 - mcbitslog2));
468 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
469 }
470 }
471
472 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
473
474 /* Handle multiple unicast addresses */
9a2ae7b3 475 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
0efedbf1
JA
476 value |= XGMAC_FILTER_PR;
477 } else {
478 struct netdev_hw_addr *ha;
479 int reg = 1;
480
481 netdev_for_each_uc_addr(ha, dev) {
482 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
483 reg++;
484 }
485
486 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
487 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
488 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
489 }
2142754f
JA
490 }
491
492 writel(value, ioaddr + XGMAC_PACKET_FILTER);
493}
494
84c8df16
JA
495static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
496{
497 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
498
499 if (enable)
500 value |= XGMAC_CONFIG_LM;
501 else
502 value &= ~XGMAC_CONFIG_LM;
503
504 writel(value, ioaddr + XGMAC_RX_CONFIG);
505}
506
76067459
JA
507static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
508 u32 val)
509{
510 u32 ctrl = 0;
511
512 writel(val, ioaddr + XGMAC_RSS_DATA);
513 ctrl |= idx << XGMAC_RSSIA_SHIFT;
514 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
515 ctrl |= XGMAC_OB;
516 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
517
518 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
519 !(ctrl & XGMAC_OB), 100, 10000);
520}
521
522static int dwxgmac2_rss_configure(struct mac_device_info *hw,
523 struct stmmac_rss *cfg, u32 num_rxq)
524{
525 void __iomem *ioaddr = hw->pcsr;
76067459
JA
526 int i, ret;
527 u32 value;
528
529 value = readl(ioaddr + XGMAC_RSS_CTRL);
b6b6cc9a 530 if (!cfg || !cfg->enable) {
76067459
JA
531 value &= ~XGMAC_RSSE;
532 writel(value, ioaddr + XGMAC_RSS_CTRL);
533 return 0;
534 }
535
3c72d4d3 536 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
b6b6cc9a 537 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
76067459
JA
538 if (ret)
539 return ret;
540 }
541
542 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
543 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
544 if (ret)
545 return ret;
546 }
547
548 for (i = 0; i < num_rxq; i++)
549 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
550
551 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
552 writel(value, ioaddr + XGMAC_RSS_CTRL);
553 return 0;
554}
555
3cd1cfcb
JA
556static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
557 bool is_double)
558{
559 void __iomem *ioaddr = hw->pcsr;
560
561 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
562
563 if (hash) {
564 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
565
566 value |= XGMAC_FILTER_VTFE;
567
568 writel(value, ioaddr + XGMAC_PACKET_FILTER);
569
afdf26ab 570 value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
3cd1cfcb
JA
571 if (is_double) {
572 value |= XGMAC_VLAN_EDVLP;
573 value |= XGMAC_VLAN_ESVL;
574 value |= XGMAC_VLAN_DOVLTC;
575 }
576
577 writel(value, ioaddr + XGMAC_VLAN_TAG);
578 } else {
579 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
580
581 value &= ~XGMAC_FILTER_VTFE;
582
583 writel(value, ioaddr + XGMAC_PACKET_FILTER);
584
585 value = readl(ioaddr + XGMAC_VLAN_TAG);
586
587 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
588 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
589 value &= ~XGMAC_VLAN_DOVLTC;
590 value &= ~XGMAC_VLAN_VID;
591
592 writel(value, ioaddr + XGMAC_VLAN_TAG);
593 }
594}
595
56e58d6c
JA
596struct dwxgmac3_error_desc {
597 bool valid;
598 const char *desc;
599 const char *detailed_desc;
600};
601
602#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
603
604static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
605 const char *module_name,
606 const struct dwxgmac3_error_desc *desc,
607 unsigned long field_offset,
608 struct stmmac_safety_stats *stats)
609{
610 unsigned long loc, mask;
611 u8 *bptr = (u8 *)stats;
612 unsigned long *ptr;
613
614 ptr = (unsigned long *)(bptr + field_offset);
615
616 mask = value;
617 for_each_set_bit(loc, &mask, 32) {
618 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
619 "correctable" : "uncorrectable", module_name,
620 desc[loc].desc, desc[loc].detailed_desc);
621
622 /* Update counters */
623 ptr[loc]++;
624 }
625}
626
627static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
628 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
629 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
630 { true, "TPES", "TSO Data Path Parity Check Error" },
631 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
632 { true, "MTPES", "MTL Data Path Parity Check Error" },
633 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
634 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
635 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
636 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
637 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
638 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
639 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
640 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
641 { true, "TTES", "TX FSM Timeout Error" },
642 { true, "RTES", "RX FSM Timeout Error" },
643 { true, "CTES", "CSR FSM Timeout Error" },
644 { true, "ATES", "APP FSM Timeout Error" },
645 { true, "PTES", "PTP FSM Timeout Error" },
646 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
647 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
648 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
649 { true, "MSTTES", "Master Read/Write Timeout Error" },
650 { true, "SLVTES", "Slave Read/Write Timeout Error" },
651 { true, "ATITES", "Application Timeout on ATI Interface Error" },
652 { true, "ARITES", "Application Timeout on ARI Interface Error" },
653 { true, "FSMPES", "FSM State Parity Error" },
654 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
655 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
656 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
657 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
658 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
659 { true, "CPI", "Control Register Parity Check Error" },
660};
661
662static void dwxgmac3_handle_mac_err(struct net_device *ndev,
663 void __iomem *ioaddr, bool correctable,
664 struct stmmac_safety_stats *stats)
665{
666 u32 value;
667
668 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
669 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
670
671 dwxgmac3_log_error(ndev, value, correctable, "MAC",
672 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
673}
674
675static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
676 { true, "TXCES", "MTL TX Memory Error" },
677 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
678 { true, "TXUES", "MTL TX Memory Error" },
679 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
680 { true, "RXCES", "MTL RX Memory Error" },
681 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
682 { true, "RXUES", "MTL RX Memory Error" },
683 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
684 { true, "ECES", "MTL EST Memory Error" },
685 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
686 { true, "EUES", "MTL EST Memory Error" },
687 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
688 { true, "RPCES", "MTL RX Parser Memory Error" },
689 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
690 { true, "RPUES", "MTL RX Parser Memory Error" },
691 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
692 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
693 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
694 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
695 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
696 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
697 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
698 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
699 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
700 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
701 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
702 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
703 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
704 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
705 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
706 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
707 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
708};
709
710static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
711 void __iomem *ioaddr, bool correctable,
712 struct stmmac_safety_stats *stats)
713{
714 u32 value;
715
716 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
717 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
718
719 dwxgmac3_log_error(ndev, value, correctable, "MTL",
720 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
721}
722
723static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
724 { true, "TCES", "DMA TSO Memory Error" },
725 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
726 { true, "TUES", "DMA TSO Memory Error" },
727 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
728 { true, "DCES", "DMA DCACHE Memory Error" },
729 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
730 { true, "DUES", "DMA DCACHE Memory Error" },
731 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
732 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
733 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
734 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
735 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
736 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
737 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
738 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
739 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
740 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
741 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
742 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
743 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
744 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
745 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
746 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
747 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
748 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
749 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
750 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
751 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
752 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
753 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
754 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
755 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
756};
757
758static void dwxgmac3_handle_dma_err(struct net_device *ndev,
759 void __iomem *ioaddr, bool correctable,
760 struct stmmac_safety_stats *stats)
761{
762 u32 value;
763
764 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
765 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
766
767 dwxgmac3_log_error(ndev, value, correctable, "DMA",
768 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
769}
770
771static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
772{
773 u32 value;
774
775 if (!asp)
776 return -EINVAL;
777
778 /* 1. Enable Safety Features */
779 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
780
781 /* 2. Enable MTL Safety Interrupts */
782 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
783 value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
784 value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
785 value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
786 value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
787 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
788
789 /* 3. Enable DMA Safety Interrupts */
790 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
791 value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
792 value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
793 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
794
795 /* Only ECC Protection for External Memory feature is selected */
796 if (asp <= 0x1)
797 return 0;
798
799 /* 4. Enable Parity and Timeout for FSM */
800 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
801 value |= XGMAC_PRTYEN; /* FSM Parity Feature */
802 value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
803 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
804
805 return 0;
806}
807
808static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
809 void __iomem *ioaddr,
810 unsigned int asp,
811 struct stmmac_safety_stats *stats)
812{
813 bool err, corr;
814 u32 mtl, dma;
815 int ret = 0;
816
817 if (!asp)
818 return -EINVAL;
819
820 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
821 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
822
823 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
824 corr = false;
825 if (err) {
826 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
827 ret |= !corr;
828 }
829
830 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
831 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
832 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
833 if (err) {
834 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
835 ret |= !corr;
836 }
837
838 err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
839 corr = dma & XGMAC_DECIS;
840 if (err) {
841 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
842 ret |= !corr;
843 }
844
845 return ret;
846}
847
848static const struct dwxgmac3_error {
849 const struct dwxgmac3_error_desc *desc;
850} dwxgmac3_all_errors[] = {
851 { dwxgmac3_mac_errors },
852 { dwxgmac3_mtl_errors },
853 { dwxgmac3_dma_errors },
854};
855
856static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
857 int index, unsigned long *count,
858 const char **desc)
859{
860 int module = index / 32, offset = index % 32;
861 unsigned long *ptr = (unsigned long *)stats;
862
863 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
864 return -EINVAL;
865 if (!dwxgmac3_all_errors[module].desc[offset].valid)
866 return -EINVAL;
867 if (count)
868 *count = *(ptr + index);
869 if (desc)
870 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
871 return 0;
872}
873
d6e1c12c
JA
874static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
875{
876 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
877
878 val &= ~XGMAC_FRPE;
879 writel(val, ioaddr + XGMAC_MTL_OPMODE);
880
881 return 0;
882}
883
884static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
885{
886 u32 val;
887
888 val = readl(ioaddr + XGMAC_MTL_OPMODE);
889 val |= XGMAC_FRPE;
890 writel(val, ioaddr + XGMAC_MTL_OPMODE);
891}
892
893static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
894 struct stmmac_tc_entry *entry,
895 int pos)
896{
897 int ret, i;
898
899 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
900 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
901 u32 val;
902
903 /* Wait for ready */
904 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
905 val, !(val & XGMAC_STARTBUSY), 1, 10000);
906 if (ret)
907 return ret;
908
909 /* Write data */
910 val = *((u32 *)&entry->val + i);
911 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
912
913 /* Write pos */
914 val = real_pos & XGMAC_ADDR;
915 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
916
917 /* Write OP */
918 val |= XGMAC_WRRDN;
919 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
920
921 /* Start Write */
922 val |= XGMAC_STARTBUSY;
923 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
924
925 /* Wait for done */
926 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
927 val, !(val & XGMAC_STARTBUSY), 1, 10000);
928 if (ret)
929 return ret;
930 }
931
932 return 0;
933}
934
935static struct stmmac_tc_entry *
936dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
937 unsigned int count, u32 curr_prio)
938{
939 struct stmmac_tc_entry *entry;
940 u32 min_prio = ~0x0;
941 int i, min_prio_idx;
942 bool found = false;
943
944 for (i = count - 1; i >= 0; i--) {
945 entry = &entries[i];
946
947 /* Do not update unused entries */
948 if (!entry->in_use)
949 continue;
950 /* Do not update already updated entries (i.e. fragments) */
951 if (entry->in_hw)
952 continue;
953 /* Let last entry be updated last */
954 if (entry->is_last)
955 continue;
956 /* Do not return fragments */
957 if (entry->is_frag)
958 continue;
959 /* Check if we already checked this prio */
960 if (entry->prio < curr_prio)
961 continue;
962 /* Check if this is the minimum prio */
963 if (entry->prio < min_prio) {
964 min_prio = entry->prio;
965 min_prio_idx = i;
966 found = true;
967 }
968 }
969
970 if (found)
971 return &entries[min_prio_idx];
972 return NULL;
973}
974
975static int dwxgmac3_rxp_config(void __iomem *ioaddr,
976 struct stmmac_tc_entry *entries,
977 unsigned int count)
978{
979 struct stmmac_tc_entry *entry, *frag;
980 int i, ret, nve = 0;
981 u32 curr_prio = 0;
982 u32 old_val, val;
983
984 /* Force disable RX */
985 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
986 val = old_val & ~XGMAC_CONFIG_RE;
987 writel(val, ioaddr + XGMAC_RX_CONFIG);
988
989 /* Disable RX Parser */
990 ret = dwxgmac3_rxp_disable(ioaddr);
991 if (ret)
992 goto re_enable;
993
994 /* Set all entries as NOT in HW */
995 for (i = 0; i < count; i++) {
996 entry = &entries[i];
997 entry->in_hw = false;
998 }
999
1000 /* Update entries by reverse order */
1001 while (1) {
1002 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1003 if (!entry)
1004 break;
1005
1006 curr_prio = entry->prio;
1007 frag = entry->frag_ptr;
1008
1009 /* Set special fragment requirements */
1010 if (frag) {
1011 entry->val.af = 0;
1012 entry->val.rf = 0;
1013 entry->val.nc = 1;
1014 entry->val.ok_index = nve + 2;
1015 }
1016
1017 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1018 if (ret)
1019 goto re_enable;
1020
1021 entry->table_pos = nve++;
1022 entry->in_hw = true;
1023
1024 if (frag && !frag->in_hw) {
1025 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1026 if (ret)
1027 goto re_enable;
1028 frag->table_pos = nve++;
1029 frag->in_hw = true;
1030 }
1031 }
1032
1033 if (!nve)
1034 goto re_enable;
1035
1036 /* Update all pass entry */
1037 for (i = 0; i < count; i++) {
1038 entry = &entries[i];
1039 if (!entry->is_last)
1040 continue;
1041
1042 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1043 if (ret)
1044 goto re_enable;
1045
1046 entry->table_pos = nve++;
1047 }
1048
1049 /* Assume n. of parsable entries == n. of valid entries */
1050 val = (nve << 16) & XGMAC_NPE;
1051 val |= nve & XGMAC_NVE;
1052 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1053
1054 /* Enable RX Parser */
1055 dwxgmac3_rxp_enable(ioaddr);
1056
1057re_enable:
1058 /* Re-enable RX */
1059 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1060 return ret;
1061}
1062
25e80cd0
JA
1063static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1064{
1065 void __iomem *ioaddr = hw->pcsr;
1066 u32 value;
1067
1068 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1069 value, value & XGMAC_TXTSC, 100, 10000))
1070 return -EBUSY;
1071
1072 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1073 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1074 return 0;
1075}
1076
95eaf3cd
JA
1077static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1078 struct stmmac_pps_cfg *cfg, bool enable,
1079 u32 sub_second_inc, u32 systime_flags)
1080{
1081 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1082 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1083 u64 period;
1084
1085 if (!cfg->available)
1086 return -EINVAL;
1087 if (tnsec & XGMAC_TRGTBUSY0)
1088 return -EBUSY;
1089 if (!sub_second_inc || !systime_flags)
1090 return -EINVAL;
1091
1092 val &= ~XGMAC_PPSx_MASK(index);
1093
1094 if (!enable) {
1095 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1096 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1097 return 0;
1098 }
1099
1100 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1101 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1102 val |= XGMAC_PPSEN0;
1103
1104 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1105
1106 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1107 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1108 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1109
1110 period = cfg->period.tv_sec * 1000000000;
1111 period += cfg->period.tv_nsec;
1112
1113 do_div(period, sub_second_inc);
1114
1115 if (period <= 1)
1116 return -EINVAL;
1117
1118 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1119
1120 period >>= 1;
1121 if (period <= 1)
1122 return -EINVAL;
1123
1124 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1125
1126 /* Finally, activate it */
1127 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1128 return 0;
1129}
1130
8000ddc0
JA
1131static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1132{
1133 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1134
1135 value &= ~XGMAC_CONFIG_SARC;
1136 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1137
1138 writel(value, ioaddr + XGMAC_TX_CONFIG);
1139}
1140
30d93227
JA
1141static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1142{
1143 void __iomem *ioaddr = hw->pcsr;
1144 u32 value;
1145
1146 value = readl(ioaddr + XGMAC_VLAN_INCL);
1147 value |= XGMAC_VLAN_VLTI;
1148 value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1149 value &= ~XGMAC_VLAN_VLC;
1150 value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1151 writel(value, ioaddr + XGMAC_VLAN_INCL);
1152}
1153
425eabdd
JA
1154static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1155{
1156 void __iomem *ioaddr = hw->pcsr;
1157 u32 value;
1158
1159 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1160 !(value & XGMAC_XB), 100, 10000))
1161 return -EBUSY;
1162 return 0;
1163}
1164
1165static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1166 u8 reg, u32 *data)
1167{
1168 void __iomem *ioaddr = hw->pcsr;
1169 u32 value;
1170 int ret;
1171
1172 ret = dwxgmac2_filter_wait(hw);
1173 if (ret)
1174 return ret;
1175
1176 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1177 value |= XGMAC_TT | XGMAC_XB;
1178 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1179
1180 ret = dwxgmac2_filter_wait(hw);
1181 if (ret)
1182 return ret;
1183
1184 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1185 return 0;
1186}
1187
1188static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1189 u8 reg, u32 data)
1190{
1191 void __iomem *ioaddr = hw->pcsr;
1192 u32 value;
1193 int ret;
1194
1195 ret = dwxgmac2_filter_wait(hw);
1196 if (ret)
1197 return ret;
1198
1199 writel(data, ioaddr + XGMAC_L3L4_DATA);
1200
1201 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1202 value |= XGMAC_XB;
1203 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1204
1205 return dwxgmac2_filter_wait(hw);
1206}
1207
1208static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1209 bool en, bool ipv6, bool sa, bool inv,
1210 u32 match)
1211{
1212 void __iomem *ioaddr = hw->pcsr;
1213 u32 value;
1214 int ret;
1215
1216 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1217 value |= XGMAC_FILTER_IPFE;
1218 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1219
1220 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1221 if (ret)
1222 return ret;
1223
1224 /* For IPv6 not both SA/DA filters can be active */
1225 if (ipv6) {
1226 value |= XGMAC_L3PEN0;
1227 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1228 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1229 if (sa) {
1230 value |= XGMAC_L3SAM0;
1231 if (inv)
1232 value |= XGMAC_L3SAIM0;
1233 } else {
1234 value |= XGMAC_L3DAM0;
1235 if (inv)
1236 value |= XGMAC_L3DAIM0;
1237 }
1238 } else {
1239 value &= ~XGMAC_L3PEN0;
1240 if (sa) {
1241 value |= XGMAC_L3SAM0;
1242 if (inv)
1243 value |= XGMAC_L3SAIM0;
1244 } else {
1245 value |= XGMAC_L3DAM0;
1246 if (inv)
1247 value |= XGMAC_L3DAIM0;
1248 }
1249 }
1250
1251 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1252 if (ret)
1253 return ret;
1254
1255 if (sa) {
1256 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1257 if (ret)
1258 return ret;
1259 } else {
1260 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1261 if (ret)
1262 return ret;
1263 }
1264
1265 if (!en)
1266 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1267
1268 return 0;
1269}
1270
1271static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1272 bool en, bool udp, bool sa, bool inv,
1273 u32 match)
1274{
1275 void __iomem *ioaddr = hw->pcsr;
1276 u32 value;
1277 int ret;
1278
1279 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1280 value |= XGMAC_FILTER_IPFE;
1281 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1282
1283 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1284 if (ret)
1285 return ret;
1286
1287 if (udp) {
1288 value |= XGMAC_L4PEN0;
1289 } else {
1290 value &= ~XGMAC_L4PEN0;
1291 }
1292
1293 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1294 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1295 if (sa) {
1296 value |= XGMAC_L4SPM0;
1297 if (inv)
1298 value |= XGMAC_L4SPIM0;
1299 } else {
1300 value |= XGMAC_L4DPM0;
1301 if (inv)
1302 value |= XGMAC_L4DPIM0;
1303 }
1304
1305 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1306 if (ret)
1307 return ret;
1308
1309 if (sa) {
1310 value = match & XGMAC_L4SP0;
1311
1312 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1313 if (ret)
1314 return ret;
1315 } else {
1316 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1317
1318 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1319 if (ret)
1320 return ret;
1321 }
1322
1323 if (!en)
1324 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1325
1326 return 0;
1327}
1328
5904a980
JA
1329static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1330 u32 addr)
1331{
1332 void __iomem *ioaddr = hw->pcsr;
1333 u32 value;
1334
1335 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1336
1337 value = readl(ioaddr + XGMAC_RX_CONFIG);
1338 if (en)
1339 value |= XGMAC_CONFIG_ARPEN;
1340 else
1341 value &= ~XGMAC_CONFIG_ARPEN;
1342 writel(value, ioaddr + XGMAC_RX_CONFIG);
1343}
1344
2142754f
JA
1345const struct stmmac_ops dwxgmac210_ops = {
1346 .core_init = dwxgmac2_core_init,
1347 .set_mac = dwxgmac2_set_mac,
1348 .rx_ipc = dwxgmac2_rx_ipc,
1349 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1350 .rx_queue_prio = dwxgmac2_rx_queue_prio,
7035aad8 1351 .tx_queue_prio = dwxgmac2_tx_queue_prio,
2142754f
JA
1352 .rx_queue_routing = NULL,
1353 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1354 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
5656ac55 1355 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
2142754f 1356 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
ec6ea8e3 1357 .config_cbs = dwxgmac2_config_cbs,
bfc56530 1358 .dump_regs = dwxgmac2_dump_regs,
2142754f
JA
1359 .host_irq_status = dwxgmac2_host_irq_status,
1360 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1361 .flow_ctrl = dwxgmac2_flow_ctrl,
1362 .pmt = dwxgmac2_pmt,
1363 .set_umac_addr = dwxgmac2_set_umac_addr,
1364 .get_umac_addr = dwxgmac2_get_umac_addr,
81b945ae
JA
1365 .set_eee_mode = dwxgmac2_set_eee_mode,
1366 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1367 .set_eee_timer = dwxgmac2_set_eee_timer,
1368 .set_eee_pls = dwxgmac2_set_eee_pls,
2142754f
JA
1369 .pcs_ctrl_ane = NULL,
1370 .pcs_rane = NULL,
1371 .pcs_get_adv_lp = NULL,
1372 .debug = NULL,
1373 .set_filter = dwxgmac2_set_filter,
56e58d6c
JA
1374 .safety_feat_config = dwxgmac3_safety_feat_config,
1375 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1376 .safety_feat_dump = dwxgmac3_safety_feat_dump,
84c8df16 1377 .set_mac_loopback = dwxgmac2_set_mac_loopback,
76067459 1378 .rss_configure = dwxgmac2_rss_configure,
3cd1cfcb 1379 .update_vlan_hash = dwxgmac2_update_vlan_hash,
d6e1c12c 1380 .rxp_config = dwxgmac3_rxp_config,
25e80cd0 1381 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
95eaf3cd 1382 .flex_pps_config = dwxgmac2_flex_pps_config,
8000ddc0 1383 .sarc_configure = dwxgmac2_sarc_configure,
30d93227 1384 .enable_vlan = dwxgmac2_enable_vlan,
425eabdd
JA
1385 .config_l3_filter = dwxgmac2_config_l3_filter,
1386 .config_l4_filter = dwxgmac2_config_l4_filter,
5904a980 1387 .set_arp_offload = dwxgmac2_set_arp_offload,
2142754f
JA
1388};
1389
1390int dwxgmac2_setup(struct stmmac_priv *priv)
1391{
1392 struct mac_device_info *mac = priv->hw;
1393
1394 dev_info(priv->device, "\tXGMAC2\n");
1395
1396 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1397 mac->pcsr = priv->ioaddr;
1398 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1399 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1400 mac->mcast_bits_log2 = 0;
1401
1402 if (mac->multicast_filter_bins)
1403 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1404
1405 mac->link.duplex = 0;
5b0d7d7d
JA
1406 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1407 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1408 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1409 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1410 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1411 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1412 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
2142754f
JA
1413 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1414
1415 mac->mii.addr = XGMAC_MDIO_ADDR;
1416 mac->mii.data = XGMAC_MDIO_DATA;
1417 mac->mii.addr_shift = 16;
1418 mac->mii.addr_mask = GENMASK(20, 16);
1419 mac->mii.reg_shift = 0;
1420 mac->mii.reg_mask = GENMASK(15, 0);
1421 mac->mii.clk_csr_shift = 19;
1422 mac->mii.clk_csr_mask = GENMASK(21, 19);
1423
1424 return 0;
1425}