net: stmmac: xgmac: Fix RSS writing wrong keys
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / dwxgmac2_core.c
CommitLineData
2142754f
JA
1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2/*
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
5 */
6
0efedbf1
JA
7#include <linux/bitrev.h>
8#include <linux/crc32.h>
76067459 9#include <linux/iopoll.h>
2142754f 10#include "stmmac.h"
95eaf3cd 11#include "stmmac_ptp.h"
2142754f
JA
12#include "dwxgmac2.h"
13
14static void dwxgmac2_core_init(struct mac_device_info *hw,
15 struct net_device *dev)
16{
17 void __iomem *ioaddr = hw->pcsr;
2142754f
JA
18 u32 tx, rx;
19
20 tx = readl(ioaddr + XGMAC_TX_CONFIG);
21 rx = readl(ioaddr + XGMAC_RX_CONFIG);
22
23 tx |= XGMAC_CORE_INIT_TX;
24 rx |= XGMAC_CORE_INIT_RX;
25
2142754f
JA
26 if (hw->ps) {
27 tx |= XGMAC_CONFIG_TE;
28 tx &= ~hw->link.speed_mask;
29
30 switch (hw->ps) {
31 case SPEED_10000:
5b0d7d7d 32 tx |= hw->link.xgmii.speed10000;
2142754f
JA
33 break;
34 case SPEED_2500:
35 tx |= hw->link.speed2500;
36 break;
37 case SPEED_1000:
38 default:
39 tx |= hw->link.speed1000;
40 break;
41 }
42 }
43
44 writel(tx, ioaddr + XGMAC_TX_CONFIG);
45 writel(rx, ioaddr + XGMAC_RX_CONFIG);
46 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
47}
48
49static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
50{
51 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
52 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
53
54 if (enable) {
55 tx |= XGMAC_CONFIG_TE;
56 rx |= XGMAC_CONFIG_RE;
57 } else {
58 tx &= ~XGMAC_CONFIG_TE;
59 rx &= ~XGMAC_CONFIG_RE;
60 }
61
62 writel(tx, ioaddr + XGMAC_TX_CONFIG);
63 writel(rx, ioaddr + XGMAC_RX_CONFIG);
64}
65
66static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
67{
68 void __iomem *ioaddr = hw->pcsr;
69 u32 value;
70
71 value = readl(ioaddr + XGMAC_RX_CONFIG);
72 if (hw->rx_csum)
73 value |= XGMAC_CONFIG_IPC;
74 else
75 value &= ~XGMAC_CONFIG_IPC;
76 writel(value, ioaddr + XGMAC_RX_CONFIG);
77
78 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
79}
80
81static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
82 u32 queue)
83{
84 void __iomem *ioaddr = hw->pcsr;
85 u32 value;
86
87 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
88 if (mode == MTL_QUEUE_AVB)
89 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
90 else if (mode == MTL_QUEUE_DCB)
91 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
92 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
93}
94
95static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
96 u32 queue)
97{
98 void __iomem *ioaddr = hw->pcsr;
99 u32 value, reg;
100
101 reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
e8df7e8c
JA
102 if (queue >= 4)
103 queue -= 4;
2142754f
JA
104
105 value = readl(ioaddr + reg);
106 value &= ~XGMAC_PSRQ(queue);
107 value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
108
109 writel(value, ioaddr + reg);
110}
111
7035aad8
JA
112static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
113 u32 queue)
114{
115 void __iomem *ioaddr = hw->pcsr;
116 u32 value, reg;
117
118 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
119 if (queue >= 4)
120 queue -= 4;
121
122 value = readl(ioaddr + reg);
123 value &= ~XGMAC_PSTC(queue);
124 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
125
126 writel(value, ioaddr + reg);
127}
128
2142754f
JA
129static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
130 u32 rx_alg)
131{
132 void __iomem *ioaddr = hw->pcsr;
133 u32 value;
134
135 value = readl(ioaddr + XGMAC_MTL_OPMODE);
136 value &= ~XGMAC_RAA;
137
138 switch (rx_alg) {
139 case MTL_RX_ALGORITHM_SP:
140 break;
141 case MTL_RX_ALGORITHM_WSP:
142 value |= XGMAC_RAA;
143 break;
144 default:
145 break;
146 }
147
148 writel(value, ioaddr + XGMAC_MTL_OPMODE);
149}
150
151static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
152 u32 tx_alg)
153{
154 void __iomem *ioaddr = hw->pcsr;
5656ac55 155 bool ets = true;
2142754f 156 u32 value;
5656ac55 157 int i;
2142754f
JA
158
159 value = readl(ioaddr + XGMAC_MTL_OPMODE);
160 value &= ~XGMAC_ETSALG;
161
162 switch (tx_alg) {
163 case MTL_TX_ALGORITHM_WRR:
164 value |= XGMAC_WRR;
165 break;
166 case MTL_TX_ALGORITHM_WFQ:
167 value |= XGMAC_WFQ;
168 break;
169 case MTL_TX_ALGORITHM_DWRR:
170 value |= XGMAC_DWRR;
171 break;
172 default:
5656ac55 173 ets = false;
2142754f
JA
174 break;
175 }
176
177 writel(value, ioaddr + XGMAC_MTL_OPMODE);
5656ac55
JA
178
179 /* Set ETS if desired */
180 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
181 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
182 value &= ~XGMAC_TSA;
183 if (ets)
184 value |= XGMAC_ETS;
185 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
186 }
187}
188
189static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
190 u32 weight, u32 queue)
191{
192 void __iomem *ioaddr = hw->pcsr;
193
194 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
2142754f
JA
195}
196
197static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
198 u32 chan)
199{
200 void __iomem *ioaddr = hw->pcsr;
201 u32 value, reg;
202
203 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
e8df7e8c
JA
204 if (queue >= 4)
205 queue -= 4;
2142754f
JA
206
207 value = readl(ioaddr + reg);
208 value &= ~XGMAC_QxMDMACH(queue);
209 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
210
211 writel(value, ioaddr + reg);
212}
213
ec6ea8e3
JA
214static void dwxgmac2_config_cbs(struct mac_device_info *hw,
215 u32 send_slope, u32 idle_slope,
216 u32 high_credit, u32 low_credit, u32 queue)
217{
218 void __iomem *ioaddr = hw->pcsr;
219 u32 value;
220
221 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
222 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
223 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
225
226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
227 value |= XGMAC_CC | XGMAC_CBS;
228 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
229}
230
bfc56530
JA
231static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
232{
233 void __iomem *ioaddr = hw->pcsr;
234 int i;
235
236 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
237 reg_space[i] = readl(ioaddr + i * 4);
238}
239
2142754f
JA
240static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
241 struct stmmac_extra_stats *x)
242{
243 void __iomem *ioaddr = hw->pcsr;
244 u32 stat, en;
81b945ae 245 int ret = 0;
2142754f
JA
246
247 en = readl(ioaddr + XGMAC_INT_EN);
248 stat = readl(ioaddr + XGMAC_INT_STATUS);
249
250 stat &= en;
251
252 if (stat & XGMAC_PMTIS) {
253 x->irq_receive_pmt_irq_n++;
254 readl(ioaddr + XGMAC_PMT);
255 }
256
81b945ae
JA
257 if (stat & XGMAC_LPIIS) {
258 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
259
260 if (lpi & XGMAC_TLPIEN) {
261 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
262 x->irq_tx_path_in_lpi_mode_n++;
263 }
264 if (lpi & XGMAC_TLPIEX) {
265 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
266 x->irq_tx_path_exit_lpi_mode_n++;
267 }
268 if (lpi & XGMAC_RLPIEN)
269 x->irq_rx_path_in_lpi_mode_n++;
270 if (lpi & XGMAC_RLPIEX)
271 x->irq_rx_path_exit_lpi_mode_n++;
272 }
273
274 return ret;
2142754f
JA
275}
276
277static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
278{
279 void __iomem *ioaddr = hw->pcsr;
280 int ret = 0;
281 u32 status;
282
283 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
284 if (status & BIT(chan)) {
285 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
286
287 if (chan_status & XGMAC_RXOVFIS)
288 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
289
290 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
291 }
292
293 return ret;
294}
295
296static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
297 unsigned int fc, unsigned int pause_time,
298 u32 tx_cnt)
299{
300 void __iomem *ioaddr = hw->pcsr;
301 u32 i;
302
303 if (fc & FLOW_RX)
304 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
305 if (fc & FLOW_TX) {
306 for (i = 0; i < tx_cnt; i++) {
307 u32 value = XGMAC_TFE;
308
309 if (duplex)
310 value |= pause_time << XGMAC_PT_SHIFT;
311
312 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
313 }
314 }
315}
316
317static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
318{
319 void __iomem *ioaddr = hw->pcsr;
320 u32 val = 0x0;
321
322 if (mode & WAKE_MAGIC)
323 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
324 if (mode & WAKE_UCAST)
325 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
326 if (val) {
327 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
328 cfg |= XGMAC_CONFIG_RE;
329 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
330 }
331
332 writel(val, ioaddr + XGMAC_PMT);
333}
334
335static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
336 unsigned char *addr, unsigned int reg_n)
337{
338 void __iomem *ioaddr = hw->pcsr;
339 u32 value;
340
341 value = (addr[5] << 8) | addr[4];
0efedbf1 342 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
2142754f
JA
343
344 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
0efedbf1 345 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
2142754f
JA
346}
347
348static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
349 unsigned char *addr, unsigned int reg_n)
350{
351 void __iomem *ioaddr = hw->pcsr;
352 u32 hi_addr, lo_addr;
353
354 /* Read the MAC address from the hardware */
0efedbf1
JA
355 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
356 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
2142754f
JA
357
358 /* Extract the MAC address from the high and low words */
359 addr[0] = lo_addr & 0xff;
360 addr[1] = (lo_addr >> 8) & 0xff;
361 addr[2] = (lo_addr >> 16) & 0xff;
362 addr[3] = (lo_addr >> 24) & 0xff;
363 addr[4] = hi_addr & 0xff;
364 addr[5] = (hi_addr >> 8) & 0xff;
365}
366
81b945ae
JA
367static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
368 bool en_tx_lpi_clockgating)
369{
370 void __iomem *ioaddr = hw->pcsr;
371 u32 value;
372
373 value = readl(ioaddr + XGMAC_LPI_CTRL);
374
375 value |= XGMAC_LPITXEN | XGMAC_LPITXA;
376 if (en_tx_lpi_clockgating)
377 value |= XGMAC_TXCGE;
378
379 writel(value, ioaddr + XGMAC_LPI_CTRL);
380}
381
382static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
383{
384 void __iomem *ioaddr = hw->pcsr;
385 u32 value;
386
387 value = readl(ioaddr + XGMAC_LPI_CTRL);
388 value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
389 writel(value, ioaddr + XGMAC_LPI_CTRL);
390}
391
392static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
393{
394 void __iomem *ioaddr = hw->pcsr;
395 u32 value;
396
397 value = readl(ioaddr + XGMAC_LPI_CTRL);
398 if (link)
399 value |= XGMAC_PLS;
400 else
401 value &= ~XGMAC_PLS;
402 writel(value, ioaddr + XGMAC_LPI_CTRL);
403}
404
405static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
406{
407 void __iomem *ioaddr = hw->pcsr;
408 u32 value;
409
410 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
411 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
412}
413
0efedbf1
JA
414static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
415 int mcbitslog2)
416{
417 int numhashregs, regs;
418
419 switch (mcbitslog2) {
420 case 6:
421 numhashregs = 2;
422 break;
423 case 7:
424 numhashregs = 4;
425 break;
426 case 8:
427 numhashregs = 8;
428 break;
429 default:
430 return;
431 }
432
433 for (regs = 0; regs < numhashregs; regs++)
434 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
435}
436
2142754f
JA
437static void dwxgmac2_set_filter(struct mac_device_info *hw,
438 struct net_device *dev)
439{
440 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
0efedbf1
JA
441 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
442 int mcbitslog2 = hw->mcast_bits_log2;
443 u32 mc_filter[8];
444 int i;
445
446 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
447 value |= XGMAC_FILTER_HPF;
448
449 memset(mc_filter, 0, sizeof(mc_filter));
2142754f
JA
450
451 if (dev->flags & IFF_PROMISC) {
0efedbf1
JA
452 value |= XGMAC_FILTER_PR;
453 value |= XGMAC_FILTER_PCF;
2142754f 454 } else if ((dev->flags & IFF_ALLMULTI) ||
0efedbf1 455 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
2142754f 456 value |= XGMAC_FILTER_PM;
0efedbf1
JA
457
458 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
459 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
460 } else if (!netdev_mc_empty(dev)) {
461 struct netdev_hw_addr *ha;
462
463 value |= XGMAC_FILTER_HMC;
464
465 netdev_for_each_mc_addr(ha, dev) {
466 int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
467 (32 - mcbitslog2));
468 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
469 }
470 }
471
472 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
473
474 /* Handle multiple unicast addresses */
9a2ae7b3 475 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
0efedbf1
JA
476 value |= XGMAC_FILTER_PR;
477 } else {
478 struct netdev_hw_addr *ha;
479 int reg = 1;
480
481 netdev_for_each_uc_addr(ha, dev) {
482 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
483 reg++;
484 }
485
486 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
487 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
488 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
489 }
2142754f
JA
490 }
491
492 writel(value, ioaddr + XGMAC_PACKET_FILTER);
493}
494
84c8df16
JA
495static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
496{
497 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
498
499 if (enable)
500 value |= XGMAC_CONFIG_LM;
501 else
502 value &= ~XGMAC_CONFIG_LM;
503
504 writel(value, ioaddr + XGMAC_RX_CONFIG);
505}
506
76067459
JA
507static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
508 u32 val)
509{
510 u32 ctrl = 0;
511
512 writel(val, ioaddr + XGMAC_RSS_DATA);
513 ctrl |= idx << XGMAC_RSSIA_SHIFT;
514 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
515 ctrl |= XGMAC_OB;
516 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
517
518 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
519 !(ctrl & XGMAC_OB), 100, 10000);
520}
521
522static int dwxgmac2_rss_configure(struct mac_device_info *hw,
523 struct stmmac_rss *cfg, u32 num_rxq)
524{
525 void __iomem *ioaddr = hw->pcsr;
56627336 526 u32 value, *key;
76067459 527 int i, ret;
76067459
JA
528
529 value = readl(ioaddr + XGMAC_RSS_CTRL);
b6b6cc9a 530 if (!cfg || !cfg->enable) {
76067459
JA
531 value &= ~XGMAC_RSSE;
532 writel(value, ioaddr + XGMAC_RSS_CTRL);
533 return 0;
534 }
535
56627336 536 key = (u32 *)cfg->key;
3c72d4d3 537 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
56627336 538 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
76067459
JA
539 if (ret)
540 return ret;
541 }
542
543 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
544 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
545 if (ret)
546 return ret;
547 }
548
549 for (i = 0; i < num_rxq; i++)
550 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
551
552 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
553 writel(value, ioaddr + XGMAC_RSS_CTRL);
554 return 0;
555}
556
3cd1cfcb
JA
557static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
558 bool is_double)
559{
560 void __iomem *ioaddr = hw->pcsr;
561
562 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
563
564 if (hash) {
565 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
566
567 value |= XGMAC_FILTER_VTFE;
568
569 writel(value, ioaddr + XGMAC_PACKET_FILTER);
570
afdf26ab 571 value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
3cd1cfcb
JA
572 if (is_double) {
573 value |= XGMAC_VLAN_EDVLP;
574 value |= XGMAC_VLAN_ESVL;
575 value |= XGMAC_VLAN_DOVLTC;
576 }
577
578 writel(value, ioaddr + XGMAC_VLAN_TAG);
579 } else {
580 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
581
582 value &= ~XGMAC_FILTER_VTFE;
583
584 writel(value, ioaddr + XGMAC_PACKET_FILTER);
585
586 value = readl(ioaddr + XGMAC_VLAN_TAG);
587
588 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
589 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
590 value &= ~XGMAC_VLAN_DOVLTC;
591 value &= ~XGMAC_VLAN_VID;
592
593 writel(value, ioaddr + XGMAC_VLAN_TAG);
594 }
595}
596
56e58d6c
JA
597struct dwxgmac3_error_desc {
598 bool valid;
599 const char *desc;
600 const char *detailed_desc;
601};
602
603#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
604
605static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
606 const char *module_name,
607 const struct dwxgmac3_error_desc *desc,
608 unsigned long field_offset,
609 struct stmmac_safety_stats *stats)
610{
611 unsigned long loc, mask;
612 u8 *bptr = (u8 *)stats;
613 unsigned long *ptr;
614
615 ptr = (unsigned long *)(bptr + field_offset);
616
617 mask = value;
618 for_each_set_bit(loc, &mask, 32) {
619 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
620 "correctable" : "uncorrectable", module_name,
621 desc[loc].desc, desc[loc].detailed_desc);
622
623 /* Update counters */
624 ptr[loc]++;
625 }
626}
627
628static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
629 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
630 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
631 { true, "TPES", "TSO Data Path Parity Check Error" },
632 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
633 { true, "MTPES", "MTL Data Path Parity Check Error" },
634 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
635 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
636 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
637 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
638 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
639 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
640 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
641 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
642 { true, "TTES", "TX FSM Timeout Error" },
643 { true, "RTES", "RX FSM Timeout Error" },
644 { true, "CTES", "CSR FSM Timeout Error" },
645 { true, "ATES", "APP FSM Timeout Error" },
646 { true, "PTES", "PTP FSM Timeout Error" },
647 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
648 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
649 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
650 { true, "MSTTES", "Master Read/Write Timeout Error" },
651 { true, "SLVTES", "Slave Read/Write Timeout Error" },
652 { true, "ATITES", "Application Timeout on ATI Interface Error" },
653 { true, "ARITES", "Application Timeout on ARI Interface Error" },
654 { true, "FSMPES", "FSM State Parity Error" },
655 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
656 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
657 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
658 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
659 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
660 { true, "CPI", "Control Register Parity Check Error" },
661};
662
663static void dwxgmac3_handle_mac_err(struct net_device *ndev,
664 void __iomem *ioaddr, bool correctable,
665 struct stmmac_safety_stats *stats)
666{
667 u32 value;
668
669 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
670 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
671
672 dwxgmac3_log_error(ndev, value, correctable, "MAC",
673 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
674}
675
676static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
677 { true, "TXCES", "MTL TX Memory Error" },
678 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
679 { true, "TXUES", "MTL TX Memory Error" },
680 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
681 { true, "RXCES", "MTL RX Memory Error" },
682 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
683 { true, "RXUES", "MTL RX Memory Error" },
684 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
685 { true, "ECES", "MTL EST Memory Error" },
686 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
687 { true, "EUES", "MTL EST Memory Error" },
688 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
689 { true, "RPCES", "MTL RX Parser Memory Error" },
690 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
691 { true, "RPUES", "MTL RX Parser Memory Error" },
692 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
693 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
694 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
695 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
696 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
697 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
698 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
699 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
700 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
701 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
702 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
703 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
704 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
705 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
706 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
707 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
708 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
709};
710
711static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
712 void __iomem *ioaddr, bool correctable,
713 struct stmmac_safety_stats *stats)
714{
715 u32 value;
716
717 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
718 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
719
720 dwxgmac3_log_error(ndev, value, correctable, "MTL",
721 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
722}
723
724static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
725 { true, "TCES", "DMA TSO Memory Error" },
726 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
727 { true, "TUES", "DMA TSO Memory Error" },
728 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
729 { true, "DCES", "DMA DCACHE Memory Error" },
730 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
731 { true, "DUES", "DMA DCACHE Memory Error" },
732 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
733 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
734 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
735 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
736 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
737 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
738 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
739 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
740 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
741 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
742 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
743 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
744 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
745 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
746 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
747 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
748 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
749 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
750 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
751 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
752 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
753 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
754 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
755 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
756 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
757};
758
759static void dwxgmac3_handle_dma_err(struct net_device *ndev,
760 void __iomem *ioaddr, bool correctable,
761 struct stmmac_safety_stats *stats)
762{
763 u32 value;
764
765 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
766 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
767
768 dwxgmac3_log_error(ndev, value, correctable, "DMA",
769 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
770}
771
772static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
773{
774 u32 value;
775
776 if (!asp)
777 return -EINVAL;
778
779 /* 1. Enable Safety Features */
780 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
781
782 /* 2. Enable MTL Safety Interrupts */
783 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
784 value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
785 value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
786 value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
787 value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
788 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
789
790 /* 3. Enable DMA Safety Interrupts */
791 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
792 value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
793 value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
794 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
795
796 /* Only ECC Protection for External Memory feature is selected */
797 if (asp <= 0x1)
798 return 0;
799
800 /* 4. Enable Parity and Timeout for FSM */
801 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
802 value |= XGMAC_PRTYEN; /* FSM Parity Feature */
803 value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
804 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
805
806 return 0;
807}
808
809static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
810 void __iomem *ioaddr,
811 unsigned int asp,
812 struct stmmac_safety_stats *stats)
813{
814 bool err, corr;
815 u32 mtl, dma;
816 int ret = 0;
817
818 if (!asp)
819 return -EINVAL;
820
821 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
822 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
823
824 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
825 corr = false;
826 if (err) {
827 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
828 ret |= !corr;
829 }
830
831 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
832 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
833 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
834 if (err) {
835 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
836 ret |= !corr;
837 }
838
839 err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
840 corr = dma & XGMAC_DECIS;
841 if (err) {
842 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
843 ret |= !corr;
844 }
845
846 return ret;
847}
848
849static const struct dwxgmac3_error {
850 const struct dwxgmac3_error_desc *desc;
851} dwxgmac3_all_errors[] = {
852 { dwxgmac3_mac_errors },
853 { dwxgmac3_mtl_errors },
854 { dwxgmac3_dma_errors },
855};
856
857static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
858 int index, unsigned long *count,
859 const char **desc)
860{
861 int module = index / 32, offset = index % 32;
862 unsigned long *ptr = (unsigned long *)stats;
863
864 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
865 return -EINVAL;
866 if (!dwxgmac3_all_errors[module].desc[offset].valid)
867 return -EINVAL;
868 if (count)
869 *count = *(ptr + index);
870 if (desc)
871 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
872 return 0;
873}
874
d6e1c12c
JA
875static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
876{
877 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
878
879 val &= ~XGMAC_FRPE;
880 writel(val, ioaddr + XGMAC_MTL_OPMODE);
881
882 return 0;
883}
884
885static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
886{
887 u32 val;
888
889 val = readl(ioaddr + XGMAC_MTL_OPMODE);
890 val |= XGMAC_FRPE;
891 writel(val, ioaddr + XGMAC_MTL_OPMODE);
892}
893
894static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
895 struct stmmac_tc_entry *entry,
896 int pos)
897{
898 int ret, i;
899
900 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
901 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
902 u32 val;
903
904 /* Wait for ready */
905 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
906 val, !(val & XGMAC_STARTBUSY), 1, 10000);
907 if (ret)
908 return ret;
909
910 /* Write data */
911 val = *((u32 *)&entry->val + i);
912 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
913
914 /* Write pos */
915 val = real_pos & XGMAC_ADDR;
916 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
917
918 /* Write OP */
919 val |= XGMAC_WRRDN;
920 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
921
922 /* Start Write */
923 val |= XGMAC_STARTBUSY;
924 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
925
926 /* Wait for done */
927 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
928 val, !(val & XGMAC_STARTBUSY), 1, 10000);
929 if (ret)
930 return ret;
931 }
932
933 return 0;
934}
935
936static struct stmmac_tc_entry *
937dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
938 unsigned int count, u32 curr_prio)
939{
940 struct stmmac_tc_entry *entry;
941 u32 min_prio = ~0x0;
942 int i, min_prio_idx;
943 bool found = false;
944
945 for (i = count - 1; i >= 0; i--) {
946 entry = &entries[i];
947
948 /* Do not update unused entries */
949 if (!entry->in_use)
950 continue;
951 /* Do not update already updated entries (i.e. fragments) */
952 if (entry->in_hw)
953 continue;
954 /* Let last entry be updated last */
955 if (entry->is_last)
956 continue;
957 /* Do not return fragments */
958 if (entry->is_frag)
959 continue;
960 /* Check if we already checked this prio */
961 if (entry->prio < curr_prio)
962 continue;
963 /* Check if this is the minimum prio */
964 if (entry->prio < min_prio) {
965 min_prio = entry->prio;
966 min_prio_idx = i;
967 found = true;
968 }
969 }
970
971 if (found)
972 return &entries[min_prio_idx];
973 return NULL;
974}
975
976static int dwxgmac3_rxp_config(void __iomem *ioaddr,
977 struct stmmac_tc_entry *entries,
978 unsigned int count)
979{
980 struct stmmac_tc_entry *entry, *frag;
981 int i, ret, nve = 0;
982 u32 curr_prio = 0;
983 u32 old_val, val;
984
985 /* Force disable RX */
986 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
987 val = old_val & ~XGMAC_CONFIG_RE;
988 writel(val, ioaddr + XGMAC_RX_CONFIG);
989
990 /* Disable RX Parser */
991 ret = dwxgmac3_rxp_disable(ioaddr);
992 if (ret)
993 goto re_enable;
994
995 /* Set all entries as NOT in HW */
996 for (i = 0; i < count; i++) {
997 entry = &entries[i];
998 entry->in_hw = false;
999 }
1000
1001 /* Update entries by reverse order */
1002 while (1) {
1003 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1004 if (!entry)
1005 break;
1006
1007 curr_prio = entry->prio;
1008 frag = entry->frag_ptr;
1009
1010 /* Set special fragment requirements */
1011 if (frag) {
1012 entry->val.af = 0;
1013 entry->val.rf = 0;
1014 entry->val.nc = 1;
1015 entry->val.ok_index = nve + 2;
1016 }
1017
1018 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1019 if (ret)
1020 goto re_enable;
1021
1022 entry->table_pos = nve++;
1023 entry->in_hw = true;
1024
1025 if (frag && !frag->in_hw) {
1026 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1027 if (ret)
1028 goto re_enable;
1029 frag->table_pos = nve++;
1030 frag->in_hw = true;
1031 }
1032 }
1033
1034 if (!nve)
1035 goto re_enable;
1036
1037 /* Update all pass entry */
1038 for (i = 0; i < count; i++) {
1039 entry = &entries[i];
1040 if (!entry->is_last)
1041 continue;
1042
1043 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1044 if (ret)
1045 goto re_enable;
1046
1047 entry->table_pos = nve++;
1048 }
1049
1050 /* Assume n. of parsable entries == n. of valid entries */
1051 val = (nve << 16) & XGMAC_NPE;
1052 val |= nve & XGMAC_NVE;
1053 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1054
1055 /* Enable RX Parser */
1056 dwxgmac3_rxp_enable(ioaddr);
1057
1058re_enable:
1059 /* Re-enable RX */
1060 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1061 return ret;
1062}
1063
25e80cd0
JA
1064static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1065{
1066 void __iomem *ioaddr = hw->pcsr;
1067 u32 value;
1068
1069 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1070 value, value & XGMAC_TXTSC, 100, 10000))
1071 return -EBUSY;
1072
1073 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1074 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1075 return 0;
1076}
1077
95eaf3cd
JA
1078static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1079 struct stmmac_pps_cfg *cfg, bool enable,
1080 u32 sub_second_inc, u32 systime_flags)
1081{
1082 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1083 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1084 u64 period;
1085
1086 if (!cfg->available)
1087 return -EINVAL;
1088 if (tnsec & XGMAC_TRGTBUSY0)
1089 return -EBUSY;
1090 if (!sub_second_inc || !systime_flags)
1091 return -EINVAL;
1092
1093 val &= ~XGMAC_PPSx_MASK(index);
1094
1095 if (!enable) {
1096 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1097 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1098 return 0;
1099 }
1100
1101 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1102 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1103 val |= XGMAC_PPSEN0;
1104
1105 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1106
1107 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1108 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1109 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1110
1111 period = cfg->period.tv_sec * 1000000000;
1112 period += cfg->period.tv_nsec;
1113
1114 do_div(period, sub_second_inc);
1115
1116 if (period <= 1)
1117 return -EINVAL;
1118
1119 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1120
1121 period >>= 1;
1122 if (period <= 1)
1123 return -EINVAL;
1124
1125 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1126
1127 /* Finally, activate it */
1128 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1129 return 0;
1130}
1131
8000ddc0
JA
1132static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1133{
1134 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1135
1136 value &= ~XGMAC_CONFIG_SARC;
1137 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1138
1139 writel(value, ioaddr + XGMAC_TX_CONFIG);
1140}
1141
30d93227
JA
1142static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1143{
1144 void __iomem *ioaddr = hw->pcsr;
1145 u32 value;
1146
1147 value = readl(ioaddr + XGMAC_VLAN_INCL);
1148 value |= XGMAC_VLAN_VLTI;
1149 value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1150 value &= ~XGMAC_VLAN_VLC;
1151 value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1152 writel(value, ioaddr + XGMAC_VLAN_INCL);
1153}
1154
425eabdd
JA
1155static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1156{
1157 void __iomem *ioaddr = hw->pcsr;
1158 u32 value;
1159
1160 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1161 !(value & XGMAC_XB), 100, 10000))
1162 return -EBUSY;
1163 return 0;
1164}
1165
1166static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1167 u8 reg, u32 *data)
1168{
1169 void __iomem *ioaddr = hw->pcsr;
1170 u32 value;
1171 int ret;
1172
1173 ret = dwxgmac2_filter_wait(hw);
1174 if (ret)
1175 return ret;
1176
1177 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1178 value |= XGMAC_TT | XGMAC_XB;
1179 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1180
1181 ret = dwxgmac2_filter_wait(hw);
1182 if (ret)
1183 return ret;
1184
1185 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1186 return 0;
1187}
1188
1189static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1190 u8 reg, u32 data)
1191{
1192 void __iomem *ioaddr = hw->pcsr;
1193 u32 value;
1194 int ret;
1195
1196 ret = dwxgmac2_filter_wait(hw);
1197 if (ret)
1198 return ret;
1199
1200 writel(data, ioaddr + XGMAC_L3L4_DATA);
1201
1202 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1203 value |= XGMAC_XB;
1204 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1205
1206 return dwxgmac2_filter_wait(hw);
1207}
1208
1209static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1210 bool en, bool ipv6, bool sa, bool inv,
1211 u32 match)
1212{
1213 void __iomem *ioaddr = hw->pcsr;
1214 u32 value;
1215 int ret;
1216
1217 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1218 value |= XGMAC_FILTER_IPFE;
1219 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1220
1221 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1222 if (ret)
1223 return ret;
1224
1225 /* For IPv6 not both SA/DA filters can be active */
1226 if (ipv6) {
1227 value |= XGMAC_L3PEN0;
1228 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1229 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1230 if (sa) {
1231 value |= XGMAC_L3SAM0;
1232 if (inv)
1233 value |= XGMAC_L3SAIM0;
1234 } else {
1235 value |= XGMAC_L3DAM0;
1236 if (inv)
1237 value |= XGMAC_L3DAIM0;
1238 }
1239 } else {
1240 value &= ~XGMAC_L3PEN0;
1241 if (sa) {
1242 value |= XGMAC_L3SAM0;
1243 if (inv)
1244 value |= XGMAC_L3SAIM0;
1245 } else {
1246 value |= XGMAC_L3DAM0;
1247 if (inv)
1248 value |= XGMAC_L3DAIM0;
1249 }
1250 }
1251
1252 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1253 if (ret)
1254 return ret;
1255
1256 if (sa) {
1257 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1258 if (ret)
1259 return ret;
1260 } else {
1261 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1262 if (ret)
1263 return ret;
1264 }
1265
1266 if (!en)
1267 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1268
1269 return 0;
1270}
1271
1272static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1273 bool en, bool udp, bool sa, bool inv,
1274 u32 match)
1275{
1276 void __iomem *ioaddr = hw->pcsr;
1277 u32 value;
1278 int ret;
1279
1280 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1281 value |= XGMAC_FILTER_IPFE;
1282 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1283
1284 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1285 if (ret)
1286 return ret;
1287
1288 if (udp) {
1289 value |= XGMAC_L4PEN0;
1290 } else {
1291 value &= ~XGMAC_L4PEN0;
1292 }
1293
1294 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1295 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1296 if (sa) {
1297 value |= XGMAC_L4SPM0;
1298 if (inv)
1299 value |= XGMAC_L4SPIM0;
1300 } else {
1301 value |= XGMAC_L4DPM0;
1302 if (inv)
1303 value |= XGMAC_L4DPIM0;
1304 }
1305
1306 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1307 if (ret)
1308 return ret;
1309
1310 if (sa) {
1311 value = match & XGMAC_L4SP0;
1312
1313 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1314 if (ret)
1315 return ret;
1316 } else {
1317 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1318
1319 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1320 if (ret)
1321 return ret;
1322 }
1323
1324 if (!en)
1325 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1326
1327 return 0;
1328}
1329
5904a980
JA
1330static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1331 u32 addr)
1332{
1333 void __iomem *ioaddr = hw->pcsr;
1334 u32 value;
1335
1336 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1337
1338 value = readl(ioaddr + XGMAC_RX_CONFIG);
1339 if (en)
1340 value |= XGMAC_CONFIG_ARPEN;
1341 else
1342 value &= ~XGMAC_CONFIG_ARPEN;
1343 writel(value, ioaddr + XGMAC_RX_CONFIG);
1344}
1345
2142754f
JA
1346const struct stmmac_ops dwxgmac210_ops = {
1347 .core_init = dwxgmac2_core_init,
1348 .set_mac = dwxgmac2_set_mac,
1349 .rx_ipc = dwxgmac2_rx_ipc,
1350 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1351 .rx_queue_prio = dwxgmac2_rx_queue_prio,
7035aad8 1352 .tx_queue_prio = dwxgmac2_tx_queue_prio,
2142754f
JA
1353 .rx_queue_routing = NULL,
1354 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1355 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
5656ac55 1356 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
2142754f 1357 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
ec6ea8e3 1358 .config_cbs = dwxgmac2_config_cbs,
bfc56530 1359 .dump_regs = dwxgmac2_dump_regs,
2142754f
JA
1360 .host_irq_status = dwxgmac2_host_irq_status,
1361 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1362 .flow_ctrl = dwxgmac2_flow_ctrl,
1363 .pmt = dwxgmac2_pmt,
1364 .set_umac_addr = dwxgmac2_set_umac_addr,
1365 .get_umac_addr = dwxgmac2_get_umac_addr,
81b945ae
JA
1366 .set_eee_mode = dwxgmac2_set_eee_mode,
1367 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1368 .set_eee_timer = dwxgmac2_set_eee_timer,
1369 .set_eee_pls = dwxgmac2_set_eee_pls,
2142754f
JA
1370 .pcs_ctrl_ane = NULL,
1371 .pcs_rane = NULL,
1372 .pcs_get_adv_lp = NULL,
1373 .debug = NULL,
1374 .set_filter = dwxgmac2_set_filter,
56e58d6c
JA
1375 .safety_feat_config = dwxgmac3_safety_feat_config,
1376 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1377 .safety_feat_dump = dwxgmac3_safety_feat_dump,
84c8df16 1378 .set_mac_loopback = dwxgmac2_set_mac_loopback,
76067459 1379 .rss_configure = dwxgmac2_rss_configure,
3cd1cfcb 1380 .update_vlan_hash = dwxgmac2_update_vlan_hash,
d6e1c12c 1381 .rxp_config = dwxgmac3_rxp_config,
25e80cd0 1382 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
95eaf3cd 1383 .flex_pps_config = dwxgmac2_flex_pps_config,
8000ddc0 1384 .sarc_configure = dwxgmac2_sarc_configure,
30d93227 1385 .enable_vlan = dwxgmac2_enable_vlan,
425eabdd
JA
1386 .config_l3_filter = dwxgmac2_config_l3_filter,
1387 .config_l4_filter = dwxgmac2_config_l4_filter,
5904a980 1388 .set_arp_offload = dwxgmac2_set_arp_offload,
2142754f
JA
1389};
1390
1391int dwxgmac2_setup(struct stmmac_priv *priv)
1392{
1393 struct mac_device_info *mac = priv->hw;
1394
1395 dev_info(priv->device, "\tXGMAC2\n");
1396
1397 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1398 mac->pcsr = priv->ioaddr;
1399 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1400 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1401 mac->mcast_bits_log2 = 0;
1402
1403 if (mac->multicast_filter_bins)
1404 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1405
1406 mac->link.duplex = 0;
5b0d7d7d
JA
1407 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1408 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1409 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1410 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1411 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1412 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1413 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
2142754f
JA
1414 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1415
1416 mac->mii.addr = XGMAC_MDIO_ADDR;
1417 mac->mii.data = XGMAC_MDIO_DATA;
1418 mac->mii.addr_shift = 16;
1419 mac->mii.addr_mask = GENMASK(20, 16);
1420 mac->mii.reg_shift = 0;
1421 mac->mii.reg_mask = GENMASK(15, 0);
1422 mac->mii.clk_csr_shift = 19;
1423 mac->mii.clk_csr_mask = GENMASK(21, 19);
1424
1425 return 0;
1426}