drivers: net: xgene: Protect indirect MAC access
[linux-2.6-block.git] / drivers / net / ethernet / apm / xgene / xgene_enet_hw.c
CommitLineData
e6ad7673
IS
1/* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "xgene_enet_main.h"
23#include "xgene_enet_hw.h"
24
25static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
26{
27 u32 *ring_cfg = ring->state;
28 u64 addr = ring->dma;
29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
30
31 ring_cfg[4] |= (1 << SELTHRSH_POS) &
32 CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
33 ring_cfg[3] |= ACCEPTLERR;
34 ring_cfg[2] |= QCOHERENT;
35
36 addr >>= 8;
37 ring_cfg[2] |= (addr << RINGADDRL_POS) &
38 CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
39 addr >>= RINGADDRL_LEN;
40 ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
41 ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
42 CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
43}
44
45static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
46{
47 u32 *ring_cfg = ring->state;
48 bool is_bufpool;
49 u32 val;
50
51 is_bufpool = xgene_enet_is_bufpool(ring->id);
52 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
53 ring_cfg[4] |= (val << RINGTYPE_POS) &
54 CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
55
56 if (is_bufpool) {
57 ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
58 CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
59 }
60}
61
62static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
63{
64 u32 *ring_cfg = ring->state;
65
66 ring_cfg[3] |= RECOMBBUF;
67 ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
68 CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
69 ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
70}
71
72static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
73 u32 offset, u32 data)
74{
75 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
76
77 iowrite32(data, pdata->ring_csr_addr + offset);
78}
79
80static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
81 u32 offset, u32 *data)
82{
83 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
84
85 *data = ioread32(pdata->ring_csr_addr + offset);
86}
87
88static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
89{
81cefb81 90 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
e6ad7673
IS
91 int i;
92
93 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
81cefb81 94 for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
e6ad7673
IS
95 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
96 ring->state[i]);
97 }
98}
99
100static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
101{
81cefb81 102 memset(ring->state, 0, sizeof(ring->state));
e6ad7673
IS
103 xgene_enet_write_ring_state(ring);
104}
105
106static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
107{
108 xgene_enet_ring_set_type(ring);
109
149e9ab4
IS
110 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
111 xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
e6ad7673
IS
112 xgene_enet_ring_set_recombbuf(ring);
113
114 xgene_enet_ring_init(ring);
115 xgene_enet_write_ring_state(ring);
116}
117
118static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
119{
120 u32 ring_id_val, ring_id_buf;
121 bool is_bufpool;
122
123 is_bufpool = xgene_enet_is_bufpool(ring->id);
124
125 ring_id_val = ring->id & GENMASK(9, 0);
126 ring_id_val |= OVERWRITE;
127
128 ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
129 ring_id_buf |= PREFETCH_BUF_EN;
130 if (is_bufpool)
131 ring_id_buf |= IS_BUFFER_POOL;
132
133 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
134 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
135}
136
137static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
138{
139 u32 ring_id;
140
141 ring_id = ring->id | OVERWRITE;
142 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
143 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
144}
145
81cefb81
IS
146static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
147 struct xgene_enet_desc_ring *ring)
e6ad7673
IS
148{
149 u32 size = ring->size;
150 u32 i, data;
151 bool is_bufpool;
152
153 xgene_enet_clr_ring_state(ring);
154 xgene_enet_set_ring_state(ring);
155 xgene_enet_set_ring_id(ring);
156
157 ring->slots = xgene_enet_get_numslots(ring->id, size);
158
159 is_bufpool = xgene_enet_is_bufpool(ring->id);
160 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
161 return ring;
162
163 for (i = 0; i < ring->slots; i++)
164 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
165
166 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
167 data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
168 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
169
170 return ring;
171}
172
81cefb81 173static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
e6ad7673
IS
174{
175 u32 data;
176 bool is_bufpool;
177
178 is_bufpool = xgene_enet_is_bufpool(ring->id);
179 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
180 goto out;
181
182 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
183 data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
184 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
185
186out:
187 xgene_enet_clr_desc_ring_id(ring);
188 xgene_enet_clr_ring_state(ring);
189}
190
81cefb81
IS
191static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
192{
193 iowrite32(count, ring->cmd);
194}
195
196static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
197{
198 u32 __iomem *cmd_base = ring->cmd_base;
199 u32 ring_state, num_msgs;
200
201 ring_state = ioread32(&cmd_base[1]);
202 num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
203
204 return num_msgs;
205}
206
e6ad7673
IS
207void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
208 struct xgene_enet_pdata *pdata,
209 enum xgene_enet_err_code status)
210{
e6ad7673
IS
211 switch (status) {
212 case INGRESS_CRC:
3bb502f8
IS
213 ring->rx_crc_errors++;
214 ring->rx_dropped++;
e6ad7673
IS
215 break;
216 case INGRESS_CHECKSUM:
217 case INGRESS_CHECKSUM_COMPUTE:
3bb502f8
IS
218 ring->rx_errors++;
219 ring->rx_dropped++;
e6ad7673
IS
220 break;
221 case INGRESS_TRUNC_FRAME:
3bb502f8
IS
222 ring->rx_frame_errors++;
223 ring->rx_dropped++;
e6ad7673
IS
224 break;
225 case INGRESS_PKT_LEN:
3bb502f8
IS
226 ring->rx_length_errors++;
227 ring->rx_dropped++;
e6ad7673
IS
228 break;
229 case INGRESS_PKT_UNDER:
3bb502f8
IS
230 ring->rx_frame_errors++;
231 ring->rx_dropped++;
e6ad7673
IS
232 break;
233 case INGRESS_FIFO_OVERRUN:
3bb502f8 234 ring->rx_fifo_errors++;
e6ad7673
IS
235 break;
236 default:
237 break;
238 }
239}
240
241static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
242 u32 offset, u32 val)
243{
244 void __iomem *addr = pdata->eth_csr_addr + offset;
245
246 iowrite32(val, addr);
247}
248
249static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
250 u32 offset, u32 val)
251{
252 void __iomem *addr = pdata->eth_ring_if_addr + offset;
253
254 iowrite32(val, addr);
255}
256
257static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
258 u32 offset, u32 val)
259{
260 void __iomem *addr = pdata->eth_diag_csr_addr + offset;
261
262 iowrite32(val, addr);
263}
264
265static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
266 u32 offset, u32 val)
267{
268 void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
269
270 iowrite32(val, addr);
271}
272
ae1aed95 273void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data)
e6ad7673 274{
ae1aed95
IS
275 void __iomem *addr, *wr, *cmd, *cmd_done;
276 struct net_device *ndev = pdata->ndev;
e6ad7673 277 u8 wait = 10;
ae1aed95 278 u32 done;
e6ad7673 279
ae1aed95
IS
280 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
281 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
282 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
283 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
284
285 spin_lock(&pdata->mac_lock);
e6ad7673
IS
286 iowrite32(wr_addr, addr);
287 iowrite32(wr_data, wr);
288 iowrite32(XGENE_ENET_WR_CMD, cmd);
289
e6ad7673
IS
290 while (!(done = ioread32(cmd_done)) && wait--)
291 udelay(1);
292
293 if (!done)
ae1aed95
IS
294 netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n",
295 wr_addr, wr_data);
e6ad7673
IS
296
297 iowrite32(0, cmd);
ae1aed95 298 spin_unlock(&pdata->mac_lock);
e6ad7673
IS
299}
300
301static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
302 u32 offset, u32 *val)
303{
304 void __iomem *addr = pdata->eth_csr_addr + offset;
305
306 *val = ioread32(addr);
307}
308
309static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
310 u32 offset, u32 *val)
311{
312 void __iomem *addr = pdata->eth_diag_csr_addr + offset;
313
314 *val = ioread32(addr);
315}
316
317static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
318 u32 offset, u32 *val)
319{
320 void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
321
322 *val = ioread32(addr);
323}
324
ae1aed95 325u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr)
e6ad7673 326{
ae1aed95
IS
327 void __iomem *addr, *rd, *cmd, *cmd_done;
328 u32 done, rd_data;
e6ad7673
IS
329 u8 wait = 10;
330
ae1aed95
IS
331 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
332 rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
333 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
334 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
335
336 spin_lock(&pdata->mac_lock);
e6ad7673
IS
337 iowrite32(rd_addr, addr);
338 iowrite32(XGENE_ENET_RD_CMD, cmd);
339
e6ad7673
IS
340 while (!(done = ioread32(cmd_done)) && wait--)
341 udelay(1);
342
343 if (!done)
ae1aed95
IS
344 netdev_err(pdata->ndev, "mac read failed, addr: %04x\n",
345 rd_addr);
e6ad7673 346
ae1aed95 347 rd_data = ioread32(rd);
e6ad7673 348 iowrite32(0, cmd);
ae1aed95 349 spin_unlock(&pdata->mac_lock);
e6ad7673 350
ae1aed95 351 return rd_data;
e6ad7673
IS
352}
353
d0eb7458 354static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
e6ad7673
IS
355{
356 u32 addr0, addr1;
357 u8 *dev_addr = pdata->ndev->dev_addr;
358
359 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
360 (dev_addr[1] << 8) | dev_addr[0];
361 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
e6ad7673 362
ae1aed95
IS
363 xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0);
364 xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1);
e6ad7673
IS
365}
366
367static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
368{
369 struct net_device *ndev = pdata->ndev;
370 u32 data;
371 u8 wait = 10;
372
373 xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
374 do {
375 usleep_range(100, 110);
376 xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
377 } while ((data != 0xffffffff) && wait--);
378
379 if (data != 0xffffffff) {
380 netdev_err(ndev, "Failed to release memory from shutdown\n");
381 return -ENODEV;
382 }
383
384 return 0;
385}
386
d0eb7458 387static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
e6ad7673 388{
ae1aed95
IS
389 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
390 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0);
e6ad7673
IS
391}
392
761d4be5
IS
393static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
394{
395 struct device *dev = &pdata->pdev->dev;
396
397 if (dev->of_node) {
398 struct clk *parent = clk_get_parent(pdata->clk);
399
400 switch (pdata->phy_speed) {
401 case SPEED_10:
402 clk_set_rate(parent, 2500000);
403 break;
404 case SPEED_100:
405 clk_set_rate(parent, 25000000);
406 break;
407 default:
408 clk_set_rate(parent, 125000000);
409 break;
410 }
411 }
412#ifdef CONFIG_ACPI
413 else {
414 switch (pdata->phy_speed) {
415 case SPEED_10:
416 acpi_evaluate_object(ACPI_HANDLE(dev),
417 "S10", NULL, NULL);
418 break;
419 case SPEED_100:
420 acpi_evaluate_object(ACPI_HANDLE(dev),
421 "S100", NULL, NULL);
422 break;
423 default:
424 acpi_evaluate_object(ACPI_HANDLE(dev),
425 "S1G", NULL, NULL);
426 break;
427 }
428 }
429#endif
430}
431
9a8c5dde 432static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
e6ad7673 433{
16615a4c 434 struct device *dev = &pdata->pdev->dev;
9a8c5dde
IS
435 u32 icm0, icm2, mc2;
436 u32 intf_ctl, rgmii, value;
e6ad7673
IS
437
438 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
439 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
ae1aed95
IS
440 mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR);
441 intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR);
e6ad7673
IS
442 xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
443
d0eb7458 444 switch (pdata->phy_speed) {
e6ad7673
IS
445 case SPEED_10:
446 ENET_INTERFACE_MODE2_SET(&mc2, 1);
761d4be5 447 intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
e6ad7673
IS
448 CFG_MACMODE_SET(&icm0, 0);
449 CFG_WAITASYNCRD_SET(&icm2, 500);
450 rgmii &= ~CFG_SPEED_1250;
451 break;
452 case SPEED_100:
453 ENET_INTERFACE_MODE2_SET(&mc2, 1);
761d4be5 454 intf_ctl &= ~ENET_GHD_MODE;
e6ad7673
IS
455 intf_ctl |= ENET_LHD_MODE;
456 CFG_MACMODE_SET(&icm0, 1);
457 CFG_WAITASYNCRD_SET(&icm2, 80);
458 rgmii &= ~CFG_SPEED_1250;
459 break;
460 default:
461 ENET_INTERFACE_MODE2_SET(&mc2, 2);
761d4be5 462 intf_ctl &= ~ENET_LHD_MODE;
e6ad7673 463 intf_ctl |= ENET_GHD_MODE;
761d4be5
IS
464 CFG_MACMODE_SET(&icm0, 2);
465 CFG_WAITASYNCRD_SET(&icm2, 0);
16615a4c
IS
466 if (dev->of_node) {
467 CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
468 CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
469 }
761d4be5 470 rgmii |= CFG_SPEED_1250;
16615a4c 471
e6ad7673
IS
472 xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
473 value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
474 xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
475 break;
476 }
477
4902a922 478 mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK;
ae1aed95
IS
479 xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
480 xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
9a8c5dde
IS
481 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
482 xgene_enet_configure_clock(pdata);
483
484 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
485 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
486}
e6ad7673 487
350b4e33
IS
488static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size)
489{
ae1aed95 490 xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size);
350b4e33
IS
491}
492
bb64fa09
IS
493static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
494 bool enable)
495{
496 u32 data;
497
498 xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data);
499
500 if (enable)
501 data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
502 else
503 data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
504
505 xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data);
506}
507
508static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
509{
510 u32 data;
511
ae1aed95 512 data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
bb64fa09
IS
513
514 if (enable)
515 data |= TX_FLOW_EN;
516 else
517 data &= ~TX_FLOW_EN;
518
ae1aed95 519 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
bb64fa09
IS
520
521 pdata->mac_ops->enable_tx_pause(pdata, enable);
522}
523
524static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
525{
526 u32 data;
527
ae1aed95 528 data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
bb64fa09
IS
529
530 if (enable)
531 data |= RX_FLOW_EN;
532 else
533 data &= ~RX_FLOW_EN;
534
ae1aed95 535 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data);
bb64fa09
IS
536}
537
9a8c5dde
IS
538static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
539{
540 u32 value;
541
8089a96f
IS
542 if (!pdata->mdio_driver)
543 xgene_gmac_reset(pdata);
544
9a8c5dde 545 xgene_gmac_set_speed(pdata);
e6ad7673
IS
546 xgene_gmac_set_mac_addr(pdata);
547
548 /* Adjust MDC clock frequency */
ae1aed95 549 value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR);
e6ad7673 550 MGMT_CLOCK_SEL_SET(&value, 7);
ae1aed95 551 xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
e6ad7673
IS
552
553 /* Enable drop if bufpool not available */
554 xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
555 value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
556 xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
557
558 /* Rtype should be copied from FP */
559 xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
e6ad7673 560
56090b12
IS
561 /* Configure HW pause frame generation */
562 xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value);
563 value = (DEF_QUANTA << 16) | (value & 0xFFFF);
564 xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value);
565
566 xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES);
567 xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES);
568
569 xgene_gmac_flowctl_tx(pdata, pdata->tx_pause);
570 xgene_gmac_flowctl_rx(pdata, pdata->rx_pause);
571
e6ad7673
IS
572 /* Rx-Tx traffic resume */
573 xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
574
e6ad7673
IS
575 xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
576 value &= ~TX_DV_GATE_EN0;
577 value &= ~RX_DV_GATE_EN0;
578 value |= RESUME_RX0;
579 xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
580
581 xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
582}
583
584static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
585{
586 u32 val = 0xffffffff;
587
588 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
589 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
590 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
591 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
592}
593
d0eb7458 594static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
d6d48969
IS
595 u32 dst_ring_num, u16 bufpool_id,
596 u16 nxtbufpool_id)
e6ad7673
IS
597{
598 u32 cb;
d6d48969 599 u32 fpsel, nxtfpsel;
e6ad7673 600
2c839337 601 fpsel = xgene_enet_get_fpsel(bufpool_id);
d6d48969 602 nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
e6ad7673
IS
603
604 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
605 cb |= CFG_CLE_BYPASS_EN0;
606 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
e026e700 607 CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
e6ad7673
IS
608 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
609
610 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
611 CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
612 CFG_CLE_FPSEL0_SET(&cb, fpsel);
d6d48969 613 CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
e6ad7673
IS
614 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
615}
616
d0eb7458 617static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
e6ad7673
IS
618{
619 u32 data;
620
ae1aed95
IS
621 data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
622 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
e6ad7673
IS
623}
624
d0eb7458 625static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
e6ad7673
IS
626{
627 u32 data;
628
ae1aed95
IS
629 data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
630 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
e6ad7673
IS
631}
632
d0eb7458 633static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
e6ad7673
IS
634{
635 u32 data;
636
ae1aed95
IS
637 data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
638 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
e6ad7673
IS
639}
640
d0eb7458 641static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
e6ad7673
IS
642{
643 u32 data;
644
ae1aed95
IS
645 data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR);
646 xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
e6ad7673
IS
647}
648
c3f4465d
IS
649bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
650{
651 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
652 return false;
653
654 if (ioread32(p->ring_csr_addr + SRST_ADDR))
655 return false;
656
657 return true;
658}
659
660static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
e6ad7673 661{
bc61167a 662 struct device *dev = &pdata->pdev->dev;
e6ad7673 663
c3f4465d
IS
664 if (!xgene_ring_mgr_init(pdata))
665 return -ENODEV;
666
8089a96f
IS
667 if (pdata->mdio_driver) {
668 xgene_enet_config_ring_if_assoc(pdata);
669 return 0;
670 }
671
bc61167a 672 if (dev->of_node) {
de7b5b3d 673 clk_prepare_enable(pdata->clk);
bc61167a 674 udelay(5);
de7b5b3d 675 clk_disable_unprepare(pdata->clk);
bc61167a 676 udelay(5);
de7b5b3d 677 clk_prepare_enable(pdata->clk);
bc61167a
IS
678 udelay(5);
679 } else {
680#ifdef CONFIG_ACPI
681 if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
682 acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
683 "_RST", NULL, NULL);
684 } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
685 "_INI")) {
686 acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
687 "_INI", NULL, NULL);
688 }
689#endif
de7b5b3d 690 }
e6ad7673 691
bc61167a
IS
692 xgene_enet_ecc_init(pdata);
693 xgene_enet_config_ring_if_assoc(pdata);
c3f4465d
IS
694
695 return 0;
e6ad7673
IS
696}
697
cb11c062
IS
698static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
699 struct xgene_enet_desc_ring *ring)
700{
2c839337 701 u32 addr, data;
cb11c062
IS
702
703 if (xgene_enet_is_bufpool(ring->id)) {
704 addr = ENET_CFGSSQMIFPRESET_ADDR;
2c839337 705 data = BIT(xgene_enet_get_fpsel(ring->id));
cb11c062
IS
706 } else {
707 addr = ENET_CFGSSQMIWQRESET_ADDR;
2c839337 708 data = BIT(xgene_enet_ring_bufnum(ring->id));
cb11c062
IS
709 }
710
711 xgene_enet_wr_ring_if(pdata, addr, data);
712}
713
d0eb7458 714static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
e6ad7673 715{
bc61167a 716 struct device *dev = &pdata->pdev->dev;
cb11c062 717 struct xgene_enet_desc_ring *ring;
2c839337 718 u32 pb;
cb11c062
IS
719 int i;
720
721 pb = 0;
722 for (i = 0; i < pdata->rxq_cnt; i++) {
723 ring = pdata->rx_ring[i]->buf_pool;
2c839337 724 pb |= BIT(xgene_enet_get_fpsel(ring->id));
a9380b0f
IS
725 ring = pdata->rx_ring[i]->page_pool;
726 if (ring)
727 pb |= BIT(xgene_enet_get_fpsel(ring->id));
cb11c062 728
cb11c062
IS
729 }
730 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
731
732 pb = 0;
733 for (i = 0; i < pdata->txq_cnt; i++) {
734 ring = pdata->tx_ring[i];
2c839337 735 pb |= BIT(xgene_enet_ring_bufnum(ring->id));
cb11c062
IS
736 }
737 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
738
bc61167a
IS
739 if (dev->of_node) {
740 if (!IS_ERR(pdata->clk))
741 clk_disable_unprepare(pdata->clk);
742 }
e6ad7673
IS
743}
744
56090b12
IS
745static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev)
746{
747 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
748 struct phy_device *phydev = ndev->phydev;
749 u16 lcladv, rmtadv = 0;
750 u32 rx_pause, tx_pause;
751 u8 flowctl = 0;
752
753 if (!phydev->duplex || !pdata->pause_autoneg)
754 return 0;
755
756 if (pdata->tx_pause)
757 flowctl |= FLOW_CTRL_TX;
758
759 if (pdata->rx_pause)
760 flowctl |= FLOW_CTRL_RX;
761
762 lcladv = mii_advertise_flowctrl(flowctl);
763
764 if (phydev->pause)
765 rmtadv = LPA_PAUSE_CAP;
766
767 if (phydev->asym_pause)
768 rmtadv |= LPA_PAUSE_ASYM;
769
770 flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
771 tx_pause = !!(flowctl & FLOW_CTRL_TX);
772 rx_pause = !!(flowctl & FLOW_CTRL_RX);
773
774 if (tx_pause != pdata->tx_pause) {
775 pdata->tx_pause = tx_pause;
776 pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause);
777 }
778
779 if (rx_pause != pdata->rx_pause) {
780 pdata->rx_pause = rx_pause;
781 pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause);
782 }
783
784 return 0;
785}
786
e6ad7673
IS
787static void xgene_enet_adjust_link(struct net_device *ndev)
788{
789 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
9a8c5dde 790 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
971d3a44 791 struct phy_device *phydev = ndev->phydev;
e6ad7673
IS
792
793 if (phydev->link) {
794 if (pdata->phy_speed != phydev->speed) {
d0eb7458 795 pdata->phy_speed = phydev->speed;
9a8c5dde 796 mac_ops->set_speed(pdata);
47c62b6d
IS
797 mac_ops->rx_enable(pdata);
798 mac_ops->tx_enable(pdata);
e6ad7673
IS
799 phy_print_status(phydev);
800 }
56090b12
IS
801
802 xgene_enet_flowctrl_cfg(ndev);
e6ad7673 803 } else {
47c62b6d
IS
804 mac_ops->rx_disable(pdata);
805 mac_ops->tx_disable(pdata);
e6ad7673
IS
806 pdata->phy_speed = SPEED_UNKNOWN;
807 phy_print_status(phydev);
808 }
809}
810
8089a96f
IS
811#ifdef CONFIG_ACPI
812static struct acpi_device *acpi_phy_find_device(struct device *dev)
813{
814 struct acpi_reference_args args;
815 struct fwnode_handle *fw_node;
816 int status;
817
818 fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
819 status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
820 &args);
821 if (ACPI_FAILURE(status)) {
822 dev_dbg(dev, "No matching phy in ACPI table\n");
823 return NULL;
824 }
825
826 return args.adev;
827}
828#endif
829
830int xgene_enet_phy_connect(struct net_device *ndev)
e6ad7673
IS
831{
832 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
8089a96f 833 struct device_node *np;
e6ad7673
IS
834 struct phy_device *phy_dev;
835 struct device *dev = &pdata->pdev->dev;
8089a96f 836 int i;
e6ad7673 837
de7b5b3d 838 if (dev->of_node) {
8089a96f
IS
839 for (i = 0 ; i < 2; i++) {
840 np = of_parse_phandle(dev->of_node, "phy-handle", i);
03377e38
IS
841 phy_dev = of_phy_connect(ndev, np,
842 &xgene_enet_adjust_link,
843 0, pdata->phy_mode);
844 of_node_put(np);
845 if (phy_dev)
846 break;
de7b5b3d 847 }
e6ad7673 848
04d53b20
RK
849 if (!phy_dev) {
850 netdev_err(ndev, "Could not connect to PHY\n");
851 return -ENODEV;
852 }
04d53b20 853 } else {
8089a96f 854#ifdef CONFIG_ACPI
36232012 855 struct acpi_device *adev = acpi_phy_find_device(dev);
8089a96f 856 if (adev)
971d3a44
PR
857 phy_dev = adev->driver_data;
858 else
859 phy_dev = NULL;
de7b5b3d 860
04d53b20
RK
861 if (!phy_dev ||
862 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
863 pdata->phy_mode)) {
864 netdev_err(ndev, "Could not connect to PHY\n");
865 return -ENODEV;
866 }
ea966cb6
AB
867#else
868 return -ENODEV;
8089a96f 869#endif
e6ad7673
IS
870 }
871
872 pdata->phy_speed = SPEED_UNKNOWN;
873 phy_dev->supported &= ~SUPPORTED_10baseT_Half &
874 ~SUPPORTED_100baseT_Half &
875 ~SUPPORTED_1000baseT_Half;
56090b12
IS
876 phy_dev->supported |= SUPPORTED_Pause |
877 SUPPORTED_Asym_Pause;
e6ad7673 878 phy_dev->advertising = phy_dev->supported;
e6ad7673
IS
879
880 return 0;
881}
882
de7b5b3d
FK
883static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
884 struct mii_bus *mdio)
e6ad7673 885{
e6ad7673 886 struct device *dev = &pdata->pdev->dev;
de7b5b3d
FK
887 struct net_device *ndev = pdata->ndev;
888 struct phy_device *phy;
e6ad7673
IS
889 struct device_node *child_np;
890 struct device_node *mdio_np = NULL;
8c151963 891 u32 phy_addr;
e6ad7673 892 int ret;
de7b5b3d
FK
893
894 if (dev->of_node) {
895 for_each_child_of_node(dev->of_node, child_np) {
896 if (of_device_is_compatible(child_np,
897 "apm,xgene-mdio")) {
898 mdio_np = child_np;
899 break;
900 }
901 }
e6ad7673 902
de7b5b3d
FK
903 if (!mdio_np) {
904 netdev_dbg(ndev, "No mdio node in the dts\n");
905 return -ENXIO;
e6ad7673 906 }
e6ad7673 907
de7b5b3d 908 return of_mdiobus_register(mdio, mdio_np);
e6ad7673
IS
909 }
910
de7b5b3d
FK
911 /* Mask out all PHYs from auto probing. */
912 mdio->phy_mask = ~0;
913
914 /* Register the MDIO bus */
915 ret = mdiobus_register(mdio);
916 if (ret)
917 return ret;
918
8c151963 919 ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
de7b5b3d 920 if (ret)
8c151963 921 ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
de7b5b3d
FK
922 if (ret)
923 return -EINVAL;
924
8c151963
IS
925 phy = xgene_enet_phy_register(mdio, phy_addr);
926 if (!phy)
de7b5b3d
FK
927 return -EIO;
928
de7b5b3d
FK
929 return ret;
930}
931
932int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
933{
934 struct net_device *ndev = pdata->ndev;
935 struct mii_bus *mdio_bus;
936 int ret;
937
e6ad7673
IS
938 mdio_bus = mdiobus_alloc();
939 if (!mdio_bus)
940 return -ENOMEM;
941
942 mdio_bus->name = "APM X-Gene MDIO bus";
8c151963
IS
943 mdio_bus->read = xgene_mdio_rgmii_read;
944 mdio_bus->write = xgene_mdio_rgmii_write;
e6ad7673
IS
945 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
946 ndev->name);
947
8c151963 948 mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
cb0366b7 949 mdio_bus->parent = &pdata->pdev->dev;
e6ad7673 950
de7b5b3d 951 ret = xgene_mdiobus_register(pdata, mdio_bus);
e6ad7673
IS
952 if (ret) {
953 netdev_err(ndev, "Failed to register MDIO bus\n");
954 mdiobus_free(mdio_bus);
955 return ret;
956 }
957 pdata->mdio_bus = mdio_bus;
958
959 ret = xgene_enet_phy_connect(ndev);
960 if (ret)
961 xgene_enet_mdio_remove(pdata);
962
963 return ret;
964}
965
8089a96f
IS
966void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
967{
971d3a44
PR
968 struct net_device *ndev = pdata->ndev;
969
970 if (ndev->phydev)
971 phy_disconnect(ndev->phydev);
8089a96f
IS
972}
973
e6ad7673
IS
974void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
975{
971d3a44
PR
976 struct net_device *ndev = pdata->ndev;
977
978 if (ndev->phydev)
979 phy_disconnect(ndev->phydev);
ccc02ddb 980
e6ad7673
IS
981 mdiobus_unregister(pdata->mdio_bus);
982 mdiobus_free(pdata->mdio_bus);
983 pdata->mdio_bus = NULL;
984}
d0eb7458 985
3cdb7309 986const struct xgene_mac_ops xgene_gmac_ops = {
d0eb7458
IS
987 .init = xgene_gmac_init,
988 .reset = xgene_gmac_reset,
989 .rx_enable = xgene_gmac_rx_enable,
990 .tx_enable = xgene_gmac_tx_enable,
991 .rx_disable = xgene_gmac_rx_disable,
992 .tx_disable = xgene_gmac_tx_disable,
9a8c5dde 993 .set_speed = xgene_gmac_set_speed,
d0eb7458 994 .set_mac_addr = xgene_gmac_set_mac_addr,
350b4e33 995 .set_framesize = xgene_enet_set_frame_size,
bb64fa09
IS
996 .enable_tx_pause = xgene_gmac_enable_tx_pause,
997 .flowctl_tx = xgene_gmac_flowctl_tx,
998 .flowctl_rx = xgene_gmac_flowctl_rx,
d0eb7458
IS
999};
1000
3cdb7309 1001const struct xgene_port_ops xgene_gport_ops = {
d0eb7458 1002 .reset = xgene_enet_reset,
cb11c062 1003 .clear = xgene_enet_clear,
d0eb7458
IS
1004 .cle_bypass = xgene_enet_cle_bypass,
1005 .shutdown = xgene_gport_shutdown,
1006};
81cefb81
IS
1007
1008struct xgene_ring_ops xgene_ring1_ops = {
1009 .num_ring_config = NUM_RING_CONFIG,
1010 .num_ring_id_shift = 6,
1011 .setup = xgene_enet_setup_ring,
1012 .clear = xgene_enet_clear_ring,
1013 .wr_cmd = xgene_enet_wr_cmd,
1014 .len = xgene_enet_ring_len,
1015};