Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / asp2 / bcmasp_intf.c
CommitLineData
490cb412
JC
1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) "bcmasp_intf: " fmt
3
4#include <asm/byteorder.h>
5#include <linux/brcmphy.h>
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/etherdevice.h>
9#include <linux/netdevice.h>
10#include <linux/of_net.h>
11#include <linux/of_mdio.h>
12#include <linux/phy.h>
13#include <linux/phy_fixed.h>
14#include <linux/ptp_classify.h>
15#include <linux/platform_device.h>
16#include <net/ip.h>
17#include <net/ipv6.h>
18
19#include "bcmasp.h"
20#include "bcmasp_intf_defs.h"
21
22static int incr_ring(int index, int ring_count)
23{
24 index++;
25 if (index == ring_count)
26 return 0;
27
28 return index;
29}
30
31/* Points to last byte of descriptor */
32static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
33 int ring_count)
34{
35 dma_addr_t end = beg + (ring_count * DESC_SIZE);
36
37 addr += DESC_SIZE;
38 if (addr > end)
39 return beg + DESC_SIZE - 1;
40
41 return addr;
42}
43
44/* Points to first byte of descriptor */
45static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
46 int ring_count)
47{
48 dma_addr_t end = beg + (ring_count * DESC_SIZE);
49
50 addr += DESC_SIZE;
51 if (addr >= end)
52 return beg;
53
54 return addr;
55}
56
57static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
58{
59 if (en) {
60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
61 tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
62 TX_EPKT_C_CFG_MISC_PT |
63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
64 TX_EPKT_C_CFG_MISC);
65 } else {
66 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
67 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
68 }
69}
70
71static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
72{
73 if (en)
74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
75 RX_EDPKT_CFG_ENABLE);
76 else
77 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
78}
79
80static void bcmasp_set_rx_mode(struct net_device *dev)
81{
82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 struct bcmasp_intf *intf = netdev_priv(dev);
84 struct netdev_hw_addr *ha;
85 int ret;
86
87 spin_lock_bh(&intf->parent->mda_lock);
88
89 bcmasp_disable_all_filters(intf);
90
91 if (dev->flags & IFF_PROMISC)
92 goto set_promisc;
93
94 bcmasp_set_promisc(intf, 0);
95
96 bcmasp_set_broad(intf, 1);
97
98 bcmasp_set_oaddr(intf, dev->dev_addr, 1);
99
100 if (dev->flags & IFF_ALLMULTI) {
101 bcmasp_set_allmulti(intf, 1);
102 } else {
103 bcmasp_set_allmulti(intf, 0);
104
105 netdev_for_each_mc_addr(ha, dev) {
106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
7c10691e
JC
107 if (ret) {
108 intf->mib.mc_filters_full_cnt++;
490cb412 109 goto set_promisc;
7c10691e 110 }
490cb412
JC
111 }
112 }
113
114 netdev_for_each_uc_addr(ha, dev) {
115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
7c10691e
JC
116 if (ret) {
117 intf->mib.uc_filters_full_cnt++;
490cb412 118 goto set_promisc;
7c10691e 119 }
490cb412
JC
120 }
121
122 spin_unlock_bh(&intf->parent->mda_lock);
123 return;
124
125set_promisc:
126 bcmasp_set_promisc(intf, 1);
7c10691e 127 intf->mib.promisc_filters_cnt++;
490cb412
JC
128
129 /* disable all filters used by this port */
130 bcmasp_disable_all_filters(intf);
131
132 spin_unlock_bh(&intf->parent->mda_lock);
133}
134
135static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
136{
137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
138
139 txcb->skb = NULL;
140 dma_unmap_addr_set(txcb, dma_addr, 0);
141 dma_unmap_len_set(txcb, dma_len, 0);
142 txcb->last = false;
143}
144
145static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
146{
147 int next_index, i;
148
149 /* Check if we have enough room for cnt descriptors */
150 for (i = 0; i < cnt; i++) {
151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
152 if (next_index == intf->tx_spb_clean_index)
153 return 1;
154 }
155
156 return 0;
157}
158
159static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
160 struct sk_buff *skb,
161 bool *csum_hw)
162{
7c10691e 163 struct bcmasp_intf *intf = netdev_priv(dev);
490cb412
JC
164 u32 header = 0, header2 = 0, epkt = 0;
165 struct bcmasp_pkt_offload *offload;
166 unsigned int header_cnt = 0;
167 u8 ip_proto;
168 int ret;
169
170 if (skb->ip_summed != CHECKSUM_PARTIAL)
171 return skb;
172
173 ret = skb_cow_head(skb, sizeof(*offload));
7c10691e
JC
174 if (ret < 0) {
175 intf->mib.tx_realloc_offload_failed++;
490cb412 176 goto help;
7c10691e 177 }
490cb412
JC
178
179 switch (skb->protocol) {
180 case htons(ETH_P_IP):
181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
183 epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
184 ip_proto = ip_hdr(skb)->protocol;
185 header_cnt += 2;
186 break;
187 case htons(ETH_P_IPV6):
188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
190 epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
191 ip_proto = ipv6_hdr(skb)->nexthdr;
192 header_cnt += 2;
193 break;
194 default:
195 goto help;
196 }
197
198 switch (ip_proto) {
199 case IPPROTO_TCP:
200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
202 header_cnt++;
203 break;
204 case IPPROTO_UDP:
205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
207 header_cnt++;
208 break;
209 default:
210 goto help;
211 }
212
213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
214
215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
217 epkt |= PKT_OFFLOAD_EPKT_OP;
218
219 offload->nop = htonl(PKT_OFFLOAD_NOP);
220 offload->header = htonl(header);
221 offload->header2 = htonl(header2);
222 offload->epkt = htonl(epkt);
223 offload->end = htonl(PKT_OFFLOAD_END_OP);
224 *csum_hw = true;
225
226 return skb;
227
228help:
229 skb_checksum_help(skb);
230
231 return skb;
232}
233
234static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
235{
236 return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
237}
238
239static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
240{
241 rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
242}
243
244static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
245{
246 rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
247}
248
249static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
250{
251 return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
252}
253
254static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
255{
256 tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
257}
258
259static const struct bcmasp_intf_ops bcmasp_intf_ops = {
260 .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
261 .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
262 .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
263 .tx_read = bcmasp_tx_spb_dma_rq,
264 .tx_write = bcmasp_tx_spb_dma_wq,
265};
266
267static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
268{
269 struct bcmasp_intf *intf = netdev_priv(dev);
270 unsigned int total_bytes, size;
271 int spb_index, nr_frags, i, j;
272 struct bcmasp_tx_cb *txcb;
273 dma_addr_t mapping, valid;
274 struct bcmasp_desc *desc;
275 bool csum_hw = false;
276 struct device *kdev;
277 skb_frag_t *frag;
278
279 kdev = &intf->parent->pdev->dev;
280
281 nr_frags = skb_shinfo(skb)->nr_frags;
282
283 if (tx_spb_ring_full(intf, nr_frags + 1)) {
284 netif_stop_queue(dev);
285 if (net_ratelimit())
286 netdev_err(dev, "Tx Ring Full!\n");
287 return NETDEV_TX_BUSY;
288 }
289
290 /* Save skb len before adding csum offload header */
291 total_bytes = skb->len;
292 skb = bcmasp_csum_offload(dev, skb, &csum_hw);
293 if (!skb)
294 return NETDEV_TX_OK;
295
296 spb_index = intf->tx_spb_index;
297 valid = intf->tx_spb_dma_valid;
298 for (i = 0; i <= nr_frags; i++) {
299 if (!i) {
300 size = skb_headlen(skb);
301 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
302 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
303 return NETDEV_TX_OK;
304 size = skb->len;
305 }
306 mapping = dma_map_single(kdev, skb->data, size,
307 DMA_TO_DEVICE);
308 } else {
309 frag = &skb_shinfo(skb)->frags[i - 1];
310 size = skb_frag_size(frag);
311 mapping = skb_frag_dma_map(kdev, frag, 0, size,
312 DMA_TO_DEVICE);
313 }
314
315 if (dma_mapping_error(kdev, mapping)) {
7c10691e 316 intf->mib.tx_dma_failed++;
490cb412
JC
317 spb_index = intf->tx_spb_index;
318 for (j = 0; j < i; j++) {
319 bcmasp_clean_txcb(intf, spb_index);
320 spb_index = incr_ring(spb_index,
321 DESC_RING_COUNT);
322 }
323 /* Rewind so we do not have a hole */
324 spb_index = intf->tx_spb_index;
325 return NETDEV_TX_OK;
326 }
327
328 txcb = &intf->tx_cbs[spb_index];
329 desc = &intf->tx_spb_cpu[spb_index];
330 memset(desc, 0, sizeof(*desc));
331 txcb->skb = skb;
332 txcb->bytes_sent = total_bytes;
333 dma_unmap_addr_set(txcb, dma_addr, mapping);
334 dma_unmap_len_set(txcb, dma_len, size);
335 if (!i) {
336 desc->flags |= DESC_SOF;
337 if (csum_hw)
338 desc->flags |= DESC_EPKT_CMD;
339 }
340
341 if (i == nr_frags) {
342 desc->flags |= DESC_EOF;
343 txcb->last = true;
344 }
345
346 desc->buf = mapping;
347 desc->size = size;
348 desc->flags |= DESC_INT_EN;
349
350 netif_dbg(intf, tx_queued, dev,
351 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
352 __func__, &mapping, desc->size, desc->flags,
353 spb_index);
354
355 spb_index = incr_ring(spb_index, DESC_RING_COUNT);
356 valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
357 DESC_RING_COUNT);
358 }
359
360 /* Ensure all descriptors have been written to DRAM for the
361 * hardware to see up-to-date contents.
362 */
363 wmb();
364
365 intf->tx_spb_index = spb_index;
366 intf->tx_spb_dma_valid = valid;
367 bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
368
369 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
370 netif_stop_queue(dev);
371
372 return NETDEV_TX_OK;
373}
374
375static void bcmasp_netif_start(struct net_device *dev)
376{
377 struct bcmasp_intf *intf = netdev_priv(dev);
378
379 bcmasp_set_rx_mode(dev);
380 napi_enable(&intf->tx_napi);
381 napi_enable(&intf->rx_napi);
382
383 bcmasp_enable_rx_irq(intf, 1);
384 bcmasp_enable_tx_irq(intf, 1);
385
386 phy_start(dev->phydev);
387}
388
389static void umac_reset(struct bcmasp_intf *intf)
390{
391 umac_wl(intf, 0x0, UMC_CMD);
392 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
393 usleep_range(10, 100);
394 umac_wl(intf, 0x0, UMC_CMD);
395}
396
397static void umac_set_hw_addr(struct bcmasp_intf *intf,
398 const unsigned char *addr)
399{
400 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
401 addr[3];
402 u32 mac1 = (addr[4] << 8) | addr[5];
403
404 umac_wl(intf, mac0, UMC_MAC0);
405 umac_wl(intf, mac1, UMC_MAC1);
406}
407
408static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
409 unsigned int enable)
410{
411 u32 reg;
412
413 reg = umac_rl(intf, UMC_CMD);
414 if (enable)
415 reg |= mask;
416 else
417 reg &= ~mask;
418 umac_wl(intf, reg, UMC_CMD);
419
420 /* UniMAC stops on a packet boundary, wait for a full-sized packet
421 * to be processed (1 msec).
422 */
423 if (enable == 0)
424 usleep_range(1000, 2000);
425}
426
427static void umac_init(struct bcmasp_intf *intf)
428{
429 umac_wl(intf, 0x800, UMC_FRM_LEN);
430 umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
431 umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
432 umac_enable_set(intf, UMC_CMD_PROMISC, 1);
433}
434
435static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
436{
437 struct bcmasp_intf *intf =
438 container_of(napi, struct bcmasp_intf, tx_napi);
439 struct bcmasp_intf_stats64 *stats = &intf->stats64;
440 struct device *kdev = &intf->parent->pdev->dev;
441 unsigned long read, released = 0;
442 struct bcmasp_tx_cb *txcb;
443 struct bcmasp_desc *desc;
444 dma_addr_t mapping;
445
446 read = bcmasp_intf_tx_read(intf);
447 while (intf->tx_spb_dma_read != read) {
448 txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
449 mapping = dma_unmap_addr(txcb, dma_addr);
450
451 dma_unmap_single(kdev, mapping,
452 dma_unmap_len(txcb, dma_len),
453 DMA_TO_DEVICE);
454
455 if (txcb->last) {
456 dev_consume_skb_any(txcb->skb);
457
458 u64_stats_update_begin(&stats->syncp);
459 u64_stats_inc(&stats->tx_packets);
460 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
461 u64_stats_update_end(&stats->syncp);
462 }
463
464 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
465
466 netif_dbg(intf, tx_done, intf->ndev,
467 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
468 __func__, &mapping, desc->size, desc->flags,
469 intf->tx_spb_clean_index);
470
471 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
472 released++;
473
474 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
475 DESC_RING_COUNT);
476 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
477 intf->tx_spb_dma_addr,
478 DESC_RING_COUNT);
479 }
480
481 /* Ensure all descriptors have been written to DRAM for the hardware
482 * to see updated contents.
483 */
484 wmb();
485
486 napi_complete(&intf->tx_napi);
487
488 bcmasp_enable_tx_irq(intf, 1);
489
490 if (released)
491 netif_wake_queue(intf->ndev);
492
493 return 0;
494}
495
496static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
497{
498 struct bcmasp_intf *intf =
499 container_of(napi, struct bcmasp_intf, rx_napi);
500 struct bcmasp_intf_stats64 *stats = &intf->stats64;
501 struct device *kdev = &intf->parent->pdev->dev;
502 unsigned long processed = 0;
503 struct bcmasp_desc *desc;
504 struct sk_buff *skb;
505 dma_addr_t valid;
506 void *data;
507 u64 flags;
508 u32 len;
509
510 valid = bcmasp_intf_rx_desc_read(intf) + 1;
511 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
512 valid = intf->rx_edpkt_dma_addr;
513
514 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
515 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
516
517 /* Ensure that descriptor has been fully written to DRAM by
518 * hardware before reading by the CPU
519 */
520 rmb();
521
522 /* Calculate virt addr by offsetting from physical addr */
523 data = intf->rx_ring_cpu +
524 (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
525
526 flags = DESC_FLAGS(desc->buf);
527 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
528 if (net_ratelimit()) {
529 netif_err(intf, rx_status, intf->ndev,
530 "flags=0x%llx\n", flags);
531 }
532
533 u64_stats_update_begin(&stats->syncp);
534 if (flags & DESC_CRC_ERR)
535 u64_stats_inc(&stats->rx_crc_errs);
536 if (flags & DESC_RX_SYM_ERR)
537 u64_stats_inc(&stats->rx_sym_errs);
538 u64_stats_update_end(&stats->syncp);
539
540 goto next;
541 }
542
543 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
544 DMA_FROM_DEVICE);
545
546 len = desc->size;
547
548 skb = napi_alloc_skb(napi, len);
549 if (!skb) {
550 u64_stats_update_begin(&stats->syncp);
551 u64_stats_inc(&stats->rx_dropped);
552 u64_stats_update_end(&stats->syncp);
7c10691e
JC
553 intf->mib.alloc_rx_skb_failed++;
554
490cb412
JC
555 goto next;
556 }
557
558 skb_put(skb, len);
559 memcpy(skb->data, data, len);
560
561 skb_pull(skb, 2);
562 len -= 2;
563 if (likely(intf->crc_fwd)) {
564 skb_trim(skb, len - ETH_FCS_LEN);
565 len -= ETH_FCS_LEN;
566 }
567
568 if ((intf->ndev->features & NETIF_F_RXCSUM) &&
569 (desc->buf & DESC_CHKSUM))
570 skb->ip_summed = CHECKSUM_UNNECESSARY;
571
572 skb->protocol = eth_type_trans(skb, intf->ndev);
573
574 napi_gro_receive(napi, skb);
575
576 u64_stats_update_begin(&stats->syncp);
577 u64_stats_inc(&stats->rx_packets);
578 u64_stats_add(&stats->rx_bytes, len);
579 u64_stats_update_end(&stats->syncp);
580
581next:
582 bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
583 desc->size));
584
585 processed++;
586 intf->rx_edpkt_dma_read =
587 incr_first_byte(intf->rx_edpkt_dma_read,
588 intf->rx_edpkt_dma_addr,
589 DESC_RING_COUNT);
590 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
591 DESC_RING_COUNT);
592 }
593
594 bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
595
596 if (processed < budget) {
597 napi_complete_done(&intf->rx_napi, processed);
598 bcmasp_enable_rx_irq(intf, 1);
599 }
600
601 return processed;
602}
603
604static void bcmasp_adj_link(struct net_device *dev)
605{
606 struct bcmasp_intf *intf = netdev_priv(dev);
607 struct phy_device *phydev = dev->phydev;
608 u32 cmd_bits = 0, reg;
609 int changed = 0;
610
611 if (intf->old_link != phydev->link) {
612 changed = 1;
613 intf->old_link = phydev->link;
614 }
615
616 if (intf->old_duplex != phydev->duplex) {
617 changed = 1;
618 intf->old_duplex = phydev->duplex;
619 }
620
621 switch (phydev->speed) {
622 case SPEED_2500:
623 cmd_bits = UMC_CMD_SPEED_2500;
624 break;
625 case SPEED_1000:
626 cmd_bits = UMC_CMD_SPEED_1000;
627 break;
628 case SPEED_100:
629 cmd_bits = UMC_CMD_SPEED_100;
630 break;
631 case SPEED_10:
632 cmd_bits = UMC_CMD_SPEED_10;
633 break;
634 default:
635 break;
636 }
637 cmd_bits <<= UMC_CMD_SPEED_SHIFT;
638
639 if (phydev->duplex == DUPLEX_HALF)
640 cmd_bits |= UMC_CMD_HD_EN;
641
642 if (intf->old_pause != phydev->pause) {
643 changed = 1;
644 intf->old_pause = phydev->pause;
645 }
646
647 if (!phydev->pause)
648 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
649
650 if (!changed)
651 return;
652
653 if (phydev->link) {
654 reg = umac_rl(intf, UMC_CMD);
655 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
656 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
657 UMC_CMD_TX_PAUSE_IGNORE);
658 reg |= cmd_bits;
659 umac_wl(intf, reg, UMC_CMD);
550e6f34
JC
660
661 intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
662 bcmasp_eee_enable_set(intf, intf->eee.eee_active);
490cb412
JC
663 }
664
665 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
666 if (phydev->link)
667 reg |= RGMII_LINK;
668 else
669 reg &= ~RGMII_LINK;
670 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
671
672 if (changed)
673 phy_print_status(phydev);
674}
675
676static int bcmasp_init_rx(struct bcmasp_intf *intf)
677{
678 struct device *kdev = &intf->parent->pdev->dev;
679 struct page *buffer_pg;
680 dma_addr_t dma;
681 void *p;
682 u32 reg;
683 int ret;
684
685 intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
686 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
e5b2e810
FF
687 if (!buffer_pg)
688 return -ENOMEM;
490cb412
JC
689
690 dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
691 DMA_FROM_DEVICE);
692 if (dma_mapping_error(kdev, dma)) {
693 __free_pages(buffer_pg, intf->rx_buf_order);
694 return -ENOMEM;
695 }
696 intf->rx_ring_cpu = page_to_virt(buffer_pg);
697 intf->rx_ring_dma = dma;
698 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
699
700 p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
701 GFP_KERNEL);
702 if (!p) {
703 ret = -ENOMEM;
704 goto free_rx_ring;
705 }
706 intf->rx_edpkt_cpu = p;
707
708 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
709
710 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
711 intf->rx_edpkt_index = 0;
712
713 /* Make sure channels are disabled */
714 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
715
716 /* Rx SPB */
717 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
718 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
719 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
720 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
721 RX_EDPKT_RING_BUFFER_END);
722 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
723 RX_EDPKT_RING_BUFFER_VALID);
724
725 /* EDPKT */
726 rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
727 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
728 (RX_EDPKT_CFG_CFG0_64_ALN <<
729 RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
730 (RX_EDPKT_CFG_CFG0_EFRM_STUF),
731 RX_EDPKT_CFG_CFG0);
732 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
733 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
734 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
735 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
736 RX_EDPKT_DMA_END);
737 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
738 RX_EDPKT_DMA_VALID);
739
740 reg = UMAC2FB_CFG_DEFAULT_EN |
741 ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
742 reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
743 umac2fb_wl(intf, reg, UMAC2FB_CFG);
744
745 return 0;
746
747free_rx_ring:
748 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
749 DMA_FROM_DEVICE);
750 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
751
752 return ret;
753}
754
755static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
756{
757 struct device *kdev = &intf->parent->pdev->dev;
758
759 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
760 intf->rx_edpkt_dma_addr);
761 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
762 DMA_FROM_DEVICE);
763 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
764}
765
766static int bcmasp_init_tx(struct bcmasp_intf *intf)
767{
768 struct device *kdev = &intf->parent->pdev->dev;
769 void *p;
770 int ret;
771
772 p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
773 GFP_KERNEL);
774 if (!p)
775 return -ENOMEM;
776
777 intf->tx_spb_cpu = p;
778 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
779 intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
780
781 intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
782 GFP_KERNEL);
783 if (!intf->tx_cbs) {
784 ret = -ENOMEM;
785 goto free_tx_spb;
786 }
787
788 intf->tx_spb_index = 0;
789 intf->tx_spb_clean_index = 0;
790
791 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
792
793 /* Make sure channels are disabled */
794 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
795 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
796
797 /* Tx SPB */
798 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
799 TX_SPB_CTRL_XF_CTRL2);
800 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
801 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
802 tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
803
804 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
805 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
806 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
807 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
808
809 return 0;
810
811free_tx_spb:
812 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
813 intf->tx_spb_dma_addr);
814
815 return ret;
816}
817
818static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
819{
820 struct device *kdev = &intf->parent->pdev->dev;
821
822 /* Free descriptors */
823 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
824 intf->tx_spb_dma_addr);
825
826 /* Free cbs */
827 kfree(intf->tx_cbs);
828}
829
830static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
831{
832 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
833 RGMII_EPHY_CFG_IDDQ_GLOBAL;
834 u32 reg;
835
836 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
837 if (enable) {
838 reg &= ~RGMII_EPHY_CK25_DIS;
839 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
840 mdelay(1);
841
842 reg &= ~mask;
843 reg |= RGMII_EPHY_RESET;
844 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
845 mdelay(1);
846
847 reg &= ~RGMII_EPHY_RESET;
848 } else {
849 reg |= mask | RGMII_EPHY_RESET;
850 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
851 mdelay(1);
852 reg |= RGMII_EPHY_CK25_DIS;
853 }
854 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
855 mdelay(1);
856
857 /* Set or clear the LED control override to avoid lighting up LEDs
858 * while the EPHY is powered off and drawing unnecessary current.
859 */
860 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
861 if (enable)
862 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
863 else
864 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
865 rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
866}
867
868static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
869{
870 u32 reg;
871
872 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
873 reg &= ~RGMII_OOB_DIS;
874 if (enable)
875 reg |= RGMII_MODE_EN;
876 else
877 reg &= ~RGMII_MODE_EN;
878 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
879}
880
881static void bcmasp_netif_deinit(struct net_device *dev)
882{
883 struct bcmasp_intf *intf = netdev_priv(dev);
884 u32 reg, timeout = 1000;
885
886 napi_disable(&intf->tx_napi);
887
888 bcmasp_enable_tx(intf, 0);
889
890 /* Flush any TX packets in the pipe */
891 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
892 do {
893 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
894 if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
895 break;
896 usleep_range(1000, 2000);
897 } while (timeout-- > 0);
898 tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
899
900 umac_enable_set(intf, UMC_CMD_TX_EN, 0);
901
902 phy_stop(dev->phydev);
903
904 umac_enable_set(intf, UMC_CMD_RX_EN, 0);
905
906 bcmasp_flush_rx_port(intf);
907 usleep_range(1000, 2000);
908 bcmasp_enable_rx(intf, 0);
909
910 napi_disable(&intf->rx_napi);
911
912 /* Disable interrupts */
913 bcmasp_enable_tx_irq(intf, 0);
914 bcmasp_enable_rx_irq(intf, 0);
915
916 netif_napi_del(&intf->tx_napi);
917 bcmasp_reclaim_free_all_tx(intf);
918
919 netif_napi_del(&intf->rx_napi);
920 bcmasp_reclaim_free_all_rx(intf);
921}
922
923static int bcmasp_stop(struct net_device *dev)
924{
925 struct bcmasp_intf *intf = netdev_priv(dev);
926
927 netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
928
929 /* Stop tx from updating HW */
930 netif_tx_disable(dev);
931
932 bcmasp_netif_deinit(dev);
933
934 phy_disconnect(dev->phydev);
935
936 /* Disable internal EPHY or external PHY */
937 if (intf->internal_phy)
938 bcmasp_ephy_enable_set(intf, false);
939 else
940 bcmasp_rgmii_mode_en_set(intf, false);
941
942 /* Disable the interface clocks */
943 bcmasp_core_clock_set_intf(intf, false);
944
945 clk_disable_unprepare(intf->parent->clk);
946
947 return 0;
948}
949
950static void bcmasp_configure_port(struct bcmasp_intf *intf)
951{
952 u32 reg, id_mode_dis = 0;
953
954 reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
955 reg &= ~RGMII_PORT_MODE_MASK;
956
957 switch (intf->phy_interface) {
958 case PHY_INTERFACE_MODE_RGMII:
959 /* RGMII_NO_ID: TXC transitions at the same time as TXD
960 * (requires PCB or receiver-side delay)
961 * RGMII: Add 2ns delay on TXC (90 degree shift)
962 *
963 * ID is implicitly disabled for 100Mbps (RG)MII operation.
964 */
965 id_mode_dis = RGMII_ID_MODE_DIS;
966 fallthrough;
967 case PHY_INTERFACE_MODE_RGMII_TXID:
968 reg |= RGMII_PORT_MODE_EXT_GPHY;
969 break;
970 case PHY_INTERFACE_MODE_MII:
971 reg |= RGMII_PORT_MODE_EXT_EPHY;
972 break;
973 default:
974 break;
975 }
976
977 if (intf->internal_phy)
978 reg |= RGMII_PORT_MODE_EPHY;
979
980 rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
981
982 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
983 reg &= ~RGMII_ID_MODE_DIS;
984 reg |= id_mode_dis;
985 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
986}
987
988static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
989{
990 struct bcmasp_intf *intf = netdev_priv(dev);
991 phy_interface_t phy_iface = intf->phy_interface;
992 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
993 PHY_BRCM_DIS_TXCRXC_NOENRGY |
994 PHY_BRCM_IDDQ_SUSPEND;
995 struct phy_device *phydev = NULL;
996 int ret;
997
998 /* Always enable interface clocks */
999 bcmasp_core_clock_set_intf(intf, true);
1000
1001 /* Enable internal PHY or external PHY before any MAC activity */
1002 if (intf->internal_phy)
1003 bcmasp_ephy_enable_set(intf, true);
1004 else
1005 bcmasp_rgmii_mode_en_set(intf, true);
1006 bcmasp_configure_port(intf);
1007
1008 /* This is an ugly quirk but we have not been correctly
1009 * interpreting the phy_interface values and we have done that
1010 * across different drivers, so at least we are consistent in
1011 * our mistakes.
1012 *
1013 * When the Generic PHY driver is in use either the PHY has
1014 * been strapped or programmed correctly by the boot loader so
1015 * we should stick to our incorrect interpretation since we
1016 * have validated it.
1017 *
1018 * Now when a dedicated PHY driver is in use, we need to
1019 * reverse the meaning of the phy_interface_mode values to
1020 * something that the PHY driver will interpret and act on such
1021 * that we have two mistakes canceling themselves so to speak.
1022 * We only do this for the two modes that GENET driver
1023 * officially supports on Broadcom STB chips:
1024 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1025 * Other modes are not *officially* supported with the boot
1026 * loader and the scripted environment generating Device Tree
1027 * blobs for those platforms.
1028 *
1029 * Note that internal PHY and fixed-link configurations are not
1030 * affected because they use different phy_interface_t values
1031 * or the Generic PHY driver.
1032 */
1033 switch (phy_iface) {
1034 case PHY_INTERFACE_MODE_RGMII:
1035 phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1036 break;
1037 case PHY_INTERFACE_MODE_RGMII_TXID:
1038 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1039 break;
1040 default:
1041 break;
1042 }
1043
1044 if (phy_connect) {
1045 phydev = of_phy_connect(dev, intf->phy_dn,
1046 bcmasp_adj_link, phy_flags,
1047 phy_iface);
1048 if (!phydev) {
1049 ret = -ENODEV;
1050 netdev_err(dev, "could not attach to PHY\n");
1051 goto err_phy_disable;
1052 }
5b76d928
FF
1053
1054 /* Indicate that the MAC is responsible for PHY PM */
1055 phydev->mac_managed_pm = true;
a2f07512 1056 } else if (!intf->wolopts) {
490cb412
JC
1057 ret = phy_resume(dev->phydev);
1058 if (ret)
1059 goto err_phy_disable;
1060 }
1061
1062 umac_reset(intf);
1063
1064 umac_init(intf);
1065
1066 /* Disable the UniMAC RX/TX */
1067 umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
1068
1069 umac_set_hw_addr(intf, dev->dev_addr);
1070
1071 intf->old_duplex = -1;
1072 intf->old_link = -1;
1073 intf->old_pause = -1;
1074
1075 ret = bcmasp_init_tx(intf);
1076 if (ret)
1077 goto err_phy_disconnect;
1078
1079 /* Turn on asp */
1080 bcmasp_enable_tx(intf, 1);
1081
1082 ret = bcmasp_init_rx(intf);
1083 if (ret)
1084 goto err_reclaim_tx;
1085
1086 bcmasp_enable_rx(intf, 1);
1087
1088 /* Turn on UniMAC TX/RX */
1089 umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
1090
1091 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1092
1093 bcmasp_netif_start(dev);
1094
1095 netif_start_queue(dev);
1096
1097 return 0;
1098
1099err_reclaim_tx:
e5b2e810 1100 netif_napi_del(&intf->tx_napi);
490cb412
JC
1101 bcmasp_reclaim_free_all_tx(intf);
1102err_phy_disconnect:
1103 if (phydev)
1104 phy_disconnect(phydev);
1105err_phy_disable:
1106 if (intf->internal_phy)
1107 bcmasp_ephy_enable_set(intf, false);
1108 else
1109 bcmasp_rgmii_mode_en_set(intf, false);
1110 return ret;
1111}
1112
1113static int bcmasp_open(struct net_device *dev)
1114{
1115 struct bcmasp_intf *intf = netdev_priv(dev);
1116 int ret;
1117
1118 netif_dbg(intf, ifup, dev, "bcmasp open\n");
1119
1120 ret = clk_prepare_enable(intf->parent->clk);
1121 if (ret)
1122 return ret;
1123
1124 ret = bcmasp_netif_init(dev, true);
1125 if (ret)
1126 clk_disable_unprepare(intf->parent->clk);
1127
1128 return ret;
1129}
1130
1131static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1132{
1133 struct bcmasp_intf *intf = netdev_priv(dev);
1134
1135 netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
7c10691e 1136 intf->mib.tx_timeout_cnt++;
490cb412
JC
1137}
1138
1139static int bcmasp_get_phys_port_name(struct net_device *dev,
1140 char *name, size_t len)
1141{
1142 struct bcmasp_intf *intf = netdev_priv(dev);
1143
1144 if (snprintf(name, len, "p%d", intf->port) >= len)
1145 return -EINVAL;
1146
1147 return 0;
1148}
1149
1150static void bcmasp_get_stats64(struct net_device *dev,
1151 struct rtnl_link_stats64 *stats)
1152{
1153 struct bcmasp_intf *intf = netdev_priv(dev);
1154 struct bcmasp_intf_stats64 *lstats;
1155 unsigned int start;
1156
1157 lstats = &intf->stats64;
1158
1159 do {
1160 start = u64_stats_fetch_begin(&lstats->syncp);
1161 stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1162 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1163 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1164 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1165 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1166 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1167
1168 stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1169 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1170 } while (u64_stats_fetch_retry(&lstats->syncp, start));
1171}
1172
1173static const struct net_device_ops bcmasp_netdev_ops = {
1174 .ndo_open = bcmasp_open,
1175 .ndo_stop = bcmasp_stop,
1176 .ndo_start_xmit = bcmasp_xmit,
1177 .ndo_tx_timeout = bcmasp_tx_timeout,
1178 .ndo_set_rx_mode = bcmasp_set_rx_mode,
1179 .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
1180 .ndo_eth_ioctl = phy_do_ioctl_running,
1181 .ndo_set_mac_address = eth_mac_addr,
1182 .ndo_get_stats64 = bcmasp_get_stats64,
1183};
1184
1185static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1186{
1187 /* Per port */
1188 intf->res.umac = priv->base + UMC_OFFSET(intf);
1189 intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1190 (intf->port * 0x4));
1191 intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1192
1193 /* Per ch */
1194 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1195 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1196 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1197 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1198 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1199
1200 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1201 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1202}
1203
1204#define MAX_IRQ_STR_LEN 64
1205struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1206 struct device_node *ndev_dn, int i)
1207{
1208 struct device *dev = &priv->pdev->dev;
1209 struct bcmasp_intf *intf;
1210 struct net_device *ndev;
1211 int ch, port, ret;
1212
1213 if (of_property_read_u32(ndev_dn, "reg", &port)) {
1214 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1215 goto err;
1216 }
1217
1218 if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1219 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1220 goto err;
1221 }
1222
1223 ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1224 if (!ndev) {
1225 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1226 goto err;
1227 }
1228 intf = netdev_priv(ndev);
1229
1230 intf->parent = priv;
1231 intf->ndev = ndev;
1232 intf->channel = ch;
1233 intf->port = port;
1234 intf->ndev_dn = ndev_dn;
1235 intf->index = i;
1236
1237 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1238 if (ret < 0) {
1239 dev_err(dev, "invalid PHY mode property\n");
1240 goto err_free_netdev;
1241 }
1242
1243 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1244 intf->internal_phy = true;
1245
1246 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1247 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1248 ret = of_phy_register_fixed_link(ndev_dn);
1249 if (ret) {
1250 dev_warn(dev, "%s: failed to register fixed PHY\n",
1251 ndev_dn->name);
1252 goto err_free_netdev;
1253 }
1254 intf->phy_dn = ndev_dn;
1255 }
1256
1257 /* Map resource */
1258 bcmasp_map_res(priv, intf);
1259
1260 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1261 intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1262 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1263 (intf->port != 1 && intf->internal_phy)) {
1264 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1265 phy_modes(intf->phy_interface), intf->port);
1266 ret = -EINVAL;
1267 goto err_free_netdev;
1268 }
1269
1270 ret = of_get_ethdev_address(ndev_dn, ndev);
1271 if (ret) {
1272 netdev_warn(ndev, "using random Ethernet MAC\n");
1273 eth_hw_addr_random(ndev);
1274 }
1275
1276 SET_NETDEV_DEV(ndev, dev);
1277 intf->ops = &bcmasp_intf_ops;
1278 ndev->netdev_ops = &bcmasp_netdev_ops;
1279 ndev->ethtool_ops = &bcmasp_ethtool_ops;
1280 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1281 NETIF_MSG_PROBE |
1282 NETIF_MSG_LINK);
1283 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1284 NETIF_F_RXCSUM;
1285 ndev->hw_features |= ndev->features;
1286 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1287
1288 return intf;
1289
1290err_free_netdev:
1291 free_netdev(ndev);
1292err:
1293 return NULL;
1294}
1295
1296void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1297{
1298 if (intf->ndev->reg_state == NETREG_REGISTERED)
1299 unregister_netdev(intf->ndev);
1300 if (of_phy_is_fixed_link(intf->ndev_dn))
1301 of_phy_deregister_fixed_link(intf->ndev_dn);
1302 free_netdev(intf->ndev);
1303}
1304
a2f07512
JC
1305static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1306{
1307 struct net_device *ndev = intf->ndev;
1308 u32 reg;
1309
1310 reg = umac_rl(intf, UMC_MPD_CTRL);
1311 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1312 reg |= UMC_MPD_CTRL_MPD_EN;
1313 reg &= ~UMC_MPD_CTRL_PSW_EN;
1314 if (intf->wolopts & WAKE_MAGICSECURE) {
1315 /* Program the SecureOn password */
1316 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1317 UMC_PSW_MS);
1318 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1319 UMC_PSW_LS);
1320 reg |= UMC_MPD_CTRL_PSW_EN;
1321 }
1322 umac_wl(intf, reg, UMC_MPD_CTRL);
1323
c5d511c4
JC
1324 if (intf->wolopts & WAKE_FILTER)
1325 bcmasp_netfilt_suspend(intf);
1326
a2f07512
JC
1327 /* UniMAC receive needs to be turned on */
1328 umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1329
1330 if (intf->parent->wol_irq > 0) {
1331 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1332 ASP_WAKEUP_INTR2_MASK_CLEAR);
1333 }
1334
1335 netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1336}
1337
490cb412
JC
1338int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1339{
a2f07512 1340 struct device *kdev = &intf->parent->pdev->dev;
490cb412
JC
1341 struct net_device *dev = intf->ndev;
1342 int ret = 0;
1343
1344 if (!netif_running(dev))
1345 return 0;
1346
1347 netif_device_detach(dev);
1348
1349 bcmasp_netif_deinit(dev);
1350
a2f07512
JC
1351 if (!intf->wolopts) {
1352 ret = phy_suspend(dev->phydev);
1353 if (ret)
1354 goto out;
490cb412 1355
a2f07512
JC
1356 if (intf->internal_phy)
1357 bcmasp_ephy_enable_set(intf, false);
1358 else
1359 bcmasp_rgmii_mode_en_set(intf, false);
490cb412 1360
a2f07512
JC
1361 /* If Wake-on-LAN is disabled, we can safely
1362 * disable the network interface clocks.
1363 */
1364 bcmasp_core_clock_set_intf(intf, false);
1365 }
1366
1367 if (device_may_wakeup(kdev) && intf->wolopts)
1368 bcmasp_suspend_to_wol(intf);
490cb412
JC
1369
1370 clk_disable_unprepare(intf->parent->clk);
1371
1372 return ret;
1373
1374out:
1375 bcmasp_netif_init(dev, false);
1376 return ret;
1377}
1378
a2f07512
JC
1379static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1380{
1381 u32 reg;
1382
1383 reg = umac_rl(intf, UMC_MPD_CTRL);
1384 reg &= ~UMC_MPD_CTRL_MPD_EN;
1385 umac_wl(intf, reg, UMC_MPD_CTRL);
1386
1387 if (intf->parent->wol_irq > 0) {
1388 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1389 ASP_WAKEUP_INTR2_MASK_SET);
1390 }
1391}
1392
490cb412
JC
1393int bcmasp_interface_resume(struct bcmasp_intf *intf)
1394{
1395 struct net_device *dev = intf->ndev;
1396 int ret;
1397
1398 if (!netif_running(dev))
1399 return 0;
1400
1401 ret = clk_prepare_enable(intf->parent->clk);
1402 if (ret)
1403 return ret;
1404
1405 ret = bcmasp_netif_init(dev, false);
1406 if (ret)
1407 goto out;
1408
a2f07512
JC
1409 bcmasp_resume_from_wol(intf);
1410
550e6f34
JC
1411 if (intf->eee.eee_enabled)
1412 bcmasp_eee_enable_set(intf, true);
1413
490cb412
JC
1414 netif_device_attach(dev);
1415
1416 return 0;
1417
1418out:
1419 clk_disable_unprepare(intf->parent->clk);
1420 return ret;
1421}