1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014-2015 Hisilicon Limited.
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/interrupt.h>
13 #include <linux/ipv6.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/phy.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
22 #include "hns_dsaf_mac.h"
24 #define NIC_MAX_Q_PER_VF 16
25 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
27 #define SERVICE_TIMER_HZ (1 * HZ)
29 #define RCB_IRQ_NOT_INITED 0
30 #define RCB_IRQ_INITED 1
31 #define HNS_BUFFER_SIZE_2048 2048
33 #define BD_MAX_SEND_SIZE 8191
35 static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
36 int send_sz, dma_addr_t dma, int frag_end,
37 int buf_num, enum hns_desc_type type, int mtu)
39 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
40 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
42 struct ipv6hdr *ipv6hdr;
54 desc_cb->length = size;
58 desc->addr = cpu_to_le64(dma);
59 desc->tx.send_size = cpu_to_le16((u16)send_sz);
61 /* config bd buffer end */
62 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
63 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
65 /* fill port_id in the tx bd for sending management pkts */
66 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
67 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
69 if (type == DESC_TYPE_SKB) {
70 skb = (struct sk_buff *)priv;
72 if (skb->ip_summed == CHECKSUM_PARTIAL) {
73 skb_reset_mac_len(skb);
74 protocol = skb->protocol;
77 if (protocol == htons(ETH_P_8021Q)) {
78 ip_offset += VLAN_HLEN;
79 protocol = vlan_get_protocol(skb);
80 skb->protocol = protocol;
83 if (skb->protocol == htons(ETH_P_IP)) {
85 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
86 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
88 /* check for tcp/udp header */
89 if (iphdr->protocol == IPPROTO_TCP &&
93 l4_len = tcp_hdrlen(skb);
94 mss = skb_shinfo(skb)->gso_size;
95 paylen = skb->len - skb_tcp_all_headers(skb);
97 } else if (skb->protocol == htons(ETH_P_IPV6)) {
98 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
99 ipv6hdr = ipv6_hdr(skb);
100 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
102 /* check for tcp/udp header */
103 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
104 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
107 l4_len = tcp_hdrlen(skb);
108 mss = skb_shinfo(skb)->gso_size;
109 paylen = skb->len - skb_tcp_all_headers(skb);
112 desc->tx.ip_offset = ip_offset;
113 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
114 desc->tx.mss = cpu_to_le16(mss);
115 desc->tx.l4_len = l4_len;
116 desc->tx.paylen = cpu_to_le16(paylen);
120 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
122 desc->tx.bn_pid = bn_pid;
123 desc->tx.ra_ri_cs_fe_vld = rrcfv;
125 ring_ptr_move_fw(ring, next_to_use);
128 static void fill_v2_desc(struct hnae_ring *ring, void *priv,
129 int size, dma_addr_t dma, int frag_end,
130 int buf_num, enum hns_desc_type type, int mtu)
132 fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
136 static const struct acpi_device_id hns_enet_acpi_match[] = {
141 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
143 static void fill_desc(struct hnae_ring *ring, void *priv,
144 int size, dma_addr_t dma, int frag_end,
145 int buf_num, enum hns_desc_type type, int mtu)
147 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
148 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
152 u32 asid_bufnum_pid = 0;
153 u32 flag_ipoffset = 0;
155 desc_cb->priv = priv;
156 desc_cb->length = size;
158 desc_cb->type = type;
160 desc->addr = cpu_to_le64(dma);
161 desc->tx.send_size = cpu_to_le16((u16)size);
163 /*config bd buffer end */
164 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
166 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
168 if (type == DESC_TYPE_SKB) {
169 skb = (struct sk_buff *)priv;
171 if (skb->ip_summed == CHECKSUM_PARTIAL) {
172 protocol = skb->protocol;
173 ip_offset = ETH_HLEN;
175 /*if it is a SW VLAN check the next protocol*/
176 if (protocol == htons(ETH_P_8021Q)) {
177 ip_offset += VLAN_HLEN;
178 protocol = vlan_get_protocol(skb);
179 skb->protocol = protocol;
182 if (skb->protocol == htons(ETH_P_IP)) {
183 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
184 /* check for tcp/udp header */
185 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
187 } else if (skb->protocol == htons(ETH_P_IPV6)) {
188 /* ipv6 has not l3 cs, check for L4 header */
189 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
192 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
196 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
198 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
199 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
201 ring_ptr_move_fw(ring, next_to_use);
204 static void unfill_desc(struct hnae_ring *ring)
206 ring_ptr_move_bw(ring, next_to_use);
209 static int hns_nic_maybe_stop_tx(
210 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
212 struct sk_buff *skb = *out_skb;
213 struct sk_buff *new_skb = NULL;
216 /* no. of segments (plus a header) */
217 buf_num = skb_shinfo(skb)->nr_frags + 1;
219 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
220 if (ring_space(ring) < 1)
223 new_skb = skb_copy(skb, GFP_ATOMIC);
227 dev_kfree_skb_any(skb);
230 } else if (buf_num > ring_space(ring)) {
238 static int hns_nic_maybe_stop_tso(
239 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
245 struct sk_buff *skb = *out_skb;
246 struct sk_buff *new_skb = NULL;
249 size = skb_headlen(skb);
250 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
252 frag_num = skb_shinfo(skb)->nr_frags;
253 for (i = 0; i < frag_num; i++) {
254 frag = &skb_shinfo(skb)->frags[i];
255 size = skb_frag_size(frag);
256 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
259 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
260 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
261 if (ring_space(ring) < buf_num)
263 /* manual split the send packet */
264 new_skb = skb_copy(skb, GFP_ATOMIC);
267 dev_kfree_skb_any(skb);
270 } else if (ring_space(ring) < buf_num) {
278 static void fill_tso_desc(struct hnae_ring *ring, void *priv,
279 int size, dma_addr_t dma, int frag_end,
280 int buf_num, enum hns_desc_type type, int mtu)
286 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
287 sizeoflast = size % BD_MAX_SEND_SIZE;
288 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
290 /* when the frag size is bigger than hardware, split this frag */
291 for (k = 0; k < frag_buf_num; k++)
292 fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
293 (k == frag_buf_num - 1) ?
294 sizeoflast : BD_MAX_SEND_SIZE,
295 dma + BD_MAX_SEND_SIZE * k,
296 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
298 (type == DESC_TYPE_SKB && !k) ?
299 DESC_TYPE_SKB : DESC_TYPE_PAGE,
303 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
305 struct hns_nic_ring_data *ring_data)
307 struct hns_nic_priv *priv = netdev_priv(ndev);
308 struct hnae_ring *ring = ring_data->ring;
309 struct device *dev = ring_to_dev(ring);
310 struct netdev_queue *dev_queue;
315 int size, next_to_use;
318 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
320 ring->stats.tx_busy++;
321 goto out_net_tx_busy;
323 ring->stats.sw_err_cnt++;
324 netdev_err(ndev, "no memory to xmit!\n");
330 /* no. of segments (plus a header) */
331 seg_num = skb_shinfo(skb)->nr_frags + 1;
332 next_to_use = ring->next_to_use;
334 /* fill the first part */
335 size = skb_headlen(skb);
336 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
337 if (dma_mapping_error(dev, dma)) {
338 netdev_err(ndev, "TX head DMA map failed\n");
339 ring->stats.sw_err_cnt++;
342 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
343 buf_num, DESC_TYPE_SKB, ndev->mtu);
345 /* fill the fragments */
346 for (i = 1; i < seg_num; i++) {
347 frag = &skb_shinfo(skb)->frags[i - 1];
348 size = skb_frag_size(frag);
349 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
350 if (dma_mapping_error(dev, dma)) {
351 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
352 ring->stats.sw_err_cnt++;
353 goto out_map_frag_fail;
355 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
356 seg_num - 1 == i ? 1 : 0, buf_num,
357 DESC_TYPE_PAGE, ndev->mtu);
360 /*complete translate all packets*/
361 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
362 netdev_tx_sent_queue(dev_queue, skb->len);
364 netif_trans_update(ndev);
365 ndev->stats.tx_bytes += skb->len;
366 ndev->stats.tx_packets++;
368 wmb(); /* commit all data before submit */
369 assert(skb->queue_mapping < priv->ae_handle->q_num);
370 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
376 while (ring->next_to_use != next_to_use) {
378 if (ring->next_to_use != next_to_use)
380 ring->desc_cb[ring->next_to_use].dma,
381 ring->desc_cb[ring->next_to_use].length,
384 dma_unmap_single(dev,
385 ring->desc_cb[next_to_use].dma,
386 ring->desc_cb[next_to_use].length,
392 dev_kfree_skb_any(skb);
397 netif_stop_subqueue(ndev, skb->queue_mapping);
399 /* Herbert's original patch had:
400 * smp_mb__after_netif_stop_queue();
401 * but since that doesn't exist yet, just open code it.
404 return NETDEV_TX_BUSY;
407 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
408 struct hnae_ring *ring, int pull_len,
409 struct hnae_desc_cb *desc_cb)
411 struct hnae_desc *desc;
417 twobufs = ((PAGE_SIZE < 8192) &&
418 hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
420 desc = &ring->desc[ring->next_to_clean];
421 size = le16_to_cpu(desc->rx.size);
424 truesize = hnae_buf_size(ring);
426 truesize = ALIGN(size, L1_CACHE_BYTES);
427 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
430 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
431 size - pull_len, truesize);
433 /* avoid re-using remote pages,flag default unreuse */
434 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
438 /* if we are only owner of page we can reuse it */
439 if (likely(page_count(desc_cb->priv) == 1)) {
440 /* flip page offset to other buffer */
441 desc_cb->page_offset ^= truesize;
443 desc_cb->reuse_flag = 1;
444 /* bump ref count on page before it is given*/
445 get_page(desc_cb->priv);
450 /* move offset up to the next cache line */
451 desc_cb->page_offset += truesize;
453 if (desc_cb->page_offset <= last_offset) {
454 desc_cb->reuse_flag = 1;
455 /* bump ref count on page before it is given*/
456 get_page(desc_cb->priv);
460 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
462 *out_bnum = hnae_get_field(bnum_flag,
463 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
466 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
468 *out_bnum = hnae_get_field(bnum_flag,
469 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
472 static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
473 struct sk_buff *skb, u32 flag)
475 struct net_device *netdev = ring_data->napi.dev;
479 /* check if RX checksum offload is enabled */
480 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
483 /* In hardware, we only support checksum for the following protocols:
485 * 2) TCP(over IPv4 or IPv6),
486 * 3) UDP(over IPv4 or IPv6),
487 * 4) SCTP(over IPv4 or IPv6)
488 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
489 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
491 * Hardware limitation:
492 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
493 * Error" bit (which usually can be used to indicate whether checksum
494 * was calculated by the hardware and if there was any error encountered
495 * during checksum calculation).
497 * Software workaround:
498 * We do get info within the RX descriptor about the kind of L3/L4
499 * protocol coming in the packet and the error status. These errors
500 * might not just be checksum errors but could be related to version,
501 * length of IPv4, UDP, TCP etc.
502 * Because there is no-way of knowing if it is a L3/L4 error due to bad
503 * checksum or any other L3/L4 error, we will not (cannot) convey
504 * checksum status for such cases to upper stack and will not maintain
505 * the RX L3/L4 checksum counters as well.
508 l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
509 l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
511 /* check L3 protocol for which checksum is supported */
512 if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
515 /* check for any(not just checksum)flagged L3 protocol errors */
516 if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
519 /* we do not support checksum of fragmented packets */
520 if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
523 /* check L4 protocol for which checksum is supported */
524 if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
525 (l4id != HNS_RX_FLAG_L4ID_UDP) &&
526 (l4id != HNS_RX_FLAG_L4ID_SCTP))
529 /* check for any(not just checksum)flagged L4 protocol errors */
530 if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
533 /* now, this has to be a packet with valid RX checksum */
534 skb->ip_summed = CHECKSUM_UNNECESSARY;
537 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
538 struct sk_buff **out_skb, int *out_bnum)
540 struct hnae_ring *ring = ring_data->ring;
541 struct net_device *ndev = ring_data->napi.dev;
542 struct hns_nic_priv *priv = netdev_priv(ndev);
544 struct hnae_desc *desc;
545 struct hnae_desc_cb *desc_cb;
551 desc = &ring->desc[ring->next_to_clean];
552 desc_cb = &ring->desc_cb[ring->next_to_clean];
556 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
558 /* prefetch first cache line of first page */
561 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
563 if (unlikely(!skb)) {
564 ring->stats.sw_err_cnt++;
568 prefetchw(skb->data);
569 length = le16_to_cpu(desc->rx.pkt_len);
570 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
571 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
574 if (length <= HNS_RX_HEAD_SIZE) {
575 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
577 /* we can reuse buffer as-is, just make sure it is local */
578 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
579 desc_cb->reuse_flag = 1;
580 else /* this page cannot be reused so discard it */
581 put_page(desc_cb->priv);
583 ring_ptr_move_fw(ring, next_to_clean);
585 if (unlikely(bnum != 1)) { /* check err*/
590 ring->stats.seg_pkt_cnt++;
592 pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
593 memcpy(__skb_put(skb, pull_len), va,
594 ALIGN(pull_len, sizeof(long)));
596 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
597 ring_ptr_move_fw(ring, next_to_clean);
599 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
603 for (i = 1; i < bnum; i++) {
604 desc = &ring->desc[ring->next_to_clean];
605 desc_cb = &ring->desc_cb[ring->next_to_clean];
607 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
608 ring_ptr_move_fw(ring, next_to_clean);
612 /* check except process, free skb and jump the desc */
613 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
615 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
616 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
617 bnum, ring->max_desc_num_per_pkt,
618 length, (int)MAX_SKB_FRAGS,
619 ((u64 *)desc)[0], ((u64 *)desc)[1]);
620 ring->stats.err_bd_num++;
621 dev_kfree_skb_any(skb);
625 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
627 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
628 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
629 ((u64 *)desc)[0], ((u64 *)desc)[1]);
630 ring->stats.non_vld_descs++;
631 dev_kfree_skb_any(skb);
635 if (unlikely((!desc->rx.pkt_len) ||
636 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
637 ring->stats.err_pkt_len++;
638 dev_kfree_skb_any(skb);
642 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
643 ring->stats.l2_err++;
644 dev_kfree_skb_any(skb);
648 ring->stats.rx_pkts++;
649 ring->stats.rx_bytes += skb->len;
651 /* indicate to upper stack if our hardware has already calculated
654 hns_nic_rx_checksum(ring_data, skb, bnum_flag);
660 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
663 struct hnae_desc_cb res_cbs;
664 struct hnae_desc_cb *desc_cb;
665 struct hnae_ring *ring = ring_data->ring;
666 struct net_device *ndev = ring_data->napi.dev;
668 for (i = 0; i < cleand_count; i++) {
669 desc_cb = &ring->desc_cb[ring->next_to_use];
670 if (desc_cb->reuse_flag) {
671 ring->stats.reuse_pg_cnt++;
672 hnae_reuse_buffer(ring, ring->next_to_use);
674 ret = hnae_reserve_buffer_map(ring, &res_cbs);
676 ring->stats.sw_err_cnt++;
677 netdev_err(ndev, "hnae reserve buffer map failed.\n");
680 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
683 ring_ptr_move_fw(ring, next_to_use);
686 wmb(); /* make all data has been write before submit */
687 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
690 /* return error number for error or number of desc left to take
692 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
695 struct net_device *ndev = ring_data->napi.dev;
697 skb->protocol = eth_type_trans(skb, ndev);
698 napi_gro_receive(&ring_data->napi, skb);
701 static int hns_desc_unused(struct hnae_ring *ring)
703 int ntc = ring->next_to_clean;
704 int ntu = ring->next_to_use;
706 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
709 #define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
710 #define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
712 #define HNS_COAL_BDNUM 3
714 static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
716 bool coal_enable = ring->q->handle->coal_adapt_en;
719 ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
720 return HNS_COAL_BDNUM;
725 static void hns_update_rx_rate(struct hnae_ring *ring)
727 bool coal_enable = ring->q->handle->coal_adapt_en;
732 time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
735 /* ring->stats.rx_bytes overflowed */
736 if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
737 ring->coal_last_rx_bytes = ring->stats.rx_bytes;
738 ring->coal_last_jiffies = jiffies;
742 total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
743 time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
744 do_div(total_bytes, time_passed_ms);
745 ring->coal_rx_rate = total_bytes >> 10;
747 ring->coal_last_rx_bytes = ring->stats.rx_bytes;
748 ring->coal_last_jiffies = jiffies;
752 * smooth_alg - smoothing algrithm for adjusting coalesce parameter
753 * @new_param: new value
754 * @old_param: old value
756 static u32 smooth_alg(u32 new_param, u32 old_param)
758 u32 gap = (new_param > old_param) ? new_param - old_param
759 : old_param - new_param;
764 if (new_param > old_param)
765 return old_param + gap;
767 return old_param - gap;
771 * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate
772 * @ring_data: pointer to hns_nic_ring_data
774 static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
776 struct hnae_ring *ring = ring_data->ring;
777 struct hnae_handle *handle = ring->q->handle;
778 u32 new_coal_param, old_coal_param = ring->coal_param;
780 if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
781 new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
782 else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
783 new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM;
785 new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM;
787 if (new_coal_param == old_coal_param &&
788 new_coal_param == handle->coal_param)
791 new_coal_param = smooth_alg(new_coal_param, old_coal_param);
792 ring->coal_param = new_coal_param;
795 * Because all ring in one port has one coalesce param, when one ring
796 * calculate its own coalesce param, it cannot write to hardware at
797 * once. There are three conditions as follows:
798 * 1. current ring's coalesce param is larger than the hardware.
799 * 2. or ring which adapt last time can change again.
802 if (new_coal_param == handle->coal_param) {
803 handle->coal_last_jiffies = jiffies;
804 handle->coal_ring_idx = ring_data->queue_index;
805 } else if (new_coal_param > handle->coal_param ||
806 handle->coal_ring_idx == ring_data->queue_index ||
807 time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) {
808 handle->dev->ops->set_coalesce_usecs(handle,
810 handle->dev->ops->set_coalesce_frames(handle,
812 handle->coal_param = new_coal_param;
813 handle->coal_ring_idx = ring_data->queue_index;
814 handle->coal_last_jiffies = jiffies;
818 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
821 struct hnae_ring *ring = ring_data->ring;
824 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
825 int recv_pkts, recv_bds, clean_count, err;
826 int unused_count = hns_desc_unused(ring);
828 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
829 rmb(); /* make sure num taken effect before the other data is touched */
831 recv_pkts = 0, recv_bds = 0, clean_count = 0;
834 while (recv_pkts < budget && recv_bds < num) {
835 /* reuse or realloc buffers */
836 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
837 hns_nic_alloc_rx_buffers(ring_data,
838 clean_count + unused_count);
840 unused_count = hns_desc_unused(ring);
844 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
845 if (unlikely(!skb)) /* this fault cannot be repaired */
850 if (unlikely(err)) { /* do jump the err */
855 /* do update ip stack process*/
856 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
862 /* make all data has been write before submit */
863 if (clean_count + unused_count > 0)
864 hns_nic_alloc_rx_buffers(ring_data,
865 clean_count + unused_count);
870 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
872 struct hnae_ring *ring = ring_data->ring;
876 hns_update_rx_rate(ring);
878 /* for hardware bug fixed */
879 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
880 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
882 if (num <= hns_coal_rx_bdnum(ring)) {
883 if (ring->q->handle->coal_adapt_en)
884 hns_nic_adpt_coalesce(ring_data);
888 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
897 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
899 struct hnae_ring *ring = ring_data->ring;
902 hns_update_rx_rate(ring);
903 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
905 if (num <= hns_coal_rx_bdnum(ring)) {
906 if (ring->q->handle->coal_adapt_en)
907 hns_nic_adpt_coalesce(ring_data);
915 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
916 int *bytes, int *pkts)
918 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
920 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
921 (*bytes) += desc_cb->length;
922 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
923 hnae_free_buffer_detach(ring, ring->next_to_clean);
925 ring_ptr_move_fw(ring, next_to_clean);
928 static int is_valid_clean_head(struct hnae_ring *ring, int h)
930 int u = ring->next_to_use;
931 int c = ring->next_to_clean;
933 if (unlikely(h > ring->desc_num))
936 assert(u > 0 && u < ring->desc_num);
937 assert(c > 0 && c < ring->desc_num);
938 assert(u != c && h != c); /* must be checked before call this func */
940 return u > c ? (h > c && h <= u) : (h > c || h <= u);
943 /* reclaim all desc in one budget
944 * return error or number of desc left
946 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
949 struct hnae_ring *ring = ring_data->ring;
950 struct net_device *ndev = ring_data->napi.dev;
951 struct netdev_queue *dev_queue;
952 struct hns_nic_priv *priv = netdev_priv(ndev);
956 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
957 rmb(); /* make sure head is ready before touch any data */
959 if (is_ring_empty(ring) || head == ring->next_to_clean)
960 return 0; /* no data to poll */
962 if (!is_valid_clean_head(ring, head)) {
963 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
964 ring->next_to_use, ring->next_to_clean);
965 ring->stats.io_err_cnt++;
971 while (head != ring->next_to_clean) {
972 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
973 /* issue prefetch for next Tx descriptor */
974 prefetch(&ring->desc_cb[ring->next_to_clean]);
976 /* update tx ring statistics. */
977 ring->stats.tx_pkts += pkts;
978 ring->stats.tx_bytes += bytes;
980 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
981 netdev_tx_completed_queue(dev_queue, pkts, bytes);
983 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
984 netif_carrier_on(ndev);
986 if (unlikely(pkts && netif_carrier_ok(ndev) &&
987 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
988 /* Make sure that anybody stopping the queue after this
989 * sees the new next_to_clean.
992 if (netif_tx_queue_stopped(dev_queue) &&
993 !test_bit(NIC_STATE_DOWN, &priv->state)) {
994 netif_tx_wake_queue(dev_queue);
995 ring->stats.restart_queue++;
1001 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
1003 struct hnae_ring *ring = ring_data->ring;
1006 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1008 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1010 if (head != ring->next_to_clean) {
1011 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1012 ring_data->ring, 1);
1020 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
1022 struct hnae_ring *ring = ring_data->ring;
1023 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1025 if (head == ring->next_to_clean)
1031 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1033 struct hnae_ring *ring = ring_data->ring;
1034 struct net_device *ndev = ring_data->napi.dev;
1035 struct netdev_queue *dev_queue;
1039 head = ring->next_to_use; /* ntu :soft setted ring position*/
1042 while (head != ring->next_to_clean)
1043 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1045 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1046 netdev_tx_reset_queue(dev_queue);
1049 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
1051 int clean_complete = 0;
1052 struct hns_nic_ring_data *ring_data =
1053 container_of(napi, struct hns_nic_ring_data, napi);
1054 struct hnae_ring *ring = ring_data->ring;
1056 clean_complete += ring_data->poll_one(
1057 ring_data, budget - clean_complete,
1058 ring_data->ex_process);
1060 if (clean_complete < budget) {
1061 if (ring_data->fini_process(ring_data)) {
1062 napi_complete(napi);
1063 ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1069 return clean_complete;
1072 static irqreturn_t hns_irq_handle(int irq, void *dev)
1074 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
1076 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1077 ring_data->ring, 1);
1078 napi_schedule(&ring_data->napi);
1084 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1087 static void hns_nic_adjust_link(struct net_device *ndev)
1089 struct hns_nic_priv *priv = netdev_priv(ndev);
1090 struct hnae_handle *h = priv->ae_handle;
1093 /* If there is no phy, do not need adjust link */
1095 /* When phy link down, do nothing */
1096 if (ndev->phydev->link == 0)
1099 if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1100 ndev->phydev->duplex)) {
1101 /* because Hi161X chip don't support to change gmac
1102 * speed and duplex with traffic. Delay 200ms to
1103 * make sure there is no more data in chip FIFO.
1105 netif_carrier_off(ndev);
1107 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1108 ndev->phydev->duplex);
1109 netif_carrier_on(ndev);
1113 state = state && h->dev->ops->get_status(h);
1115 if (state != priv->link) {
1117 netif_carrier_on(ndev);
1118 netif_tx_wake_all_queues(ndev);
1119 netdev_info(ndev, "link up\n");
1121 netif_carrier_off(ndev);
1122 netdev_info(ndev, "link down\n");
1129 *hns_nic_init_phy - init phy
1132 * Return 0 on success, negative on failure
1134 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1136 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
1137 struct phy_device *phy_dev = h->phy_dev;
1143 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1144 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1145 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1147 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1148 phy_dev->autoneg = false;
1150 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1151 phy_dev->dev_flags = 0;
1153 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1156 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1161 phy_attached_info(phy_dev);
1166 static int hns_nic_ring_open(struct net_device *netdev, int idx)
1168 struct hns_nic_priv *priv = netdev_priv(netdev);
1169 struct hnae_handle *h = priv->ae_handle;
1171 napi_enable(&priv->ring_data[idx].napi);
1173 enable_irq(priv->ring_data[idx].ring->irq);
1174 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1179 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1181 struct hns_nic_priv *priv = netdev_priv(ndev);
1182 struct hnae_handle *h = priv->ae_handle;
1183 struct sockaddr *mac_addr = p;
1186 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1187 return -EADDRNOTAVAIL;
1189 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1191 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1195 eth_hw_addr_set(ndev, mac_addr->sa_data);
1200 static void hns_nic_update_stats(struct net_device *netdev)
1202 struct hns_nic_priv *priv = netdev_priv(netdev);
1203 struct hnae_handle *h = priv->ae_handle;
1205 h->dev->ops->update_stats(h, &netdev->stats);
1208 /* set mac addr if it is configed. or leave it to the AE driver */
1209 static void hns_init_mac_addr(struct net_device *ndev)
1211 struct hns_nic_priv *priv = netdev_priv(ndev);
1213 if (device_get_ethdev_address(priv->dev, ndev)) {
1214 eth_hw_addr_random(ndev);
1215 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1220 static void hns_nic_ring_close(struct net_device *netdev, int idx)
1222 struct hns_nic_priv *priv = netdev_priv(netdev);
1223 struct hnae_handle *h = priv->ae_handle;
1225 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1226 disable_irq(priv->ring_data[idx].ring->irq);
1228 napi_disable(&priv->ring_data[idx].napi);
1231 static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
1232 struct hnae_ring *ring, cpumask_t *mask)
1236 /* Different irq balance between 16core and 32core.
1237 * The cpu mask set by ring index according to the ring flag
1238 * which indicate the ring is tx or rx.
1240 if (q_num == num_possible_cpus()) {
1241 if (is_tx_ring(ring))
1244 cpu = ring_idx - q_num;
1246 if (is_tx_ring(ring))
1249 cpu = (ring_idx - q_num) * 2 + 1;
1252 cpumask_clear(mask);
1253 cpumask_set_cpu(cpu, mask);
1258 static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
1262 for (i = 0; i < q_num * 2; i++) {
1263 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1264 irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1266 free_irq(priv->ring_data[i].ring->irq,
1267 &priv->ring_data[i]);
1268 priv->ring_data[i].ring->irq_init_flag =
1274 static int hns_nic_init_irq(struct hns_nic_priv *priv)
1276 struct hnae_handle *h = priv->ae_handle;
1277 struct hns_nic_ring_data *rd;
1282 for (i = 0; i < h->q_num * 2; i++) {
1283 rd = &priv->ring_data[i];
1285 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1288 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1289 "%s-%s%d", priv->netdev->name,
1290 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
1292 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1294 irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN);
1295 ret = request_irq(rd->ring->irq,
1296 hns_irq_handle, 0, rd->ring->ring_name, rd);
1298 netdev_err(priv->netdev, "request irq(%d) fail\n",
1303 cpu = hns_nic_init_affinity_mask(h->q_num, i,
1304 rd->ring, &rd->mask);
1306 if (cpu_online(cpu))
1307 irq_set_affinity_hint(rd->ring->irq,
1310 rd->ring->irq_init_flag = RCB_IRQ_INITED;
1316 hns_nic_free_irq(h->q_num, priv);
1320 static int hns_nic_net_up(struct net_device *ndev)
1322 struct hns_nic_priv *priv = netdev_priv(ndev);
1323 struct hnae_handle *h = priv->ae_handle;
1327 if (!test_bit(NIC_STATE_DOWN, &priv->state))
1330 ret = hns_nic_init_irq(priv);
1332 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1336 for (i = 0; i < h->q_num * 2; i++) {
1337 ret = hns_nic_ring_open(ndev, i);
1339 goto out_has_some_queues;
1342 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1344 goto out_set_mac_addr_err;
1346 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1351 phy_start(ndev->phydev);
1353 clear_bit(NIC_STATE_DOWN, &priv->state);
1354 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1359 netif_stop_queue(ndev);
1360 out_set_mac_addr_err:
1361 out_has_some_queues:
1362 for (j = i - 1; j >= 0; j--)
1363 hns_nic_ring_close(ndev, j);
1365 hns_nic_free_irq(h->q_num, priv);
1366 set_bit(NIC_STATE_DOWN, &priv->state);
1371 static void hns_nic_net_down(struct net_device *ndev)
1374 struct hnae_ae_ops *ops;
1375 struct hns_nic_priv *priv = netdev_priv(ndev);
1377 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1380 (void)del_timer_sync(&priv->service_timer);
1381 netif_tx_stop_all_queues(ndev);
1382 netif_carrier_off(ndev);
1383 netif_tx_disable(ndev);
1387 phy_stop(ndev->phydev);
1389 ops = priv->ae_handle->dev->ops;
1392 ops->stop(priv->ae_handle);
1394 netif_tx_stop_all_queues(ndev);
1396 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1397 hns_nic_ring_close(ndev, i);
1398 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1400 /* clean tx buffers*/
1401 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1405 void hns_nic_net_reset(struct net_device *ndev)
1407 struct hns_nic_priv *priv = netdev_priv(ndev);
1408 struct hnae_handle *handle = priv->ae_handle;
1410 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1411 usleep_range(1000, 2000);
1413 (void)hnae_reinit_handle(handle);
1415 clear_bit(NIC_STATE_RESETTING, &priv->state);
1418 void hns_nic_net_reinit(struct net_device *netdev)
1420 struct hns_nic_priv *priv = netdev_priv(netdev);
1421 enum hnae_port_type type = priv->ae_handle->port_type;
1423 netif_trans_update(priv->netdev);
1424 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1425 usleep_range(1000, 2000);
1427 hns_nic_net_down(netdev);
1429 /* Only do hns_nic_net_reset in debug mode
1430 * because of hardware limitation.
1432 if (type == HNAE_PORT_DEBUG)
1433 hns_nic_net_reset(netdev);
1435 (void)hns_nic_net_up(netdev);
1436 clear_bit(NIC_STATE_REINITING, &priv->state);
1439 static int hns_nic_net_open(struct net_device *ndev)
1441 struct hns_nic_priv *priv = netdev_priv(ndev);
1442 struct hnae_handle *h = priv->ae_handle;
1445 if (test_bit(NIC_STATE_TESTING, &priv->state))
1449 netif_carrier_off(ndev);
1451 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1453 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1458 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1461 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1465 ret = hns_nic_net_up(ndev);
1468 "hns net up fail, ret=%d!\n", ret);
1475 static int hns_nic_net_stop(struct net_device *ndev)
1477 hns_nic_net_down(ndev);
1482 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1483 #define HNS_TX_TIMEO_LIMIT (40 * HZ)
1484 static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
1486 struct hns_nic_priv *priv = netdev_priv(ndev);
1488 if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
1489 ndev->watchdog_timeo *= 2;
1490 netdev_info(ndev, "watchdog_timo changed to %d.\n",
1491 ndev->watchdog_timeo);
1493 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1494 hns_tx_timeout_reset(priv);
1498 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1499 struct net_device *ndev)
1501 struct hns_nic_priv *priv = netdev_priv(ndev);
1503 assert(skb->queue_mapping < priv->ae_handle->q_num);
1505 return hns_nic_net_xmit_hw(ndev, skb,
1506 &tx_ring_data(priv, skb->queue_mapping));
1509 static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
1510 struct sk_buff *skb)
1512 dev_kfree_skb_any(skb);
1515 #define HNS_LB_TX_RING 0
1516 static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
1518 struct sk_buff *skb;
1519 struct ethhdr *ethhdr;
1522 /* allocate test skb */
1523 skb = alloc_skb(64, GFP_KERNEL);
1529 memset(skb->data, 0xFF, skb->len);
1531 /* must be tcp/ip package */
1532 ethhdr = (struct ethhdr *)skb->data;
1533 ethhdr->h_proto = htons(ETH_P_IP);
1535 frame_len = skb->len & (~1ul);
1536 memset(&skb->data[frame_len / 2], 0xAA,
1539 skb->queue_mapping = HNS_LB_TX_RING;
1544 static int hns_enable_serdes_lb(struct net_device *ndev)
1546 struct hns_nic_priv *priv = netdev_priv(ndev);
1547 struct hnae_handle *h = priv->ae_handle;
1548 struct hnae_ae_ops *ops = h->dev->ops;
1552 ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
1556 ret = ops->start ? ops->start(h) : 0;
1560 /* link adjust duplex*/
1561 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1567 ops->adjust_link(h, speed, duplex);
1569 /* wait h/w ready */
1575 static void hns_disable_serdes_lb(struct net_device *ndev)
1577 struct hns_nic_priv *priv = netdev_priv(ndev);
1578 struct hnae_handle *h = priv->ae_handle;
1579 struct hnae_ae_ops *ops = h->dev->ops;
1582 ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
1586 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1587 *function as follows:
1588 * 1. if one rx ring has found the page_offset is not equal 0 between head
1589 * and tail, it means that the chip fetched the wrong descs for the ring
1590 * which buffer size is 4096.
1591 * 2. we set the chip serdes loopback and set rss indirection to the ring.
1592 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1593 * receiving all packages and it will fetch new descriptions.
1594 * 4. recover to the original state.
1598 static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1600 struct hns_nic_priv *priv = netdev_priv(ndev);
1601 struct hnae_handle *h = priv->ae_handle;
1602 struct hnae_ae_ops *ops = h->dev->ops;
1603 struct hns_nic_ring_data *rd;
1604 struct hnae_ring *ring;
1605 struct sk_buff *skb;
1616 /* alloc indir memory */
1617 indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
1618 org_indir = kzalloc(indir_size, GFP_KERNEL);
1622 /* store the original indirection */
1623 ops->get_rss(h, org_indir, NULL, NULL);
1625 cur_indir = kzalloc(indir_size, GFP_KERNEL);
1628 goto cur_indir_alloc_err;
1632 if (hns_enable_serdes_lb(ndev)) {
1634 goto enable_serdes_lb_err;
1637 /* foreach every rx ring to clear fetch desc */
1638 for (i = 0; i < h->q_num; i++) {
1639 ring = &h->qs[i]->rx_ring;
1640 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1641 tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
1643 fetch_num = ring_dist(ring, head, tail);
1645 while (head != tail) {
1646 if (ring->desc_cb[head].page_offset != 0) {
1652 if (head == ring->desc_num)
1657 for (j = 0; j < indir_size / sizeof(*org_indir); j++)
1659 ops->set_rss(h, cur_indir, NULL, 0);
1661 for (j = 0; j < fetch_num; j++) {
1662 /* alloc one skb and init */
1663 skb = hns_assemble_skb(ndev);
1668 rd = &tx_ring_data(priv, skb->queue_mapping);
1669 hns_nic_net_xmit_hw(ndev, skb, rd);
1672 while (retry_times++ < 10) {
1675 rd = &rx_ring_data(priv, i);
1676 if (rd->poll_one(rd, fetch_num,
1677 hns_nic_drop_rx_fetch))
1682 while (retry_times++ < 10) {
1684 /* clean tx ring 0 send package */
1685 rd = &tx_ring_data(priv,
1687 if (rd->poll_one(rd, fetch_num, NULL))
1695 /* restore everything */
1696 ops->set_rss(h, org_indir, NULL, 0);
1697 hns_disable_serdes_lb(ndev);
1698 enable_serdes_lb_err:
1700 cur_indir_alloc_err:
1706 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1708 struct hns_nic_priv *priv = netdev_priv(ndev);
1709 struct hnae_handle *h = priv->ae_handle;
1710 bool if_running = netif_running(ndev);
1713 /* MTU < 68 is an error and causes problems on some kernels */
1718 if (new_mtu == ndev->mtu)
1721 if (!h->dev->ops->set_mtu)
1725 (void)hns_nic_net_stop(ndev);
1729 if (priv->enet_ver != AE_VERSION_1 &&
1730 ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
1731 new_mtu > BD_SIZE_2048_MAX_MTU) {
1733 hnae_reinit_all_ring_desc(h);
1735 /* clear the package which the chip has fetched */
1736 ret = hns_nic_clear_all_rx_fetch(ndev);
1738 /* the page offset must be consist with desc */
1739 hnae_reinit_all_ring_page_off(h);
1742 netdev_err(ndev, "clear the fetched desc fail\n");
1747 ret = h->dev->ops->set_mtu(h, new_mtu);
1749 netdev_err(ndev, "set mtu fail, return value %d\n",
1754 /* finally, set new mtu to netdevice */
1755 ndev->mtu = new_mtu;
1759 if (hns_nic_net_open(ndev)) {
1760 netdev_err(ndev, "hns net open fail\n");
1768 static int hns_nic_set_features(struct net_device *netdev,
1769 netdev_features_t features)
1771 struct hns_nic_priv *priv = netdev_priv(netdev);
1773 switch (priv->enet_ver) {
1775 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1776 netdev_info(netdev, "enet v1 do not support tso!\n");
1779 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1780 priv->ops.fill_desc = fill_tso_desc;
1781 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1782 /* The chip only support 7*4096 */
1783 netif_set_tso_max_size(netdev, 7 * 4096);
1785 priv->ops.fill_desc = fill_v2_desc;
1786 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1790 netdev->features = features;
1794 static netdev_features_t hns_nic_fix_features(
1795 struct net_device *netdev, netdev_features_t features)
1797 struct hns_nic_priv *priv = netdev_priv(netdev);
1799 switch (priv->enet_ver) {
1801 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1802 NETIF_F_HW_VLAN_CTAG_FILTER);
1810 static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
1812 struct hns_nic_priv *priv = netdev_priv(netdev);
1813 struct hnae_handle *h = priv->ae_handle;
1815 if (h->dev->ops->add_uc_addr)
1816 return h->dev->ops->add_uc_addr(h, addr);
1821 static int hns_nic_uc_unsync(struct net_device *netdev,
1822 const unsigned char *addr)
1824 struct hns_nic_priv *priv = netdev_priv(netdev);
1825 struct hnae_handle *h = priv->ae_handle;
1827 if (h->dev->ops->rm_uc_addr)
1828 return h->dev->ops->rm_uc_addr(h, addr);
1834 * hns_set_multicast_list - set mutl mac address
1839 static void hns_set_multicast_list(struct net_device *ndev)
1841 struct hns_nic_priv *priv = netdev_priv(ndev);
1842 struct hnae_handle *h = priv->ae_handle;
1843 struct netdev_hw_addr *ha = NULL;
1846 netdev_err(ndev, "hnae handle is null\n");
1850 if (h->dev->ops->clr_mc_addr)
1851 if (h->dev->ops->clr_mc_addr(h))
1852 netdev_err(ndev, "clear multicast address fail\n");
1854 if (h->dev->ops->set_mc_addr) {
1855 netdev_for_each_mc_addr(ha, ndev)
1856 if (h->dev->ops->set_mc_addr(h, ha->addr))
1857 netdev_err(ndev, "set multicast fail\n");
1861 static void hns_nic_set_rx_mode(struct net_device *ndev)
1863 struct hns_nic_priv *priv = netdev_priv(ndev);
1864 struct hnae_handle *h = priv->ae_handle;
1866 if (h->dev->ops->set_promisc_mode) {
1867 if (ndev->flags & IFF_PROMISC)
1868 h->dev->ops->set_promisc_mode(h, 1);
1870 h->dev->ops->set_promisc_mode(h, 0);
1873 hns_set_multicast_list(ndev);
1875 if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
1876 netdev_err(ndev, "sync uc address fail\n");
1879 static void hns_nic_get_stats64(struct net_device *ndev,
1880 struct rtnl_link_stats64 *stats)
1887 struct hns_nic_priv *priv = netdev_priv(ndev);
1888 struct hnae_handle *h = priv->ae_handle;
1890 for (idx = 0; idx < h->q_num; idx++) {
1891 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1892 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1893 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1894 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1897 stats->tx_bytes = tx_bytes;
1898 stats->tx_packets = tx_pkts;
1899 stats->rx_bytes = rx_bytes;
1900 stats->rx_packets = rx_pkts;
1902 stats->rx_errors = ndev->stats.rx_errors;
1903 stats->multicast = ndev->stats.multicast;
1904 stats->rx_length_errors = ndev->stats.rx_length_errors;
1905 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1906 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1908 stats->tx_errors = ndev->stats.tx_errors;
1909 stats->rx_dropped = ndev->stats.rx_dropped;
1910 stats->tx_dropped = ndev->stats.tx_dropped;
1911 stats->collisions = ndev->stats.collisions;
1912 stats->rx_over_errors = ndev->stats.rx_over_errors;
1913 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1914 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1915 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1916 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1917 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1918 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1919 stats->tx_window_errors = ndev->stats.tx_window_errors;
1920 stats->rx_compressed = ndev->stats.rx_compressed;
1921 stats->tx_compressed = ndev->stats.tx_compressed;
1925 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1926 struct net_device *sb_dev)
1928 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1929 struct hns_nic_priv *priv = netdev_priv(ndev);
1931 /* fix hardware broadcast/multicast packets queue loopback */
1932 if (!AE_IS_VER1(priv->enet_ver) &&
1933 is_multicast_ether_addr(eth_hdr->h_dest))
1936 return netdev_pick_tx(ndev, skb, NULL);
1939 static const struct net_device_ops hns_nic_netdev_ops = {
1940 .ndo_open = hns_nic_net_open,
1941 .ndo_stop = hns_nic_net_stop,
1942 .ndo_start_xmit = hns_nic_net_xmit,
1943 .ndo_tx_timeout = hns_nic_net_timeout,
1944 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1945 .ndo_change_mtu = hns_nic_change_mtu,
1946 .ndo_eth_ioctl = phy_do_ioctl_running,
1947 .ndo_set_features = hns_nic_set_features,
1948 .ndo_fix_features = hns_nic_fix_features,
1949 .ndo_get_stats64 = hns_nic_get_stats64,
1950 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1951 .ndo_select_queue = hns_nic_select_queue,
1954 static void hns_nic_update_link_status(struct net_device *netdev)
1956 struct hns_nic_priv *priv = netdev_priv(netdev);
1958 struct hnae_handle *h = priv->ae_handle;
1961 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1964 (void)genphy_read_status(h->phy_dev);
1966 hns_nic_adjust_link(netdev);
1969 /* for dumping key regs*/
1970 static void hns_nic_dump(struct hns_nic_priv *priv)
1972 struct hnae_handle *h = priv->ae_handle;
1973 struct hnae_ae_ops *ops = h->dev->ops;
1974 u32 *data, reg_num, i;
1976 if (ops->get_regs_len && ops->get_regs) {
1977 reg_num = ops->get_regs_len(priv->ae_handle);
1978 reg_num = (reg_num + 3ul) & ~3ul;
1979 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1981 ops->get_regs(priv->ae_handle, data);
1982 for (i = 0; i < reg_num; i += 4)
1983 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1984 i, data[i], data[i + 1],
1985 data[i + 2], data[i + 3]);
1990 for (i = 0; i < h->q_num; i++) {
1991 pr_info("tx_queue%d_next_to_clean:%d\n",
1992 i, h->qs[i]->tx_ring.next_to_clean);
1993 pr_info("tx_queue%d_next_to_use:%d\n",
1994 i, h->qs[i]->tx_ring.next_to_use);
1995 pr_info("rx_queue%d_next_to_clean:%d\n",
1996 i, h->qs[i]->rx_ring.next_to_clean);
1997 pr_info("rx_queue%d_next_to_use:%d\n",
1998 i, h->qs[i]->rx_ring.next_to_use);
2002 /* for resetting subtask */
2003 static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
2005 enum hnae_port_type type = priv->ae_handle->port_type;
2007 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
2009 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2011 /* If we're already down, removing or resetting, just bail */
2012 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
2013 test_bit(NIC_STATE_REMOVING, &priv->state) ||
2014 test_bit(NIC_STATE_RESETTING, &priv->state))
2018 netdev_info(priv->netdev, "try to reset %s port!\n",
2019 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
2022 /* put off any impending NetWatchDogTimeout */
2023 netif_trans_update(priv->netdev);
2024 hns_nic_net_reinit(priv->netdev);
2029 /* for doing service complete*/
2030 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
2032 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
2033 /* make sure to commit the things */
2034 smp_mb__before_atomic();
2035 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2038 static void hns_nic_service_task(struct work_struct *work)
2040 struct hns_nic_priv *priv
2041 = container_of(work, struct hns_nic_priv, service_task);
2042 struct hnae_handle *h = priv->ae_handle;
2044 hns_nic_reset_subtask(priv);
2045 hns_nic_update_link_status(priv->netdev);
2046 h->dev->ops->update_led_status(h);
2047 hns_nic_update_stats(priv->netdev);
2049 hns_nic_service_event_complete(priv);
2052 static void hns_nic_task_schedule(struct hns_nic_priv *priv)
2054 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
2055 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
2056 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
2057 (void)schedule_work(&priv->service_task);
2060 static void hns_nic_service_timer(struct timer_list *t)
2062 struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
2064 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
2066 hns_nic_task_schedule(priv);
2070 * hns_tx_timeout_reset - initiate reset due to Tx timeout
2071 * @priv: driver private struct
2073 static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
2075 /* Do the reset outside of interrupt context */
2076 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
2077 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2078 netdev_warn(priv->netdev,
2079 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2080 priv->tx_timeout_count, priv->state);
2081 priv->tx_timeout_count++;
2082 hns_nic_task_schedule(priv);
2086 static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2088 struct hnae_handle *h = priv->ae_handle;
2089 struct hns_nic_ring_data *rd;
2090 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
2093 if (h->q_num > NIC_MAX_Q_PER_VF) {
2094 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
2098 priv->ring_data = kzalloc(array3_size(h->q_num,
2099 sizeof(*priv->ring_data), 2),
2101 if (!priv->ring_data)
2104 for (i = 0; i < h->q_num; i++) {
2105 rd = &priv->ring_data[i];
2106 rd->queue_index = i;
2107 rd->ring = &h->qs[i]->tx_ring;
2108 rd->poll_one = hns_nic_tx_poll_one;
2109 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
2110 hns_nic_tx_fini_pro_v2;
2112 netif_napi_add(priv->netdev, &rd->napi,
2113 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2114 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2116 for (i = h->q_num; i < h->q_num * 2; i++) {
2117 rd = &priv->ring_data[i];
2118 rd->queue_index = i - h->q_num;
2119 rd->ring = &h->qs[i - h->q_num]->rx_ring;
2120 rd->poll_one = hns_nic_rx_poll_one;
2121 rd->ex_process = hns_nic_rx_up_pro;
2122 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
2123 hns_nic_rx_fini_pro_v2;
2125 netif_napi_add(priv->netdev, &rd->napi,
2126 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2127 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2133 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
2135 struct hnae_handle *h = priv->ae_handle;
2138 for (i = 0; i < h->q_num * 2; i++) {
2139 netif_napi_del(&priv->ring_data[i].napi);
2140 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
2141 (void)irq_set_affinity_hint(
2142 priv->ring_data[i].ring->irq,
2144 free_irq(priv->ring_data[i].ring->irq,
2145 &priv->ring_data[i]);
2148 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2150 kfree(priv->ring_data);
2153 static void hns_nic_set_priv_ops(struct net_device *netdev)
2155 struct hns_nic_priv *priv = netdev_priv(netdev);
2156 struct hnae_handle *h = priv->ae_handle;
2158 if (AE_IS_VER1(priv->enet_ver)) {
2159 priv->ops.fill_desc = fill_desc;
2160 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
2161 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2163 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
2164 if ((netdev->features & NETIF_F_TSO) ||
2165 (netdev->features & NETIF_F_TSO6)) {
2166 priv->ops.fill_desc = fill_tso_desc;
2167 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
2168 /* This chip only support 7*4096 */
2169 netif_set_tso_max_size(netdev, 7 * 4096);
2171 priv->ops.fill_desc = fill_v2_desc;
2172 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2174 /* enable tso when init
2175 * control tso on/off through TSE bit in bd
2177 h->dev->ops->set_tso_stats(h, 1);
2181 static int hns_nic_try_get_ae(struct net_device *ndev)
2183 struct hns_nic_priv *priv = netdev_priv(ndev);
2184 struct hnae_handle *h;
2187 h = hnae_get_handle(&priv->netdev->dev,
2188 priv->fwnode, priv->port_id, NULL);
2189 if (IS_ERR_OR_NULL(h)) {
2191 dev_dbg(priv->dev, "has not handle, register notifier!\n");
2194 priv->ae_handle = h;
2196 ret = hns_nic_init_phy(ndev, h);
2198 dev_err(priv->dev, "probe phy device fail!\n");
2202 ret = hns_nic_init_ring_data(priv);
2205 goto out_init_ring_data;
2208 hns_nic_set_priv_ops(ndev);
2210 ret = register_netdev(ndev);
2212 dev_err(priv->dev, "probe register netdev fail!\n");
2213 goto out_reg_ndev_fail;
2218 hns_nic_uninit_ring_data(priv);
2219 priv->ring_data = NULL;
2222 hnae_put_handle(priv->ae_handle);
2223 priv->ae_handle = NULL;
2228 static int hns_nic_notifier_action(struct notifier_block *nb,
2229 unsigned long action, void *data)
2231 struct hns_nic_priv *priv =
2232 container_of(nb, struct hns_nic_priv, notifier_block);
2234 assert(action == HNAE_AE_REGISTER);
2236 if (!hns_nic_try_get_ae(priv->netdev)) {
2237 hnae_unregister_notifier(&priv->notifier_block);
2238 priv->notifier_block.notifier_call = NULL;
2243 static int hns_nic_dev_probe(struct platform_device *pdev)
2245 struct device *dev = &pdev->dev;
2246 struct net_device *ndev;
2247 struct hns_nic_priv *priv;
2251 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
2255 platform_set_drvdata(pdev, ndev);
2257 priv = netdev_priv(ndev);
2259 priv->netdev = ndev;
2261 if (dev_of_node(dev)) {
2262 struct device_node *ae_node;
2264 if (of_device_is_compatible(dev->of_node,
2265 "hisilicon,hns-nic-v1"))
2266 priv->enet_ver = AE_VERSION_1;
2268 priv->enet_ver = AE_VERSION_2;
2270 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2273 dev_err(dev, "not find ae-handle\n");
2274 goto out_read_prop_fail;
2276 priv->fwnode = &ae_node->fwnode;
2277 } else if (is_acpi_node(dev->fwnode)) {
2278 struct fwnode_reference_args args;
2280 if (acpi_dev_found(hns_enet_acpi_match[0].id))
2281 priv->enet_ver = AE_VERSION_1;
2282 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
2283 priv->enet_ver = AE_VERSION_2;
2286 goto out_read_prop_fail;
2289 /* try to find port-idx-in-ae first */
2290 ret = acpi_node_get_property_reference(dev->fwnode,
2291 "ae-handle", 0, &args);
2293 dev_err(dev, "not find ae-handle\n");
2294 goto out_read_prop_fail;
2296 if (!is_acpi_device_node(args.fwnode)) {
2298 goto out_read_prop_fail;
2300 priv->fwnode = args.fwnode;
2302 dev_err(dev, "cannot read cfg data from OF or acpi\n");
2304 goto out_read_prop_fail;
2307 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
2309 /* only for old code compatible */
2310 ret = device_property_read_u32(dev, "port-id", &port_id);
2312 goto out_read_prop_fail;
2313 /* for old dts, we need to caculate the port offset */
2314 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2315 : port_id - HNS_SRV_OFFSET;
2317 priv->port_id = port_id;
2319 hns_init_mac_addr(ndev);
2321 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2322 ndev->priv_flags |= IFF_UNICAST_FLT;
2323 ndev->netdev_ops = &hns_nic_netdev_ops;
2324 hns_ethtool_set_ops(ndev);
2326 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2327 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2329 ndev->vlan_features |=
2330 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2331 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
2333 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2334 ndev->min_mtu = MAC_MIN_MTU;
2335 switch (priv->enet_ver) {
2337 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
2338 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2339 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2340 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
2341 ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
2342 ndev->max_mtu = MAC_MAX_MTU_V2 -
2343 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2346 ndev->max_mtu = MAC_MAX_MTU -
2347 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2351 SET_NETDEV_DEV(ndev, dev);
2353 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2354 dev_dbg(dev, "set mask to 64bit\n");
2356 dev_err(dev, "set mask to 64bit fail!\n");
2358 /* carrier off reporting is important to ethtool even BEFORE open */
2359 netif_carrier_off(ndev);
2361 timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
2362 INIT_WORK(&priv->service_task, hns_nic_service_task);
2364 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2365 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2366 set_bit(NIC_STATE_DOWN, &priv->state);
2368 if (hns_nic_try_get_ae(priv->netdev)) {
2369 priv->notifier_block.notifier_call = hns_nic_notifier_action;
2370 ret = hnae_register_notifier(&priv->notifier_block);
2372 dev_err(dev, "register notifier fail!\n");
2373 goto out_notify_fail;
2375 dev_dbg(dev, "has not handle, register notifier!\n");
2381 (void)cancel_work_sync(&priv->service_task);
2383 /* safe for ACPI FW */
2384 of_node_put(to_of_node(priv->fwnode));
2389 static int hns_nic_dev_remove(struct platform_device *pdev)
2391 struct net_device *ndev = platform_get_drvdata(pdev);
2392 struct hns_nic_priv *priv = netdev_priv(ndev);
2394 if (ndev->reg_state != NETREG_UNINITIALIZED)
2395 unregister_netdev(ndev);
2397 if (priv->ring_data)
2398 hns_nic_uninit_ring_data(priv);
2399 priv->ring_data = NULL;
2402 phy_disconnect(ndev->phydev);
2404 if (!IS_ERR_OR_NULL(priv->ae_handle))
2405 hnae_put_handle(priv->ae_handle);
2406 priv->ae_handle = NULL;
2407 if (priv->notifier_block.notifier_call)
2408 hnae_unregister_notifier(&priv->notifier_block);
2409 priv->notifier_block.notifier_call = NULL;
2411 set_bit(NIC_STATE_REMOVING, &priv->state);
2412 (void)cancel_work_sync(&priv->service_task);
2414 /* safe for ACPI FW */
2415 of_node_put(to_of_node(priv->fwnode));
2421 static const struct of_device_id hns_enet_of_match[] = {
2422 {.compatible = "hisilicon,hns-nic-v1",},
2423 {.compatible = "hisilicon,hns-nic-v2",},
2427 MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2429 static struct platform_driver hns_nic_dev_driver = {
2432 .of_match_table = hns_enet_of_match,
2433 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2435 .probe = hns_nic_dev_probe,
2436 .remove = hns_nic_dev_remove,
2439 module_platform_driver(hns_nic_dev_driver);
2441 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2442 MODULE_AUTHOR("Hisilicon, Inc.");
2443 MODULE_LICENSE("GPL");
2444 MODULE_ALIAS("platform:hns-nic");