2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
25 #include "hns_dsaf_mac.h"
27 #define NIC_MAX_Q_PER_VF 16
28 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
30 #define SERVICE_TIMER_HZ (1 * HZ)
32 #define NIC_TX_CLEAN_MAX_NUM 256
33 #define NIC_RX_CLEAN_MAX_NUM 64
35 #define RCB_IRQ_NOT_INITED 0
36 #define RCB_IRQ_INITED 1
37 #define HNS_BUFFER_SIZE_2048 2048
39 #define BD_MAX_SEND_SIZE 8191
40 #define SKB_TMP_LEN(SKB) \
41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
43 static void fill_v2_desc(struct hnae_ring *ring, void *priv,
44 int size, dma_addr_t dma, int frag_end,
45 int buf_num, enum hns_desc_type type, int mtu)
47 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
50 struct ipv6hdr *ipv6hdr;
62 desc_cb->length = size;
66 desc->addr = cpu_to_le64(dma);
67 desc->tx.send_size = cpu_to_le16((u16)size);
69 /* config bd buffer end */
70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
71 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
73 /* fill port_id in the tx bd for sending management pkts */
74 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
75 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
77 if (type == DESC_TYPE_SKB) {
78 skb = (struct sk_buff *)priv;
80 if (skb->ip_summed == CHECKSUM_PARTIAL) {
81 skb_reset_mac_len(skb);
82 protocol = skb->protocol;
85 if (protocol == htons(ETH_P_8021Q)) {
86 ip_offset += VLAN_HLEN;
87 protocol = vlan_get_protocol(skb);
88 skb->protocol = protocol;
91 if (skb->protocol == htons(ETH_P_IP)) {
93 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
94 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
96 /* check for tcp/udp header */
97 if (iphdr->protocol == IPPROTO_TCP &&
101 l4_len = tcp_hdrlen(skb);
102 mss = skb_shinfo(skb)->gso_size;
103 paylen = skb->len - SKB_TMP_LEN(skb);
105 } else if (skb->protocol == htons(ETH_P_IPV6)) {
106 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
107 ipv6hdr = ipv6_hdr(skb);
108 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
110 /* check for tcp/udp header */
111 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
112 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
115 l4_len = tcp_hdrlen(skb);
116 mss = skb_shinfo(skb)->gso_size;
117 paylen = skb->len - SKB_TMP_LEN(skb);
120 desc->tx.ip_offset = ip_offset;
121 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
122 desc->tx.mss = cpu_to_le16(mss);
123 desc->tx.l4_len = l4_len;
124 desc->tx.paylen = cpu_to_le16(paylen);
128 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
130 desc->tx.bn_pid = bn_pid;
131 desc->tx.ra_ri_cs_fe_vld = rrcfv;
133 ring_ptr_move_fw(ring, next_to_use);
136 static const struct acpi_device_id hns_enet_acpi_match[] = {
141 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
143 static void fill_desc(struct hnae_ring *ring, void *priv,
144 int size, dma_addr_t dma, int frag_end,
145 int buf_num, enum hns_desc_type type, int mtu)
147 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
148 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
152 u32 asid_bufnum_pid = 0;
153 u32 flag_ipoffset = 0;
155 desc_cb->priv = priv;
156 desc_cb->length = size;
158 desc_cb->type = type;
160 desc->addr = cpu_to_le64(dma);
161 desc->tx.send_size = cpu_to_le16((u16)size);
163 /*config bd buffer end */
164 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
166 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
168 if (type == DESC_TYPE_SKB) {
169 skb = (struct sk_buff *)priv;
171 if (skb->ip_summed == CHECKSUM_PARTIAL) {
172 protocol = skb->protocol;
173 ip_offset = ETH_HLEN;
175 /*if it is a SW VLAN check the next protocol*/
176 if (protocol == htons(ETH_P_8021Q)) {
177 ip_offset += VLAN_HLEN;
178 protocol = vlan_get_protocol(skb);
179 skb->protocol = protocol;
182 if (skb->protocol == htons(ETH_P_IP)) {
183 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
184 /* check for tcp/udp header */
185 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
187 } else if (skb->protocol == htons(ETH_P_IPV6)) {
188 /* ipv6 has not l3 cs, check for L4 header */
189 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
192 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
196 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
198 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
199 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
201 ring_ptr_move_fw(ring, next_to_use);
204 static void unfill_desc(struct hnae_ring *ring)
206 ring_ptr_move_bw(ring, next_to_use);
209 static int hns_nic_maybe_stop_tx(
210 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
212 struct sk_buff *skb = *out_skb;
213 struct sk_buff *new_skb = NULL;
216 /* no. of segments (plus a header) */
217 buf_num = skb_shinfo(skb)->nr_frags + 1;
219 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
220 if (ring_space(ring) < 1)
223 new_skb = skb_copy(skb, GFP_ATOMIC);
227 dev_kfree_skb_any(skb);
230 } else if (buf_num > ring_space(ring)) {
238 static int hns_nic_maybe_stop_tso(
239 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
245 struct sk_buff *skb = *out_skb;
246 struct sk_buff *new_skb = NULL;
247 struct skb_frag_struct *frag;
249 size = skb_headlen(skb);
250 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
252 frag_num = skb_shinfo(skb)->nr_frags;
253 for (i = 0; i < frag_num; i++) {
254 frag = &skb_shinfo(skb)->frags[i];
255 size = skb_frag_size(frag);
256 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
259 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
260 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
261 if (ring_space(ring) < buf_num)
263 /* manual split the send packet */
264 new_skb = skb_copy(skb, GFP_ATOMIC);
267 dev_kfree_skb_any(skb);
270 } else if (ring_space(ring) < buf_num) {
278 static void fill_tso_desc(struct hnae_ring *ring, void *priv,
279 int size, dma_addr_t dma, int frag_end,
280 int buf_num, enum hns_desc_type type, int mtu)
286 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
287 sizeoflast = size % BD_MAX_SEND_SIZE;
288 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
290 /* when the frag size is bigger than hardware, split this frag */
291 for (k = 0; k < frag_buf_num; k++)
292 fill_v2_desc(ring, priv,
293 (k == frag_buf_num - 1) ?
294 sizeoflast : BD_MAX_SEND_SIZE,
295 dma + BD_MAX_SEND_SIZE * k,
296 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
298 (type == DESC_TYPE_SKB && !k) ?
299 DESC_TYPE_SKB : DESC_TYPE_PAGE,
303 int hns_nic_net_xmit_hw(struct net_device *ndev,
305 struct hns_nic_ring_data *ring_data)
307 struct hns_nic_priv *priv = netdev_priv(ndev);
308 struct hnae_ring *ring = ring_data->ring;
309 struct device *dev = ring_to_dev(ring);
310 struct netdev_queue *dev_queue;
311 struct skb_frag_struct *frag;
315 int size, next_to_use;
318 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
320 ring->stats.tx_busy++;
321 goto out_net_tx_busy;
323 ring->stats.sw_err_cnt++;
324 netdev_err(ndev, "no memory to xmit!\n");
330 /* no. of segments (plus a header) */
331 seg_num = skb_shinfo(skb)->nr_frags + 1;
332 next_to_use = ring->next_to_use;
334 /* fill the first part */
335 size = skb_headlen(skb);
336 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
337 if (dma_mapping_error(dev, dma)) {
338 netdev_err(ndev, "TX head DMA map failed\n");
339 ring->stats.sw_err_cnt++;
342 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
343 buf_num, DESC_TYPE_SKB, ndev->mtu);
345 /* fill the fragments */
346 for (i = 1; i < seg_num; i++) {
347 frag = &skb_shinfo(skb)->frags[i - 1];
348 size = skb_frag_size(frag);
349 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
350 if (dma_mapping_error(dev, dma)) {
351 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
352 ring->stats.sw_err_cnt++;
353 goto out_map_frag_fail;
355 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
356 seg_num - 1 == i ? 1 : 0, buf_num,
357 DESC_TYPE_PAGE, ndev->mtu);
360 /*complete translate all packets*/
361 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
362 netdev_tx_sent_queue(dev_queue, skb->len);
364 wmb(); /* commit all data before submit */
365 assert(skb->queue_mapping < priv->ae_handle->q_num);
366 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
367 ring->stats.tx_pkts++;
368 ring->stats.tx_bytes += skb->len;
374 while (ring->next_to_use != next_to_use) {
376 if (ring->next_to_use != next_to_use)
378 ring->desc_cb[ring->next_to_use].dma,
379 ring->desc_cb[ring->next_to_use].length,
382 dma_unmap_single(dev,
383 ring->desc_cb[next_to_use].dma,
384 ring->desc_cb[next_to_use].length,
390 dev_kfree_skb_any(skb);
395 netif_stop_subqueue(ndev, skb->queue_mapping);
397 /* Herbert's original patch had:
398 * smp_mb__after_netif_stop_queue();
399 * but since that doesn't exist yet, just open code it.
402 return NETDEV_TX_BUSY;
406 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
407 * @data: pointer to the start of the headers
408 * @max: total length of section to find headers in
410 * This function is meant to determine the length of headers that will
411 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
412 * motivation of doing this is to only perform one pull for IPv4 TCP
413 * packets so that we can do basic things like calculating the gso_size
414 * based on the average data per packet.
416 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
417 unsigned int max_size)
419 unsigned char *network;
422 /* this should never happen, but better safe than sorry */
423 if (max_size < ETH_HLEN)
426 /* initialize network frame pointer */
429 /* set first protocol and move network header forward */
432 /* handle any vlan tag if present */
433 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
434 == HNS_RX_FLAG_VLAN_PRESENT) {
435 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
438 network += VLAN_HLEN;
441 /* handle L3 protocols */
442 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
443 == HNS_RX_FLAG_L3ID_IPV4) {
444 if ((typeof(max_size))(network - data) >
445 (max_size - sizeof(struct iphdr)))
448 /* access ihl as a u8 to avoid unaligned access on ia64 */
449 hlen = (network[0] & 0x0F) << 2;
451 /* verify hlen meets minimum size requirements */
452 if (hlen < sizeof(struct iphdr))
453 return network - data;
455 /* record next protocol if header is present */
456 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
457 == HNS_RX_FLAG_L3ID_IPV6) {
458 if ((typeof(max_size))(network - data) >
459 (max_size - sizeof(struct ipv6hdr)))
462 /* record next protocol */
463 hlen = sizeof(struct ipv6hdr);
465 return network - data;
468 /* relocate pointer to start of L4 header */
471 /* finally sort out TCP/UDP */
472 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
473 == HNS_RX_FLAG_L4ID_TCP) {
474 if ((typeof(max_size))(network - data) >
475 (max_size - sizeof(struct tcphdr)))
478 /* access doff as a u8 to avoid unaligned access on ia64 */
479 hlen = (network[12] & 0xF0) >> 2;
481 /* verify hlen meets minimum size requirements */
482 if (hlen < sizeof(struct tcphdr))
483 return network - data;
486 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
487 == HNS_RX_FLAG_L4ID_UDP) {
488 if ((typeof(max_size))(network - data) >
489 (max_size - sizeof(struct udphdr)))
492 network += sizeof(struct udphdr);
495 /* If everything has gone correctly network should be the
496 * data section of the packet and will be the end of the header.
497 * If not then it probably represents the end of the last recognized
500 if ((typeof(max_size))(network - data) < max_size)
501 return network - data;
506 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
507 struct hnae_ring *ring, int pull_len,
508 struct hnae_desc_cb *desc_cb)
510 struct hnae_desc *desc;
515 twobufs = ((PAGE_SIZE < 8192) &&
516 hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
518 desc = &ring->desc[ring->next_to_clean];
519 size = le16_to_cpu(desc->rx.size);
522 truesize = hnae_buf_size(ring);
524 truesize = ALIGN(size, L1_CACHE_BYTES);
525 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
528 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
529 size - pull_len, truesize - pull_len);
531 /* avoid re-using remote pages,flag default unreuse */
532 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
536 /* if we are only owner of page we can reuse it */
537 if (likely(page_count(desc_cb->priv) == 1)) {
538 /* flip page offset to other buffer */
539 desc_cb->page_offset ^= truesize;
541 desc_cb->reuse_flag = 1;
542 /* bump ref count on page before it is given*/
543 get_page(desc_cb->priv);
548 /* move offset up to the next cache line */
549 desc_cb->page_offset += truesize;
551 if (desc_cb->page_offset <= last_offset) {
552 desc_cb->reuse_flag = 1;
553 /* bump ref count on page before it is given*/
554 get_page(desc_cb->priv);
558 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
560 *out_bnum = hnae_get_field(bnum_flag,
561 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
564 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
566 *out_bnum = hnae_get_field(bnum_flag,
567 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
570 static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
571 struct sk_buff *skb, u32 flag)
573 struct net_device *netdev = ring_data->napi.dev;
577 /* check if RX checksum offload is enabled */
578 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
581 /* In hardware, we only support checksum for the following protocols:
583 * 2) TCP(over IPv4 or IPv6),
584 * 3) UDP(over IPv4 or IPv6),
585 * 4) SCTP(over IPv4 or IPv6)
586 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
587 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
589 * Hardware limitation:
590 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
591 * Error" bit (which usually can be used to indicate whether checksum
592 * was calculated by the hardware and if there was any error encountered
593 * during checksum calculation).
595 * Software workaround:
596 * We do get info within the RX descriptor about the kind of L3/L4
597 * protocol coming in the packet and the error status. These errors
598 * might not just be checksum errors but could be related to version,
599 * length of IPv4, UDP, TCP etc.
600 * Because there is no-way of knowing if it is a L3/L4 error due to bad
601 * checksum or any other L3/L4 error, we will not (cannot) convey
602 * checksum status for such cases to upper stack and will not maintain
603 * the RX L3/L4 checksum counters as well.
606 l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
607 l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
609 /* check L3 protocol for which checksum is supported */
610 if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
613 /* check for any(not just checksum)flagged L3 protocol errors */
614 if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
617 /* we do not support checksum of fragmented packets */
618 if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
621 /* check L4 protocol for which checksum is supported */
622 if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
623 (l4id != HNS_RX_FLAG_L4ID_UDP) &&
624 (l4id != HNS_RX_FLAG_L4ID_SCTP))
627 /* check for any(not just checksum)flagged L4 protocol errors */
628 if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
631 /* now, this has to be a packet with valid RX checksum */
632 skb->ip_summed = CHECKSUM_UNNECESSARY;
635 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
636 struct sk_buff **out_skb, int *out_bnum)
638 struct hnae_ring *ring = ring_data->ring;
639 struct net_device *ndev = ring_data->napi.dev;
640 struct hns_nic_priv *priv = netdev_priv(ndev);
642 struct hnae_desc *desc;
643 struct hnae_desc_cb *desc_cb;
649 desc = &ring->desc[ring->next_to_clean];
650 desc_cb = &ring->desc_cb[ring->next_to_clean];
654 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
656 /* prefetch first cache line of first page */
658 #if L1_CACHE_BYTES < 128
659 prefetch(va + L1_CACHE_BYTES);
662 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
664 if (unlikely(!skb)) {
665 netdev_err(ndev, "alloc rx skb fail\n");
666 ring->stats.sw_err_cnt++;
670 prefetchw(skb->data);
671 length = le16_to_cpu(desc->rx.pkt_len);
672 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
673 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
676 if (length <= HNS_RX_HEAD_SIZE) {
677 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
679 /* we can reuse buffer as-is, just make sure it is local */
680 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
681 desc_cb->reuse_flag = 1;
682 else /* this page cannot be reused so discard it */
683 put_page(desc_cb->priv);
685 ring_ptr_move_fw(ring, next_to_clean);
687 if (unlikely(bnum != 1)) { /* check err*/
692 ring->stats.seg_pkt_cnt++;
694 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
695 memcpy(__skb_put(skb, pull_len), va,
696 ALIGN(pull_len, sizeof(long)));
698 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
699 ring_ptr_move_fw(ring, next_to_clean);
701 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
705 for (i = 1; i < bnum; i++) {
706 desc = &ring->desc[ring->next_to_clean];
707 desc_cb = &ring->desc_cb[ring->next_to_clean];
709 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
710 ring_ptr_move_fw(ring, next_to_clean);
714 /* check except process, free skb and jump the desc */
715 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
717 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
718 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
719 bnum, ring->max_desc_num_per_pkt,
720 length, (int)MAX_SKB_FRAGS,
721 ((u64 *)desc)[0], ((u64 *)desc)[1]);
722 ring->stats.err_bd_num++;
723 dev_kfree_skb_any(skb);
727 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
729 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
730 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
731 ((u64 *)desc)[0], ((u64 *)desc)[1]);
732 ring->stats.non_vld_descs++;
733 dev_kfree_skb_any(skb);
737 if (unlikely((!desc->rx.pkt_len) ||
738 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
739 ring->stats.err_pkt_len++;
740 dev_kfree_skb_any(skb);
744 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
745 ring->stats.l2_err++;
746 dev_kfree_skb_any(skb);
750 ring->stats.rx_pkts++;
751 ring->stats.rx_bytes += skb->len;
753 /* indicate to upper stack if our hardware has already calculated
756 hns_nic_rx_checksum(ring_data, skb, bnum_flag);
762 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
765 struct hnae_desc_cb res_cbs;
766 struct hnae_desc_cb *desc_cb;
767 struct hnae_ring *ring = ring_data->ring;
768 struct net_device *ndev = ring_data->napi.dev;
770 for (i = 0; i < cleand_count; i++) {
771 desc_cb = &ring->desc_cb[ring->next_to_use];
772 if (desc_cb->reuse_flag) {
773 ring->stats.reuse_pg_cnt++;
774 hnae_reuse_buffer(ring, ring->next_to_use);
776 ret = hnae_reserve_buffer_map(ring, &res_cbs);
778 ring->stats.sw_err_cnt++;
779 netdev_err(ndev, "hnae reserve buffer map failed.\n");
782 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
785 ring_ptr_move_fw(ring, next_to_use);
788 wmb(); /* make all data has been write before submit */
789 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
792 /* return error number for error or number of desc left to take
794 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
797 struct net_device *ndev = ring_data->napi.dev;
799 skb->protocol = eth_type_trans(skb, ndev);
800 (void)napi_gro_receive(&ring_data->napi, skb);
803 static int hns_desc_unused(struct hnae_ring *ring)
805 int ntc = ring->next_to_clean;
806 int ntu = ring->next_to_use;
808 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
811 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
814 struct hnae_ring *ring = ring_data->ring;
817 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
818 int recv_pkts, recv_bds, clean_count, err;
819 int unused_count = hns_desc_unused(ring);
821 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
822 rmb(); /* make sure num taken effect before the other data is touched */
824 recv_pkts = 0, recv_bds = 0, clean_count = 0;
827 while (recv_pkts < budget && recv_bds < num) {
828 /* reuse or realloc buffers */
829 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
830 hns_nic_alloc_rx_buffers(ring_data,
831 clean_count + unused_count);
833 unused_count = hns_desc_unused(ring);
837 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
838 if (unlikely(!skb)) /* this fault cannot be repaired */
843 if (unlikely(err)) { /* do jump the err */
848 /* do update ip stack process*/
849 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
855 /* make all data has been write before submit */
856 if (clean_count + unused_count > 0)
857 hns_nic_alloc_rx_buffers(ring_data,
858 clean_count + unused_count);
863 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
865 struct hnae_ring *ring = ring_data->ring;
868 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
870 /* for hardware bug fixed */
871 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
874 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
883 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
885 struct hnae_ring *ring = ring_data->ring;
888 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
896 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
897 int *bytes, int *pkts)
899 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
901 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
902 (*bytes) += desc_cb->length;
903 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
904 hnae_free_buffer_detach(ring, ring->next_to_clean);
906 ring_ptr_move_fw(ring, next_to_clean);
909 static int is_valid_clean_head(struct hnae_ring *ring, int h)
911 int u = ring->next_to_use;
912 int c = ring->next_to_clean;
914 if (unlikely(h > ring->desc_num))
917 assert(u > 0 && u < ring->desc_num);
918 assert(c > 0 && c < ring->desc_num);
919 assert(u != c && h != c); /* must be checked before call this func */
921 return u > c ? (h > c && h <= u) : (h > c || h <= u);
924 /* netif_tx_lock will turn down the performance, set only when necessary */
925 #ifdef CONFIG_NET_POLL_CONTROLLER
926 #define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
927 #define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
929 #define NETIF_TX_LOCK(ring)
930 #define NETIF_TX_UNLOCK(ring)
933 /* reclaim all desc in one budget
934 * return error or number of desc left
936 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
939 struct hnae_ring *ring = ring_data->ring;
940 struct net_device *ndev = ring_data->napi.dev;
941 struct netdev_queue *dev_queue;
942 struct hns_nic_priv *priv = netdev_priv(ndev);
948 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
949 rmb(); /* make sure head is ready before touch any data */
951 if (is_ring_empty(ring) || head == ring->next_to_clean) {
952 NETIF_TX_UNLOCK(ring);
953 return 0; /* no data to poll */
956 if (!is_valid_clean_head(ring, head)) {
957 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
958 ring->next_to_use, ring->next_to_clean);
959 ring->stats.io_err_cnt++;
960 NETIF_TX_UNLOCK(ring);
966 while (head != ring->next_to_clean) {
967 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
968 /* issue prefetch for next Tx descriptor */
969 prefetch(&ring->desc_cb[ring->next_to_clean]);
972 NETIF_TX_UNLOCK(ring);
974 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
975 netdev_tx_completed_queue(dev_queue, pkts, bytes);
977 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
978 netif_carrier_on(ndev);
980 if (unlikely(pkts && netif_carrier_ok(ndev) &&
981 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
982 /* Make sure that anybody stopping the queue after this
983 * sees the new next_to_clean.
986 if (netif_tx_queue_stopped(dev_queue) &&
987 !test_bit(NIC_STATE_DOWN, &priv->state)) {
988 netif_tx_wake_queue(dev_queue);
989 ring->stats.restart_queue++;
995 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
997 struct hnae_ring *ring = ring_data->ring;
1000 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1002 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1004 if (head != ring->next_to_clean) {
1005 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1006 ring_data->ring, 1);
1014 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
1016 struct hnae_ring *ring = ring_data->ring;
1017 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1019 if (head == ring->next_to_clean)
1025 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1027 struct hnae_ring *ring = ring_data->ring;
1028 struct net_device *ndev = ring_data->napi.dev;
1029 struct netdev_queue *dev_queue;
1033 NETIF_TX_LOCK(ring);
1035 head = ring->next_to_use; /* ntu :soft setted ring position*/
1038 while (head != ring->next_to_clean)
1039 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1041 NETIF_TX_UNLOCK(ring);
1043 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1044 netdev_tx_reset_queue(dev_queue);
1047 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
1049 int clean_complete = 0;
1050 struct hns_nic_ring_data *ring_data =
1051 container_of(napi, struct hns_nic_ring_data, napi);
1052 struct hnae_ring *ring = ring_data->ring;
1055 clean_complete += ring_data->poll_one(
1056 ring_data, budget - clean_complete,
1057 ring_data->ex_process);
1059 if (clean_complete < budget) {
1060 if (ring_data->fini_process(ring_data)) {
1061 napi_complete(napi);
1062 ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1068 return clean_complete;
1071 static irqreturn_t hns_irq_handle(int irq, void *dev)
1073 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
1075 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1076 ring_data->ring, 1);
1077 napi_schedule(&ring_data->napi);
1083 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1086 static void hns_nic_adjust_link(struct net_device *ndev)
1088 struct hns_nic_priv *priv = netdev_priv(ndev);
1089 struct hnae_handle *h = priv->ae_handle;
1093 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1094 ndev->phydev->duplex);
1095 state = ndev->phydev->link;
1097 state = state && h->dev->ops->get_status(h);
1099 if (state != priv->link) {
1101 netif_carrier_on(ndev);
1102 netif_tx_wake_all_queues(ndev);
1103 netdev_info(ndev, "link up\n");
1105 netif_carrier_off(ndev);
1106 netdev_info(ndev, "link down\n");
1113 *hns_nic_init_phy - init phy
1116 * Return 0 on success, negative on failure
1118 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1120 struct phy_device *phy_dev = h->phy_dev;
1126 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1127 phy_dev->dev_flags = 0;
1129 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1132 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1137 phy_dev->supported &= h->if_support;
1138 phy_dev->advertising = phy_dev->supported;
1140 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1141 phy_dev->autoneg = false;
1146 static int hns_nic_ring_open(struct net_device *netdev, int idx)
1148 struct hns_nic_priv *priv = netdev_priv(netdev);
1149 struct hnae_handle *h = priv->ae_handle;
1151 napi_enable(&priv->ring_data[idx].napi);
1153 enable_irq(priv->ring_data[idx].ring->irq);
1154 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1159 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1161 struct hns_nic_priv *priv = netdev_priv(ndev);
1162 struct hnae_handle *h = priv->ae_handle;
1163 struct sockaddr *mac_addr = p;
1166 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1167 return -EADDRNOTAVAIL;
1169 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1171 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1175 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1180 void hns_nic_update_stats(struct net_device *netdev)
1182 struct hns_nic_priv *priv = netdev_priv(netdev);
1183 struct hnae_handle *h = priv->ae_handle;
1185 h->dev->ops->update_stats(h, &netdev->stats);
1188 /* set mac addr if it is configed. or leave it to the AE driver */
1189 static void hns_init_mac_addr(struct net_device *ndev)
1191 struct hns_nic_priv *priv = netdev_priv(ndev);
1193 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
1194 eth_hw_addr_random(ndev);
1195 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1200 static void hns_nic_ring_close(struct net_device *netdev, int idx)
1202 struct hns_nic_priv *priv = netdev_priv(netdev);
1203 struct hnae_handle *h = priv->ae_handle;
1205 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1206 disable_irq(priv->ring_data[idx].ring->irq);
1208 napi_disable(&priv->ring_data[idx].napi);
1211 static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
1212 struct hnae_ring *ring, cpumask_t *mask)
1216 /* Diffrent irq banlance between 16core and 32core.
1217 * The cpu mask set by ring index according to the ring flag
1218 * which indicate the ring is tx or rx.
1220 if (q_num == num_possible_cpus()) {
1221 if (is_tx_ring(ring))
1224 cpu = ring_idx - q_num;
1226 if (is_tx_ring(ring))
1229 cpu = (ring_idx - q_num) * 2 + 1;
1232 cpumask_clear(mask);
1233 cpumask_set_cpu(cpu, mask);
1238 static int hns_nic_init_irq(struct hns_nic_priv *priv)
1240 struct hnae_handle *h = priv->ae_handle;
1241 struct hns_nic_ring_data *rd;
1246 for (i = 0; i < h->q_num * 2; i++) {
1247 rd = &priv->ring_data[i];
1249 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1252 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1253 "%s-%s%d", priv->netdev->name,
1254 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
1256 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1258 ret = request_irq(rd->ring->irq,
1259 hns_irq_handle, 0, rd->ring->ring_name, rd);
1261 netdev_err(priv->netdev, "request irq(%d) fail\n",
1265 disable_irq(rd->ring->irq);
1267 cpu = hns_nic_init_affinity_mask(h->q_num, i,
1268 rd->ring, &rd->mask);
1270 if (cpu_online(cpu))
1271 irq_set_affinity_hint(rd->ring->irq,
1274 rd->ring->irq_init_flag = RCB_IRQ_INITED;
1280 static int hns_nic_net_up(struct net_device *ndev)
1282 struct hns_nic_priv *priv = netdev_priv(ndev);
1283 struct hnae_handle *h = priv->ae_handle;
1287 ret = hns_nic_init_irq(priv);
1289 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1293 for (i = 0; i < h->q_num * 2; i++) {
1294 ret = hns_nic_ring_open(ndev, i);
1296 goto out_has_some_queues;
1299 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1301 goto out_set_mac_addr_err;
1303 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1308 phy_start(ndev->phydev);
1310 clear_bit(NIC_STATE_DOWN, &priv->state);
1311 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1316 netif_stop_queue(ndev);
1317 out_set_mac_addr_err:
1318 out_has_some_queues:
1319 for (j = i - 1; j >= 0; j--)
1320 hns_nic_ring_close(ndev, j);
1322 set_bit(NIC_STATE_DOWN, &priv->state);
1327 static void hns_nic_net_down(struct net_device *ndev)
1330 struct hnae_ae_ops *ops;
1331 struct hns_nic_priv *priv = netdev_priv(ndev);
1333 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1336 (void)del_timer_sync(&priv->service_timer);
1337 netif_tx_stop_all_queues(ndev);
1338 netif_carrier_off(ndev);
1339 netif_tx_disable(ndev);
1343 phy_stop(ndev->phydev);
1345 ops = priv->ae_handle->dev->ops;
1348 ops->stop(priv->ae_handle);
1350 netif_tx_stop_all_queues(ndev);
1352 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1353 hns_nic_ring_close(ndev, i);
1354 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1356 /* clean tx buffers*/
1357 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1361 void hns_nic_net_reset(struct net_device *ndev)
1363 struct hns_nic_priv *priv = netdev_priv(ndev);
1364 struct hnae_handle *handle = priv->ae_handle;
1366 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1367 usleep_range(1000, 2000);
1369 (void)hnae_reinit_handle(handle);
1371 clear_bit(NIC_STATE_RESETTING, &priv->state);
1374 void hns_nic_net_reinit(struct net_device *netdev)
1376 struct hns_nic_priv *priv = netdev_priv(netdev);
1378 netif_trans_update(priv->netdev);
1379 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1380 usleep_range(1000, 2000);
1382 hns_nic_net_down(netdev);
1383 hns_nic_net_reset(netdev);
1384 (void)hns_nic_net_up(netdev);
1385 clear_bit(NIC_STATE_REINITING, &priv->state);
1388 static int hns_nic_net_open(struct net_device *ndev)
1390 struct hns_nic_priv *priv = netdev_priv(ndev);
1391 struct hnae_handle *h = priv->ae_handle;
1394 if (test_bit(NIC_STATE_TESTING, &priv->state))
1398 netif_carrier_off(ndev);
1400 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1402 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1407 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1410 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1414 ret = hns_nic_net_up(ndev);
1417 "hns net up fail, ret=%d!\n", ret);
1424 static int hns_nic_net_stop(struct net_device *ndev)
1426 hns_nic_net_down(ndev);
1431 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1432 static void hns_nic_net_timeout(struct net_device *ndev)
1434 struct hns_nic_priv *priv = netdev_priv(ndev);
1436 hns_tx_timeout_reset(priv);
1439 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1442 struct phy_device *phy_dev = netdev->phydev;
1444 if (!netif_running(netdev))
1450 return phy_mii_ioctl(phy_dev, ifr, cmd);
1453 /* use only for netconsole to poll with the device without interrupt */
1454 #ifdef CONFIG_NET_POLL_CONTROLLER
1455 void hns_nic_poll_controller(struct net_device *ndev)
1457 struct hns_nic_priv *priv = netdev_priv(ndev);
1458 unsigned long flags;
1461 local_irq_save(flags);
1462 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1463 napi_schedule(&priv->ring_data[i].napi);
1464 local_irq_restore(flags);
1468 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1469 struct net_device *ndev)
1471 struct hns_nic_priv *priv = netdev_priv(ndev);
1474 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1475 ret = hns_nic_net_xmit_hw(ndev, skb,
1476 &tx_ring_data(priv, skb->queue_mapping));
1477 if (ret == NETDEV_TX_OK) {
1478 netif_trans_update(ndev);
1479 ndev->stats.tx_bytes += skb->len;
1480 ndev->stats.tx_packets++;
1482 return (netdev_tx_t)ret;
1485 static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
1486 struct sk_buff *skb)
1488 dev_kfree_skb_any(skb);
1491 #define HNS_LB_TX_RING 0
1492 static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
1494 struct sk_buff *skb;
1495 struct ethhdr *ethhdr;
1498 /* allocate test skb */
1499 skb = alloc_skb(64, GFP_KERNEL);
1505 memset(skb->data, 0xFF, skb->len);
1507 /* must be tcp/ip package */
1508 ethhdr = (struct ethhdr *)skb->data;
1509 ethhdr->h_proto = htons(ETH_P_IP);
1511 frame_len = skb->len & (~1ul);
1512 memset(&skb->data[frame_len / 2], 0xAA,
1515 skb->queue_mapping = HNS_LB_TX_RING;
1520 static int hns_enable_serdes_lb(struct net_device *ndev)
1522 struct hns_nic_priv *priv = netdev_priv(ndev);
1523 struct hnae_handle *h = priv->ae_handle;
1524 struct hnae_ae_ops *ops = h->dev->ops;
1528 ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
1532 ret = ops->start ? ops->start(h) : 0;
1536 /* link adjust duplex*/
1537 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1543 ops->adjust_link(h, speed, duplex);
1545 /* wait h/w ready */
1551 static void hns_disable_serdes_lb(struct net_device *ndev)
1553 struct hns_nic_priv *priv = netdev_priv(ndev);
1554 struct hnae_handle *h = priv->ae_handle;
1555 struct hnae_ae_ops *ops = h->dev->ops;
1558 ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
1562 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1563 *function as follows:
1564 * 1. if one rx ring has found the page_offset is not equal 0 between head
1565 * and tail, it means that the chip fetched the wrong descs for the ring
1566 * which buffer size is 4096.
1567 * 2. we set the chip serdes loopback and set rss indirection to the ring.
1568 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1569 * recieving all packages and it will fetch new descriptions.
1570 * 4. recover to the original state.
1574 static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1576 struct hns_nic_priv *priv = netdev_priv(ndev);
1577 struct hnae_handle *h = priv->ae_handle;
1578 struct hnae_ae_ops *ops = h->dev->ops;
1579 struct hns_nic_ring_data *rd;
1580 struct hnae_ring *ring;
1581 struct sk_buff *skb;
1592 /* alloc indir memory */
1593 indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
1594 org_indir = kzalloc(indir_size, GFP_KERNEL);
1598 /* store the orginal indirection */
1599 ops->get_rss(h, org_indir, NULL, NULL);
1601 cur_indir = kzalloc(indir_size, GFP_KERNEL);
1604 goto cur_indir_alloc_err;
1608 if (hns_enable_serdes_lb(ndev)) {
1610 goto enable_serdes_lb_err;
1613 /* foreach every rx ring to clear fetch desc */
1614 for (i = 0; i < h->q_num; i++) {
1615 ring = &h->qs[i]->rx_ring;
1616 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1617 tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
1619 fetch_num = ring_dist(ring, head, tail);
1621 while (head != tail) {
1622 if (ring->desc_cb[head].page_offset != 0) {
1628 if (head == ring->desc_num)
1633 for (j = 0; j < indir_size / sizeof(*org_indir); j++)
1635 ops->set_rss(h, cur_indir, NULL, 0);
1637 for (j = 0; j < fetch_num; j++) {
1638 /* alloc one skb and init */
1639 skb = hns_assemble_skb(ndev);
1642 rd = &tx_ring_data(priv, skb->queue_mapping);
1643 hns_nic_net_xmit_hw(ndev, skb, rd);
1646 while (retry_times++ < 10) {
1649 rd = &rx_ring_data(priv, i);
1650 if (rd->poll_one(rd, fetch_num,
1651 hns_nic_drop_rx_fetch))
1656 while (retry_times++ < 10) {
1658 /* clean tx ring 0 send package */
1659 rd = &tx_ring_data(priv,
1661 if (rd->poll_one(rd, fetch_num, NULL))
1669 /* restore everything */
1670 ops->set_rss(h, org_indir, NULL, 0);
1671 hns_disable_serdes_lb(ndev);
1672 enable_serdes_lb_err:
1674 cur_indir_alloc_err:
1680 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1682 struct hns_nic_priv *priv = netdev_priv(ndev);
1683 struct hnae_handle *h = priv->ae_handle;
1684 bool if_running = netif_running(ndev);
1687 /* MTU < 68 is an error and causes problems on some kernels */
1692 if (new_mtu == ndev->mtu)
1695 if (!h->dev->ops->set_mtu)
1699 (void)hns_nic_net_stop(ndev);
1703 if (priv->enet_ver != AE_VERSION_1 &&
1704 ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
1705 new_mtu > BD_SIZE_2048_MAX_MTU) {
1707 hnae_reinit_all_ring_desc(h);
1709 /* clear the package which the chip has fetched */
1710 ret = hns_nic_clear_all_rx_fetch(ndev);
1712 /* the page offset must be consist with desc */
1713 hnae_reinit_all_ring_page_off(h);
1716 netdev_err(ndev, "clear the fetched desc fail\n");
1721 ret = h->dev->ops->set_mtu(h, new_mtu);
1723 netdev_err(ndev, "set mtu fail, return value %d\n",
1728 /* finally, set new mtu to netdevice */
1729 ndev->mtu = new_mtu;
1733 if (hns_nic_net_open(ndev)) {
1734 netdev_err(ndev, "hns net open fail\n");
1742 static int hns_nic_set_features(struct net_device *netdev,
1743 netdev_features_t features)
1745 struct hns_nic_priv *priv = netdev_priv(netdev);
1747 switch (priv->enet_ver) {
1749 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1750 netdev_info(netdev, "enet v1 do not support tso!\n");
1753 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1754 priv->ops.fill_desc = fill_tso_desc;
1755 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1756 /* The chip only support 7*4096 */
1757 netif_set_gso_max_size(netdev, 7 * 4096);
1759 priv->ops.fill_desc = fill_v2_desc;
1760 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1764 netdev->features = features;
1768 static netdev_features_t hns_nic_fix_features(
1769 struct net_device *netdev, netdev_features_t features)
1771 struct hns_nic_priv *priv = netdev_priv(netdev);
1773 switch (priv->enet_ver) {
1775 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1776 NETIF_F_HW_VLAN_CTAG_FILTER);
1784 static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
1786 struct hns_nic_priv *priv = netdev_priv(netdev);
1787 struct hnae_handle *h = priv->ae_handle;
1789 if (h->dev->ops->add_uc_addr)
1790 return h->dev->ops->add_uc_addr(h, addr);
1795 static int hns_nic_uc_unsync(struct net_device *netdev,
1796 const unsigned char *addr)
1798 struct hns_nic_priv *priv = netdev_priv(netdev);
1799 struct hnae_handle *h = priv->ae_handle;
1801 if (h->dev->ops->rm_uc_addr)
1802 return h->dev->ops->rm_uc_addr(h, addr);
1808 * nic_set_multicast_list - set mutl mac address
1809 * @netdev: net device
1814 void hns_set_multicast_list(struct net_device *ndev)
1816 struct hns_nic_priv *priv = netdev_priv(ndev);
1817 struct hnae_handle *h = priv->ae_handle;
1818 struct netdev_hw_addr *ha = NULL;
1821 netdev_err(ndev, "hnae handle is null\n");
1825 if (h->dev->ops->clr_mc_addr)
1826 if (h->dev->ops->clr_mc_addr(h))
1827 netdev_err(ndev, "clear multicast address fail\n");
1829 if (h->dev->ops->set_mc_addr) {
1830 netdev_for_each_mc_addr(ha, ndev)
1831 if (h->dev->ops->set_mc_addr(h, ha->addr))
1832 netdev_err(ndev, "set multicast fail\n");
1836 void hns_nic_set_rx_mode(struct net_device *ndev)
1838 struct hns_nic_priv *priv = netdev_priv(ndev);
1839 struct hnae_handle *h = priv->ae_handle;
1841 if (h->dev->ops->set_promisc_mode) {
1842 if (ndev->flags & IFF_PROMISC)
1843 h->dev->ops->set_promisc_mode(h, 1);
1845 h->dev->ops->set_promisc_mode(h, 0);
1848 hns_set_multicast_list(ndev);
1850 if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
1851 netdev_err(ndev, "sync uc address fail\n");
1854 static void hns_nic_get_stats64(struct net_device *ndev,
1855 struct rtnl_link_stats64 *stats)
1862 struct hns_nic_priv *priv = netdev_priv(ndev);
1863 struct hnae_handle *h = priv->ae_handle;
1865 for (idx = 0; idx < h->q_num; idx++) {
1866 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1867 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1868 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1869 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1872 stats->tx_bytes = tx_bytes;
1873 stats->tx_packets = tx_pkts;
1874 stats->rx_bytes = rx_bytes;
1875 stats->rx_packets = rx_pkts;
1877 stats->rx_errors = ndev->stats.rx_errors;
1878 stats->multicast = ndev->stats.multicast;
1879 stats->rx_length_errors = ndev->stats.rx_length_errors;
1880 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1881 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1883 stats->tx_errors = ndev->stats.tx_errors;
1884 stats->rx_dropped = ndev->stats.rx_dropped;
1885 stats->tx_dropped = ndev->stats.tx_dropped;
1886 stats->collisions = ndev->stats.collisions;
1887 stats->rx_over_errors = ndev->stats.rx_over_errors;
1888 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1889 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1890 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1891 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1892 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1893 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1894 stats->tx_window_errors = ndev->stats.tx_window_errors;
1895 stats->rx_compressed = ndev->stats.rx_compressed;
1896 stats->tx_compressed = ndev->stats.tx_compressed;
1900 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1901 void *accel_priv, select_queue_fallback_t fallback)
1903 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1904 struct hns_nic_priv *priv = netdev_priv(ndev);
1906 /* fix hardware broadcast/multicast packets queue loopback */
1907 if (!AE_IS_VER1(priv->enet_ver) &&
1908 is_multicast_ether_addr(eth_hdr->h_dest))
1911 return fallback(ndev, skb);
1914 static const struct net_device_ops hns_nic_netdev_ops = {
1915 .ndo_open = hns_nic_net_open,
1916 .ndo_stop = hns_nic_net_stop,
1917 .ndo_start_xmit = hns_nic_net_xmit,
1918 .ndo_tx_timeout = hns_nic_net_timeout,
1919 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1920 .ndo_change_mtu = hns_nic_change_mtu,
1921 .ndo_do_ioctl = hns_nic_do_ioctl,
1922 .ndo_set_features = hns_nic_set_features,
1923 .ndo_fix_features = hns_nic_fix_features,
1924 .ndo_get_stats64 = hns_nic_get_stats64,
1925 #ifdef CONFIG_NET_POLL_CONTROLLER
1926 .ndo_poll_controller = hns_nic_poll_controller,
1928 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1929 .ndo_select_queue = hns_nic_select_queue,
1932 static void hns_nic_update_link_status(struct net_device *netdev)
1934 struct hns_nic_priv *priv = netdev_priv(netdev);
1936 struct hnae_handle *h = priv->ae_handle;
1939 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1942 (void)genphy_read_status(h->phy_dev);
1944 hns_nic_adjust_link(netdev);
1947 /* for dumping key regs*/
1948 static void hns_nic_dump(struct hns_nic_priv *priv)
1950 struct hnae_handle *h = priv->ae_handle;
1951 struct hnae_ae_ops *ops = h->dev->ops;
1952 u32 *data, reg_num, i;
1954 if (ops->get_regs_len && ops->get_regs) {
1955 reg_num = ops->get_regs_len(priv->ae_handle);
1956 reg_num = (reg_num + 3ul) & ~3ul;
1957 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1959 ops->get_regs(priv->ae_handle, data);
1960 for (i = 0; i < reg_num; i += 4)
1961 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1962 i, data[i], data[i + 1],
1963 data[i + 2], data[i + 3]);
1968 for (i = 0; i < h->q_num; i++) {
1969 pr_info("tx_queue%d_next_to_clean:%d\n",
1970 i, h->qs[i]->tx_ring.next_to_clean);
1971 pr_info("tx_queue%d_next_to_use:%d\n",
1972 i, h->qs[i]->tx_ring.next_to_use);
1973 pr_info("rx_queue%d_next_to_clean:%d\n",
1974 i, h->qs[i]->rx_ring.next_to_clean);
1975 pr_info("rx_queue%d_next_to_use:%d\n",
1976 i, h->qs[i]->rx_ring.next_to_use);
1980 /* for resetting subtask */
1981 static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1983 enum hnae_port_type type = priv->ae_handle->port_type;
1985 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1987 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1989 /* If we're already down, removing or resetting, just bail */
1990 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1991 test_bit(NIC_STATE_REMOVING, &priv->state) ||
1992 test_bit(NIC_STATE_RESETTING, &priv->state))
1996 netdev_info(priv->netdev, "try to reset %s port!\n",
1997 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
2000 /* put off any impending NetWatchDogTimeout */
2001 netif_trans_update(priv->netdev);
2003 if (type == HNAE_PORT_DEBUG) {
2004 hns_nic_net_reinit(priv->netdev);
2006 netif_carrier_off(priv->netdev);
2007 netif_tx_disable(priv->netdev);
2012 /* for doing service complete*/
2013 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
2015 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
2016 /* make sure to commit the things */
2017 smp_mb__before_atomic();
2018 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2021 static void hns_nic_service_task(struct work_struct *work)
2023 struct hns_nic_priv *priv
2024 = container_of(work, struct hns_nic_priv, service_task);
2025 struct hnae_handle *h = priv->ae_handle;
2027 hns_nic_update_link_status(priv->netdev);
2028 h->dev->ops->update_led_status(h);
2029 hns_nic_update_stats(priv->netdev);
2031 hns_nic_reset_subtask(priv);
2032 hns_nic_service_event_complete(priv);
2035 static void hns_nic_task_schedule(struct hns_nic_priv *priv)
2037 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
2038 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
2039 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
2040 (void)schedule_work(&priv->service_task);
2043 static void hns_nic_service_timer(unsigned long data)
2045 struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
2047 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
2049 hns_nic_task_schedule(priv);
2053 * hns_tx_timeout_reset - initiate reset due to Tx timeout
2054 * @priv: driver private struct
2056 static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
2058 /* Do the reset outside of interrupt context */
2059 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
2060 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2061 netdev_warn(priv->netdev,
2062 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2063 priv->tx_timeout_count, priv->state);
2064 priv->tx_timeout_count++;
2065 hns_nic_task_schedule(priv);
2069 static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2071 struct hnae_handle *h = priv->ae_handle;
2072 struct hns_nic_ring_data *rd;
2073 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
2076 if (h->q_num > NIC_MAX_Q_PER_VF) {
2077 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
2081 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
2083 if (!priv->ring_data)
2086 for (i = 0; i < h->q_num; i++) {
2087 rd = &priv->ring_data[i];
2088 rd->queue_index = i;
2089 rd->ring = &h->qs[i]->tx_ring;
2090 rd->poll_one = hns_nic_tx_poll_one;
2091 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
2092 hns_nic_tx_fini_pro_v2;
2094 netif_napi_add(priv->netdev, &rd->napi,
2095 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
2096 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2098 for (i = h->q_num; i < h->q_num * 2; i++) {
2099 rd = &priv->ring_data[i];
2100 rd->queue_index = i - h->q_num;
2101 rd->ring = &h->qs[i - h->q_num]->rx_ring;
2102 rd->poll_one = hns_nic_rx_poll_one;
2103 rd->ex_process = hns_nic_rx_up_pro;
2104 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
2105 hns_nic_rx_fini_pro_v2;
2107 netif_napi_add(priv->netdev, &rd->napi,
2108 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
2109 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2115 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
2117 struct hnae_handle *h = priv->ae_handle;
2120 for (i = 0; i < h->q_num * 2; i++) {
2121 netif_napi_del(&priv->ring_data[i].napi);
2122 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
2123 (void)irq_set_affinity_hint(
2124 priv->ring_data[i].ring->irq,
2126 free_irq(priv->ring_data[i].ring->irq,
2127 &priv->ring_data[i]);
2130 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2132 kfree(priv->ring_data);
2135 static void hns_nic_set_priv_ops(struct net_device *netdev)
2137 struct hns_nic_priv *priv = netdev_priv(netdev);
2138 struct hnae_handle *h = priv->ae_handle;
2140 if (AE_IS_VER1(priv->enet_ver)) {
2141 priv->ops.fill_desc = fill_desc;
2142 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
2143 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2145 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
2146 if ((netdev->features & NETIF_F_TSO) ||
2147 (netdev->features & NETIF_F_TSO6)) {
2148 priv->ops.fill_desc = fill_tso_desc;
2149 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
2150 /* This chip only support 7*4096 */
2151 netif_set_gso_max_size(netdev, 7 * 4096);
2153 priv->ops.fill_desc = fill_v2_desc;
2154 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2156 /* enable tso when init
2157 * control tso on/off through TSE bit in bd
2159 h->dev->ops->set_tso_stats(h, 1);
2163 static int hns_nic_try_get_ae(struct net_device *ndev)
2165 struct hns_nic_priv *priv = netdev_priv(ndev);
2166 struct hnae_handle *h;
2169 h = hnae_get_handle(&priv->netdev->dev,
2170 priv->fwnode, priv->port_id, NULL);
2171 if (IS_ERR_OR_NULL(h)) {
2173 dev_dbg(priv->dev, "has not handle, register notifier!\n");
2176 priv->ae_handle = h;
2178 ret = hns_nic_init_phy(ndev, h);
2180 dev_err(priv->dev, "probe phy device fail!\n");
2184 ret = hns_nic_init_ring_data(priv);
2187 goto out_init_ring_data;
2190 hns_nic_set_priv_ops(ndev);
2192 ret = register_netdev(ndev);
2194 dev_err(priv->dev, "probe register netdev fail!\n");
2195 goto out_reg_ndev_fail;
2200 hns_nic_uninit_ring_data(priv);
2201 priv->ring_data = NULL;
2204 hnae_put_handle(priv->ae_handle);
2205 priv->ae_handle = NULL;
2210 static int hns_nic_notifier_action(struct notifier_block *nb,
2211 unsigned long action, void *data)
2213 struct hns_nic_priv *priv =
2214 container_of(nb, struct hns_nic_priv, notifier_block);
2216 assert(action == HNAE_AE_REGISTER);
2218 if (!hns_nic_try_get_ae(priv->netdev)) {
2219 hnae_unregister_notifier(&priv->notifier_block);
2220 priv->notifier_block.notifier_call = NULL;
2225 static int hns_nic_dev_probe(struct platform_device *pdev)
2227 struct device *dev = &pdev->dev;
2228 struct net_device *ndev;
2229 struct hns_nic_priv *priv;
2233 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
2237 platform_set_drvdata(pdev, ndev);
2239 priv = netdev_priv(ndev);
2241 priv->netdev = ndev;
2243 if (dev_of_node(dev)) {
2244 struct device_node *ae_node;
2246 if (of_device_is_compatible(dev->of_node,
2247 "hisilicon,hns-nic-v1"))
2248 priv->enet_ver = AE_VERSION_1;
2250 priv->enet_ver = AE_VERSION_2;
2252 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2253 if (IS_ERR_OR_NULL(ae_node)) {
2254 ret = PTR_ERR(ae_node);
2255 dev_err(dev, "not find ae-handle\n");
2256 goto out_read_prop_fail;
2258 priv->fwnode = &ae_node->fwnode;
2259 } else if (is_acpi_node(dev->fwnode)) {
2260 struct acpi_reference_args args;
2262 if (acpi_dev_found(hns_enet_acpi_match[0].id))
2263 priv->enet_ver = AE_VERSION_1;
2264 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
2265 priv->enet_ver = AE_VERSION_2;
2269 /* try to find port-idx-in-ae first */
2270 ret = acpi_node_get_property_reference(dev->fwnode,
2271 "ae-handle", 0, &args);
2273 dev_err(dev, "not find ae-handle\n");
2274 goto out_read_prop_fail;
2276 priv->fwnode = acpi_fwnode_handle(args.adev);
2278 dev_err(dev, "cannot read cfg data from OF or acpi\n");
2282 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
2284 /* only for old code compatible */
2285 ret = device_property_read_u32(dev, "port-id", &port_id);
2287 goto out_read_prop_fail;
2288 /* for old dts, we need to caculate the port offset */
2289 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2290 : port_id - HNS_SRV_OFFSET;
2292 priv->port_id = port_id;
2294 hns_init_mac_addr(ndev);
2296 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2297 ndev->priv_flags |= IFF_UNICAST_FLT;
2298 ndev->netdev_ops = &hns_nic_netdev_ops;
2299 hns_ethtool_set_ops(ndev);
2301 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2302 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2304 ndev->vlan_features |=
2305 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2306 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
2308 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2309 ndev->min_mtu = MAC_MIN_MTU;
2310 switch (priv->enet_ver) {
2312 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2313 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2314 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2315 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
2316 ndev->max_mtu = MAC_MAX_MTU_V2 -
2317 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2320 ndev->max_mtu = MAC_MAX_MTU -
2321 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2325 SET_NETDEV_DEV(ndev, dev);
2327 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2328 dev_dbg(dev, "set mask to 64bit\n");
2330 dev_err(dev, "set mask to 64bit fail!\n");
2332 /* carrier off reporting is important to ethtool even BEFORE open */
2333 netif_carrier_off(ndev);
2335 setup_timer(&priv->service_timer, hns_nic_service_timer,
2336 (unsigned long)priv);
2337 INIT_WORK(&priv->service_task, hns_nic_service_task);
2339 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2340 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2341 set_bit(NIC_STATE_DOWN, &priv->state);
2343 if (hns_nic_try_get_ae(priv->netdev)) {
2344 priv->notifier_block.notifier_call = hns_nic_notifier_action;
2345 ret = hnae_register_notifier(&priv->notifier_block);
2347 dev_err(dev, "register notifier fail!\n");
2348 goto out_notify_fail;
2350 dev_dbg(dev, "has not handle, register notifier!\n");
2356 (void)cancel_work_sync(&priv->service_task);
2362 static int hns_nic_dev_remove(struct platform_device *pdev)
2364 struct net_device *ndev = platform_get_drvdata(pdev);
2365 struct hns_nic_priv *priv = netdev_priv(ndev);
2367 if (ndev->reg_state != NETREG_UNINITIALIZED)
2368 unregister_netdev(ndev);
2370 if (priv->ring_data)
2371 hns_nic_uninit_ring_data(priv);
2372 priv->ring_data = NULL;
2375 phy_disconnect(ndev->phydev);
2377 if (!IS_ERR_OR_NULL(priv->ae_handle))
2378 hnae_put_handle(priv->ae_handle);
2379 priv->ae_handle = NULL;
2380 if (priv->notifier_block.notifier_call)
2381 hnae_unregister_notifier(&priv->notifier_block);
2382 priv->notifier_block.notifier_call = NULL;
2384 set_bit(NIC_STATE_REMOVING, &priv->state);
2385 (void)cancel_work_sync(&priv->service_task);
2391 static const struct of_device_id hns_enet_of_match[] = {
2392 {.compatible = "hisilicon,hns-nic-v1",},
2393 {.compatible = "hisilicon,hns-nic-v2",},
2397 MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2399 static struct platform_driver hns_nic_dev_driver = {
2402 .of_match_table = hns_enet_of_match,
2403 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2405 .probe = hns_nic_dev_probe,
2406 .remove = hns_nic_dev_remove,
2409 module_platform_driver(hns_nic_dev_driver);
2411 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2412 MODULE_AUTHOR("Hisilicon, Inc.");
2413 MODULE_LICENSE("GPL");
2414 MODULE_ALIAS("platform:hns-nic");