net: sched: push cls related args into cls_common structure
[linux-2.6-block.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
CommitLineData
76ad4f0e
S
1/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
22#include <net/vxlan.h>
23
24#include "hnae3.h"
25#include "hns3_enet.h"
26
27const char hns3_driver_name[] = "hns3";
28const char hns3_driver_version[] = VERMAGIC_STRING;
29static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32static struct hnae3_client client;
33
34/* hns3_pci_tbl - PCI Device ID Table
35 *
36 * Last entry must be all 0s
37 *
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
40 */
41static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
50 {0, }
51};
52MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
53
54static irqreturn_t hns3_irq_handle(int irq, void *dev)
55{
56 struct hns3_enet_tqp_vector *tqp_vector = dev;
57
58 napi_schedule(&tqp_vector->napi);
59
60 return IRQ_HANDLED;
61}
62
63static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
64{
65 struct hns3_enet_tqp_vector *tqp_vectors;
66 unsigned int i;
67
68 for (i = 0; i < priv->vector_num; i++) {
69 tqp_vectors = &priv->tqp_vector[i];
70
71 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
72 continue;
73
74 /* release the irq resource */
75 free_irq(tqp_vectors->vector_irq, tqp_vectors);
76 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
77 }
78}
79
80static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
81{
82 struct hns3_enet_tqp_vector *tqp_vectors;
83 int txrx_int_idx = 0;
84 int rx_int_idx = 0;
85 int tx_int_idx = 0;
86 unsigned int i;
87 int ret;
88
89 for (i = 0; i < priv->vector_num; i++) {
90 tqp_vectors = &priv->tqp_vector[i];
91
92 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
93 continue;
94
95 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
96 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
97 "%s-%s-%d", priv->netdev->name, "TxRx",
98 txrx_int_idx++);
99 txrx_int_idx++;
100 } else if (tqp_vectors->rx_group.ring) {
101 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 "%s-%s-%d", priv->netdev->name, "Rx",
103 rx_int_idx++);
104 } else if (tqp_vectors->tx_group.ring) {
105 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
106 "%s-%s-%d", priv->netdev->name, "Tx",
107 tx_int_idx++);
108 } else {
109 /* Skip this unused q_vector */
110 continue;
111 }
112
113 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
114
115 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
116 tqp_vectors->name,
117 tqp_vectors);
118 if (ret) {
119 netdev_err(priv->netdev, "request irq(%d) fail\n",
120 tqp_vectors->vector_irq);
121 return ret;
122 }
123
124 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
125 }
126
127 return 0;
128}
129
130static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
131 u32 mask_en)
132{
133 writel(mask_en, tqp_vector->mask_addr);
134}
135
136static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
137{
138 napi_enable(&tqp_vector->napi);
139
140 /* enable vector */
141 hns3_mask_vector_irq(tqp_vector, 1);
142}
143
144static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
145{
146 /* disable vector */
147 hns3_mask_vector_irq(tqp_vector, 0);
148
149 disable_irq(tqp_vector->vector_irq);
150 napi_disable(&tqp_vector->napi);
151}
152
153static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
154 u32 gl_value)
155{
156 /* this defines the configuration for GL (Interrupt Gap Limiter)
157 * GL defines inter interrupt gap.
158 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
159 */
160 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
161 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
162 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
163}
164
165static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
166 u32 rl_value)
167{
168 /* this defines the configuration for RL (Interrupt Rate Limiter).
169 * Rl defines rate of interrupts i.e. number of interrupts-per-second
170 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
171 */
172 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
173}
174
175static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
176{
177 /* initialize the configuration for interrupt coalescing.
178 * 1. GL (Interrupt Gap Limiter)
179 * 2. RL (Interrupt Rate Limiter)
180 */
181
182 /* Default :enable interrupt coalesce */
183 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
184 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
185 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
186 /* for now we are disabling Interrupt RL - we
187 * will re-enable later
188 */
189 hns3_set_vector_coalesc_rl(tqp_vector, 0);
190 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
191 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
192}
193
194static int hns3_nic_net_up(struct net_device *netdev)
195{
196 struct hns3_nic_priv *priv = netdev_priv(netdev);
197 struct hnae3_handle *h = priv->ae_handle;
198 int i, j;
199 int ret;
200
201 /* get irq resource for all vectors */
202 ret = hns3_nic_init_irq(priv);
203 if (ret) {
204 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
205 return ret;
206 }
207
208 /* enable the vectors */
209 for (i = 0; i < priv->vector_num; i++)
210 hns3_vector_enable(&priv->tqp_vector[i]);
211
212 /* start the ae_dev */
213 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
214 if (ret)
215 goto out_start_err;
216
217 return 0;
218
219out_start_err:
220 for (j = i - 1; j >= 0; j--)
221 hns3_vector_disable(&priv->tqp_vector[j]);
222
223 hns3_nic_uninit_irq(priv);
224
225 return ret;
226}
227
228static int hns3_nic_net_open(struct net_device *netdev)
229{
230 struct hns3_nic_priv *priv = netdev_priv(netdev);
231 struct hnae3_handle *h = priv->ae_handle;
232 int ret;
233
234 netif_carrier_off(netdev);
235
236 ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
237 if (ret) {
238 netdev_err(netdev,
239 "netif_set_real_num_tx_queues fail, ret=%d!\n",
240 ret);
241 return ret;
242 }
243
244 ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
245 if (ret) {
246 netdev_err(netdev,
247 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
248 return ret;
249 }
250
251 ret = hns3_nic_net_up(netdev);
252 if (ret) {
253 netdev_err(netdev,
254 "hns net up fail, ret=%d!\n", ret);
255 return ret;
256 }
257
258 return 0;
259}
260
261static void hns3_nic_net_down(struct net_device *netdev)
262{
263 struct hns3_nic_priv *priv = netdev_priv(netdev);
264 const struct hnae3_ae_ops *ops;
265 int i;
266
267 /* stop ae_dev */
268 ops = priv->ae_handle->ae_algo->ops;
269 if (ops->stop)
270 ops->stop(priv->ae_handle);
271
272 /* disable vectors */
273 for (i = 0; i < priv->vector_num; i++)
274 hns3_vector_disable(&priv->tqp_vector[i]);
275
276 /* free irq resources */
277 hns3_nic_uninit_irq(priv);
278}
279
280static int hns3_nic_net_stop(struct net_device *netdev)
281{
282 netif_tx_stop_all_queues(netdev);
283 netif_carrier_off(netdev);
284
285 hns3_nic_net_down(netdev);
286
287 return 0;
288}
289
290void hns3_set_multicast_list(struct net_device *netdev)
291{
292 struct hns3_nic_priv *priv = netdev_priv(netdev);
293 struct hnae3_handle *h = priv->ae_handle;
294 struct netdev_hw_addr *ha = NULL;
295
296 if (h->ae_algo->ops->set_mc_addr) {
297 netdev_for_each_mc_addr(ha, netdev)
298 if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
299 netdev_err(netdev, "set multicast fail\n");
300 }
301}
302
303static int hns3_nic_uc_sync(struct net_device *netdev,
304 const unsigned char *addr)
305{
306 struct hns3_nic_priv *priv = netdev_priv(netdev);
307 struct hnae3_handle *h = priv->ae_handle;
308
309 if (h->ae_algo->ops->add_uc_addr)
310 return h->ae_algo->ops->add_uc_addr(h, addr);
311
312 return 0;
313}
314
315static int hns3_nic_uc_unsync(struct net_device *netdev,
316 const unsigned char *addr)
317{
318 struct hns3_nic_priv *priv = netdev_priv(netdev);
319 struct hnae3_handle *h = priv->ae_handle;
320
321 if (h->ae_algo->ops->rm_uc_addr)
322 return h->ae_algo->ops->rm_uc_addr(h, addr);
323
324 return 0;
325}
326
327static int hns3_nic_mc_sync(struct net_device *netdev,
328 const unsigned char *addr)
329{
330 struct hns3_nic_priv *priv = netdev_priv(netdev);
331 struct hnae3_handle *h = priv->ae_handle;
332
333 if (h->ae_algo->ops->add_uc_addr)
334 return h->ae_algo->ops->add_mc_addr(h, addr);
335
336 return 0;
337}
338
339static int hns3_nic_mc_unsync(struct net_device *netdev,
340 const unsigned char *addr)
341{
342 struct hns3_nic_priv *priv = netdev_priv(netdev);
343 struct hnae3_handle *h = priv->ae_handle;
344
345 if (h->ae_algo->ops->rm_uc_addr)
346 return h->ae_algo->ops->rm_mc_addr(h, addr);
347
348 return 0;
349}
350
351void hns3_nic_set_rx_mode(struct net_device *netdev)
352{
353 struct hns3_nic_priv *priv = netdev_priv(netdev);
354 struct hnae3_handle *h = priv->ae_handle;
355
356 if (h->ae_algo->ops->set_promisc_mode) {
357 if (netdev->flags & IFF_PROMISC)
358 h->ae_algo->ops->set_promisc_mode(h, 1);
359 else
360 h->ae_algo->ops->set_promisc_mode(h, 0);
361 }
362 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
363 netdev_err(netdev, "sync uc address fail\n");
364 if (netdev->flags & IFF_MULTICAST)
365 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
366 netdev_err(netdev, "sync mc address fail\n");
367}
368
369static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
370 u16 *mss, u32 *type_cs_vlan_tso)
371{
372 u32 l4_offset, hdr_len;
373 union l3_hdr_info l3;
374 union l4_hdr_info l4;
375 u32 l4_paylen;
376 int ret;
377
378 if (!skb_is_gso(skb))
379 return 0;
380
381 ret = skb_cow_head(skb, 0);
382 if (ret)
383 return ret;
384
385 l3.hdr = skb_network_header(skb);
386 l4.hdr = skb_transport_header(skb);
387
388 /* Software should clear the IPv4's checksum field when tso is
389 * needed.
390 */
391 if (l3.v4->version == 4)
392 l3.v4->check = 0;
393
394 /* tunnel packet.*/
395 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
396 SKB_GSO_GRE_CSUM |
397 SKB_GSO_UDP_TUNNEL |
398 SKB_GSO_UDP_TUNNEL_CSUM)) {
399 if ((!(skb_shinfo(skb)->gso_type &
400 SKB_GSO_PARTIAL)) &&
401 (skb_shinfo(skb)->gso_type &
402 SKB_GSO_UDP_TUNNEL_CSUM)) {
403 /* Software should clear the udp's checksum
404 * field when tso is needed.
405 */
406 l4.udp->check = 0;
407 }
408 /* reset l3&l4 pointers from outer to inner headers */
409 l3.hdr = skb_inner_network_header(skb);
410 l4.hdr = skb_inner_transport_header(skb);
411
412 /* Software should clear the IPv4's checksum field when
413 * tso is needed.
414 */
415 if (l3.v4->version == 4)
416 l3.v4->check = 0;
417 }
418
419 /* normal or tunnel packet*/
420 l4_offset = l4.hdr - skb->data;
421 hdr_len = (l4.tcp->doff * 4) + l4_offset;
422
423 /* remove payload length from inner pseudo checksum when tso*/
424 l4_paylen = skb->len - l4_offset;
425 csum_replace_by_diff(&l4.tcp->check,
426 (__force __wsum)htonl(l4_paylen));
427
428 /* find the txbd field values */
429 *paylen = skb->len - hdr_len;
430 hnae_set_bit(*type_cs_vlan_tso,
431 HNS3_TXD_TSO_B, 1);
432
433 /* get MSS for TSO */
434 *mss = skb_shinfo(skb)->gso_size;
435
436 return 0;
437}
438
439static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
440 u8 *il4_proto)
441{
442 union {
443 struct iphdr *v4;
444 struct ipv6hdr *v6;
445 unsigned char *hdr;
446 } l3;
447 unsigned char *l4_hdr;
448 unsigned char *exthdr;
449 u8 l4_proto_tmp;
450 __be16 frag_off;
451
452 /* find outer header point */
453 l3.hdr = skb_network_header(skb);
454 l4_hdr = skb_inner_transport_header(skb);
455
456 if (skb->protocol == htons(ETH_P_IPV6)) {
457 exthdr = l3.hdr + sizeof(*l3.v6);
458 l4_proto_tmp = l3.v6->nexthdr;
459 if (l4_hdr != exthdr)
460 ipv6_skip_exthdr(skb, exthdr - skb->data,
461 &l4_proto_tmp, &frag_off);
462 } else if (skb->protocol == htons(ETH_P_IP)) {
463 l4_proto_tmp = l3.v4->protocol;
464 }
465
466 *ol4_proto = l4_proto_tmp;
467
468 /* tunnel packet */
469 if (!skb->encapsulation) {
470 *il4_proto = 0;
471 return;
472 }
473
474 /* find inner header point */
475 l3.hdr = skb_inner_network_header(skb);
476 l4_hdr = skb_inner_transport_header(skb);
477
478 if (l3.v6->version == 6) {
479 exthdr = l3.hdr + sizeof(*l3.v6);
480 l4_proto_tmp = l3.v6->nexthdr;
481 if (l4_hdr != exthdr)
482 ipv6_skip_exthdr(skb, exthdr - skb->data,
483 &l4_proto_tmp, &frag_off);
484 } else if (l3.v4->version == 4) {
485 l4_proto_tmp = l3.v4->protocol;
486 }
487
488 *il4_proto = l4_proto_tmp;
489}
490
491static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
492 u8 il4_proto, u32 *type_cs_vlan_tso,
493 u32 *ol_type_vlan_len_msec)
494{
495 union {
496 struct iphdr *v4;
497 struct ipv6hdr *v6;
498 unsigned char *hdr;
499 } l3;
500 union {
501 struct tcphdr *tcp;
502 struct udphdr *udp;
503 struct gre_base_hdr *gre;
504 unsigned char *hdr;
505 } l4;
506 unsigned char *l2_hdr;
507 u8 l4_proto = ol4_proto;
508 u32 ol2_len;
509 u32 ol3_len;
510 u32 ol4_len;
511 u32 l2_len;
512 u32 l3_len;
513
514 l3.hdr = skb_network_header(skb);
515 l4.hdr = skb_transport_header(skb);
516
517 /* compute L2 header size for normal packet, defined in 2 Bytes */
518 l2_len = l3.hdr - skb->data;
519 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
520 HNS3_TXD_L2LEN_S, l2_len >> 1);
521
522 /* tunnel packet*/
523 if (skb->encapsulation) {
524 /* compute OL2 header size, defined in 2 Bytes */
525 ol2_len = l2_len;
526 hnae_set_field(*ol_type_vlan_len_msec,
527 HNS3_TXD_L2LEN_M,
528 HNS3_TXD_L2LEN_S, ol2_len >> 1);
529
530 /* compute OL3 header size, defined in 4 Bytes */
531 ol3_len = l4.hdr - l3.hdr;
532 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
533 HNS3_TXD_L3LEN_S, ol3_len >> 2);
534
535 /* MAC in UDP, MAC in GRE (0x6558)*/
536 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
537 /* switch MAC header ptr from outer to inner header.*/
538 l2_hdr = skb_inner_mac_header(skb);
539
540 /* compute OL4 header size, defined in 4 Bytes. */
541 ol4_len = l2_hdr - l4.hdr;
542 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
543 HNS3_TXD_L4LEN_S, ol4_len >> 2);
544
545 /* switch IP header ptr from outer to inner header */
546 l3.hdr = skb_inner_network_header(skb);
547
548 /* compute inner l2 header size, defined in 2 Bytes. */
549 l2_len = l3.hdr - l2_hdr;
550 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
551 HNS3_TXD_L2LEN_S, l2_len >> 1);
552 } else {
553 /* skb packet types not supported by hardware,
554 * txbd len fild doesn't be filled.
555 */
556 return;
557 }
558
559 /* switch L4 header pointer from outer to inner */
560 l4.hdr = skb_inner_transport_header(skb);
561
562 l4_proto = il4_proto;
563 }
564
565 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
566 l3_len = l4.hdr - l3.hdr;
567 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
568 HNS3_TXD_L3LEN_S, l3_len >> 2);
569
570 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
571 switch (l4_proto) {
572 case IPPROTO_TCP:
573 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
574 HNS3_TXD_L4LEN_S, l4.tcp->doff);
575 break;
576 case IPPROTO_SCTP:
577 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
578 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
579 break;
580 case IPPROTO_UDP:
581 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
582 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
583 break;
584 default:
585 /* skb packet types not supported by hardware,
586 * txbd len fild doesn't be filled.
587 */
588 return;
589 }
590}
591
592static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
593 u8 il4_proto, u32 *type_cs_vlan_tso,
594 u32 *ol_type_vlan_len_msec)
595{
596 union {
597 struct iphdr *v4;
598 struct ipv6hdr *v6;
599 unsigned char *hdr;
600 } l3;
601 u32 l4_proto = ol4_proto;
602
603 l3.hdr = skb_network_header(skb);
604
605 /* define OL3 type and tunnel type(OL4).*/
606 if (skb->encapsulation) {
607 /* define outer network header type.*/
608 if (skb->protocol == htons(ETH_P_IP)) {
609 if (skb_is_gso(skb))
610 hnae_set_field(*ol_type_vlan_len_msec,
611 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
612 HNS3_OL3T_IPV4_CSUM);
613 else
614 hnae_set_field(*ol_type_vlan_len_msec,
615 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
616 HNS3_OL3T_IPV4_NO_CSUM);
617
618 } else if (skb->protocol == htons(ETH_P_IPV6)) {
619 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
620 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
621 }
622
623 /* define tunnel type(OL4).*/
624 switch (l4_proto) {
625 case IPPROTO_UDP:
626 hnae_set_field(*ol_type_vlan_len_msec,
627 HNS3_TXD_TUNTYPE_M,
628 HNS3_TXD_TUNTYPE_S,
629 HNS3_TUN_MAC_IN_UDP);
630 break;
631 case IPPROTO_GRE:
632 hnae_set_field(*ol_type_vlan_len_msec,
633 HNS3_TXD_TUNTYPE_M,
634 HNS3_TXD_TUNTYPE_S,
635 HNS3_TUN_NVGRE);
636 break;
637 default:
638 /* drop the skb tunnel packet if hardware don't support,
639 * because hardware can't calculate csum when TSO.
640 */
641 if (skb_is_gso(skb))
642 return -EDOM;
643
644 /* the stack computes the IP header already,
645 * driver calculate l4 checksum when not TSO.
646 */
647 skb_checksum_help(skb);
648 return 0;
649 }
650
651 l3.hdr = skb_inner_network_header(skb);
652 l4_proto = il4_proto;
653 }
654
655 if (l3.v4->version == 4) {
656 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
657 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
658
659 /* the stack computes the IP header already, the only time we
660 * need the hardware to recompute it is in the case of TSO.
661 */
662 if (skb_is_gso(skb))
663 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
664
665 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
666 } else if (l3.v6->version == 6) {
667 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
668 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
669 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
670 }
671
672 switch (l4_proto) {
673 case IPPROTO_TCP:
674 hnae_set_field(*type_cs_vlan_tso,
675 HNS3_TXD_L4T_M,
676 HNS3_TXD_L4T_S,
677 HNS3_L4T_TCP);
678 break;
679 case IPPROTO_UDP:
680 hnae_set_field(*type_cs_vlan_tso,
681 HNS3_TXD_L4T_M,
682 HNS3_TXD_L4T_S,
683 HNS3_L4T_UDP);
684 break;
685 case IPPROTO_SCTP:
686 hnae_set_field(*type_cs_vlan_tso,
687 HNS3_TXD_L4T_M,
688 HNS3_TXD_L4T_S,
689 HNS3_L4T_SCTP);
690 break;
691 default:
692 /* drop the skb tunnel packet if hardware don't support,
693 * because hardware can't calculate csum when TSO.
694 */
695 if (skb_is_gso(skb))
696 return -EDOM;
697
698 /* the stack computes the IP header already,
699 * driver calculate l4 checksum when not TSO.
700 */
701 skb_checksum_help(skb);
702 return 0;
703 }
704
705 return 0;
706}
707
708static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
709{
710 /* Config bd buffer end */
711 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
712 HNS3_TXD_BDTYPE_M, 0);
713 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
714 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
715 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
716}
717
718static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
719 int size, dma_addr_t dma, int frag_end,
720 enum hns_desc_type type)
721{
722 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
723 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
724 u32 ol_type_vlan_len_msec = 0;
725 u16 bdtp_fe_sc_vld_ra_ri = 0;
726 u32 type_cs_vlan_tso = 0;
727 struct sk_buff *skb;
728 u32 paylen = 0;
729 u16 mss = 0;
730 __be16 protocol;
731 u8 ol4_proto;
732 u8 il4_proto;
733 int ret;
734
735 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
736 desc_cb->priv = priv;
737 desc_cb->length = size;
738 desc_cb->dma = dma;
739 desc_cb->type = type;
740
741 /* now, fill the descriptor */
742 desc->addr = cpu_to_le64(dma);
743 desc->tx.send_size = cpu_to_le16((u16)size);
744 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
745 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
746
747 if (type == DESC_TYPE_SKB) {
748 skb = (struct sk_buff *)priv;
749 paylen = cpu_to_le16(skb->len);
750
751 if (skb->ip_summed == CHECKSUM_PARTIAL) {
752 skb_reset_mac_len(skb);
753 protocol = skb->protocol;
754
755 /* vlan packet*/
756 if (protocol == htons(ETH_P_8021Q)) {
757 protocol = vlan_get_protocol(skb);
758 skb->protocol = protocol;
759 }
760 hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
761 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
762 &type_cs_vlan_tso,
763 &ol_type_vlan_len_msec);
764 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
765 &type_cs_vlan_tso,
766 &ol_type_vlan_len_msec);
767 if (ret)
768 return ret;
769
770 ret = hns3_set_tso(skb, &paylen, &mss,
771 &type_cs_vlan_tso);
772 if (ret)
773 return ret;
774 }
775
776 /* Set txbd */
777 desc->tx.ol_type_vlan_len_msec =
778 cpu_to_le32(ol_type_vlan_len_msec);
779 desc->tx.type_cs_vlan_tso_len =
780 cpu_to_le32(type_cs_vlan_tso);
781 desc->tx.paylen = cpu_to_le16(paylen);
782 desc->tx.mss = cpu_to_le16(mss);
783 }
784
785 /* move ring pointer to next.*/
786 ring_ptr_move_fw(ring, next_to_use);
787
788 return 0;
789}
790
791static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
792 int size, dma_addr_t dma, int frag_end,
793 enum hns_desc_type type)
794{
795 unsigned int frag_buf_num;
796 unsigned int k;
797 int sizeoflast;
798 int ret;
799
800 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
801 sizeoflast = size % HNS3_MAX_BD_SIZE;
802 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
803
804 /* When the frag size is bigger than hardware, split this frag */
805 for (k = 0; k < frag_buf_num; k++) {
806 ret = hns3_fill_desc(ring, priv,
807 (k == frag_buf_num - 1) ?
808 sizeoflast : HNS3_MAX_BD_SIZE,
809 dma + HNS3_MAX_BD_SIZE * k,
810 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
811 (type == DESC_TYPE_SKB && !k) ?
812 DESC_TYPE_SKB : DESC_TYPE_PAGE);
813 if (ret)
814 return ret;
815 }
816
817 return 0;
818}
819
820static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
821 struct hns3_enet_ring *ring)
822{
823 struct sk_buff *skb = *out_skb;
824 struct skb_frag_struct *frag;
825 int bdnum_for_frag;
826 int frag_num;
827 int buf_num;
828 int size;
829 int i;
830
831 size = skb_headlen(skb);
832 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
833
834 frag_num = skb_shinfo(skb)->nr_frags;
835 for (i = 0; i < frag_num; i++) {
836 frag = &skb_shinfo(skb)->frags[i];
837 size = skb_frag_size(frag);
838 bdnum_for_frag =
839 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
840 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
841 return -ENOMEM;
842
843 buf_num += bdnum_for_frag;
844 }
845
846 if (buf_num > ring_space(ring))
847 return -EBUSY;
848
849 *bnum = buf_num;
850 return 0;
851}
852
853static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
854 struct hns3_enet_ring *ring)
855{
856 struct sk_buff *skb = *out_skb;
857 int buf_num;
858
859 /* No. of segments (plus a header) */
860 buf_num = skb_shinfo(skb)->nr_frags + 1;
861
862 if (buf_num > ring_space(ring))
863 return -EBUSY;
864
865 *bnum = buf_num;
866
867 return 0;
868}
869
870static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
871{
872 struct device *dev = ring_to_dev(ring);
873 unsigned int i;
874
875 for (i = 0; i < ring->desc_num; i++) {
876 /* check if this is where we started */
877 if (ring->next_to_use == next_to_use_orig)
878 break;
879
880 /* unmap the descriptor dma address */
881 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
882 dma_unmap_single(dev,
883 ring->desc_cb[ring->next_to_use].dma,
884 ring->desc_cb[ring->next_to_use].length,
885 DMA_TO_DEVICE);
886 else
887 dma_unmap_page(dev,
888 ring->desc_cb[ring->next_to_use].dma,
889 ring->desc_cb[ring->next_to_use].length,
890 DMA_TO_DEVICE);
891
892 /* rollback one */
893 ring_ptr_move_bw(ring, next_to_use);
894 }
895}
896
897static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
898 struct net_device *netdev)
899{
900 struct hns3_nic_priv *priv = netdev_priv(netdev);
901 struct hns3_nic_ring_data *ring_data =
902 &tx_ring_data(priv, skb->queue_mapping);
903 struct hns3_enet_ring *ring = ring_data->ring;
904 struct device *dev = priv->dev;
905 struct netdev_queue *dev_queue;
906 struct skb_frag_struct *frag;
907 int next_to_use_head;
908 int next_to_use_frag;
909 dma_addr_t dma;
910 int buf_num;
911 int seg_num;
912 int size;
913 int ret;
914 int i;
915
916 /* Prefetch the data used later */
917 prefetch(skb->data);
918
919 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
920 case -EBUSY:
921 u64_stats_update_begin(&ring->syncp);
922 ring->stats.tx_busy++;
923 u64_stats_update_end(&ring->syncp);
924
925 goto out_net_tx_busy;
926 case -ENOMEM:
927 u64_stats_update_begin(&ring->syncp);
928 ring->stats.sw_err_cnt++;
929 u64_stats_update_end(&ring->syncp);
930 netdev_err(netdev, "no memory to xmit!\n");
931
932 goto out_err_tx_ok;
933 default:
934 break;
935 }
936
937 /* No. of segments (plus a header) */
938 seg_num = skb_shinfo(skb)->nr_frags + 1;
939 /* Fill the first part */
940 size = skb_headlen(skb);
941
942 next_to_use_head = ring->next_to_use;
943
944 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
945 if (dma_mapping_error(dev, dma)) {
946 netdev_err(netdev, "TX head DMA map failed\n");
947 ring->stats.sw_err_cnt++;
948 goto out_err_tx_ok;
949 }
950
951 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
952 DESC_TYPE_SKB);
953 if (ret)
954 goto head_dma_map_err;
955
956 next_to_use_frag = ring->next_to_use;
957 /* Fill the fragments */
958 for (i = 1; i < seg_num; i++) {
959 frag = &skb_shinfo(skb)->frags[i - 1];
960 size = skb_frag_size(frag);
961 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
962 if (dma_mapping_error(dev, dma)) {
963 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
964 ring->stats.sw_err_cnt++;
965 goto frag_dma_map_err;
966 }
967 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
968 seg_num - 1 == i ? 1 : 0,
969 DESC_TYPE_PAGE);
970
971 if (ret)
972 goto frag_dma_map_err;
973 }
974
975 /* Complete translate all packets */
976 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
977 netdev_tx_sent_queue(dev_queue, skb->len);
978
979 wmb(); /* Commit all data before submit */
980
981 hnae_queue_xmit(ring->tqp, buf_num);
982
983 return NETDEV_TX_OK;
984
985frag_dma_map_err:
986 hns_nic_dma_unmap(ring, next_to_use_frag);
987
988head_dma_map_err:
989 hns_nic_dma_unmap(ring, next_to_use_head);
990
991out_err_tx_ok:
992 dev_kfree_skb_any(skb);
993 return NETDEV_TX_OK;
994
995out_net_tx_busy:
996 netif_stop_subqueue(netdev, ring_data->queue_index);
997 smp_mb(); /* Commit all data before submit */
998
999 return NETDEV_TX_BUSY;
1000}
1001
1002static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1003{
1004 struct hns3_nic_priv *priv = netdev_priv(netdev);
1005 struct hnae3_handle *h = priv->ae_handle;
1006 struct sockaddr *mac_addr = p;
1007 int ret;
1008
1009 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1010 return -EADDRNOTAVAIL;
1011
1012 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1013 if (ret) {
1014 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1015 return ret;
1016 }
1017
1018 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1019
1020 return 0;
1021}
1022
1023static int hns3_nic_set_features(struct net_device *netdev,
1024 netdev_features_t features)
1025{
1026 struct hns3_nic_priv *priv = netdev_priv(netdev);
1027
1028 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1029 priv->ops.fill_desc = hns3_fill_desc_tso;
1030 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1031 } else {
1032 priv->ops.fill_desc = hns3_fill_desc;
1033 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1034 }
1035
1036 netdev->features = features;
1037 return 0;
1038}
1039
1040static void
1041hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1042{
1043 struct hns3_nic_priv *priv = netdev_priv(netdev);
1044 int queue_num = priv->ae_handle->kinfo.num_tqps;
1045 struct hns3_enet_ring *ring;
1046 unsigned int start;
1047 unsigned int idx;
1048 u64 tx_bytes = 0;
1049 u64 rx_bytes = 0;
1050 u64 tx_pkts = 0;
1051 u64 rx_pkts = 0;
1052
1053 for (idx = 0; idx < queue_num; idx++) {
1054 /* fetch the tx stats */
1055 ring = priv->ring_data[idx].ring;
1056 do {
1057 tx_bytes += ring->stats.tx_bytes;
1058 tx_pkts += ring->stats.tx_pkts;
1059 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1060
1061 /* fetch the rx stats */
1062 ring = priv->ring_data[idx + queue_num].ring;
1063 do {
1064 rx_bytes += ring->stats.rx_bytes;
1065 rx_pkts += ring->stats.rx_pkts;
1066 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1067 }
1068
1069 stats->tx_bytes = tx_bytes;
1070 stats->tx_packets = tx_pkts;
1071 stats->rx_bytes = rx_bytes;
1072 stats->rx_packets = rx_pkts;
1073
1074 stats->rx_errors = netdev->stats.rx_errors;
1075 stats->multicast = netdev->stats.multicast;
1076 stats->rx_length_errors = netdev->stats.rx_length_errors;
1077 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1078 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1079
1080 stats->tx_errors = netdev->stats.tx_errors;
1081 stats->rx_dropped = netdev->stats.rx_dropped;
1082 stats->tx_dropped = netdev->stats.tx_dropped;
1083 stats->collisions = netdev->stats.collisions;
1084 stats->rx_over_errors = netdev->stats.rx_over_errors;
1085 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1086 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1087 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1088 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1089 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1090 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1091 stats->tx_window_errors = netdev->stats.tx_window_errors;
1092 stats->rx_compressed = netdev->stats.rx_compressed;
1093 stats->tx_compressed = netdev->stats.tx_compressed;
1094}
1095
1096static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1097 enum hns3_udp_tnl_type type)
1098{
1099 struct hns3_nic_priv *priv = netdev_priv(netdev);
1100 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1101 struct hnae3_handle *h = priv->ae_handle;
1102
1103 if (udp_tnl->used && udp_tnl->dst_port == port) {
1104 udp_tnl->used++;
1105 return;
1106 }
1107
1108 if (udp_tnl->used) {
1109 netdev_warn(netdev,
1110 "UDP tunnel [%d], port [%d] offload\n", type, port);
1111 return;
1112 }
1113
1114 udp_tnl->dst_port = port;
1115 udp_tnl->used = 1;
1116 /* TBD send command to hardware to add port */
1117 if (h->ae_algo->ops->add_tunnel_udp)
1118 h->ae_algo->ops->add_tunnel_udp(h, port);
1119}
1120
1121static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1122 enum hns3_udp_tnl_type type)
1123{
1124 struct hns3_nic_priv *priv = netdev_priv(netdev);
1125 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1126 struct hnae3_handle *h = priv->ae_handle;
1127
1128 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1129 netdev_warn(netdev,
1130 "Invalid UDP tunnel port %d\n", port);
1131 return;
1132 }
1133
1134 udp_tnl->used--;
1135 if (udp_tnl->used)
1136 return;
1137
1138 udp_tnl->dst_port = 0;
1139 /* TBD send command to hardware to del port */
1140 if (h->ae_algo->ops->del_tunnel_udp)
1141 h->ae_algo->ops->add_tunnel_udp(h, port);
1142}
1143
1144/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1145 * @netdev: This physical ports's netdev
1146 * @ti: Tunnel information
1147 */
1148static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1149 struct udp_tunnel_info *ti)
1150{
1151 u16 port_n = ntohs(ti->port);
1152
1153 switch (ti->type) {
1154 case UDP_TUNNEL_TYPE_VXLAN:
1155 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1156 break;
1157 case UDP_TUNNEL_TYPE_GENEVE:
1158 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1159 break;
1160 default:
1161 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1162 break;
1163 }
1164}
1165
1166static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1167 struct udp_tunnel_info *ti)
1168{
1169 u16 port_n = ntohs(ti->port);
1170
1171 switch (ti->type) {
1172 case UDP_TUNNEL_TYPE_VXLAN:
1173 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1174 break;
1175 case UDP_TUNNEL_TYPE_GENEVE:
1176 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1177 break;
1178 default:
1179 break;
1180 }
1181}
1182
1183static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1184{
1185 struct hns3_nic_priv *priv = netdev_priv(netdev);
1186 struct hnae3_handle *h = priv->ae_handle;
1187 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1188 unsigned int i;
1189 int ret;
1190
1191 if (tc > HNAE3_MAX_TC)
1192 return -EINVAL;
1193
1194 if (kinfo->num_tc == tc)
1195 return 0;
1196
1197 if (!netdev)
1198 return -EINVAL;
1199
1200 if (!tc) {
1201 netdev_reset_tc(netdev);
1202 return 0;
1203 }
1204
1205 /* Set num_tc for netdev */
1206 ret = netdev_set_num_tc(netdev, tc);
1207 if (ret)
1208 return ret;
1209
1210 /* Set per TC queues for the VSI */
1211 for (i = 0; i < HNAE3_MAX_TC; i++) {
1212 if (kinfo->tc_info[i].enable)
1213 netdev_set_tc_queue(netdev,
1214 kinfo->tc_info[i].tc,
1215 kinfo->tc_info[i].tqp_count,
1216 kinfo->tc_info[i].tqp_offset);
1217 }
1218
1219 return 0;
1220}
1221
2572ac53 1222static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
76ad4f0e
S
1223 struct tc_to_netdev *tc)
1224{
74897ef0 1225 if (type != TC_SETUP_MQPRIO)
76ad4f0e
S
1226 return -EINVAL;
1227
1228 return hns3_setup_tc(dev, tc->mqprio->num_tc);
1229}
1230
1231static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1232 __be16 proto, u16 vid)
1233{
1234 struct hns3_nic_priv *priv = netdev_priv(netdev);
1235 struct hnae3_handle *h = priv->ae_handle;
1236 int ret = -EIO;
1237
1238 if (h->ae_algo->ops->set_vlan_filter)
1239 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1240
1241 return ret;
1242}
1243
1244static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1245 __be16 proto, u16 vid)
1246{
1247 struct hns3_nic_priv *priv = netdev_priv(netdev);
1248 struct hnae3_handle *h = priv->ae_handle;
1249 int ret = -EIO;
1250
1251 if (h->ae_algo->ops->set_vlan_filter)
1252 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1253
1254 return ret;
1255}
1256
1257static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1258 u8 qos, __be16 vlan_proto)
1259{
1260 struct hns3_nic_priv *priv = netdev_priv(netdev);
1261 struct hnae3_handle *h = priv->ae_handle;
1262 int ret = -EIO;
1263
1264 if (h->ae_algo->ops->set_vf_vlan_filter)
1265 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1266 qos, vlan_proto);
1267
1268 return ret;
1269}
1270
1271static const struct net_device_ops hns3_nic_netdev_ops = {
1272 .ndo_open = hns3_nic_net_open,
1273 .ndo_stop = hns3_nic_net_stop,
1274 .ndo_start_xmit = hns3_nic_net_xmit,
1275 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1276 .ndo_set_features = hns3_nic_set_features,
1277 .ndo_get_stats64 = hns3_nic_get_stats64,
1278 .ndo_setup_tc = hns3_nic_setup_tc,
1279 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1280 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1281 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1282 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1283 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1284 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1285};
1286
1287/* hns3_probe - Device initialization routine
1288 * @pdev: PCI device information struct
1289 * @ent: entry in hns3_pci_tbl
1290 *
1291 * hns3_probe initializes a PF identified by a pci_dev structure.
1292 * The OS initialization, configuring of the PF private structure,
1293 * and a hardware reset occur.
1294 *
1295 * Returns 0 on success, negative on failure
1296 */
1297static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1298{
1299 struct hnae3_ae_dev *ae_dev;
1300 int ret;
1301
1302 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1303 GFP_KERNEL);
1304 if (!ae_dev) {
1305 ret = -ENOMEM;
1306 return ret;
1307 }
1308
1309 ae_dev->pdev = pdev;
1310 ae_dev->dev_type = HNAE3_DEV_KNIC;
1311 pci_set_drvdata(pdev, ae_dev);
1312
1313 return hnae3_register_ae_dev(ae_dev);
1314}
1315
1316/* hns3_remove - Device removal routine
1317 * @pdev: PCI device information struct
1318 */
1319static void hns3_remove(struct pci_dev *pdev)
1320{
1321 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1322
1323 hnae3_unregister_ae_dev(ae_dev);
1324
1325 devm_kfree(&pdev->dev, ae_dev);
1326
1327 pci_set_drvdata(pdev, NULL);
1328}
1329
1330static struct pci_driver hns3_driver = {
1331 .name = hns3_driver_name,
1332 .id_table = hns3_pci_tbl,
1333 .probe = hns3_probe,
1334 .remove = hns3_remove,
1335};
1336
1337/* set default feature to hns3 */
1338static void hns3_set_default_feature(struct net_device *netdev)
1339{
1340 netdev->priv_flags |= IFF_UNICAST_FLT;
1341
1342 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1343 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1344 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1345 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1346 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1347
1348 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1349
1350 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1351
1352 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1353 NETIF_F_HW_VLAN_CTAG_FILTER |
1354 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1355 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1356 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1357 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1358
1359 netdev->vlan_features |=
1360 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1361 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1362 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1363 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1364 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1365
1366 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1367 NETIF_F_HW_VLAN_CTAG_FILTER |
1368 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1369 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1370 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1371 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1372}
1373
1374static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1375 struct hns3_desc_cb *cb)
1376{
1377 unsigned int order = hnae_page_order(ring);
1378 struct page *p;
1379
1380 p = dev_alloc_pages(order);
1381 if (!p)
1382 return -ENOMEM;
1383
1384 cb->priv = p;
1385 cb->page_offset = 0;
1386 cb->reuse_flag = 0;
1387 cb->buf = page_address(p);
1388 cb->length = hnae_page_size(ring);
1389 cb->type = DESC_TYPE_PAGE;
1390
1391 memset(cb->buf, 0, cb->length);
1392
1393 return 0;
1394}
1395
1396static void hns3_free_buffer(struct hns3_enet_ring *ring,
1397 struct hns3_desc_cb *cb)
1398{
1399 if (cb->type == DESC_TYPE_SKB)
1400 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1401 else if (!HNAE3_IS_TX_RING(ring))
1402 put_page((struct page *)cb->priv);
1403 memset(cb, 0, sizeof(*cb));
1404}
1405
1406static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1407{
1408 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1409 cb->length, ring_to_dma_dir(ring));
1410
1411 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1412 return -EIO;
1413
1414 return 0;
1415}
1416
1417static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1418 struct hns3_desc_cb *cb)
1419{
1420 if (cb->type == DESC_TYPE_SKB)
1421 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1422 ring_to_dma_dir(ring));
1423 else
1424 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1425 ring_to_dma_dir(ring));
1426}
1427
1428static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1429{
1430 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1431 ring->desc[i].addr = 0;
1432}
1433
1434static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1435{
1436 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1437
1438 if (!ring->desc_cb[i].dma)
1439 return;
1440
1441 hns3_buffer_detach(ring, i);
1442 hns3_free_buffer(ring, cb);
1443}
1444
1445static void hns3_free_buffers(struct hns3_enet_ring *ring)
1446{
1447 int i;
1448
1449 for (i = 0; i < ring->desc_num; i++)
1450 hns3_free_buffer_detach(ring, i);
1451}
1452
1453/* free desc along with its attached buffer */
1454static void hns3_free_desc(struct hns3_enet_ring *ring)
1455{
1456 hns3_free_buffers(ring);
1457
1458 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1459 ring->desc_num * sizeof(ring->desc[0]),
1460 DMA_BIDIRECTIONAL);
1461 ring->desc_dma_addr = 0;
1462 kfree(ring->desc);
1463 ring->desc = NULL;
1464}
1465
1466static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1467{
1468 int size = ring->desc_num * sizeof(ring->desc[0]);
1469
1470 ring->desc = kzalloc(size, GFP_KERNEL);
1471 if (!ring->desc)
1472 return -ENOMEM;
1473
1474 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1475 size, DMA_BIDIRECTIONAL);
1476 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1477 ring->desc_dma_addr = 0;
1478 kfree(ring->desc);
1479 ring->desc = NULL;
1480 return -ENOMEM;
1481 }
1482
1483 return 0;
1484}
1485
1486static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1487 struct hns3_desc_cb *cb)
1488{
1489 int ret;
1490
1491 ret = hns3_alloc_buffer(ring, cb);
1492 if (ret)
1493 goto out;
1494
1495 ret = hns3_map_buffer(ring, cb);
1496 if (ret)
1497 goto out_with_buf;
1498
1499 return 0;
1500
1501out_with_buf:
1502 hns3_free_buffers(ring);
1503out:
1504 return ret;
1505}
1506
1507static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1508{
1509 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1510
1511 if (ret)
1512 return ret;
1513
1514 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1515
1516 return 0;
1517}
1518
1519/* Allocate memory for raw pkg, and map with dma */
1520static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1521{
1522 int i, j, ret;
1523
1524 for (i = 0; i < ring->desc_num; i++) {
1525 ret = hns3_alloc_buffer_attach(ring, i);
1526 if (ret)
1527 goto out_buffer_fail;
1528 }
1529
1530 return 0;
1531
1532out_buffer_fail:
1533 for (j = i - 1; j >= 0; j--)
1534 hns3_free_buffer_detach(ring, j);
1535 return ret;
1536}
1537
1538/* detach a in-used buffer and replace with a reserved one */
1539static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1540 struct hns3_desc_cb *res_cb)
1541{
1542 hns3_map_buffer(ring, &ring->desc_cb[i]);
1543 ring->desc_cb[i] = *res_cb;
1544 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1545}
1546
1547static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1548{
1549 ring->desc_cb[i].reuse_flag = 0;
1550 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1551 + ring->desc_cb[i].page_offset);
1552}
1553
1554static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1555 int *pkts)
1556{
1557 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1558
1559 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1560 (*bytes) += desc_cb->length;
1561 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1562 hns3_free_buffer_detach(ring, ring->next_to_clean);
1563
1564 ring_ptr_move_fw(ring, next_to_clean);
1565}
1566
1567static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1568{
1569 int u = ring->next_to_use;
1570 int c = ring->next_to_clean;
1571
1572 if (unlikely(h > ring->desc_num))
1573 return 0;
1574
1575 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1576}
1577
1578int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1579{
1580 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1581 struct netdev_queue *dev_queue;
1582 int bytes, pkts;
1583 int head;
1584
1585 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1586 rmb(); /* Make sure head is ready before touch any data */
1587
1588 if (is_ring_empty(ring) || head == ring->next_to_clean)
1589 return 0; /* no data to poll */
1590
1591 if (!is_valid_clean_head(ring, head)) {
1592 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1593 ring->next_to_use, ring->next_to_clean);
1594
1595 u64_stats_update_begin(&ring->syncp);
1596 ring->stats.io_err_cnt++;
1597 u64_stats_update_end(&ring->syncp);
1598 return -EIO;
1599 }
1600
1601 bytes = 0;
1602 pkts = 0;
1603 while (head != ring->next_to_clean && budget) {
1604 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1605 /* Issue prefetch for next Tx descriptor */
1606 prefetch(&ring->desc_cb[ring->next_to_clean]);
1607 budget--;
1608 }
1609
1610 ring->tqp_vector->tx_group.total_bytes += bytes;
1611 ring->tqp_vector->tx_group.total_packets += pkts;
1612
1613 u64_stats_update_begin(&ring->syncp);
1614 ring->stats.tx_bytes += bytes;
1615 ring->stats.tx_pkts += pkts;
1616 u64_stats_update_end(&ring->syncp);
1617
1618 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1619 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1620
1621 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1622 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1623 /* Make sure that anybody stopping the queue after this
1624 * sees the new next_to_clean.
1625 */
1626 smp_mb();
1627 if (netif_tx_queue_stopped(dev_queue)) {
1628 netif_tx_wake_queue(dev_queue);
1629 ring->stats.restart_queue++;
1630 }
1631 }
1632
1633 return !!budget;
1634}
1635
1636static int hns3_desc_unused(struct hns3_enet_ring *ring)
1637{
1638 int ntc = ring->next_to_clean;
1639 int ntu = ring->next_to_use;
1640
1641 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1642}
1643
1644static void
1645hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1646{
1647 struct hns3_desc_cb *desc_cb;
1648 struct hns3_desc_cb res_cbs;
1649 int i, ret;
1650
1651 for (i = 0; i < cleand_count; i++) {
1652 desc_cb = &ring->desc_cb[ring->next_to_use];
1653 if (desc_cb->reuse_flag) {
1654 u64_stats_update_begin(&ring->syncp);
1655 ring->stats.reuse_pg_cnt++;
1656 u64_stats_update_end(&ring->syncp);
1657
1658 hns3_reuse_buffer(ring, ring->next_to_use);
1659 } else {
1660 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1661 if (ret) {
1662 u64_stats_update_begin(&ring->syncp);
1663 ring->stats.sw_err_cnt++;
1664 u64_stats_update_end(&ring->syncp);
1665
1666 netdev_err(ring->tqp->handle->kinfo.netdev,
1667 "hnae reserve buffer map failed.\n");
1668 break;
1669 }
1670 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1671 }
1672
1673 ring_ptr_move_fw(ring, next_to_use);
1674 }
1675
1676 wmb(); /* Make all data has been write before submit */
1677 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1678}
1679
1680/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1681 * @data: pointer to the start of the headers
1682 * @max: total length of section to find headers in
1683 *
1684 * This function is meant to determine the length of headers that will
1685 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1686 * motivation of doing this is to only perform one pull for IPv4 TCP
1687 * packets so that we can do basic things like calculating the gso_size
1688 * based on the average data per packet.
1689 */
1690static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1691 unsigned int max_size)
1692{
1693 unsigned char *network;
1694 u8 hlen;
1695
1696 /* This should never happen, but better safe than sorry */
1697 if (max_size < ETH_HLEN)
1698 return max_size;
1699
1700 /* Initialize network frame pointer */
1701 network = data;
1702
1703 /* Set first protocol and move network header forward */
1704 network += ETH_HLEN;
1705
1706 /* Handle any vlan tag if present */
1707 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1708 == HNS3_RX_FLAG_VLAN_PRESENT) {
1709 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1710 return max_size;
1711
1712 network += VLAN_HLEN;
1713 }
1714
1715 /* Handle L3 protocols */
1716 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1717 == HNS3_RX_FLAG_L3ID_IPV4) {
1718 if ((typeof(max_size))(network - data) >
1719 (max_size - sizeof(struct iphdr)))
1720 return max_size;
1721
1722 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1723 hlen = (network[0] & 0x0F) << 2;
1724
1725 /* Verify hlen meets minimum size requirements */
1726 if (hlen < sizeof(struct iphdr))
1727 return network - data;
1728
1729 /* Record next protocol if header is present */
1730 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1731 == HNS3_RX_FLAG_L3ID_IPV6) {
1732 if ((typeof(max_size))(network - data) >
1733 (max_size - sizeof(struct ipv6hdr)))
1734 return max_size;
1735
1736 /* Record next protocol */
1737 hlen = sizeof(struct ipv6hdr);
1738 } else {
1739 return network - data;
1740 }
1741
1742 /* Relocate pointer to start of L4 header */
1743 network += hlen;
1744
1745 /* Finally sort out TCP/UDP */
1746 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1747 == HNS3_RX_FLAG_L4ID_TCP) {
1748 if ((typeof(max_size))(network - data) >
1749 (max_size - sizeof(struct tcphdr)))
1750 return max_size;
1751
1752 /* Access doff as a u8 to avoid unaligned access on ia64 */
1753 hlen = (network[12] & 0xF0) >> 2;
1754
1755 /* Verify hlen meets minimum size requirements */
1756 if (hlen < sizeof(struct tcphdr))
1757 return network - data;
1758
1759 network += hlen;
1760 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1761 == HNS3_RX_FLAG_L4ID_UDP) {
1762 if ((typeof(max_size))(network - data) >
1763 (max_size - sizeof(struct udphdr)))
1764 return max_size;
1765
1766 network += sizeof(struct udphdr);
1767 }
1768
1769 /* If everything has gone correctly network should be the
1770 * data section of the packet and will be the end of the header.
1771 * If not then it probably represents the end of the last recognized
1772 * header.
1773 */
1774 if ((typeof(max_size))(network - data) < max_size)
1775 return network - data;
1776 else
1777 return max_size;
1778}
1779
1780static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1781 struct hns3_enet_ring *ring, int pull_len,
1782 struct hns3_desc_cb *desc_cb)
1783{
1784 struct hns3_desc *desc;
1785 int truesize, size;
1786 int last_offset;
1787 bool twobufs;
1788
1789 twobufs = ((PAGE_SIZE < 8192) &&
1790 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1791
1792 desc = &ring->desc[ring->next_to_clean];
1793 size = le16_to_cpu(desc->rx.size);
1794
1795 if (twobufs) {
1796 truesize = hnae_buf_size(ring);
1797 } else {
1798 truesize = ALIGN(size, L1_CACHE_BYTES);
1799 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1800 }
1801
1802 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1803 size - pull_len, truesize - pull_len);
1804
1805 /* Avoid re-using remote pages,flag default unreuse */
1806 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1807 return;
1808
1809 if (twobufs) {
1810 /* If we are only owner of page we can reuse it */
1811 if (likely(page_count(desc_cb->priv) == 1)) {
1812 /* Flip page offset to other buffer */
1813 desc_cb->page_offset ^= truesize;
1814
1815 desc_cb->reuse_flag = 1;
1816 /* bump ref count on page before it is given*/
1817 get_page(desc_cb->priv);
1818 }
1819 return;
1820 }
1821
1822 /* Move offset up to the next cache line */
1823 desc_cb->page_offset += truesize;
1824
1825 if (desc_cb->page_offset <= last_offset) {
1826 desc_cb->reuse_flag = 1;
1827 /* Bump ref count on page before it is given*/
1828 get_page(desc_cb->priv);
1829 }
1830}
1831
1832static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1833 struct hns3_desc *desc)
1834{
1835 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1836 int l3_type, l4_type;
1837 u32 bd_base_info;
1838 int ol4_type;
1839 u32 l234info;
1840
1841 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1842 l234info = le32_to_cpu(desc->rx.l234_info);
1843
1844 skb->ip_summed = CHECKSUM_NONE;
1845
1846 skb_checksum_none_assert(skb);
1847
1848 if (!(netdev->features & NETIF_F_RXCSUM))
1849 return;
1850
1851 /* check if hardware has done checksum */
1852 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1853 return;
1854
1855 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1856 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1857 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1858 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1859 netdev_err(netdev, "L3/L4 error pkt\n");
1860 u64_stats_update_begin(&ring->syncp);
1861 ring->stats.l3l4_csum_err++;
1862 u64_stats_update_end(&ring->syncp);
1863
1864 return;
1865 }
1866
1867 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1868 HNS3_RXD_L3ID_S);
1869 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1870 HNS3_RXD_L4ID_S);
1871
1872 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1873 switch (ol4_type) {
1874 case HNS3_OL4_TYPE_MAC_IN_UDP:
1875 case HNS3_OL4_TYPE_NVGRE:
1876 skb->csum_level = 1;
1877 case HNS3_OL4_TYPE_NO_TUN:
1878 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1879 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1880 (l3_type == HNS3_L3_TYPE_IPV6 &&
1881 (l4_type == HNS3_L4_TYPE_UDP ||
1882 l4_type == HNS3_L4_TYPE_TCP ||
1883 l4_type == HNS3_L4_TYPE_SCTP)))
1884 skb->ip_summed = CHECKSUM_UNNECESSARY;
1885 break;
1886 }
1887}
1888
1889static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1890 struct sk_buff **out_skb, int *out_bnum)
1891{
1892 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1893 struct hns3_desc_cb *desc_cb;
1894 struct hns3_desc *desc;
1895 struct sk_buff *skb;
1896 unsigned char *va;
1897 u32 bd_base_info;
1898 int pull_len;
1899 u32 l234info;
1900 int length;
1901 int bnum;
1902
1903 desc = &ring->desc[ring->next_to_clean];
1904 desc_cb = &ring->desc_cb[ring->next_to_clean];
1905
1906 prefetch(desc);
1907
1908 length = le16_to_cpu(desc->rx.pkt_len);
1909 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1910 l234info = le32_to_cpu(desc->rx.l234_info);
1911
1912 /* Check valid BD */
1913 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1914 return -EFAULT;
1915
1916 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1917
1918 /* Prefetch first cache line of first page
1919 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1920 * line size is 64B so need to prefetch twice to make it 128B. But in
1921 * actual we can have greater size of caches with 128B Level 1 cache
1922 * lines. In such a case, single fetch would suffice to cache in the
1923 * relevant part of the header.
1924 */
1925 prefetch(va);
1926#if L1_CACHE_BYTES < 128
1927 prefetch(va + L1_CACHE_BYTES);
1928#endif
1929
1930 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1931 HNS3_RX_HEAD_SIZE);
1932 if (unlikely(!skb)) {
1933 netdev_err(netdev, "alloc rx skb fail\n");
1934
1935 u64_stats_update_begin(&ring->syncp);
1936 ring->stats.sw_err_cnt++;
1937 u64_stats_update_end(&ring->syncp);
1938
1939 return -ENOMEM;
1940 }
1941
1942 prefetchw(skb->data);
1943
1944 bnum = 1;
1945 if (length <= HNS3_RX_HEAD_SIZE) {
1946 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
1947
1948 /* We can reuse buffer as-is, just make sure it is local */
1949 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
1950 desc_cb->reuse_flag = 1;
1951 else /* This page cannot be reused so discard it */
1952 put_page(desc_cb->priv);
1953
1954 ring_ptr_move_fw(ring, next_to_clean);
1955 } else {
1956 u64_stats_update_begin(&ring->syncp);
1957 ring->stats.seg_pkt_cnt++;
1958 u64_stats_update_end(&ring->syncp);
1959
1960 pull_len = hns3_nic_get_headlen(va, l234info,
1961 HNS3_RX_HEAD_SIZE);
1962 memcpy(__skb_put(skb, pull_len), va,
1963 ALIGN(pull_len, sizeof(long)));
1964
1965 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
1966 ring_ptr_move_fw(ring, next_to_clean);
1967
1968 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1969 desc = &ring->desc[ring->next_to_clean];
1970 desc_cb = &ring->desc_cb[ring->next_to_clean];
1971 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1972 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
1973 ring_ptr_move_fw(ring, next_to_clean);
1974 bnum++;
1975 }
1976 }
1977
1978 *out_bnum = bnum;
1979
1980 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
1981 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
1982 ((u64 *)desc)[0], ((u64 *)desc)[1]);
1983 u64_stats_update_begin(&ring->syncp);
1984 ring->stats.non_vld_descs++;
1985 u64_stats_update_end(&ring->syncp);
1986
1987 dev_kfree_skb_any(skb);
1988 return -EINVAL;
1989 }
1990
1991 if (unlikely((!desc->rx.pkt_len) ||
1992 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
1993 netdev_err(netdev, "truncated pkt\n");
1994 u64_stats_update_begin(&ring->syncp);
1995 ring->stats.err_pkt_len++;
1996 u64_stats_update_end(&ring->syncp);
1997
1998 dev_kfree_skb_any(skb);
1999 return -EFAULT;
2000 }
2001
2002 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2003 netdev_err(netdev, "L2 error pkt\n");
2004 u64_stats_update_begin(&ring->syncp);
2005 ring->stats.l2_err++;
2006 u64_stats_update_end(&ring->syncp);
2007
2008 dev_kfree_skb_any(skb);
2009 return -EFAULT;
2010 }
2011
2012 u64_stats_update_begin(&ring->syncp);
2013 ring->stats.rx_pkts++;
2014 ring->stats.rx_bytes += skb->len;
2015 u64_stats_update_end(&ring->syncp);
2016
2017 ring->tqp_vector->rx_group.total_bytes += skb->len;
2018
2019 hns3_rx_checksum(ring, skb, desc);
2020 return 0;
2021}
2022
2023static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2024{
2025#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2026 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2027 int recv_pkts, recv_bds, clean_count, err;
2028 int unused_count = hns3_desc_unused(ring);
2029 struct sk_buff *skb = NULL;
2030 int num, bnum = 0;
2031
2032 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2033 rmb(); /* Make sure num taken effect before the other data is touched */
2034
2035 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2036 num -= unused_count;
2037
2038 while (recv_pkts < budget && recv_bds < num) {
2039 /* Reuse or realloc buffers */
2040 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2041 hns3_nic_alloc_rx_buffers(ring,
2042 clean_count + unused_count);
2043 clean_count = 0;
2044 unused_count = hns3_desc_unused(ring);
2045 }
2046
2047 /* Poll one pkt */
2048 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2049 if (unlikely(!skb)) /* This fault cannot be repaired */
2050 goto out;
2051
2052 recv_bds += bnum;
2053 clean_count += bnum;
2054 if (unlikely(err)) { /* Do jump the err */
2055 recv_pkts++;
2056 continue;
2057 }
2058
2059 /* Do update ip stack process */
2060 skb->protocol = eth_type_trans(skb, netdev);
2061 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2062
2063 recv_pkts++;
2064 }
2065
2066out:
2067 /* Make all data has been write before submit */
2068 if (clean_count + unused_count > 0)
2069 hns3_nic_alloc_rx_buffers(ring,
2070 clean_count + unused_count);
2071
2072 return recv_pkts;
2073}
2074
2075static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2076{
2077#define HNS3_RX_ULTRA_PACKET_RATE 40000
2078 enum hns3_flow_level_range new_flow_level;
2079 struct hns3_enet_tqp_vector *tqp_vector;
2080 int packets_per_secs;
2081 int bytes_per_usecs;
2082 u16 new_int_gl;
2083 int usecs;
2084
2085 if (!ring_group->int_gl)
2086 return false;
2087
2088 if (ring_group->total_packets == 0) {
2089 ring_group->int_gl = HNS3_INT_GL_50K;
2090 ring_group->flow_level = HNS3_FLOW_LOW;
2091 return true;
2092 }
2093
2094 /* Simple throttlerate management
2095 * 0-10MB/s lower (50000 ints/s)
2096 * 10-20MB/s middle (20000 ints/s)
2097 * 20-1249MB/s high (18000 ints/s)
2098 * > 40000pps ultra (8000 ints/s)
2099 */
2100 new_flow_level = ring_group->flow_level;
2101 new_int_gl = ring_group->int_gl;
2102 tqp_vector = ring_group->ring->tqp_vector;
2103 usecs = (ring_group->int_gl << 1);
2104 bytes_per_usecs = ring_group->total_bytes / usecs;
2105 /* 1000000 microseconds */
2106 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2107
2108 switch (new_flow_level) {
2109 case HNS3_FLOW_LOW:
2110 if (bytes_per_usecs > 10)
2111 new_flow_level = HNS3_FLOW_MID;
2112 break;
2113 case HNS3_FLOW_MID:
2114 if (bytes_per_usecs > 20)
2115 new_flow_level = HNS3_FLOW_HIGH;
2116 else if (bytes_per_usecs <= 10)
2117 new_flow_level = HNS3_FLOW_LOW;
2118 break;
2119 case HNS3_FLOW_HIGH:
2120 case HNS3_FLOW_ULTRA:
2121 default:
2122 if (bytes_per_usecs <= 20)
2123 new_flow_level = HNS3_FLOW_MID;
2124 break;
2125 }
2126
2127 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2128 (&tqp_vector->rx_group == ring_group))
2129 new_flow_level = HNS3_FLOW_ULTRA;
2130
2131 switch (new_flow_level) {
2132 case HNS3_FLOW_LOW:
2133 new_int_gl = HNS3_INT_GL_50K;
2134 break;
2135 case HNS3_FLOW_MID:
2136 new_int_gl = HNS3_INT_GL_20K;
2137 break;
2138 case HNS3_FLOW_HIGH:
2139 new_int_gl = HNS3_INT_GL_18K;
2140 break;
2141 case HNS3_FLOW_ULTRA:
2142 new_int_gl = HNS3_INT_GL_8K;
2143 break;
2144 default:
2145 break;
2146 }
2147
2148 ring_group->total_bytes = 0;
2149 ring_group->total_packets = 0;
2150 ring_group->flow_level = new_flow_level;
2151 if (new_int_gl != ring_group->int_gl) {
2152 ring_group->int_gl = new_int_gl;
2153 return true;
2154 }
2155 return false;
2156}
2157
2158static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2159{
2160 u16 rx_int_gl, tx_int_gl;
2161 bool rx, tx;
2162
2163 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2164 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2165 rx_int_gl = tqp_vector->rx_group.int_gl;
2166 tx_int_gl = tqp_vector->tx_group.int_gl;
2167 if (rx && tx) {
2168 if (rx_int_gl > tx_int_gl) {
2169 tqp_vector->tx_group.int_gl = rx_int_gl;
2170 tqp_vector->tx_group.flow_level =
2171 tqp_vector->rx_group.flow_level;
2172 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2173 } else {
2174 tqp_vector->rx_group.int_gl = tx_int_gl;
2175 tqp_vector->rx_group.flow_level =
2176 tqp_vector->tx_group.flow_level;
2177 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2178 }
2179 }
2180}
2181
2182static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2183{
2184 struct hns3_enet_ring *ring;
2185 int rx_pkt_total = 0;
2186
2187 struct hns3_enet_tqp_vector *tqp_vector =
2188 container_of(napi, struct hns3_enet_tqp_vector, napi);
2189 bool clean_complete = true;
2190 int rx_budget;
2191
2192 /* Since the actual Tx work is minimal, we can give the Tx a larger
2193 * budget and be more aggressive about cleaning up the Tx descriptors.
2194 */
2195 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2196 if (!hns3_clean_tx_ring(ring, budget))
2197 clean_complete = false;
2198 }
2199
2200 /* make sure rx ring budget not smaller than 1 */
2201 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2202
2203 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2204 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2205
2206 if (rx_cleaned >= rx_budget)
2207 clean_complete = false;
2208
2209 rx_pkt_total += rx_cleaned;
2210 }
2211
2212 tqp_vector->rx_group.total_packets += rx_pkt_total;
2213
2214 if (!clean_complete)
2215 return budget;
2216
2217 napi_complete(napi);
2218 hns3_update_new_int_gl(tqp_vector);
2219 hns3_mask_vector_irq(tqp_vector, 1);
2220
2221 return rx_pkt_total;
2222}
2223
2224static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2225 struct hnae3_ring_chain_node *head)
2226{
2227 struct pci_dev *pdev = tqp_vector->handle->pdev;
2228 struct hnae3_ring_chain_node *cur_chain = head;
2229 struct hnae3_ring_chain_node *chain;
2230 struct hns3_enet_ring *tx_ring;
2231 struct hns3_enet_ring *rx_ring;
2232
2233 tx_ring = tqp_vector->tx_group.ring;
2234 if (tx_ring) {
2235 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2236 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2237 HNAE3_RING_TYPE_TX);
2238
2239 cur_chain->next = NULL;
2240
2241 while (tx_ring->next) {
2242 tx_ring = tx_ring->next;
2243
2244 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2245 GFP_KERNEL);
2246 if (!chain)
2247 return -ENOMEM;
2248
2249 cur_chain->next = chain;
2250 chain->tqp_index = tx_ring->tqp->tqp_index;
2251 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2252 HNAE3_RING_TYPE_TX);
2253
2254 cur_chain = chain;
2255 }
2256 }
2257
2258 rx_ring = tqp_vector->rx_group.ring;
2259 if (!tx_ring && rx_ring) {
2260 cur_chain->next = NULL;
2261 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2262 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2263 HNAE3_RING_TYPE_RX);
2264
2265 rx_ring = rx_ring->next;
2266 }
2267
2268 while (rx_ring) {
2269 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2270 if (!chain)
2271 return -ENOMEM;
2272
2273 cur_chain->next = chain;
2274 chain->tqp_index = rx_ring->tqp->tqp_index;
2275 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2276 HNAE3_RING_TYPE_RX);
2277 cur_chain = chain;
2278
2279 rx_ring = rx_ring->next;
2280 }
2281
2282 return 0;
2283}
2284
2285static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2286 struct hnae3_ring_chain_node *head)
2287{
2288 struct pci_dev *pdev = tqp_vector->handle->pdev;
2289 struct hnae3_ring_chain_node *chain_tmp, *chain;
2290
2291 chain = head->next;
2292
2293 while (chain) {
2294 chain_tmp = chain->next;
2295 devm_kfree(&pdev->dev, chain);
2296 chain = chain_tmp;
2297 }
2298}
2299
2300static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2301 struct hns3_enet_ring *ring)
2302{
2303 ring->next = group->ring;
2304 group->ring = ring;
2305
2306 group->count++;
2307}
2308
2309static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2310{
2311 struct hnae3_ring_chain_node vector_ring_chain;
2312 struct hnae3_handle *h = priv->ae_handle;
2313 struct hns3_enet_tqp_vector *tqp_vector;
2314 struct hnae3_vector_info *vector;
2315 struct pci_dev *pdev = h->pdev;
2316 u16 tqp_num = h->kinfo.num_tqps;
2317 u16 vector_num;
2318 int ret = 0;
2319 u16 i;
2320
2321 /* RSS size, cpu online and vector_num should be the same */
2322 /* Should consider 2p/4p later */
2323 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2324 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2325 GFP_KERNEL);
2326 if (!vector)
2327 return -ENOMEM;
2328
2329 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2330
2331 priv->vector_num = vector_num;
2332 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2333 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2334 GFP_KERNEL);
2335 if (!priv->tqp_vector)
2336 return -ENOMEM;
2337
2338 for (i = 0; i < tqp_num; i++) {
2339 u16 vector_i = i % vector_num;
2340
2341 tqp_vector = &priv->tqp_vector[vector_i];
2342
2343 hns3_add_ring_to_group(&tqp_vector->tx_group,
2344 priv->ring_data[i].ring);
2345
2346 hns3_add_ring_to_group(&tqp_vector->rx_group,
2347 priv->ring_data[i + tqp_num].ring);
2348
2349 tqp_vector->idx = vector_i;
2350 tqp_vector->mask_addr = vector[vector_i].io_addr;
2351 tqp_vector->vector_irq = vector[vector_i].vector;
2352 tqp_vector->num_tqps++;
2353
2354 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2355 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2356 }
2357
2358 for (i = 0; i < vector_num; i++) {
2359 tqp_vector = &priv->tqp_vector[i];
2360
2361 tqp_vector->rx_group.total_bytes = 0;
2362 tqp_vector->rx_group.total_packets = 0;
2363 tqp_vector->tx_group.total_bytes = 0;
2364 tqp_vector->tx_group.total_packets = 0;
2365 hns3_vector_gl_rl_init(tqp_vector);
2366 tqp_vector->handle = h;
2367
2368 ret = hns3_get_vector_ring_chain(tqp_vector,
2369 &vector_ring_chain);
2370 if (ret)
2371 goto out;
2372
2373 ret = h->ae_algo->ops->map_ring_to_vector(h,
2374 tqp_vector->vector_irq, &vector_ring_chain);
2375 if (ret)
2376 goto out;
2377
2378 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2379
2380 netif_napi_add(priv->netdev, &tqp_vector->napi,
2381 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2382 }
2383
2384out:
2385 devm_kfree(&pdev->dev, vector);
2386 return ret;
2387}
2388
2389static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2390{
2391 struct hnae3_ring_chain_node vector_ring_chain;
2392 struct hnae3_handle *h = priv->ae_handle;
2393 struct hns3_enet_tqp_vector *tqp_vector;
2394 struct pci_dev *pdev = h->pdev;
2395 int i, ret;
2396
2397 for (i = 0; i < priv->vector_num; i++) {
2398 tqp_vector = &priv->tqp_vector[i];
2399
2400 ret = hns3_get_vector_ring_chain(tqp_vector,
2401 &vector_ring_chain);
2402 if (ret)
2403 return ret;
2404
2405 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2406 tqp_vector->vector_irq, &vector_ring_chain);
2407 if (ret)
2408 return ret;
2409
2410 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2411
2412 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2413 (void)irq_set_affinity_hint(
2414 priv->tqp_vector[i].vector_irq,
2415 NULL);
2416 devm_free_irq(&pdev->dev,
2417 priv->tqp_vector[i].vector_irq,
2418 &priv->tqp_vector[i]);
2419 }
2420
2421 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2422
2423 netif_napi_del(&priv->tqp_vector[i].napi);
2424 }
2425
2426 devm_kfree(&pdev->dev, priv->tqp_vector);
2427
2428 return 0;
2429}
2430
2431static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2432 int ring_type)
2433{
2434 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2435 int queue_num = priv->ae_handle->kinfo.num_tqps;
2436 struct pci_dev *pdev = priv->ae_handle->pdev;
2437 struct hns3_enet_ring *ring;
2438
2439 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2440 if (!ring)
2441 return -ENOMEM;
2442
2443 if (ring_type == HNAE3_RING_TYPE_TX) {
2444 ring_data[q->tqp_index].ring = ring;
2445 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2446 } else {
2447 ring_data[q->tqp_index + queue_num].ring = ring;
2448 ring->io_base = q->io_base;
2449 }
2450
2451 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2452
2453 ring_data[q->tqp_index].queue_index = q->tqp_index;
2454
2455 ring->tqp = q;
2456 ring->desc = NULL;
2457 ring->desc_cb = NULL;
2458 ring->dev = priv->dev;
2459 ring->desc_dma_addr = 0;
2460 ring->buf_size = q->buf_size;
2461 ring->desc_num = q->desc_num;
2462 ring->next_to_use = 0;
2463 ring->next_to_clean = 0;
2464
2465 return 0;
2466}
2467
2468static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2469 struct hns3_nic_priv *priv)
2470{
2471 int ret;
2472
2473 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2474 if (ret)
2475 return ret;
2476
2477 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2478 if (ret)
2479 return ret;
2480
2481 return 0;
2482}
2483
2484static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2485{
2486 struct hnae3_handle *h = priv->ae_handle;
2487 struct pci_dev *pdev = h->pdev;
2488 int i, ret;
2489
2490 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2491 sizeof(*priv->ring_data) * 2,
2492 GFP_KERNEL);
2493 if (!priv->ring_data)
2494 return -ENOMEM;
2495
2496 for (i = 0; i < h->kinfo.num_tqps; i++) {
2497 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2498 if (ret)
2499 goto err;
2500 }
2501
2502 return 0;
2503err:
2504 devm_kfree(&pdev->dev, priv->ring_data);
2505 return ret;
2506}
2507
2508static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2509{
2510 int ret;
2511
2512 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2513 return -EINVAL;
2514
2515 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2516 GFP_KERNEL);
2517 if (!ring->desc_cb) {
2518 ret = -ENOMEM;
2519 goto out;
2520 }
2521
2522 ret = hns3_alloc_desc(ring);
2523 if (ret)
2524 goto out_with_desc_cb;
2525
2526 if (!HNAE3_IS_TX_RING(ring)) {
2527 ret = hns3_alloc_ring_buffers(ring);
2528 if (ret)
2529 goto out_with_desc;
2530 }
2531
2532 return 0;
2533
2534out_with_desc:
2535 hns3_free_desc(ring);
2536out_with_desc_cb:
2537 kfree(ring->desc_cb);
2538 ring->desc_cb = NULL;
2539out:
2540 return ret;
2541}
2542
2543static void hns3_fini_ring(struct hns3_enet_ring *ring)
2544{
2545 hns3_free_desc(ring);
2546 kfree(ring->desc_cb);
2547 ring->desc_cb = NULL;
2548 ring->next_to_clean = 0;
2549 ring->next_to_use = 0;
2550}
2551
2552int hns3_buf_size2type(u32 buf_size)
2553{
2554 int bd_size_type;
2555
2556 switch (buf_size) {
2557 case 512:
2558 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2559 break;
2560 case 1024:
2561 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2562 break;
2563 case 2048:
2564 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2565 break;
2566 case 4096:
2567 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2568 break;
2569 default:
2570 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2571 }
2572
2573 return bd_size_type;
2574}
2575
2576static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2577{
2578 dma_addr_t dma = ring->desc_dma_addr;
2579 struct hnae3_queue *q = ring->tqp;
2580
2581 if (!HNAE3_IS_TX_RING(ring)) {
2582 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2583 (u32)dma);
2584 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2585 (u32)((dma >> 31) >> 1));
2586
2587 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2588 hns3_buf_size2type(ring->buf_size));
2589 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2590 ring->desc_num / 8 - 1);
2591
2592 } else {
2593 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2594 (u32)dma);
2595 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2596 (u32)((dma >> 31) >> 1));
2597
2598 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2599 hns3_buf_size2type(ring->buf_size));
2600 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2601 ring->desc_num / 8 - 1);
2602 }
2603}
2604
2605static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2606{
2607 struct hnae3_handle *h = priv->ae_handle;
2608 int ring_num = h->kinfo.num_tqps * 2;
2609 int i, j;
2610 int ret;
2611
2612 for (i = 0; i < ring_num; i++) {
2613 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2614 if (ret) {
2615 dev_err(priv->dev,
2616 "Alloc ring memory fail! ret=%d\n", ret);
2617 goto out_when_alloc_ring_memory;
2618 }
2619
2620 hns3_init_ring_hw(priv->ring_data[i].ring);
2621
2622 u64_stats_init(&priv->ring_data[i].ring->syncp);
2623 }
2624
2625 return 0;
2626
2627out_when_alloc_ring_memory:
2628 for (j = i - 1; j >= 0; j--)
2629 hns3_fini_ring(priv->ring_data[i].ring);
2630
2631 return -ENOMEM;
2632}
2633
2634static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2635{
2636 struct hnae3_handle *h = priv->ae_handle;
2637 int i;
2638
2639 for (i = 0; i < h->kinfo.num_tqps; i++) {
2640 if (h->ae_algo->ops->reset_queue)
2641 h->ae_algo->ops->reset_queue(h, i);
2642
2643 hns3_fini_ring(priv->ring_data[i].ring);
2644 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2645 }
2646
2647 return 0;
2648}
2649
2650/* Set mac addr if it is configured. or leave it to the AE driver */
2651static void hns3_init_mac_addr(struct net_device *netdev)
2652{
2653 struct hns3_nic_priv *priv = netdev_priv(netdev);
2654 struct hnae3_handle *h = priv->ae_handle;
2655 u8 mac_addr_temp[ETH_ALEN];
2656
2657 if (h->ae_algo->ops->get_mac_addr) {
2658 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2659 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2660 }
2661
2662 /* Check if the MAC address is valid, if not get a random one */
2663 if (!is_valid_ether_addr(netdev->dev_addr)) {
2664 eth_hw_addr_random(netdev);
2665 dev_warn(priv->dev, "using random MAC address %pM\n",
2666 netdev->dev_addr);
2667 /* Also copy this new MAC address into hdev */
2668 if (h->ae_algo->ops->set_mac_addr)
2669 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2670 }
2671}
2672
2673static void hns3_nic_set_priv_ops(struct net_device *netdev)
2674{
2675 struct hns3_nic_priv *priv = netdev_priv(netdev);
2676
2677 if ((netdev->features & NETIF_F_TSO) ||
2678 (netdev->features & NETIF_F_TSO6)) {
2679 priv->ops.fill_desc = hns3_fill_desc_tso;
2680 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2681 } else {
2682 priv->ops.fill_desc = hns3_fill_desc;
2683 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2684 }
2685}
2686
2687static int hns3_client_init(struct hnae3_handle *handle)
2688{
2689 struct pci_dev *pdev = handle->pdev;
2690 struct hns3_nic_priv *priv;
2691 struct net_device *netdev;
2692 int ret;
2693
2694 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2695 handle->kinfo.num_tqps);
2696 if (!netdev)
2697 return -ENOMEM;
2698
2699 priv = netdev_priv(netdev);
2700 priv->dev = &pdev->dev;
2701 priv->netdev = netdev;
2702 priv->ae_handle = handle;
2703
2704 handle->kinfo.netdev = netdev;
2705 handle->priv = (void *)priv;
2706
2707 hns3_init_mac_addr(netdev);
2708
2709 hns3_set_default_feature(netdev);
2710
2711 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2712 netdev->priv_flags |= IFF_UNICAST_FLT;
2713 netdev->netdev_ops = &hns3_nic_netdev_ops;
2714 SET_NETDEV_DEV(netdev, &pdev->dev);
2715 hns3_ethtool_set_ops(netdev);
2716 hns3_nic_set_priv_ops(netdev);
2717
2718 /* Carrier off reporting is important to ethtool even BEFORE open */
2719 netif_carrier_off(netdev);
2720
2721 ret = hns3_get_ring_config(priv);
2722 if (ret) {
2723 ret = -ENOMEM;
2724 goto out_get_ring_cfg;
2725 }
2726
2727 ret = hns3_nic_init_vector_data(priv);
2728 if (ret) {
2729 ret = -ENOMEM;
2730 goto out_init_vector_data;
2731 }
2732
2733 ret = hns3_init_all_ring(priv);
2734 if (ret) {
2735 ret = -ENOMEM;
2736 goto out_init_ring_data;
2737 }
2738
2739 ret = register_netdev(netdev);
2740 if (ret) {
2741 dev_err(priv->dev, "probe register netdev fail!\n");
2742 goto out_reg_netdev_fail;
2743 }
2744
2745 return ret;
2746
2747out_reg_netdev_fail:
2748out_init_ring_data:
2749 (void)hns3_nic_uninit_vector_data(priv);
2750 priv->ring_data = NULL;
2751out_init_vector_data:
2752out_get_ring_cfg:
2753 priv->ae_handle = NULL;
2754 free_netdev(netdev);
2755 return ret;
2756}
2757
2758static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2759{
2760 struct net_device *netdev = handle->kinfo.netdev;
2761 struct hns3_nic_priv *priv = netdev_priv(netdev);
2762 int ret;
2763
2764 if (netdev->reg_state != NETREG_UNINITIALIZED)
2765 unregister_netdev(netdev);
2766
2767 ret = hns3_nic_uninit_vector_data(priv);
2768 if (ret)
2769 netdev_err(netdev, "uninit vector error\n");
2770
2771 ret = hns3_uninit_all_ring(priv);
2772 if (ret)
2773 netdev_err(netdev, "uninit ring error\n");
2774
2775 priv->ring_data = NULL;
2776
2777 free_netdev(netdev);
2778}
2779
2780static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2781{
2782 struct net_device *netdev = handle->kinfo.netdev;
2783
2784 if (!netdev)
2785 return;
2786
2787 if (linkup) {
2788 netif_carrier_on(netdev);
2789 netif_tx_wake_all_queues(netdev);
2790 netdev_info(netdev, "link up\n");
2791 } else {
2792 netif_carrier_off(netdev);
2793 netif_tx_stop_all_queues(netdev);
2794 netdev_info(netdev, "link down\n");
2795 }
2796}
2797
2798const struct hnae3_client_ops client_ops = {
2799 .init_instance = hns3_client_init,
2800 .uninit_instance = hns3_client_uninit,
2801 .link_status_change = hns3_link_status_change,
2802};
2803
2804/* hns3_init_module - Driver registration routine
2805 * hns3_init_module is the first routine called when the driver is
2806 * loaded. All it does is register with the PCI subsystem.
2807 */
2808static int __init hns3_init_module(void)
2809{
2810 int ret;
2811
2812 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2813 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2814
2815 client.type = HNAE3_CLIENT_KNIC;
2816 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2817 hns3_driver_name);
2818
2819 client.ops = &client_ops;
2820
2821 ret = hnae3_register_client(&client);
2822 if (ret)
2823 return ret;
2824
2825 ret = pci_register_driver(&hns3_driver);
2826 if (ret)
2827 hnae3_unregister_client(&client);
2828
2829 return ret;
2830}
2831module_init(hns3_init_module);
2832
2833/* hns3_exit_module - Driver exit cleanup routine
2834 * hns3_exit_module is called just before the driver is removed
2835 * from memory.
2836 */
2837static void __exit hns3_exit_module(void)
2838{
2839 pci_unregister_driver(&hns3_driver);
2840 hnae3_unregister_client(&client);
2841}
2842module_exit(hns3_exit_module);
2843
2844MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2845MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2846MODULE_LICENSE("GPL");
2847MODULE_ALIAS("pci:hns-nic");