Commit | Line | Data |
---|---|---|
d71d8381 JS |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // Copyright (c) 2016-2017 Hisilicon Limited. | |
76ad4f0e S |
3 | |
4 | #include <linux/dma-mapping.h> | |
5 | #include <linux/etherdevice.h> | |
6 | #include <linux/interrupt.h> | |
ffab9691 JS |
7 | #ifdef CONFIG_RFS_ACCEL |
8 | #include <linux/cpu_rmap.h> | |
9 | #endif | |
76ad4f0e | 10 | #include <linux/if_vlan.h> |
e99a308d | 11 | #include <linux/irq.h> |
76ad4f0e S |
12 | #include <linux/ip.h> |
13 | #include <linux/ipv6.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/pci.h> | |
6ae4e733 | 16 | #include <linux/aer.h> |
76ad4f0e S |
17 | #include <linux/skbuff.h> |
18 | #include <linux/sctp.h> | |
76ad4f0e | 19 | #include <net/gre.h> |
4721031c | 20 | #include <net/gro.h> |
e2ee1c5a | 21 | #include <net/ip6_checksum.h> |
30d240df | 22 | #include <net/pkt_cls.h> |
a6d53b97 | 23 | #include <net/tcp.h> |
76ad4f0e | 24 | #include <net/vxlan.h> |
a156998f | 25 | #include <net/geneve.h> |
76ad4f0e S |
26 | |
27 | #include "hnae3.h" | |
28 | #include "hns3_enet.h" | |
698a8954 YL |
29 | /* All hns3 tracepoints are defined by the include below, which |
30 | * must be included exactly once across the whole kernel with | |
31 | * CREATE_TRACE_POINTS defined | |
32 | */ | |
33 | #define CREATE_TRACE_POINTS | |
34 | #include "hns3_trace.h" | |
76ad4f0e | 35 | |
9393eb50 | 36 | #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift)) |
5f543a54 | 37 | #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE) |
cde4ffad | 38 | |
b20d7fe5 YL |
39 | #define hns3_rl_err(fmt, ...) \ |
40 | do { \ | |
41 | if (net_ratelimit()) \ | |
42 | netdev_err(fmt, ##__VA_ARGS__); \ | |
43 | } while (0) | |
44 | ||
f96315f2 | 45 | static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); |
7b763f3f | 46 | |
1db9b1bf | 47 | static const char hns3_driver_name[] = "hns3"; |
76ad4f0e S |
48 | static const char hns3_driver_string[] = |
49 | "Hisilicon Ethernet Network Driver for Hip08 Family"; | |
50 | static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; | |
51 | static struct hnae3_client client; | |
52 | ||
bb87be87 YL |
53 | static int debug = -1; |
54 | module_param(debug, int, 0); | |
55 | MODULE_PARM_DESC(debug, " Network interface message level setting"); | |
56 | ||
7459775e YL |
57 | static unsigned int tx_sgl = 1; |
58 | module_param(tx_sgl, uint, 0600); | |
59 | MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); | |
60 | ||
f7ec554b YL |
61 | static bool page_pool_enabled = true; |
62 | module_param(page_pool_enabled, bool, 0400); | |
63 | ||
7459775e YL |
64 | #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ |
65 | sizeof(struct sg_table)) | |
c74e5035 | 66 | #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ |
7459775e YL |
67 | dma_get_cache_alignment()) |
68 | ||
bb87be87 YL |
69 | #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ |
70 | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) | |
71 | ||
eb977d99 YL |
72 | #define HNS3_INNER_VLAN_TAG 1 |
73 | #define HNS3_OUTER_VLAN_TAG 2 | |
74 | ||
36c67349 | 75 | #define HNS3_MIN_TX_LEN 33U |
d18e8118 | 76 | #define HNS3_MIN_TUN_PKT_LEN 65U |
36c67349 | 77 | |
76ad4f0e S |
78 | /* hns3_pci_tbl - PCI Device ID Table |
79 | * | |
80 | * Last entry must be all 0s | |
81 | * | |
82 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | |
83 | * Class, Class Mask, private data (not used) } | |
84 | */ | |
85 | static const struct pci_device_id hns3_pci_tbl[] = { | |
86 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, | |
87 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, | |
e92a0843 | 88 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), |
2daf4a65 | 89 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
e92a0843 | 90 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), |
2daf4a65 | 91 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
e92a0843 | 92 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), |
2daf4a65 | 93 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
e92a0843 | 94 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), |
2daf4a65 | 95 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
e92a0843 | 96 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), |
2daf4a65 | 97 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
ae6f010c GH |
98 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), |
99 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, | |
c155e22b GH |
100 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, |
101 | {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), | |
07acf909 | 102 | HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, |
76ad4f0e | 103 | /* required last entry */ |
7f2d4b7f | 104 | {0,} |
76ad4f0e S |
105 | }; |
106 | MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); | |
107 | ||
79664077 HT |
108 | #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \ |
109 | { ptype, \ | |
110 | l, \ | |
111 | CHECKSUM_##s, \ | |
112 | HNS3_L3_TYPE_##t, \ | |
113 | 1 } | |
114 | ||
115 | #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ | |
116 | { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 } | |
117 | ||
118 | static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { | |
119 | HNS3_RX_PTYPE_UNUSED_ENTRY(0), | |
120 | HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP), | |
121 | HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP), | |
122 | HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP), | |
123 | HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL), | |
124 | HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL), | |
125 | HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL), | |
126 | HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM), | |
127 | HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL), | |
128 | HNS3_RX_PTYPE_UNUSED_ENTRY(9), | |
129 | HNS3_RX_PTYPE_UNUSED_ENTRY(10), | |
130 | HNS3_RX_PTYPE_UNUSED_ENTRY(11), | |
131 | HNS3_RX_PTYPE_UNUSED_ENTRY(12), | |
132 | HNS3_RX_PTYPE_UNUSED_ENTRY(13), | |
133 | HNS3_RX_PTYPE_UNUSED_ENTRY(14), | |
134 | HNS3_RX_PTYPE_UNUSED_ENTRY(15), | |
135 | HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL), | |
136 | HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4), | |
137 | HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4), | |
138 | HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4), | |
139 | HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4), | |
140 | HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4), | |
141 | HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4), | |
142 | HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4), | |
143 | HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4), | |
144 | HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4), | |
145 | HNS3_RX_PTYPE_UNUSED_ENTRY(26), | |
146 | HNS3_RX_PTYPE_UNUSED_ENTRY(27), | |
147 | HNS3_RX_PTYPE_UNUSED_ENTRY(28), | |
148 | HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL), | |
149 | HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL), | |
150 | HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4), | |
151 | HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4), | |
152 | HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4), | |
153 | HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4), | |
154 | HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4), | |
155 | HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4), | |
156 | HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4), | |
157 | HNS3_RX_PTYPE_UNUSED_ENTRY(38), | |
158 | HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6), | |
159 | HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6), | |
160 | HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6), | |
161 | HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6), | |
162 | HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6), | |
163 | HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6), | |
164 | HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6), | |
165 | HNS3_RX_PTYPE_UNUSED_ENTRY(46), | |
166 | HNS3_RX_PTYPE_UNUSED_ENTRY(47), | |
167 | HNS3_RX_PTYPE_UNUSED_ENTRY(48), | |
168 | HNS3_RX_PTYPE_UNUSED_ENTRY(49), | |
169 | HNS3_RX_PTYPE_UNUSED_ENTRY(50), | |
170 | HNS3_RX_PTYPE_UNUSED_ENTRY(51), | |
171 | HNS3_RX_PTYPE_UNUSED_ENTRY(52), | |
172 | HNS3_RX_PTYPE_UNUSED_ENTRY(53), | |
173 | HNS3_RX_PTYPE_UNUSED_ENTRY(54), | |
174 | HNS3_RX_PTYPE_UNUSED_ENTRY(55), | |
175 | HNS3_RX_PTYPE_UNUSED_ENTRY(56), | |
176 | HNS3_RX_PTYPE_UNUSED_ENTRY(57), | |
177 | HNS3_RX_PTYPE_UNUSED_ENTRY(58), | |
178 | HNS3_RX_PTYPE_UNUSED_ENTRY(59), | |
179 | HNS3_RX_PTYPE_UNUSED_ENTRY(60), | |
180 | HNS3_RX_PTYPE_UNUSED_ENTRY(61), | |
181 | HNS3_RX_PTYPE_UNUSED_ENTRY(62), | |
182 | HNS3_RX_PTYPE_UNUSED_ENTRY(63), | |
183 | HNS3_RX_PTYPE_UNUSED_ENTRY(64), | |
184 | HNS3_RX_PTYPE_UNUSED_ENTRY(65), | |
185 | HNS3_RX_PTYPE_UNUSED_ENTRY(66), | |
186 | HNS3_RX_PTYPE_UNUSED_ENTRY(67), | |
187 | HNS3_RX_PTYPE_UNUSED_ENTRY(68), | |
188 | HNS3_RX_PTYPE_UNUSED_ENTRY(69), | |
189 | HNS3_RX_PTYPE_UNUSED_ENTRY(70), | |
190 | HNS3_RX_PTYPE_UNUSED_ENTRY(71), | |
191 | HNS3_RX_PTYPE_UNUSED_ENTRY(72), | |
192 | HNS3_RX_PTYPE_UNUSED_ENTRY(73), | |
193 | HNS3_RX_PTYPE_UNUSED_ENTRY(74), | |
194 | HNS3_RX_PTYPE_UNUSED_ENTRY(75), | |
195 | HNS3_RX_PTYPE_UNUSED_ENTRY(76), | |
196 | HNS3_RX_PTYPE_UNUSED_ENTRY(77), | |
197 | HNS3_RX_PTYPE_UNUSED_ENTRY(78), | |
198 | HNS3_RX_PTYPE_UNUSED_ENTRY(79), | |
199 | HNS3_RX_PTYPE_UNUSED_ENTRY(80), | |
200 | HNS3_RX_PTYPE_UNUSED_ENTRY(81), | |
201 | HNS3_RX_PTYPE_UNUSED_ENTRY(82), | |
202 | HNS3_RX_PTYPE_UNUSED_ENTRY(83), | |
203 | HNS3_RX_PTYPE_UNUSED_ENTRY(84), | |
204 | HNS3_RX_PTYPE_UNUSED_ENTRY(85), | |
205 | HNS3_RX_PTYPE_UNUSED_ENTRY(86), | |
206 | HNS3_RX_PTYPE_UNUSED_ENTRY(87), | |
207 | HNS3_RX_PTYPE_UNUSED_ENTRY(88), | |
208 | HNS3_RX_PTYPE_UNUSED_ENTRY(89), | |
209 | HNS3_RX_PTYPE_UNUSED_ENTRY(90), | |
210 | HNS3_RX_PTYPE_UNUSED_ENTRY(91), | |
211 | HNS3_RX_PTYPE_UNUSED_ENTRY(92), | |
212 | HNS3_RX_PTYPE_UNUSED_ENTRY(93), | |
213 | HNS3_RX_PTYPE_UNUSED_ENTRY(94), | |
214 | HNS3_RX_PTYPE_UNUSED_ENTRY(95), | |
215 | HNS3_RX_PTYPE_UNUSED_ENTRY(96), | |
216 | HNS3_RX_PTYPE_UNUSED_ENTRY(97), | |
217 | HNS3_RX_PTYPE_UNUSED_ENTRY(98), | |
218 | HNS3_RX_PTYPE_UNUSED_ENTRY(99), | |
219 | HNS3_RX_PTYPE_UNUSED_ENTRY(100), | |
220 | HNS3_RX_PTYPE_UNUSED_ENTRY(101), | |
221 | HNS3_RX_PTYPE_UNUSED_ENTRY(102), | |
222 | HNS3_RX_PTYPE_UNUSED_ENTRY(103), | |
223 | HNS3_RX_PTYPE_UNUSED_ENTRY(104), | |
224 | HNS3_RX_PTYPE_UNUSED_ENTRY(105), | |
225 | HNS3_RX_PTYPE_UNUSED_ENTRY(106), | |
226 | HNS3_RX_PTYPE_UNUSED_ENTRY(107), | |
227 | HNS3_RX_PTYPE_UNUSED_ENTRY(108), | |
228 | HNS3_RX_PTYPE_UNUSED_ENTRY(109), | |
229 | HNS3_RX_PTYPE_UNUSED_ENTRY(110), | |
230 | HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6), | |
231 | HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6), | |
232 | HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6), | |
233 | HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6), | |
234 | HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6), | |
235 | HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6), | |
236 | HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6), | |
237 | HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6), | |
238 | HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6), | |
239 | HNS3_RX_PTYPE_UNUSED_ENTRY(120), | |
240 | HNS3_RX_PTYPE_UNUSED_ENTRY(121), | |
241 | HNS3_RX_PTYPE_UNUSED_ENTRY(122), | |
242 | HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL), | |
243 | HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL), | |
244 | HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4), | |
245 | HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4), | |
246 | HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4), | |
247 | HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4), | |
248 | HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4), | |
249 | HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4), | |
250 | HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4), | |
251 | HNS3_RX_PTYPE_UNUSED_ENTRY(132), | |
252 | HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6), | |
253 | HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6), | |
254 | HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6), | |
255 | HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6), | |
256 | HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6), | |
257 | HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6), | |
258 | HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6), | |
259 | HNS3_RX_PTYPE_UNUSED_ENTRY(140), | |
260 | HNS3_RX_PTYPE_UNUSED_ENTRY(141), | |
261 | HNS3_RX_PTYPE_UNUSED_ENTRY(142), | |
262 | HNS3_RX_PTYPE_UNUSED_ENTRY(143), | |
263 | HNS3_RX_PTYPE_UNUSED_ENTRY(144), | |
264 | HNS3_RX_PTYPE_UNUSED_ENTRY(145), | |
265 | HNS3_RX_PTYPE_UNUSED_ENTRY(146), | |
266 | HNS3_RX_PTYPE_UNUSED_ENTRY(147), | |
267 | HNS3_RX_PTYPE_UNUSED_ENTRY(148), | |
268 | HNS3_RX_PTYPE_UNUSED_ENTRY(149), | |
269 | HNS3_RX_PTYPE_UNUSED_ENTRY(150), | |
270 | HNS3_RX_PTYPE_UNUSED_ENTRY(151), | |
271 | HNS3_RX_PTYPE_UNUSED_ENTRY(152), | |
272 | HNS3_RX_PTYPE_UNUSED_ENTRY(153), | |
273 | HNS3_RX_PTYPE_UNUSED_ENTRY(154), | |
274 | HNS3_RX_PTYPE_UNUSED_ENTRY(155), | |
275 | HNS3_RX_PTYPE_UNUSED_ENTRY(156), | |
276 | HNS3_RX_PTYPE_UNUSED_ENTRY(157), | |
277 | HNS3_RX_PTYPE_UNUSED_ENTRY(158), | |
278 | HNS3_RX_PTYPE_UNUSED_ENTRY(159), | |
279 | HNS3_RX_PTYPE_UNUSED_ENTRY(160), | |
280 | HNS3_RX_PTYPE_UNUSED_ENTRY(161), | |
281 | HNS3_RX_PTYPE_UNUSED_ENTRY(162), | |
282 | HNS3_RX_PTYPE_UNUSED_ENTRY(163), | |
283 | HNS3_RX_PTYPE_UNUSED_ENTRY(164), | |
284 | HNS3_RX_PTYPE_UNUSED_ENTRY(165), | |
285 | HNS3_RX_PTYPE_UNUSED_ENTRY(166), | |
286 | HNS3_RX_PTYPE_UNUSED_ENTRY(167), | |
287 | HNS3_RX_PTYPE_UNUSED_ENTRY(168), | |
288 | HNS3_RX_PTYPE_UNUSED_ENTRY(169), | |
289 | HNS3_RX_PTYPE_UNUSED_ENTRY(170), | |
290 | HNS3_RX_PTYPE_UNUSED_ENTRY(171), | |
291 | HNS3_RX_PTYPE_UNUSED_ENTRY(172), | |
292 | HNS3_RX_PTYPE_UNUSED_ENTRY(173), | |
293 | HNS3_RX_PTYPE_UNUSED_ENTRY(174), | |
294 | HNS3_RX_PTYPE_UNUSED_ENTRY(175), | |
295 | HNS3_RX_PTYPE_UNUSED_ENTRY(176), | |
296 | HNS3_RX_PTYPE_UNUSED_ENTRY(177), | |
297 | HNS3_RX_PTYPE_UNUSED_ENTRY(178), | |
298 | HNS3_RX_PTYPE_UNUSED_ENTRY(179), | |
299 | HNS3_RX_PTYPE_UNUSED_ENTRY(180), | |
300 | HNS3_RX_PTYPE_UNUSED_ENTRY(181), | |
301 | HNS3_RX_PTYPE_UNUSED_ENTRY(182), | |
302 | HNS3_RX_PTYPE_UNUSED_ENTRY(183), | |
303 | HNS3_RX_PTYPE_UNUSED_ENTRY(184), | |
304 | HNS3_RX_PTYPE_UNUSED_ENTRY(185), | |
305 | HNS3_RX_PTYPE_UNUSED_ENTRY(186), | |
306 | HNS3_RX_PTYPE_UNUSED_ENTRY(187), | |
307 | HNS3_RX_PTYPE_UNUSED_ENTRY(188), | |
308 | HNS3_RX_PTYPE_UNUSED_ENTRY(189), | |
309 | HNS3_RX_PTYPE_UNUSED_ENTRY(190), | |
310 | HNS3_RX_PTYPE_UNUSED_ENTRY(191), | |
311 | HNS3_RX_PTYPE_UNUSED_ENTRY(192), | |
312 | HNS3_RX_PTYPE_UNUSED_ENTRY(193), | |
313 | HNS3_RX_PTYPE_UNUSED_ENTRY(194), | |
314 | HNS3_RX_PTYPE_UNUSED_ENTRY(195), | |
315 | HNS3_RX_PTYPE_UNUSED_ENTRY(196), | |
316 | HNS3_RX_PTYPE_UNUSED_ENTRY(197), | |
317 | HNS3_RX_PTYPE_UNUSED_ENTRY(198), | |
318 | HNS3_RX_PTYPE_UNUSED_ENTRY(199), | |
319 | HNS3_RX_PTYPE_UNUSED_ENTRY(200), | |
320 | HNS3_RX_PTYPE_UNUSED_ENTRY(201), | |
321 | HNS3_RX_PTYPE_UNUSED_ENTRY(202), | |
322 | HNS3_RX_PTYPE_UNUSED_ENTRY(203), | |
323 | HNS3_RX_PTYPE_UNUSED_ENTRY(204), | |
324 | HNS3_RX_PTYPE_UNUSED_ENTRY(205), | |
325 | HNS3_RX_PTYPE_UNUSED_ENTRY(206), | |
326 | HNS3_RX_PTYPE_UNUSED_ENTRY(207), | |
327 | HNS3_RX_PTYPE_UNUSED_ENTRY(208), | |
328 | HNS3_RX_PTYPE_UNUSED_ENTRY(209), | |
329 | HNS3_RX_PTYPE_UNUSED_ENTRY(210), | |
330 | HNS3_RX_PTYPE_UNUSED_ENTRY(211), | |
331 | HNS3_RX_PTYPE_UNUSED_ENTRY(212), | |
332 | HNS3_RX_PTYPE_UNUSED_ENTRY(213), | |
333 | HNS3_RX_PTYPE_UNUSED_ENTRY(214), | |
334 | HNS3_RX_PTYPE_UNUSED_ENTRY(215), | |
335 | HNS3_RX_PTYPE_UNUSED_ENTRY(216), | |
336 | HNS3_RX_PTYPE_UNUSED_ENTRY(217), | |
337 | HNS3_RX_PTYPE_UNUSED_ENTRY(218), | |
338 | HNS3_RX_PTYPE_UNUSED_ENTRY(219), | |
339 | HNS3_RX_PTYPE_UNUSED_ENTRY(220), | |
340 | HNS3_RX_PTYPE_UNUSED_ENTRY(221), | |
341 | HNS3_RX_PTYPE_UNUSED_ENTRY(222), | |
342 | HNS3_RX_PTYPE_UNUSED_ENTRY(223), | |
343 | HNS3_RX_PTYPE_UNUSED_ENTRY(224), | |
344 | HNS3_RX_PTYPE_UNUSED_ENTRY(225), | |
345 | HNS3_RX_PTYPE_UNUSED_ENTRY(226), | |
346 | HNS3_RX_PTYPE_UNUSED_ENTRY(227), | |
347 | HNS3_RX_PTYPE_UNUSED_ENTRY(228), | |
348 | HNS3_RX_PTYPE_UNUSED_ENTRY(229), | |
349 | HNS3_RX_PTYPE_UNUSED_ENTRY(230), | |
350 | HNS3_RX_PTYPE_UNUSED_ENTRY(231), | |
351 | HNS3_RX_PTYPE_UNUSED_ENTRY(232), | |
352 | HNS3_RX_PTYPE_UNUSED_ENTRY(233), | |
353 | HNS3_RX_PTYPE_UNUSED_ENTRY(234), | |
354 | HNS3_RX_PTYPE_UNUSED_ENTRY(235), | |
355 | HNS3_RX_PTYPE_UNUSED_ENTRY(236), | |
356 | HNS3_RX_PTYPE_UNUSED_ENTRY(237), | |
357 | HNS3_RX_PTYPE_UNUSED_ENTRY(238), | |
358 | HNS3_RX_PTYPE_UNUSED_ENTRY(239), | |
359 | HNS3_RX_PTYPE_UNUSED_ENTRY(240), | |
360 | HNS3_RX_PTYPE_UNUSED_ENTRY(241), | |
361 | HNS3_RX_PTYPE_UNUSED_ENTRY(242), | |
362 | HNS3_RX_PTYPE_UNUSED_ENTRY(243), | |
363 | HNS3_RX_PTYPE_UNUSED_ENTRY(244), | |
364 | HNS3_RX_PTYPE_UNUSED_ENTRY(245), | |
365 | HNS3_RX_PTYPE_UNUSED_ENTRY(246), | |
366 | HNS3_RX_PTYPE_UNUSED_ENTRY(247), | |
367 | HNS3_RX_PTYPE_UNUSED_ENTRY(248), | |
368 | HNS3_RX_PTYPE_UNUSED_ENTRY(249), | |
369 | HNS3_RX_PTYPE_UNUSED_ENTRY(250), | |
370 | HNS3_RX_PTYPE_UNUSED_ENTRY(251), | |
371 | HNS3_RX_PTYPE_UNUSED_ENTRY(252), | |
372 | HNS3_RX_PTYPE_UNUSED_ENTRY(253), | |
373 | HNS3_RX_PTYPE_UNUSED_ENTRY(254), | |
374 | HNS3_RX_PTYPE_UNUSED_ENTRY(255), | |
375 | }; | |
376 | ||
377 | #define HNS3_INVALID_PTYPE \ | |
378 | ARRAY_SIZE(hns3_rx_ptype_tbl) | |
379 | ||
ef0c5009 | 380 | static irqreturn_t hns3_irq_handle(int irq, void *vector) |
76ad4f0e | 381 | { |
ef0c5009 | 382 | struct hns3_enet_tqp_vector *tqp_vector = vector; |
76ad4f0e | 383 | |
fb00331b | 384 | napi_schedule_irqoff(&tqp_vector->napi); |
307ea4ce | 385 | tqp_vector->event_cnt++; |
76ad4f0e S |
386 | |
387 | return IRQ_HANDLED; | |
388 | } | |
389 | ||
390 | static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) | |
391 | { | |
392 | struct hns3_enet_tqp_vector *tqp_vectors; | |
393 | unsigned int i; | |
394 | ||
395 | for (i = 0; i < priv->vector_num; i++) { | |
396 | tqp_vectors = &priv->tqp_vector[i]; | |
397 | ||
398 | if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) | |
399 | continue; | |
400 | ||
ffab9691 | 401 | /* clear the affinity mask */ |
874bff0b PL |
402 | irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); |
403 | ||
76ad4f0e S |
404 | /* release the irq resource */ |
405 | free_irq(tqp_vectors->vector_irq, tqp_vectors); | |
406 | tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; | |
407 | } | |
408 | } | |
409 | ||
410 | static int hns3_nic_init_irq(struct hns3_nic_priv *priv) | |
411 | { | |
412 | struct hns3_enet_tqp_vector *tqp_vectors; | |
413 | int txrx_int_idx = 0; | |
414 | int rx_int_idx = 0; | |
415 | int tx_int_idx = 0; | |
416 | unsigned int i; | |
417 | int ret; | |
418 | ||
419 | for (i = 0; i < priv->vector_num; i++) { | |
420 | tqp_vectors = &priv->tqp_vector[i]; | |
421 | ||
422 | if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) | |
423 | continue; | |
424 | ||
425 | if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { | |
5bffde62 YL |
426 | snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, |
427 | "%s-%s-%s-%d", hns3_driver_name, | |
428 | pci_name(priv->ae_handle->pdev), | |
429 | "TxRx", txrx_int_idx++); | |
76ad4f0e S |
430 | txrx_int_idx++; |
431 | } else if (tqp_vectors->rx_group.ring) { | |
5bffde62 YL |
432 | snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, |
433 | "%s-%s-%s-%d", hns3_driver_name, | |
434 | pci_name(priv->ae_handle->pdev), | |
435 | "Rx", rx_int_idx++); | |
76ad4f0e | 436 | } else if (tqp_vectors->tx_group.ring) { |
5bffde62 YL |
437 | snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, |
438 | "%s-%s-%s-%d", hns3_driver_name, | |
439 | pci_name(priv->ae_handle->pdev), | |
440 | "Tx", tx_int_idx++); | |
76ad4f0e S |
441 | } else { |
442 | /* Skip this unused q_vector */ | |
443 | continue; | |
444 | } | |
445 | ||
446 | tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; | |
447 | ||
e99a308d | 448 | irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); |
76ad4f0e | 449 | ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, |
9b2f3477 | 450 | tqp_vectors->name, tqp_vectors); |
76ad4f0e S |
451 | if (ret) { |
452 | netdev_err(priv->netdev, "request irq(%d) fail\n", | |
453 | tqp_vectors->vector_irq); | |
d547ecdc | 454 | hns3_nic_uninit_irq(priv); |
76ad4f0e S |
455 | return ret; |
456 | } | |
457 | ||
874bff0b PL |
458 | irq_set_affinity_hint(tqp_vectors->vector_irq, |
459 | &tqp_vectors->affinity_mask); | |
460 | ||
76ad4f0e S |
461 | tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; |
462 | } | |
463 | ||
464 | return 0; | |
465 | } | |
466 | ||
467 | static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, | |
468 | u32 mask_en) | |
469 | { | |
470 | writel(mask_en, tqp_vector->mask_addr); | |
471 | } | |
472 | ||
473 | static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) | |
474 | { | |
475 | napi_enable(&tqp_vector->napi); | |
08a10068 | 476 | enable_irq(tqp_vector->vector_irq); |
76ad4f0e S |
477 | |
478 | /* enable vector */ | |
479 | hns3_mask_vector_irq(tqp_vector, 1); | |
480 | } | |
481 | ||
482 | static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) | |
483 | { | |
484 | /* disable vector */ | |
485 | hns3_mask_vector_irq(tqp_vector, 0); | |
486 | ||
487 | disable_irq(tqp_vector->vector_irq); | |
488 | napi_disable(&tqp_vector->napi); | |
307ea4ce HT |
489 | cancel_work_sync(&tqp_vector->rx_group.dim.work); |
490 | cancel_work_sync(&tqp_vector->tx_group.dim.work); | |
76ad4f0e S |
491 | } |
492 | ||
434776a5 FL |
493 | void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, |
494 | u32 rl_value) | |
76ad4f0e | 495 | { |
434776a5 FL |
496 | u32 rl_reg = hns3_rl_usec_to_reg(rl_value); |
497 | ||
76ad4f0e S |
498 | /* this defines the configuration for RL (Interrupt Rate Limiter). |
499 | * Rl defines rate of interrupts i.e. number of interrupts-per-second | |
500 | * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing | |
501 | */ | |
de25bcc4 HT |
502 | if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && |
503 | !tqp_vector->rx_group.coal.adapt_enable) | |
434776a5 FL |
504 | /* According to the hardware, the range of rl_reg is |
505 | * 0-59 and the unit is 4. | |
506 | */ | |
507 | rl_reg |= HNS3_INT_RL_ENABLE_MASK; | |
508 | ||
509 | writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); | |
510 | } | |
511 | ||
512 | void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, | |
513 | u32 gl_value) | |
514 | { | |
5ac84b02 | 515 | u32 new_val; |
434776a5 | 516 | |
5ac84b02 HT |
517 | if (tqp_vector->rx_group.coal.unit_1us) |
518 | new_val = gl_value | HNS3_INT_GL_1US; | |
519 | else | |
520 | new_val = hns3_gl_usec_to_reg(gl_value); | |
521 | ||
522 | writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); | |
434776a5 FL |
523 | } |
524 | ||
525 | void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, | |
526 | u32 gl_value) | |
527 | { | |
5ac84b02 HT |
528 | u32 new_val; |
529 | ||
530 | if (tqp_vector->tx_group.coal.unit_1us) | |
531 | new_val = gl_value | HNS3_INT_GL_1US; | |
532 | else | |
533 | new_val = hns3_gl_usec_to_reg(gl_value); | |
434776a5 | 534 | |
5ac84b02 | 535 | writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); |
76ad4f0e S |
536 | } |
537 | ||
91bfae25 HT |
538 | void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, |
539 | u32 ql_value) | |
76ad4f0e | 540 | { |
91bfae25 HT |
541 | writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); |
542 | } | |
543 | ||
544 | void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, | |
545 | u32 ql_value) | |
546 | { | |
547 | writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); | |
548 | } | |
549 | ||
550 | static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, | |
551 | struct hns3_nic_priv *priv) | |
552 | { | |
553 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); | |
554 | struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; | |
555 | struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; | |
73a13d8d HT |
556 | struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; |
557 | struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; | |
91bfae25 | 558 | |
73a13d8d HT |
559 | tx_coal->adapt_enable = ptx_coal->adapt_enable; |
560 | rx_coal->adapt_enable = prx_coal->adapt_enable; | |
91bfae25 | 561 | |
73a13d8d HT |
562 | tx_coal->int_gl = ptx_coal->int_gl; |
563 | rx_coal->int_gl = prx_coal->int_gl; | |
5fd4789a | 564 | |
73a13d8d HT |
565 | rx_coal->flow_level = prx_coal->flow_level; |
566 | tx_coal->flow_level = ptx_coal->flow_level; | |
5fd4789a | 567 | |
5ac84b02 HT |
568 | /* device version above V3(include V3), GL can configure 1us |
569 | * unit, so uses 1us unit. | |
570 | */ | |
571 | if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { | |
572 | tx_coal->unit_1us = 1; | |
573 | rx_coal->unit_1us = 1; | |
574 | } | |
575 | ||
91bfae25 HT |
576 | if (ae_dev->dev_specs.int_ql_max) { |
577 | tx_coal->ql_enable = 1; | |
578 | rx_coal->ql_enable = 1; | |
579 | tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; | |
580 | rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; | |
73a13d8d HT |
581 | tx_coal->int_ql = ptx_coal->int_ql; |
582 | rx_coal->int_ql = prx_coal->int_ql; | |
91bfae25 | 583 | } |
76ad4f0e S |
584 | } |
585 | ||
91bfae25 HT |
586 | static void |
587 | hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, | |
588 | struct hns3_nic_priv *priv) | |
dd38c726 | 589 | { |
91bfae25 HT |
590 | struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; |
591 | struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; | |
dd38c726 YL |
592 | struct hnae3_handle *h = priv->ae_handle; |
593 | ||
91bfae25 HT |
594 | hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); |
595 | hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); | |
dd38c726 | 596 | hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); |
91bfae25 HT |
597 | |
598 | if (tx_coal->ql_enable) | |
599 | hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); | |
600 | ||
601 | if (rx_coal->ql_enable) | |
602 | hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); | |
dd38c726 YL |
603 | } |
604 | ||
9df8f79a YL |
605 | static int hns3_nic_set_real_num_queue(struct net_device *netdev) |
606 | { | |
9780cb97 | 607 | struct hnae3_handle *h = hns3_get_handle(netdev); |
9df8f79a | 608 | struct hnae3_knic_private_info *kinfo = &h->kinfo; |
35244430 | 609 | struct hnae3_tc_info *tc_info = &kinfo->tc_info; |
5a5c9091 | 610 | unsigned int queue_size = kinfo->num_tqps; |
a75a8efa YL |
611 | int i, ret; |
612 | ||
5a5c9091 | 613 | if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { |
a75a8efa YL |
614 | netdev_reset_tc(netdev); |
615 | } else { | |
35244430 | 616 | ret = netdev_set_num_tc(netdev, tc_info->num_tc); |
a75a8efa YL |
617 | if (ret) { |
618 | netdev_err(netdev, | |
619 | "netdev_set_num_tc fail, ret=%d!\n", ret); | |
620 | return ret; | |
621 | } | |
622 | ||
a8e76fef | 623 | for (i = 0; i < tc_info->num_tc; i++) |
35244430 JS |
624 | netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], |
625 | tc_info->tqp_offset[i]); | |
a75a8efa | 626 | } |
9df8f79a YL |
627 | |
628 | ret = netif_set_real_num_tx_queues(netdev, queue_size); | |
629 | if (ret) { | |
630 | netdev_err(netdev, | |
9b2f3477 | 631 | "netif_set_real_num_tx_queues fail, ret=%d!\n", ret); |
9df8f79a YL |
632 | return ret; |
633 | } | |
634 | ||
635 | ret = netif_set_real_num_rx_queues(netdev, queue_size); | |
636 | if (ret) { | |
637 | netdev_err(netdev, | |
638 | "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); | |
639 | return ret; | |
640 | } | |
641 | ||
642 | return 0; | |
643 | } | |
644 | ||
77e91848 | 645 | u16 hns3_get_max_available_channels(struct hnae3_handle *h) |
678335a1 | 646 | { |
0d43bf45 | 647 | u16 alloc_tqps, max_rss_size, rss_size; |
678335a1 | 648 | |
0d43bf45 | 649 | h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); |
35244430 | 650 | rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; |
678335a1 | 651 | |
0d43bf45 | 652 | return min_t(u16, rss_size, max_rss_size); |
678335a1 PL |
653 | } |
654 | ||
8df0fa91 HT |
655 | static void hns3_tqp_enable(struct hnae3_queue *tqp) |
656 | { | |
657 | u32 rcb_reg; | |
658 | ||
659 | rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); | |
660 | rcb_reg |= BIT(HNS3_RING_EN_B); | |
661 | hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); | |
662 | } | |
663 | ||
664 | static void hns3_tqp_disable(struct hnae3_queue *tqp) | |
665 | { | |
666 | u32 rcb_reg; | |
667 | ||
668 | rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); | |
669 | rcb_reg &= ~BIT(HNS3_RING_EN_B); | |
670 | hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); | |
671 | } | |
672 | ||
ffab9691 JS |
673 | static void hns3_free_rx_cpu_rmap(struct net_device *netdev) |
674 | { | |
675 | #ifdef CONFIG_RFS_ACCEL | |
676 | free_irq_cpu_rmap(netdev->rx_cpu_rmap); | |
677 | netdev->rx_cpu_rmap = NULL; | |
678 | #endif | |
679 | } | |
680 | ||
681 | static int hns3_set_rx_cpu_rmap(struct net_device *netdev) | |
682 | { | |
683 | #ifdef CONFIG_RFS_ACCEL | |
684 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
685 | struct hns3_enet_tqp_vector *tqp_vector; | |
686 | int i, ret; | |
687 | ||
688 | if (!netdev->rx_cpu_rmap) { | |
689 | netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); | |
690 | if (!netdev->rx_cpu_rmap) | |
691 | return -ENOMEM; | |
692 | } | |
693 | ||
694 | for (i = 0; i < priv->vector_num; i++) { | |
695 | tqp_vector = &priv->tqp_vector[i]; | |
696 | ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, | |
697 | tqp_vector->vector_irq); | |
698 | if (ret) { | |
699 | hns3_free_rx_cpu_rmap(netdev); | |
700 | return ret; | |
701 | } | |
702 | } | |
703 | #endif | |
704 | return 0; | |
705 | } | |
706 | ||
76ad4f0e S |
707 | static int hns3_nic_net_up(struct net_device *netdev) |
708 | { | |
709 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
710 | struct hnae3_handle *h = priv->ae_handle; | |
711 | int i, j; | |
712 | int ret; | |
713 | ||
7b763f3f FL |
714 | ret = hns3_nic_reset_all_ring(h); |
715 | if (ret) | |
716 | return ret; | |
717 | ||
b7b585c2 JS |
718 | clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); |
719 | ||
76ad4f0e S |
720 | /* enable the vectors */ |
721 | for (i = 0; i < priv->vector_num; i++) | |
722 | hns3_vector_enable(&priv->tqp_vector[i]); | |
723 | ||
8df0fa91 HT |
724 | /* enable rcb */ |
725 | for (j = 0; j < h->kinfo.num_tqps; j++) | |
726 | hns3_tqp_enable(h->kinfo.tqp[j]); | |
727 | ||
76ad4f0e S |
728 | /* start the ae_dev */ |
729 | ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; | |
08a10068 YL |
730 | if (ret) { |
731 | set_bit(HNS3_NIC_STATE_DOWN, &priv->state); | |
732 | while (j--) | |
733 | hns3_tqp_disable(h->kinfo.tqp[j]); | |
8df0fa91 | 734 | |
08a10068 YL |
735 | for (j = i - 1; j >= 0; j--) |
736 | hns3_vector_disable(&priv->tqp_vector[j]); | |
737 | } | |
76ad4f0e | 738 | |
76ad4f0e S |
739 | return ret; |
740 | } | |
741 | ||
2a73ac3e YL |
742 | static void hns3_config_xps(struct hns3_nic_priv *priv) |
743 | { | |
744 | int i; | |
745 | ||
746 | for (i = 0; i < priv->vector_num; i++) { | |
747 | struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; | |
748 | struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; | |
749 | ||
750 | while (ring) { | |
751 | int ret; | |
752 | ||
753 | ret = netif_set_xps_queue(priv->netdev, | |
754 | &tqp_vector->affinity_mask, | |
755 | ring->tqp->tqp_index); | |
756 | if (ret) | |
757 | netdev_warn(priv->netdev, | |
758 | "set xps queue failed: %d", ret); | |
759 | ||
760 | ring = ring->next; | |
761 | } | |
762 | } | |
763 | } | |
764 | ||
76ad4f0e S |
765 | static int hns3_nic_net_open(struct net_device *netdev) |
766 | { | |
8cdb992f | 767 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
a75a8efa YL |
768 | struct hnae3_handle *h = hns3_get_handle(netdev); |
769 | struct hnae3_knic_private_info *kinfo; | |
770 | int i, ret; | |
76ad4f0e | 771 | |
257e4f29 HT |
772 | if (hns3_nic_resetting(netdev)) |
773 | return -EBUSY; | |
774 | ||
5b09e88e JS |
775 | if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { |
776 | netdev_warn(netdev, "net open repeatedly!\n"); | |
777 | return 0; | |
778 | } | |
779 | ||
76ad4f0e S |
780 | netif_carrier_off(netdev); |
781 | ||
9df8f79a YL |
782 | ret = hns3_nic_set_real_num_queue(netdev); |
783 | if (ret) | |
76ad4f0e | 784 | return ret; |
76ad4f0e S |
785 | |
786 | ret = hns3_nic_net_up(netdev); | |
787 | if (ret) { | |
9b2f3477 | 788 | netdev_err(netdev, "net up fail, ret=%d!\n", ret); |
76ad4f0e S |
789 | return ret; |
790 | } | |
791 | ||
a75a8efa | 792 | kinfo = &h->kinfo; |
9b2f3477 | 793 | for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) |
35244430 | 794 | netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); |
a75a8efa | 795 | |
8cdb992f JS |
796 | if (h->ae_algo->ops->set_timer_task) |
797 | h->ae_algo->ops->set_timer_task(priv->ae_handle, true); | |
798 | ||
2a73ac3e | 799 | hns3_config_xps(priv); |
1c822948 YL |
800 | |
801 | netif_dbg(h, drv, netdev, "net open\n"); | |
802 | ||
76ad4f0e S |
803 | return 0; |
804 | } | |
805 | ||
f96315f2 HT |
806 | static void hns3_reset_tx_queue(struct hnae3_handle *h) |
807 | { | |
808 | struct net_device *ndev = h->kinfo.netdev; | |
809 | struct hns3_nic_priv *priv = netdev_priv(ndev); | |
810 | struct netdev_queue *dev_queue; | |
811 | u32 i; | |
812 | ||
813 | for (i = 0; i < h->kinfo.num_tqps; i++) { | |
814 | dev_queue = netdev_get_tx_queue(ndev, | |
5f06b903 | 815 | priv->ring[i].queue_index); |
f96315f2 HT |
816 | netdev_tx_reset_queue(dev_queue); |
817 | } | |
818 | } | |
819 | ||
76ad4f0e S |
820 | static void hns3_nic_net_down(struct net_device *netdev) |
821 | { | |
822 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
8df0fa91 | 823 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e S |
824 | const struct hnae3_ae_ops *ops; |
825 | int i; | |
826 | ||
7b763f3f FL |
827 | /* disable vectors */ |
828 | for (i = 0; i < priv->vector_num; i++) | |
829 | hns3_vector_disable(&priv->tqp_vector[i]); | |
8df0fa91 HT |
830 | |
831 | /* disable rcb */ | |
832 | for (i = 0; i < h->kinfo.num_tqps; i++) | |
833 | hns3_tqp_disable(h->kinfo.tqp[i]); | |
7b763f3f | 834 | |
76ad4f0e S |
835 | /* stop ae_dev */ |
836 | ops = priv->ae_handle->ae_algo->ops; | |
837 | if (ops->stop) | |
838 | ops->stop(priv->ae_handle); | |
839 | ||
3a30964a YL |
840 | /* delay ring buffer clearing to hns3_reset_notify_uninit_enet |
841 | * during reset process, because driver may not be able | |
842 | * to disable the ring through firmware when downing the netdev. | |
843 | */ | |
844 | if (!hns3_nic_resetting(netdev)) | |
f96315f2 HT |
845 | hns3_clear_all_ring(priv->ae_handle, false); |
846 | ||
847 | hns3_reset_tx_queue(priv->ae_handle); | |
76ad4f0e S |
848 | } |
849 | ||
850 | static int hns3_nic_net_stop(struct net_device *netdev) | |
851 | { | |
ff0699e0 | 852 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
8cdb992f | 853 | struct hnae3_handle *h = hns3_get_handle(netdev); |
ff0699e0 HT |
854 | |
855 | if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) | |
856 | return 0; | |
857 | ||
1c822948 YL |
858 | netif_dbg(h, drv, netdev, "net stop\n"); |
859 | ||
8cdb992f JS |
860 | if (h->ae_algo->ops->set_timer_task) |
861 | h->ae_algo->ops->set_timer_task(priv->ae_handle, false); | |
862 | ||
76ad4f0e | 863 | netif_carrier_off(netdev); |
b416e872 | 864 | netif_tx_disable(netdev); |
76ad4f0e S |
865 | |
866 | hns3_nic_net_down(netdev); | |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
76ad4f0e S |
871 | static int hns3_nic_uc_sync(struct net_device *netdev, |
872 | const unsigned char *addr) | |
873 | { | |
9780cb97 | 874 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e S |
875 | |
876 | if (h->ae_algo->ops->add_uc_addr) | |
877 | return h->ae_algo->ops->add_uc_addr(h, addr); | |
878 | ||
879 | return 0; | |
880 | } | |
881 | ||
882 | static int hns3_nic_uc_unsync(struct net_device *netdev, | |
883 | const unsigned char *addr) | |
884 | { | |
9780cb97 | 885 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e | 886 | |
ee4bcd3b JS |
887 | /* need ignore the request of removing device address, because |
888 | * we store the device address and other addresses of uc list | |
889 | * in the function's mac filter list. | |
890 | */ | |
891 | if (ether_addr_equal(addr, netdev->dev_addr)) | |
892 | return 0; | |
893 | ||
76ad4f0e S |
894 | if (h->ae_algo->ops->rm_uc_addr) |
895 | return h->ae_algo->ops->rm_uc_addr(h, addr); | |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | static int hns3_nic_mc_sync(struct net_device *netdev, | |
901 | const unsigned char *addr) | |
902 | { | |
9780cb97 | 903 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e | 904 | |
720a8478 | 905 | if (h->ae_algo->ops->add_mc_addr) |
76ad4f0e S |
906 | return h->ae_algo->ops->add_mc_addr(h, addr); |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | static int hns3_nic_mc_unsync(struct net_device *netdev, | |
912 | const unsigned char *addr) | |
913 | { | |
9780cb97 | 914 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e | 915 | |
720a8478 | 916 | if (h->ae_algo->ops->rm_mc_addr) |
76ad4f0e S |
917 | return h->ae_algo->ops->rm_mc_addr(h, addr); |
918 | ||
919 | return 0; | |
920 | } | |
921 | ||
c60edc17 JS |
922 | static u8 hns3_get_netdev_flags(struct net_device *netdev) |
923 | { | |
924 | u8 flags = 0; | |
925 | ||
2ba30662 | 926 | if (netdev->flags & IFF_PROMISC) |
28673b33 | 927 | flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; |
2ba30662 JS |
928 | else if (netdev->flags & IFF_ALLMULTI) |
929 | flags = HNAE3_USER_MPE; | |
c60edc17 JS |
930 | |
931 | return flags; | |
932 | } | |
933 | ||
1db9b1bf | 934 | static void hns3_nic_set_rx_mode(struct net_device *netdev) |
76ad4f0e | 935 | { |
9780cb97 | 936 | struct hnae3_handle *h = hns3_get_handle(netdev); |
c60edc17 | 937 | u8 new_flags; |
76ad4f0e | 938 | |
c60edc17 JS |
939 | new_flags = hns3_get_netdev_flags(netdev); |
940 | ||
c631c696 JS |
941 | __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); |
942 | __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync); | |
c60edc17 | 943 | |
c60edc17 | 944 | /* User mode Promisc mode enable and vlan filtering is disabled to |
c631c696 | 945 | * let all packets in. |
c60edc17 | 946 | */ |
c60edc17 | 947 | h->netdev_flags = new_flags; |
c631c696 JS |
948 | hns3_request_update_promisc_mode(h); |
949 | } | |
950 | ||
951 | void hns3_request_update_promisc_mode(struct hnae3_handle *handle) | |
952 | { | |
953 | const struct hnae3_ae_ops *ops = handle->ae_algo->ops; | |
954 | ||
955 | if (ops->request_update_promisc_mode) | |
956 | ops->request_update_promisc_mode(handle); | |
c60edc17 JS |
957 | } |
958 | ||
907676b1 YL |
959 | static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) |
960 | { | |
961 | struct hns3_tx_spare *tx_spare = ring->tx_spare; | |
962 | u32 ntc, ntu; | |
963 | ||
964 | /* This smp_load_acquire() pairs with smp_store_release() in | |
965 | * hns3_tx_spare_update() called in tx desc cleaning process. | |
966 | */ | |
967 | ntc = smp_load_acquire(&tx_spare->last_to_clean); | |
968 | ntu = tx_spare->next_to_use; | |
969 | ||
970 | if (ntc > ntu) | |
971 | return ntc - ntu - 1; | |
972 | ||
973 | /* The free tx buffer is divided into two part, so pick the | |
974 | * larger one. | |
975 | */ | |
38b99e1e | 976 | return max(ntc, tx_spare->len - ntu) - 1; |
907676b1 YL |
977 | } |
978 | ||
979 | static void hns3_tx_spare_update(struct hns3_enet_ring *ring) | |
980 | { | |
981 | struct hns3_tx_spare *tx_spare = ring->tx_spare; | |
982 | ||
983 | if (!tx_spare || | |
984 | tx_spare->last_to_clean == tx_spare->next_to_clean) | |
985 | return; | |
986 | ||
987 | /* This smp_store_release() pairs with smp_load_acquire() in | |
988 | * hns3_tx_spare_space() called in xmit process. | |
989 | */ | |
990 | smp_store_release(&tx_spare->last_to_clean, | |
991 | tx_spare->next_to_clean); | |
992 | } | |
993 | ||
994 | static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, | |
995 | struct sk_buff *skb, | |
996 | u32 space) | |
997 | { | |
998 | u32 len = skb->len <= ring->tx_copybreak ? skb->len : | |
999 | skb_headlen(skb); | |
1000 | ||
1001 | if (len > ring->tx_copybreak) | |
1002 | return false; | |
1003 | ||
1004 | if (ALIGN(len, dma_get_cache_alignment()) > space) { | |
1005 | u64_stats_update_begin(&ring->syncp); | |
1006 | ring->stats.tx_spare_full++; | |
1007 | u64_stats_update_end(&ring->syncp); | |
1008 | return false; | |
1009 | } | |
1010 | ||
1011 | return true; | |
1012 | } | |
1013 | ||
7459775e YL |
1014 | static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, |
1015 | struct sk_buff *skb, | |
1016 | u32 space) | |
1017 | { | |
1018 | if (skb->len <= ring->tx_copybreak || !tx_sgl || | |
1019 | (!skb_has_frag_list(skb) && | |
1020 | skb_shinfo(skb)->nr_frags < tx_sgl)) | |
1021 | return false; | |
1022 | ||
1023 | if (space < HNS3_MAX_SGL_SIZE) { | |
1024 | u64_stats_update_begin(&ring->syncp); | |
1025 | ring->stats.tx_spare_full++; | |
1026 | u64_stats_update_end(&ring->syncp); | |
1027 | return false; | |
1028 | } | |
1029 | ||
1030 | return true; | |
1031 | } | |
1032 | ||
907676b1 YL |
1033 | static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) |
1034 | { | |
1035 | struct hns3_tx_spare *tx_spare; | |
1036 | struct page *page; | |
1a00197b | 1037 | u32 alloc_size; |
907676b1 YL |
1038 | dma_addr_t dma; |
1039 | int order; | |
1040 | ||
e175eb5f | 1041 | alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; |
1a00197b | 1042 | if (!alloc_size) |
907676b1 YL |
1043 | return; |
1044 | ||
1a00197b | 1045 | order = get_order(alloc_size); |
907676b1 YL |
1046 | tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), |
1047 | GFP_KERNEL); | |
1048 | if (!tx_spare) { | |
1049 | /* The driver still work without the tx spare buffer */ | |
1050 | dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); | |
1051 | return; | |
1052 | } | |
1053 | ||
1054 | page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), | |
1055 | GFP_KERNEL, order); | |
1056 | if (!page) { | |
1057 | dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); | |
1058 | devm_kfree(ring_to_dev(ring), tx_spare); | |
1059 | return; | |
1060 | } | |
1061 | ||
1062 | dma = dma_map_page(ring_to_dev(ring), page, 0, | |
1063 | PAGE_SIZE << order, DMA_TO_DEVICE); | |
1064 | if (dma_mapping_error(ring_to_dev(ring), dma)) { | |
1065 | dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); | |
1066 | put_page(page); | |
1067 | devm_kfree(ring_to_dev(ring), tx_spare); | |
1068 | return; | |
1069 | } | |
1070 | ||
1071 | tx_spare->dma = dma; | |
1072 | tx_spare->buf = page_address(page); | |
1073 | tx_spare->len = PAGE_SIZE << order; | |
1074 | ring->tx_spare = tx_spare; | |
1075 | } | |
1076 | ||
1077 | /* Use hns3_tx_spare_space() to make sure there is enough buffer | |
1078 | * before calling below function to allocate tx buffer. | |
1079 | */ | |
1080 | static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring, | |
1081 | unsigned int size, dma_addr_t *dma, | |
1082 | u32 *cb_len) | |
1083 | { | |
1084 | struct hns3_tx_spare *tx_spare = ring->tx_spare; | |
1085 | u32 ntu = tx_spare->next_to_use; | |
1086 | ||
1087 | size = ALIGN(size, dma_get_cache_alignment()); | |
1088 | *cb_len = size; | |
1089 | ||
1090 | /* Tx spare buffer wraps back here because the end of | |
1091 | * freed tx buffer is not enough. | |
1092 | */ | |
1093 | if (ntu + size > tx_spare->len) { | |
1094 | *cb_len += (tx_spare->len - ntu); | |
1095 | ntu = 0; | |
1096 | } | |
1097 | ||
1098 | tx_spare->next_to_use = ntu + size; | |
1099 | if (tx_spare->next_to_use == tx_spare->len) | |
1100 | tx_spare->next_to_use = 0; | |
1101 | ||
1102 | *dma = tx_spare->dma + ntu; | |
1103 | ||
1104 | return tx_spare->buf + ntu; | |
1105 | } | |
1106 | ||
1107 | static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len) | |
1108 | { | |
1109 | struct hns3_tx_spare *tx_spare = ring->tx_spare; | |
1110 | ||
1111 | if (len > tx_spare->next_to_use) { | |
1112 | len -= tx_spare->next_to_use; | |
1113 | tx_spare->next_to_use = tx_spare->len - len; | |
1114 | } else { | |
1115 | tx_spare->next_to_use -= len; | |
1116 | } | |
1117 | } | |
1118 | ||
1119 | static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring, | |
1120 | struct hns3_desc_cb *cb) | |
1121 | { | |
1122 | struct hns3_tx_spare *tx_spare = ring->tx_spare; | |
1123 | u32 ntc = tx_spare->next_to_clean; | |
1124 | u32 len = cb->length; | |
1125 | ||
1126 | tx_spare->next_to_clean += len; | |
1127 | ||
1128 | if (tx_spare->next_to_clean >= tx_spare->len) { | |
1129 | tx_spare->next_to_clean -= tx_spare->len; | |
1130 | ||
1131 | if (tx_spare->next_to_clean) { | |
1132 | ntc = 0; | |
1133 | len = tx_spare->next_to_clean; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | /* This tx spare buffer is only really reclaimed after calling | |
1138 | * hns3_tx_spare_update(), so it is still safe to use the info in | |
7459775e YL |
1139 | * the tx buffer to do the dma sync or sg unmapping after |
1140 | * tx_spare->next_to_clean is moved forword. | |
907676b1 YL |
1141 | */ |
1142 | if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { | |
1143 | dma_addr_t dma = tx_spare->dma + ntc; | |
1144 | ||
1145 | dma_sync_single_for_cpu(ring_to_dev(ring), dma, len, | |
1146 | DMA_TO_DEVICE); | |
7459775e YL |
1147 | } else { |
1148 | struct sg_table *sgt = tx_spare->buf + ntc; | |
1149 | ||
1150 | dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, | |
1151 | DMA_TO_DEVICE); | |
907676b1 YL |
1152 | } |
1153 | } | |
1154 | ||
3e281621 | 1155 | static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, |
811c0830 | 1156 | u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes) |
76ad4f0e S |
1157 | { |
1158 | u32 l4_offset, hdr_len; | |
1159 | union l3_hdr_info l3; | |
1160 | union l4_hdr_info l4; | |
1161 | u32 l4_paylen; | |
1162 | int ret; | |
1163 | ||
1164 | if (!skb_is_gso(skb)) | |
1165 | return 0; | |
1166 | ||
1167 | ret = skb_cow_head(skb, 0); | |
8ae10cfb | 1168 | if (unlikely(ret < 0)) |
76ad4f0e S |
1169 | return ret; |
1170 | ||
1171 | l3.hdr = skb_network_header(skb); | |
1172 | l4.hdr = skb_transport_header(skb); | |
1173 | ||
1174 | /* Software should clear the IPv4's checksum field when tso is | |
1175 | * needed. | |
1176 | */ | |
1177 | if (l3.v4->version == 4) | |
1178 | l3.v4->check = 0; | |
1179 | ||
9b2f3477 | 1180 | /* tunnel packet */ |
76ad4f0e S |
1181 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | |
1182 | SKB_GSO_GRE_CSUM | | |
3e281621 HT |
1183 | SKB_GSO_UDP_TUNNEL | |
1184 | SKB_GSO_UDP_TUNNEL_CSUM)) { | |
76ad4f0e S |
1185 | /* reset l3&l4 pointers from outer to inner headers */ |
1186 | l3.hdr = skb_inner_network_header(skb); | |
1187 | l4.hdr = skb_inner_transport_header(skb); | |
1188 | ||
1189 | /* Software should clear the IPv4's checksum field when | |
1190 | * tso is needed. | |
1191 | */ | |
1192 | if (l3.v4->version == 4) | |
1193 | l3.v4->check = 0; | |
1194 | } | |
1195 | ||
9b2f3477 | 1196 | /* normal or tunnel packet */ |
76ad4f0e | 1197 | l4_offset = l4.hdr - skb->data; |
76ad4f0e | 1198 | |
9b2f3477 | 1199 | /* remove payload length from inner pseudo checksum when tso */ |
76ad4f0e | 1200 | l4_paylen = skb->len - l4_offset; |
0692cfe9 HT |
1201 | |
1202 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { | |
1203 | hdr_len = sizeof(*l4.udp) + l4_offset; | |
1204 | csum_replace_by_diff(&l4.udp->check, | |
1205 | (__force __wsum)htonl(l4_paylen)); | |
1206 | } else { | |
1207 | hdr_len = (l4.tcp->doff << 2) + l4_offset; | |
1208 | csum_replace_by_diff(&l4.tcp->check, | |
1209 | (__force __wsum)htonl(l4_paylen)); | |
1210 | } | |
76ad4f0e | 1211 | |
811c0830 YL |
1212 | *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; |
1213 | ||
76ad4f0e | 1214 | /* find the txbd field values */ |
3e281621 | 1215 | *paylen_fdop_ol4cs = skb->len - hdr_len; |
cde4ffad | 1216 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); |
76ad4f0e | 1217 | |
3e281621 HT |
1218 | /* offload outer UDP header checksum */ |
1219 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) | |
1220 | hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); | |
1221 | ||
76ad4f0e S |
1222 | /* get MSS for TSO */ |
1223 | *mss = skb_shinfo(skb)->gso_size; | |
1224 | ||
698a8954 YL |
1225 | trace_hns3_tso(skb); |
1226 | ||
76ad4f0e S |
1227 | return 0; |
1228 | } | |
1229 | ||
1898d4e4 S |
1230 | static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, |
1231 | u8 *il4_proto) | |
76ad4f0e | 1232 | { |
1a6e552d | 1233 | union l3_hdr_info l3; |
76ad4f0e S |
1234 | unsigned char *l4_hdr; |
1235 | unsigned char *exthdr; | |
1236 | u8 l4_proto_tmp; | |
1237 | __be16 frag_off; | |
1238 | ||
1239 | /* find outer header point */ | |
1240 | l3.hdr = skb_network_header(skb); | |
35f58fd7 | 1241 | l4_hdr = skb_transport_header(skb); |
76ad4f0e S |
1242 | |
1243 | if (skb->protocol == htons(ETH_P_IPV6)) { | |
1244 | exthdr = l3.hdr + sizeof(*l3.v6); | |
1245 | l4_proto_tmp = l3.v6->nexthdr; | |
1246 | if (l4_hdr != exthdr) | |
1247 | ipv6_skip_exthdr(skb, exthdr - skb->data, | |
1248 | &l4_proto_tmp, &frag_off); | |
1249 | } else if (skb->protocol == htons(ETH_P_IP)) { | |
1250 | l4_proto_tmp = l3.v4->protocol; | |
1898d4e4 S |
1251 | } else { |
1252 | return -EINVAL; | |
76ad4f0e S |
1253 | } |
1254 | ||
1255 | *ol4_proto = l4_proto_tmp; | |
1256 | ||
1257 | /* tunnel packet */ | |
1258 | if (!skb->encapsulation) { | |
1259 | *il4_proto = 0; | |
1898d4e4 | 1260 | return 0; |
76ad4f0e S |
1261 | } |
1262 | ||
1263 | /* find inner header point */ | |
1264 | l3.hdr = skb_inner_network_header(skb); | |
1265 | l4_hdr = skb_inner_transport_header(skb); | |
1266 | ||
1267 | if (l3.v6->version == 6) { | |
1268 | exthdr = l3.hdr + sizeof(*l3.v6); | |
1269 | l4_proto_tmp = l3.v6->nexthdr; | |
1270 | if (l4_hdr != exthdr) | |
1271 | ipv6_skip_exthdr(skb, exthdr - skb->data, | |
1272 | &l4_proto_tmp, &frag_off); | |
1273 | } else if (l3.v4->version == 4) { | |
1274 | l4_proto_tmp = l3.v4->protocol; | |
1275 | } | |
1276 | ||
1277 | *il4_proto = l4_proto_tmp; | |
1898d4e4 S |
1278 | |
1279 | return 0; | |
76ad4f0e S |
1280 | } |
1281 | ||
3db084d2 YL |
1282 | /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL |
1283 | * and it is udp packet, which has a dest port as the IANA assigned. | |
1284 | * the hardware is expected to do the checksum offload, but the | |
1285 | * hardware will not do the checksum offload when udp dest port is | |
905416f1 | 1286 | * 4789, 4790 or 6081. |
3db084d2 YL |
1287 | */ |
1288 | static bool hns3_tunnel_csum_bug(struct sk_buff *skb) | |
1289 | { | |
ade36cce HT |
1290 | struct hns3_nic_priv *priv = netdev_priv(skb->dev); |
1291 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); | |
1a6e552d | 1292 | union l4_hdr_info l4; |
3db084d2 | 1293 | |
ade36cce HT |
1294 | /* device version above V3(include V3), the hardware can |
1295 | * do this checksum offload. | |
1296 | */ | |
1297 | if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) | |
1298 | return false; | |
1299 | ||
3db084d2 YL |
1300 | l4.hdr = skb_transport_header(skb); |
1301 | ||
bea96410 | 1302 | if (!(!skb->encapsulation && |
a156998f | 1303 | (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || |
905416f1 HC |
1304 | l4.udp->dest == htons(GENEVE_UDP_PORT) || |
1305 | l4.udp->dest == htons(4790)))) | |
3db084d2 YL |
1306 | return false; |
1307 | ||
3db084d2 YL |
1308 | return true; |
1309 | } | |
1310 | ||
757cd1e4 YL |
1311 | static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, |
1312 | u32 *ol_type_vlan_len_msec) | |
76ad4f0e | 1313 | { |
757cd1e4 YL |
1314 | u32 l2_len, l3_len, l4_len; |
1315 | unsigned char *il2_hdr; | |
1a6e552d | 1316 | union l3_hdr_info l3; |
757cd1e4 | 1317 | union l4_hdr_info l4; |
76ad4f0e S |
1318 | |
1319 | l3.hdr = skb_network_header(skb); | |
757cd1e4 | 1320 | l4.hdr = skb_transport_header(skb); |
76ad4f0e | 1321 | |
757cd1e4 YL |
1322 | /* compute OL2 header size, defined in 2 Bytes */ |
1323 | l2_len = l3.hdr - skb->data; | |
1324 | hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); | |
1325 | ||
1326 | /* compute OL3 header size, defined in 4 Bytes */ | |
1327 | l3_len = l4.hdr - l3.hdr; | |
1328 | hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); | |
76ad4f0e | 1329 | |
757cd1e4 | 1330 | il2_hdr = skb_inner_mac_header(skb); |
9b2f3477 | 1331 | /* compute OL4 header size, defined in 4 Bytes */ |
757cd1e4 YL |
1332 | l4_len = il2_hdr - l4.hdr; |
1333 | hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); | |
1334 | ||
1335 | /* define outer network header type */ | |
1336 | if (skb->protocol == htons(ETH_P_IP)) { | |
1337 | if (skb_is_gso(skb)) | |
cde4ffad | 1338 | hns3_set_field(*ol_type_vlan_len_msec, |
757cd1e4 YL |
1339 | HNS3_TXD_OL3T_S, |
1340 | HNS3_OL3T_IPV4_CSUM); | |
1341 | else | |
cde4ffad | 1342 | hns3_set_field(*ol_type_vlan_len_msec, |
757cd1e4 YL |
1343 | HNS3_TXD_OL3T_S, |
1344 | HNS3_OL3T_IPV4_NO_CSUM); | |
757cd1e4 YL |
1345 | } else if (skb->protocol == htons(ETH_P_IPV6)) { |
1346 | hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, | |
1347 | HNS3_OL3T_IPV6); | |
1348 | } | |
1349 | ||
1350 | if (ol4_proto == IPPROTO_UDP) | |
1351 | hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, | |
1352 | HNS3_TUN_MAC_IN_UDP); | |
1353 | else if (ol4_proto == IPPROTO_GRE) | |
1354 | hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, | |
1355 | HNS3_TUN_NVGRE); | |
1356 | } | |
1357 | ||
1358 | static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, | |
1359 | u8 il4_proto, u32 *type_cs_vlan_tso, | |
1360 | u32 *ol_type_vlan_len_msec) | |
1361 | { | |
c264ed44 | 1362 | unsigned char *l2_hdr = skb->data; |
757cd1e4 YL |
1363 | u32 l4_proto = ol4_proto; |
1364 | union l4_hdr_info l4; | |
1365 | union l3_hdr_info l3; | |
1366 | u32 l2_len, l3_len; | |
1367 | ||
1368 | l4.hdr = skb_transport_header(skb); | |
1369 | l3.hdr = skb_network_header(skb); | |
1370 | ||
1371 | /* handle encapsulation skb */ | |
1372 | if (skb->encapsulation) { | |
1373 | /* If this is a not UDP/GRE encapsulation skb */ | |
1374 | if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { | |
76ad4f0e S |
1375 | /* drop the skb tunnel packet if hardware don't support, |
1376 | * because hardware can't calculate csum when TSO. | |
1377 | */ | |
1378 | if (skb_is_gso(skb)) | |
1379 | return -EDOM; | |
1380 | ||
1381 | /* the stack computes the IP header already, | |
1382 | * driver calculate l4 checksum when not TSO. | |
1383 | */ | |
9bb5a495 | 1384 | return skb_checksum_help(skb); |
76ad4f0e S |
1385 | } |
1386 | ||
757cd1e4 YL |
1387 | hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); |
1388 | ||
1389 | /* switch to inner header */ | |
1390 | l2_hdr = skb_inner_mac_header(skb); | |
76ad4f0e | 1391 | l3.hdr = skb_inner_network_header(skb); |
757cd1e4 | 1392 | l4.hdr = skb_inner_transport_header(skb); |
76ad4f0e S |
1393 | l4_proto = il4_proto; |
1394 | } | |
1395 | ||
1396 | if (l3.v4->version == 4) { | |
cde4ffad YL |
1397 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, |
1398 | HNS3_L3T_IPV4); | |
76ad4f0e S |
1399 | |
1400 | /* the stack computes the IP header already, the only time we | |
1401 | * need the hardware to recompute it is in the case of TSO. | |
1402 | */ | |
1403 | if (skb_is_gso(skb)) | |
cde4ffad | 1404 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); |
76ad4f0e | 1405 | } else if (l3.v6->version == 6) { |
cde4ffad YL |
1406 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, |
1407 | HNS3_L3T_IPV6); | |
76ad4f0e S |
1408 | } |
1409 | ||
757cd1e4 YL |
1410 | /* compute inner(/normal) L2 header size, defined in 2 Bytes */ |
1411 | l2_len = l3.hdr - l2_hdr; | |
1412 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); | |
1413 | ||
1414 | /* compute inner(/normal) L3 header size, defined in 4 Bytes */ | |
1415 | l3_len = l4.hdr - l3.hdr; | |
1416 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); | |
1417 | ||
1418 | /* compute inner(/normal) L4 header size, defined in 4 Bytes */ | |
76ad4f0e S |
1419 | switch (l4_proto) { |
1420 | case IPPROTO_TCP: | |
cde4ffad YL |
1421 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
1422 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, | |
1423 | HNS3_L4T_TCP); | |
757cd1e4 YL |
1424 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, |
1425 | l4.tcp->doff); | |
76ad4f0e S |
1426 | break; |
1427 | case IPPROTO_UDP: | |
d18e8118 YM |
1428 | if (hns3_tunnel_csum_bug(skb)) { |
1429 | int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); | |
1430 | ||
1431 | return ret ? ret : skb_checksum_help(skb); | |
1432 | } | |
3db084d2 | 1433 | |
cde4ffad YL |
1434 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
1435 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, | |
1436 | HNS3_L4T_UDP); | |
757cd1e4 YL |
1437 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, |
1438 | (sizeof(struct udphdr) >> 2)); | |
76ad4f0e S |
1439 | break; |
1440 | case IPPROTO_SCTP: | |
cde4ffad YL |
1441 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); |
1442 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, | |
1443 | HNS3_L4T_SCTP); | |
757cd1e4 YL |
1444 | hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, |
1445 | (sizeof(struct sctphdr) >> 2)); | |
76ad4f0e S |
1446 | break; |
1447 | default: | |
1448 | /* drop the skb tunnel packet if hardware don't support, | |
1449 | * because hardware can't calculate csum when TSO. | |
1450 | */ | |
1451 | if (skb_is_gso(skb)) | |
1452 | return -EDOM; | |
1453 | ||
1454 | /* the stack computes the IP header already, | |
1455 | * driver calculate l4 checksum when not TSO. | |
1456 | */ | |
9bb5a495 | 1457 | return skb_checksum_help(skb); |
76ad4f0e S |
1458 | } |
1459 | ||
1460 | return 0; | |
1461 | } | |
1462 | ||
eb977d99 YL |
1463 | static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, |
1464 | struct sk_buff *skb) | |
9699cffe | 1465 | { |
44e626f7 | 1466 | struct hnae3_handle *handle = tx_ring->tqp->handle; |
592b0179 | 1467 | struct hnae3_ae_dev *ae_dev; |
eb977d99 YL |
1468 | struct vlan_ethhdr *vhdr; |
1469 | int rc; | |
1470 | ||
1471 | if (!(skb->protocol == htons(ETH_P_8021Q) || | |
1472 | skb_vlan_tag_present(skb))) | |
1473 | return 0; | |
44e626f7 | 1474 | |
592b0179 GL |
1475 | /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert |
1476 | * VLAN enabled, only one VLAN header is allowed in skb, otherwise it | |
1477 | * will cause RAS error. | |
44e626f7 | 1478 | */ |
592b0179 | 1479 | ae_dev = pci_get_drvdata(handle->pdev); |
44e626f7 | 1480 | if (unlikely(skb_vlan_tagged_multi(skb) && |
592b0179 | 1481 | ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && |
44e626f7 JS |
1482 | handle->port_base_vlan_state == |
1483 | HNAE3_PORT_BASE_VLAN_ENABLE)) | |
1484 | return -EINVAL; | |
1485 | ||
9699cffe | 1486 | if (skb->protocol == htons(ETH_P_8021Q) && |
eb977d99 | 1487 | !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { |
9699cffe PL |
1488 | /* When HW VLAN acceleration is turned off, and the stack |
1489 | * sets the protocol to 802.1q, the driver just need to | |
1490 | * set the protocol to the encapsulated ethertype. | |
1491 | */ | |
1492 | skb->protocol = vlan_get_protocol(skb); | |
1493 | return 0; | |
1494 | } | |
1495 | ||
1496 | if (skb_vlan_tag_present(skb)) { | |
9699cffe PL |
1497 | /* Based on hw strategy, use out_vtag in two layer tag case, |
1498 | * and use inner_vtag in one tag case. | |
1499 | */ | |
eb977d99 YL |
1500 | if (skb->protocol == htons(ETH_P_8021Q) && |
1501 | handle->port_base_vlan_state == | |
1502 | HNAE3_PORT_BASE_VLAN_DISABLE) | |
1503 | rc = HNS3_OUTER_VLAN_TAG; | |
1504 | else | |
1505 | rc = HNS3_INNER_VLAN_TAG; | |
1506 | ||
1507 | skb->protocol = vlan_get_protocol(skb); | |
1508 | return rc; | |
9699cffe PL |
1509 | } |
1510 | ||
eb977d99 YL |
1511 | rc = skb_cow_head(skb, 0); |
1512 | if (unlikely(rc < 0)) | |
1513 | return rc; | |
1514 | ||
1515 | vhdr = (struct vlan_ethhdr *)skb->data; | |
1516 | vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) | |
1517 | & VLAN_PRIO_MASK); | |
1518 | ||
9699cffe PL |
1519 | skb->protocol = vlan_get_protocol(skb); |
1520 | return 0; | |
1521 | } | |
1522 | ||
66d52f3b HT |
1523 | /* check if the hardware is capable of checksum offloading */ |
1524 | static bool hns3_check_hw_tx_csum(struct sk_buff *skb) | |
1525 | { | |
1526 | struct hns3_nic_priv *priv = netdev_priv(skb->dev); | |
1527 | ||
1528 | /* Kindly note, due to backward compatibility of the TX descriptor, | |
1529 | * HW checksum of the non-IP packets and GSO packets is handled at | |
1530 | * different place in the following code | |
1531 | */ | |
b9046e88 | 1532 | if (skb_csum_is_sctp(skb) || skb_is_gso(skb) || |
66d52f3b HT |
1533 | !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) |
1534 | return false; | |
1535 | ||
1536 | return true; | |
1537 | } | |
1538 | ||
eb977d99 | 1539 | static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, |
811c0830 YL |
1540 | struct sk_buff *skb, struct hns3_desc *desc, |
1541 | struct hns3_desc_cb *desc_cb) | |
eb977d99 YL |
1542 | { |
1543 | u32 ol_type_vlan_len_msec = 0; | |
3e281621 | 1544 | u32 paylen_ol4cs = skb->len; |
eb977d99 | 1545 | u32 type_cs_vlan_tso = 0; |
66d52f3b | 1546 | u16 mss_hw_csum = 0; |
eb977d99 YL |
1547 | u16 inner_vtag = 0; |
1548 | u16 out_vtag = 0; | |
eb977d99 YL |
1549 | int ret; |
1550 | ||
1551 | ret = hns3_handle_vtags(ring, skb); | |
1552 | if (unlikely(ret < 0)) { | |
b20d7fe5 YL |
1553 | u64_stats_update_begin(&ring->syncp); |
1554 | ring->stats.tx_vlan_err++; | |
1555 | u64_stats_update_end(&ring->syncp); | |
eb977d99 YL |
1556 | return ret; |
1557 | } else if (ret == HNS3_INNER_VLAN_TAG) { | |
1558 | inner_vtag = skb_vlan_tag_get(skb); | |
1559 | inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & | |
1560 | VLAN_PRIO_MASK; | |
1561 | hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); | |
1562 | } else if (ret == HNS3_OUTER_VLAN_TAG) { | |
1563 | out_vtag = skb_vlan_tag_get(skb); | |
1564 | out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & | |
1565 | VLAN_PRIO_MASK; | |
1566 | hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, | |
1567 | 1); | |
1568 | } | |
1569 | ||
811c0830 YL |
1570 | desc_cb->send_bytes = skb->len; |
1571 | ||
eb977d99 YL |
1572 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1573 | u8 ol4_proto, il4_proto; | |
1574 | ||
66d52f3b HT |
1575 | if (hns3_check_hw_tx_csum(skb)) { |
1576 | /* set checksum start and offset, defined in 2 Bytes */ | |
1577 | hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, | |
1578 | skb_checksum_start_offset(skb) >> 1); | |
1579 | hns3_set_field(ol_type_vlan_len_msec, | |
1580 | HNS3_TXD_CSUM_OFFSET_S, | |
1581 | skb->csum_offset >> 1); | |
1582 | mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); | |
1583 | goto out_hw_tx_csum; | |
1584 | } | |
1585 | ||
eb977d99 YL |
1586 | skb_reset_mac_len(skb); |
1587 | ||
1588 | ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); | |
8ae10cfb | 1589 | if (unlikely(ret < 0)) { |
b20d7fe5 YL |
1590 | u64_stats_update_begin(&ring->syncp); |
1591 | ring->stats.tx_l4_proto_err++; | |
1592 | u64_stats_update_end(&ring->syncp); | |
eb977d99 | 1593 | return ret; |
b20d7fe5 | 1594 | } |
eb977d99 YL |
1595 | |
1596 | ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, | |
1597 | &type_cs_vlan_tso, | |
1598 | &ol_type_vlan_len_msec); | |
8ae10cfb | 1599 | if (unlikely(ret < 0)) { |
b20d7fe5 YL |
1600 | u64_stats_update_begin(&ring->syncp); |
1601 | ring->stats.tx_l2l3l4_err++; | |
1602 | u64_stats_update_end(&ring->syncp); | |
eb977d99 | 1603 | return ret; |
b20d7fe5 | 1604 | } |
eb977d99 | 1605 | |
3e281621 | 1606 | ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, |
811c0830 | 1607 | &type_cs_vlan_tso, &desc_cb->send_bytes); |
8ae10cfb | 1608 | if (unlikely(ret < 0)) { |
b20d7fe5 YL |
1609 | u64_stats_update_begin(&ring->syncp); |
1610 | ring->stats.tx_tso_err++; | |
1611 | u64_stats_update_end(&ring->syncp); | |
eb977d99 | 1612 | return ret; |
b20d7fe5 | 1613 | } |
eb977d99 YL |
1614 | } |
1615 | ||
66d52f3b | 1616 | out_hw_tx_csum: |
eb977d99 YL |
1617 | /* Set txbd */ |
1618 | desc->tx.ol_type_vlan_len_msec = | |
1619 | cpu_to_le32(ol_type_vlan_len_msec); | |
1620 | desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); | |
3e281621 | 1621 | desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); |
66d52f3b | 1622 | desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); |
eb977d99 YL |
1623 | desc->tx.vlan_tag = cpu_to_le16(inner_vtag); |
1624 | desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); | |
1625 | ||
1626 | return 0; | |
1627 | } | |
1628 | ||
8677d78c YL |
1629 | static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma, |
1630 | unsigned int size) | |
76ad4f0e | 1631 | { |
8ae10cfb YL |
1632 | #define HNS3_LIKELY_BD_NUM 1 |
1633 | ||
76ad4f0e | 1634 | struct hns3_desc *desc = &ring->desc[ring->next_to_use]; |
1e8a7977 | 1635 | unsigned int frag_buf_num; |
47e7b13b | 1636 | int k, sizeoflast; |
bcdb12b7 | 1637 | |
ceca4a5e | 1638 | if (likely(size <= HNS3_MAX_BD_SIZE)) { |
ceca4a5e YL |
1639 | desc->addr = cpu_to_le64(dma); |
1640 | desc->tx.send_size = cpu_to_le16(size); | |
ceca4a5e | 1641 | desc->tx.bdtp_fe_sc_vld_ra_ri = |
8ae10cfb | 1642 | cpu_to_le16(BIT(HNS3_TXD_VLD_B)); |
ceca4a5e | 1643 | |
698a8954 | 1644 | trace_hns3_tx_desc(ring, ring->next_to_use); |
ceca4a5e | 1645 | ring_ptr_move_fw(ring, next_to_use); |
8ae10cfb | 1646 | return HNS3_LIKELY_BD_NUM; |
ceca4a5e YL |
1647 | } |
1648 | ||
5f543a54 | 1649 | frag_buf_num = hns3_tx_bd_count(size); |
48ae74c9 | 1650 | sizeoflast = size % HNS3_MAX_BD_SIZE; |
1e8a7977 FL |
1651 | sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; |
1652 | ||
1653 | /* When frag size is bigger than hardware limit, split this frag */ | |
1654 | for (k = 0; k < frag_buf_num; k++) { | |
1e8a7977 FL |
1655 | /* now, fill the descriptor */ |
1656 | desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); | |
bcdb12b7 | 1657 | desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? |
9b2f3477 | 1658 | (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); |
1e8a7977 | 1659 | desc->tx.bdtp_fe_sc_vld_ra_ri = |
8ae10cfb | 1660 | cpu_to_le16(BIT(HNS3_TXD_VLD_B)); |
1e8a7977 | 1661 | |
698a8954 | 1662 | trace_hns3_tx_desc(ring, ring->next_to_use); |
9b2f3477 | 1663 | /* move ring pointer to next */ |
1e8a7977 FL |
1664 | ring_ptr_move_fw(ring, next_to_use); |
1665 | ||
1e8a7977 FL |
1666 | desc = &ring->desc[ring->next_to_use]; |
1667 | } | |
76ad4f0e | 1668 | |
8ae10cfb | 1669 | return frag_buf_num; |
76ad4f0e S |
1670 | } |
1671 | ||
8677d78c YL |
1672 | static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, |
1673 | unsigned int type) | |
1674 | { | |
1675 | struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; | |
1676 | struct device *dev = ring_to_dev(ring); | |
1677 | unsigned int size; | |
1678 | dma_addr_t dma; | |
1679 | ||
1680 | if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) { | |
1681 | struct sk_buff *skb = (struct sk_buff *)priv; | |
1682 | ||
1683 | size = skb_headlen(skb); | |
1684 | if (!size) | |
1685 | return 0; | |
1686 | ||
1687 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); | |
907676b1 YL |
1688 | } else if (type & DESC_TYPE_BOUNCE_HEAD) { |
1689 | /* Head data has been filled in hns3_handle_tx_bounce(), | |
1690 | * just return 0 here. | |
1691 | */ | |
1692 | return 0; | |
8677d78c YL |
1693 | } else { |
1694 | skb_frag_t *frag = (skb_frag_t *)priv; | |
1695 | ||
1696 | size = skb_frag_size(frag); | |
1697 | if (!size) | |
1698 | return 0; | |
1699 | ||
1700 | dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); | |
1701 | } | |
1702 | ||
1703 | if (unlikely(dma_mapping_error(dev, dma))) { | |
1704 | u64_stats_update_begin(&ring->syncp); | |
1705 | ring->stats.sw_err_cnt++; | |
1706 | u64_stats_update_end(&ring->syncp); | |
1707 | return -ENOMEM; | |
1708 | } | |
1709 | ||
1710 | desc_cb->priv = priv; | |
1711 | desc_cb->length = size; | |
1712 | desc_cb->dma = dma; | |
1713 | desc_cb->type = type; | |
1714 | ||
1715 | return hns3_fill_desc(ring, dma, size); | |
1716 | } | |
1717 | ||
8ae10cfb YL |
1718 | static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, |
1719 | unsigned int bd_num) | |
76ad4f0e | 1720 | { |
8ae10cfb | 1721 | unsigned int size; |
42611b70 | 1722 | int i; |
76ad4f0e | 1723 | |
8ae10cfb YL |
1724 | size = skb_headlen(skb); |
1725 | while (size > HNS3_MAX_BD_SIZE) { | |
1726 | bd_size[bd_num++] = HNS3_MAX_BD_SIZE; | |
1727 | size -= HNS3_MAX_BD_SIZE; | |
1728 | ||
1729 | if (bd_num > HNS3_MAX_TSO_BD_NUM) | |
1730 | return bd_num; | |
1731 | } | |
76ad4f0e | 1732 | |
8ae10cfb YL |
1733 | if (size) { |
1734 | bd_size[bd_num++] = size; | |
1735 | if (bd_num > HNS3_MAX_TSO_BD_NUM) | |
1736 | return bd_num; | |
1737 | } | |
76ad4f0e | 1738 | |
3d5f3741 | 1739 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
d7840976 | 1740 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
8ae10cfb YL |
1741 | size = skb_frag_size(frag); |
1742 | if (!size) | |
1743 | continue; | |
1744 | ||
1745 | while (size > HNS3_MAX_BD_SIZE) { | |
1746 | bd_size[bd_num++] = HNS3_MAX_BD_SIZE; | |
1747 | size -= HNS3_MAX_BD_SIZE; | |
1748 | ||
1749 | if (bd_num > HNS3_MAX_TSO_BD_NUM) | |
1750 | return bd_num; | |
1751 | } | |
1752 | ||
1753 | bd_size[bd_num++] = size; | |
1754 | if (bd_num > HNS3_MAX_TSO_BD_NUM) | |
1755 | return bd_num; | |
1756 | } | |
1757 | ||
1758 | return bd_num; | |
1759 | } | |
1760 | ||
fd665b3d | 1761 | static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size, |
d5d5e019 YL |
1762 | u8 max_non_tso_bd_num, unsigned int bd_num, |
1763 | unsigned int recursion_level) | |
8ae10cfb | 1764 | { |
d5d5e019 YL |
1765 | #define HNS3_MAX_RECURSION_LEVEL 24 |
1766 | ||
8ae10cfb | 1767 | struct sk_buff *frag_skb; |
8ae10cfb YL |
1768 | |
1769 | /* If the total len is within the max bd limit */ | |
d5d5e019 YL |
1770 | if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && |
1771 | !skb_has_frag_list(skb) && | |
fd665b3d | 1772 | skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) |
8ae10cfb YL |
1773 | return skb_shinfo(skb)->nr_frags + 1U; |
1774 | ||
d5d5e019 YL |
1775 | if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL)) |
1776 | return UINT_MAX; | |
8ae10cfb YL |
1777 | |
1778 | bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); | |
8ae10cfb YL |
1779 | if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) |
1780 | return bd_num; | |
1781 | ||
1782 | skb_walk_frags(skb, frag_skb) { | |
d5d5e019 YL |
1783 | bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num, |
1784 | bd_num, recursion_level + 1); | |
8ae10cfb YL |
1785 | if (bd_num > HNS3_MAX_TSO_BD_NUM) |
1786 | return bd_num; | |
3d5f3741 | 1787 | } |
76ad4f0e | 1788 | |
3d5f3741 | 1789 | return bd_num; |
76ad4f0e S |
1790 | } |
1791 | ||
db4970aa YL |
1792 | static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) |
1793 | { | |
1794 | if (!skb->encapsulation) | |
1795 | return skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1796 | ||
1797 | return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); | |
1798 | } | |
1799 | ||
fd665b3d HT |
1800 | /* HW need every continuous max_non_tso_bd_num buffer data to be larger |
1801 | * than MSS, we simplify it by ensuring skb_headlen + the first continuous | |
1802 | * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss, | |
1803 | * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger | |
1804 | * than MSS except the last max_non_tso_bd_num - 1 frags. | |
db4970aa | 1805 | */ |
8ae10cfb | 1806 | static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, |
fd665b3d | 1807 | unsigned int bd_num, u8 max_non_tso_bd_num) |
db4970aa | 1808 | { |
db4970aa YL |
1809 | unsigned int tot_len = 0; |
1810 | int i; | |
1811 | ||
fd665b3d | 1812 | for (i = 0; i < max_non_tso_bd_num - 1U; i++) |
8ae10cfb | 1813 | tot_len += bd_size[i]; |
db4970aa | 1814 | |
fd665b3d HT |
1815 | /* ensure the first max_non_tso_bd_num frags is greater than |
1816 | * mss + header | |
1817 | */ | |
1818 | if (tot_len + bd_size[max_non_tso_bd_num - 1U] < | |
8ae10cfb | 1819 | skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) |
db4970aa YL |
1820 | return true; |
1821 | ||
fd665b3d HT |
1822 | /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater |
1823 | * than mss except the last one. | |
8ae10cfb | 1824 | */ |
fd665b3d | 1825 | for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { |
8ae10cfb | 1826 | tot_len -= bd_size[i]; |
fd665b3d | 1827 | tot_len += bd_size[i + max_non_tso_bd_num - 1U]; |
db4970aa YL |
1828 | |
1829 | if (tot_len < skb_shinfo(skb)->gso_size) | |
1830 | return true; | |
1831 | } | |
1832 | ||
1833 | return false; | |
1834 | } | |
1835 | ||
698a8954 YL |
1836 | void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size) |
1837 | { | |
9d8d5a36 | 1838 | int i; |
698a8954 YL |
1839 | |
1840 | for (i = 0; i < MAX_SKB_FRAGS; i++) | |
1841 | size[i] = skb_frag_size(&shinfo->frags[i]); | |
1842 | } | |
1843 | ||
d5d5e019 YL |
1844 | static int hns3_skb_linearize(struct hns3_enet_ring *ring, |
1845 | struct sk_buff *skb, | |
d5d5e019 YL |
1846 | unsigned int bd_num) |
1847 | { | |
1848 | /* 'bd_num == UINT_MAX' means the skb' fraglist has a | |
1849 | * recursion level of over HNS3_MAX_RECURSION_LEVEL. | |
1850 | */ | |
1851 | if (bd_num == UINT_MAX) { | |
1852 | u64_stats_update_begin(&ring->syncp); | |
1853 | ring->stats.over_max_recursion++; | |
1854 | u64_stats_update_end(&ring->syncp); | |
1855 | return -ENOMEM; | |
1856 | } | |
1857 | ||
1858 | /* The skb->len has exceeded the hw limitation, linearization | |
1859 | * will not help. | |
1860 | */ | |
1861 | if (skb->len > HNS3_MAX_TSO_SIZE || | |
adfb7b49 | 1862 | (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { |
d5d5e019 YL |
1863 | u64_stats_update_begin(&ring->syncp); |
1864 | ring->stats.hw_limitation++; | |
1865 | u64_stats_update_end(&ring->syncp); | |
1866 | return -ENOMEM; | |
1867 | } | |
1868 | ||
1869 | if (__skb_linearize(skb)) { | |
1870 | u64_stats_update_begin(&ring->syncp); | |
1871 | ring->stats.sw_err_cnt++; | |
1872 | u64_stats_update_end(&ring->syncp); | |
1873 | return -ENOMEM; | |
1874 | } | |
1875 | ||
1876 | return 0; | |
1877 | } | |
1878 | ||
3d5f3741 | 1879 | static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, |
2a597eff | 1880 | struct net_device *netdev, |
d1a37ded | 1881 | struct sk_buff *skb) |
76ad4f0e | 1882 | { |
2a597eff | 1883 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
fd665b3d | 1884 | u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; |
8ae10cfb | 1885 | unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; |
42611b70 | 1886 | unsigned int bd_num; |
76ad4f0e | 1887 | |
d5d5e019 | 1888 | bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0); |
fd665b3d | 1889 | if (unlikely(bd_num > max_non_tso_bd_num)) { |
8ae10cfb | 1890 | if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && |
fd665b3d HT |
1891 | !hns3_skb_need_linearized(skb, bd_size, bd_num, |
1892 | max_non_tso_bd_num)) { | |
6ad595bc | 1893 | trace_hns3_over_max_bd(skb); |
db4970aa | 1894 | goto out; |
698a8954 | 1895 | } |
db4970aa | 1896 | |
adfb7b49 | 1897 | if (hns3_skb_linearize(ring, skb, bd_num)) |
51e8439f | 1898 | return -ENOMEM; |
3d5f3741 | 1899 | |
d1a37ded | 1900 | bd_num = hns3_tx_bd_count(skb->len); |
42611b70 | 1901 | |
3d5f3741 YL |
1902 | u64_stats_update_begin(&ring->syncp); |
1903 | ring->stats.tx_copy++; | |
1904 | u64_stats_update_end(&ring->syncp); | |
51e8439f PL |
1905 | } |
1906 | ||
db4970aa | 1907 | out: |
2a597eff YL |
1908 | if (likely(ring_space(ring) >= bd_num)) |
1909 | return bd_num; | |
76ad4f0e | 1910 | |
2a597eff YL |
1911 | netif_stop_subqueue(netdev, ring->queue_index); |
1912 | smp_mb(); /* Memory barrier before checking ring_space */ | |
1913 | ||
1914 | /* Start queue in case hns3_clean_tx_ring has just made room | |
1915 | * available and has not seen the queue stopped state performed | |
1916 | * by netif_stop_subqueue above. | |
1917 | */ | |
1918 | if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) && | |
1919 | !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { | |
1920 | netif_start_subqueue(netdev, ring->queue_index); | |
1921 | return bd_num; | |
1922 | } | |
1923 | ||
d5d5e019 YL |
1924 | u64_stats_update_begin(&ring->syncp); |
1925 | ring->stats.tx_busy++; | |
1926 | u64_stats_update_end(&ring->syncp); | |
1927 | ||
2a597eff | 1928 | return -EBUSY; |
76ad4f0e S |
1929 | } |
1930 | ||
ba3f808f | 1931 | static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) |
76ad4f0e S |
1932 | { |
1933 | struct device *dev = ring_to_dev(ring); | |
1934 | unsigned int i; | |
1935 | ||
1936 | for (i = 0; i < ring->desc_num; i++) { | |
8ceca59f | 1937 | struct hns3_desc *desc = &ring->desc[ring->next_to_use]; |
26f1ccdf | 1938 | struct hns3_desc_cb *desc_cb; |
8ceca59f YL |
1939 | |
1940 | memset(desc, 0, sizeof(*desc)); | |
1941 | ||
76ad4f0e S |
1942 | /* check if this is where we started */ |
1943 | if (ring->next_to_use == next_to_use_orig) | |
1944 | break; | |
1945 | ||
aa9d22dd YL |
1946 | /* rollback one */ |
1947 | ring_ptr_move_bw(ring, next_to_use); | |
1948 | ||
26f1ccdf YL |
1949 | desc_cb = &ring->desc_cb[ring->next_to_use]; |
1950 | ||
1951 | if (!desc_cb->dma) | |
8ceca59f YL |
1952 | continue; |
1953 | ||
76ad4f0e | 1954 | /* unmap the descriptor dma address */ |
26f1ccdf YL |
1955 | if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) |
1956 | dma_unmap_single(dev, desc_cb->dma, desc_cb->length, | |
1957 | DMA_TO_DEVICE); | |
907676b1 YL |
1958 | else if (desc_cb->type & |
1959 | (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) | |
1960 | hns3_tx_spare_rollback(ring, desc_cb->length); | |
26f1ccdf YL |
1961 | else if (desc_cb->length) |
1962 | dma_unmap_page(dev, desc_cb->dma, desc_cb->length, | |
76ad4f0e S |
1963 | DMA_TO_DEVICE); |
1964 | ||
26f1ccdf YL |
1965 | desc_cb->length = 0; |
1966 | desc_cb->dma = 0; | |
1967 | desc_cb->type = DESC_TYPE_UNKNOWN; | |
76ad4f0e S |
1968 | } |
1969 | } | |
1970 | ||
8ae10cfb | 1971 | static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, |
26f1ccdf | 1972 | struct sk_buff *skb, unsigned int type) |
8ae10cfb | 1973 | { |
d5d5e019 | 1974 | struct sk_buff *frag_skb; |
8ae10cfb YL |
1975 | int i, ret, bd_num = 0; |
1976 | ||
8677d78c YL |
1977 | ret = hns3_map_and_fill_desc(ring, skb, type); |
1978 | if (unlikely(ret < 0)) | |
1979 | return ret; | |
8ae10cfb | 1980 | |
8677d78c | 1981 | bd_num += ret; |
8ae10cfb YL |
1982 | |
1983 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1984 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1985 | ||
8677d78c | 1986 | ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE); |
8ae10cfb YL |
1987 | if (unlikely(ret < 0)) |
1988 | return ret; | |
1989 | ||
1990 | bd_num += ret; | |
1991 | } | |
1992 | ||
d5d5e019 YL |
1993 | skb_walk_frags(skb, frag_skb) { |
1994 | ret = hns3_fill_skb_to_desc(ring, frag_skb, | |
1995 | DESC_TYPE_FRAGLIST_SKB); | |
1996 | if (unlikely(ret < 0)) | |
1997 | return ret; | |
1998 | ||
1999 | bd_num += ret; | |
2000 | } | |
2001 | ||
8ae10cfb YL |
2002 | return bd_num; |
2003 | } | |
2004 | ||
f6061a05 YL |
2005 | static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, |
2006 | bool doorbell) | |
2007 | { | |
2008 | ring->pending_buf += num; | |
2009 | ||
2010 | if (!doorbell) { | |
2011 | u64_stats_update_begin(&ring->syncp); | |
2012 | ring->stats.tx_more++; | |
2013 | u64_stats_update_end(&ring->syncp); | |
2014 | return; | |
2015 | } | |
2016 | ||
2017 | if (!ring->pending_buf) | |
2018 | return; | |
2019 | ||
48ee56fd YL |
2020 | writel(ring->pending_buf, |
2021 | ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); | |
f6061a05 | 2022 | ring->pending_buf = 0; |
20d06ca2 | 2023 | WRITE_ONCE(ring->last_to_use, ring->next_to_use); |
f6061a05 YL |
2024 | } |
2025 | ||
0bf5eb78 HT |
2026 | static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb, |
2027 | struct hns3_desc *desc) | |
2028 | { | |
2029 | struct hnae3_handle *h = hns3_get_handle(netdev); | |
2030 | ||
2031 | if (!(h->ae_algo->ops->set_tx_hwts_info && | |
2032 | h->ae_algo->ops->set_tx_hwts_info(h, skb))) | |
2033 | return; | |
2034 | ||
2035 | desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); | |
2036 | } | |
2037 | ||
907676b1 YL |
2038 | static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, |
2039 | struct sk_buff *skb) | |
2040 | { | |
2041 | struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; | |
2042 | unsigned int type = DESC_TYPE_BOUNCE_HEAD; | |
2043 | unsigned int size = skb_headlen(skb); | |
2044 | dma_addr_t dma; | |
2045 | int bd_num = 0; | |
2046 | u32 cb_len; | |
2047 | void *buf; | |
2048 | int ret; | |
2049 | ||
2050 | if (skb->len <= ring->tx_copybreak) { | |
2051 | size = skb->len; | |
2052 | type = DESC_TYPE_BOUNCE_ALL; | |
2053 | } | |
2054 | ||
2055 | /* hns3_can_use_tx_bounce() is called to ensure the below | |
2056 | * function can always return the tx buffer. | |
2057 | */ | |
2058 | buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len); | |
2059 | ||
2060 | ret = skb_copy_bits(skb, 0, buf, size); | |
2061 | if (unlikely(ret < 0)) { | |
2062 | hns3_tx_spare_rollback(ring, cb_len); | |
2063 | u64_stats_update_begin(&ring->syncp); | |
2064 | ring->stats.copy_bits_err++; | |
2065 | u64_stats_update_end(&ring->syncp); | |
2066 | return ret; | |
2067 | } | |
2068 | ||
2069 | desc_cb->priv = skb; | |
2070 | desc_cb->length = cb_len; | |
2071 | desc_cb->dma = dma; | |
2072 | desc_cb->type = type; | |
2073 | ||
2074 | bd_num += hns3_fill_desc(ring, dma, size); | |
2075 | ||
2076 | if (type == DESC_TYPE_BOUNCE_HEAD) { | |
2077 | ret = hns3_fill_skb_to_desc(ring, skb, | |
2078 | DESC_TYPE_BOUNCE_HEAD); | |
2079 | if (unlikely(ret < 0)) | |
2080 | return ret; | |
2081 | ||
2082 | bd_num += ret; | |
2083 | } | |
2084 | ||
2085 | dma_sync_single_for_device(ring_to_dev(ring), dma, size, | |
2086 | DMA_TO_DEVICE); | |
2087 | ||
2088 | u64_stats_update_begin(&ring->syncp); | |
2089 | ring->stats.tx_bounce++; | |
2090 | u64_stats_update_end(&ring->syncp); | |
2091 | return bd_num; | |
2092 | } | |
2093 | ||
7459775e YL |
2094 | static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, |
2095 | struct sk_buff *skb) | |
2096 | { | |
2097 | struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; | |
2098 | u32 nfrag = skb_shinfo(skb)->nr_frags + 1; | |
2099 | struct sg_table *sgt; | |
2100 | int i, bd_num = 0; | |
2101 | dma_addr_t dma; | |
2102 | u32 cb_len; | |
2103 | int nents; | |
2104 | ||
2105 | if (skb_has_frag_list(skb)) | |
2106 | nfrag = HNS3_MAX_TSO_BD_NUM; | |
2107 | ||
2108 | /* hns3_can_use_tx_sgl() is called to ensure the below | |
2109 | * function can always return the tx buffer. | |
2110 | */ | |
2111 | sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag), | |
2112 | &dma, &cb_len); | |
2113 | ||
2114 | /* scatterlist follows by the sg table */ | |
2115 | sgt->sgl = (struct scatterlist *)(sgt + 1); | |
2116 | sg_init_table(sgt->sgl, nfrag); | |
2117 | nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); | |
2118 | if (unlikely(nents < 0)) { | |
2119 | hns3_tx_spare_rollback(ring, cb_len); | |
2120 | u64_stats_update_begin(&ring->syncp); | |
2121 | ring->stats.skb2sgl_err++; | |
2122 | u64_stats_update_end(&ring->syncp); | |
2123 | return -ENOMEM; | |
2124 | } | |
2125 | ||
2126 | sgt->orig_nents = nents; | |
2127 | sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, | |
2128 | DMA_TO_DEVICE); | |
2129 | if (unlikely(!sgt->nents)) { | |
2130 | hns3_tx_spare_rollback(ring, cb_len); | |
2131 | u64_stats_update_begin(&ring->syncp); | |
2132 | ring->stats.map_sg_err++; | |
2133 | u64_stats_update_end(&ring->syncp); | |
2134 | return -ENOMEM; | |
2135 | } | |
2136 | ||
2137 | desc_cb->priv = skb; | |
2138 | desc_cb->length = cb_len; | |
2139 | desc_cb->dma = dma; | |
2140 | desc_cb->type = DESC_TYPE_SGL_SKB; | |
2141 | ||
2142 | for (i = 0; i < sgt->nents; i++) | |
2143 | bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), | |
2144 | sg_dma_len(sgt->sgl + i)); | |
2145 | ||
2146 | u64_stats_update_begin(&ring->syncp); | |
2147 | ring->stats.tx_sgl++; | |
2148 | u64_stats_update_end(&ring->syncp); | |
2149 | ||
2150 | return bd_num; | |
2151 | } | |
2152 | ||
907676b1 YL |
2153 | static int hns3_handle_desc_filling(struct hns3_enet_ring *ring, |
2154 | struct sk_buff *skb) | |
2155 | { | |
2156 | u32 space; | |
2157 | ||
2158 | if (!ring->tx_spare) | |
2159 | goto out; | |
2160 | ||
2161 | space = hns3_tx_spare_space(ring); | |
2162 | ||
7459775e YL |
2163 | if (hns3_can_use_tx_sgl(ring, skb, space)) |
2164 | return hns3_handle_tx_sgl(ring, skb); | |
2165 | ||
907676b1 YL |
2166 | if (hns3_can_use_tx_bounce(ring, skb, space)) |
2167 | return hns3_handle_tx_bounce(ring, skb); | |
2168 | ||
2169 | out: | |
2170 | return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); | |
2171 | } | |
2172 | ||
d43e5aca | 2173 | netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) |
76ad4f0e S |
2174 | { |
2175 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
5f06b903 | 2176 | struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; |
811c0830 | 2177 | struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; |
76ad4f0e | 2178 | struct netdev_queue *dev_queue; |
8ae10cfb | 2179 | int pre_ntu, next_to_use_head; |
f6061a05 | 2180 | bool doorbell; |
76ad4f0e | 2181 | int ret; |
76ad4f0e | 2182 | |
36c67349 | 2183 | /* Hardware can only handle short frames above 32 bytes */ |
f6061a05 YL |
2184 | if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { |
2185 | hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); | |
97b9e5c1 YL |
2186 | |
2187 | u64_stats_update_begin(&ring->syncp); | |
2188 | ring->stats.sw_err_cnt++; | |
2189 | u64_stats_update_end(&ring->syncp); | |
2190 | ||
36c67349 | 2191 | return NETDEV_TX_OK; |
f6061a05 | 2192 | } |
36c67349 | 2193 | |
76ad4f0e S |
2194 | /* Prefetch the data used later */ |
2195 | prefetch(skb->data); | |
2196 | ||
d1a37ded | 2197 | ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); |
8ae10cfb YL |
2198 | if (unlikely(ret <= 0)) { |
2199 | if (ret == -EBUSY) { | |
f6061a05 | 2200 | hns3_tx_doorbell(ring, 0, true); |
2a597eff | 2201 | return NETDEV_TX_BUSY; |
3d5f3741 | 2202 | } |
76ad4f0e | 2203 | |
8ae10cfb | 2204 | hns3_rl_err(netdev, "xmit error: %d!\n", ret); |
76ad4f0e | 2205 | goto out_err_tx_ok; |
76ad4f0e S |
2206 | } |
2207 | ||
76ad4f0e S |
2208 | next_to_use_head = ring->next_to_use; |
2209 | ||
811c0830 YL |
2210 | ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], |
2211 | desc_cb); | |
cfdaeba5 YL |
2212 | if (unlikely(ret < 0)) |
2213 | goto fill_err; | |
2214 | ||
d5d5e019 YL |
2215 | /* 'ret < 0' means filling error, 'ret == 0' means skb->len is |
2216 | * zero, which is unlikely, and 'ret > 0' means how many tx desc | |
2217 | * need to be notified to the hw. | |
2218 | */ | |
907676b1 | 2219 | ret = hns3_handle_desc_filling(ring, skb); |
d5d5e019 | 2220 | if (unlikely(ret <= 0)) |
aa9d22dd | 2221 | goto fill_err; |
76ad4f0e | 2222 | |
8ae10cfb YL |
2223 | pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : |
2224 | (ring->desc_num - 1); | |
0bf5eb78 HT |
2225 | |
2226 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) | |
2227 | hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); | |
2228 | ||
8ae10cfb YL |
2229 | ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= |
2230 | cpu_to_le16(BIT(HNS3_TXD_FE_B)); | |
698a8954 | 2231 | trace_hns3_tx_desc(ring, pre_ntu); |
76ad4f0e | 2232 | |
0bf5eb78 HT |
2233 | skb_tx_timestamp(skb); |
2234 | ||
76ad4f0e | 2235 | /* Complete translate all packets */ |
5f06b903 | 2236 | dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); |
811c0830 | 2237 | doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, |
f6061a05 | 2238 | netdev_xmit_more()); |
d5d5e019 | 2239 | hns3_tx_doorbell(ring, ret, doorbell); |
76ad4f0e S |
2240 | |
2241 | return NETDEV_TX_OK; | |
2242 | ||
aa9d22dd | 2243 | fill_err: |
ba3f808f | 2244 | hns3_clear_desc(ring, next_to_use_head); |
76ad4f0e S |
2245 | |
2246 | out_err_tx_ok: | |
2247 | dev_kfree_skb_any(skb); | |
f6061a05 | 2248 | hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); |
76ad4f0e | 2249 | return NETDEV_TX_OK; |
76ad4f0e S |
2250 | } |
2251 | ||
2252 | static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) | |
2253 | { | |
4f331fda YM |
2254 | char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; |
2255 | char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; | |
9780cb97 | 2256 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e S |
2257 | struct sockaddr *mac_addr = p; |
2258 | int ret; | |
2259 | ||
2260 | if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) | |
2261 | return -EADDRNOTAVAIL; | |
2262 | ||
5ec2a51e | 2263 | if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { |
4f331fda YM |
2264 | hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); |
2265 | netdev_info(netdev, "already using mac address %s\n", | |
2266 | format_mac_addr_sa); | |
5ec2a51e JS |
2267 | return 0; |
2268 | } | |
2269 | ||
8e6de441 HT |
2270 | /* For VF device, if there is a perm_addr, then the user will not |
2271 | * be allowed to change the address. | |
2272 | */ | |
2273 | if (!hns3_is_phys_func(h->pdev) && | |
2274 | !is_zero_ether_addr(netdev->perm_addr)) { | |
4f331fda YM |
2275 | hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); |
2276 | hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); | |
2277 | netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", | |
2278 | format_mac_addr_perm, format_mac_addr_sa); | |
8e6de441 HT |
2279 | return -EPERM; |
2280 | } | |
2281 | ||
59098055 | 2282 | ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); |
76ad4f0e S |
2283 | if (ret) { |
2284 | netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); | |
2285 | return ret; | |
2286 | } | |
2287 | ||
f3956ebb | 2288 | eth_hw_addr_set(netdev, mac_addr->sa_data); |
76ad4f0e S |
2289 | |
2290 | return 0; | |
2291 | } | |
2292 | ||
26483246 XW |
2293 | static int hns3_nic_do_ioctl(struct net_device *netdev, |
2294 | struct ifreq *ifr, int cmd) | |
2295 | { | |
2296 | struct hnae3_handle *h = hns3_get_handle(netdev); | |
2297 | ||
2298 | if (!netif_running(netdev)) | |
2299 | return -EINVAL; | |
2300 | ||
2301 | if (!h->ae_algo->ops->do_ioctl) | |
2302 | return -EOPNOTSUPP; | |
2303 | ||
2304 | return h->ae_algo->ops->do_ioctl(h, ifr, cmd); | |
2305 | } | |
2306 | ||
76ad4f0e S |
2307 | static int hns3_nic_set_features(struct net_device *netdev, |
2308 | netdev_features_t features) | |
2309 | { | |
181d454b | 2310 | netdev_features_t changed = netdev->features ^ features; |
76ad4f0e | 2311 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
052ece6d | 2312 | struct hnae3_handle *h = priv->ae_handle; |
1731be4c | 2313 | bool enable; |
052ece6d | 2314 | int ret; |
76ad4f0e | 2315 | |
5c9f6b39 | 2316 | if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { |
1731be4c YL |
2317 | enable = !!(features & NETIF_F_GRO_HW); |
2318 | ret = h->ae_algo->ops->set_gro_en(h, enable); | |
5c9f6b39 PL |
2319 | if (ret) |
2320 | return ret; | |
2321 | } | |
2322 | ||
bd368416 JS |
2323 | if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && |
2324 | h->ae_algo->ops->enable_hw_strip_rxvtag) { | |
1731be4c YL |
2325 | enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); |
2326 | ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); | |
052ece6d PL |
2327 | if (ret) |
2328 | return ret; | |
2329 | } | |
2330 | ||
c17852a8 | 2331 | if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { |
1731be4c YL |
2332 | enable = !!(features & NETIF_F_NTUPLE); |
2333 | h->ae_algo->ops->enable_fd(h, enable); | |
c17852a8 JS |
2334 | } |
2335 | ||
0205ec04 JS |
2336 | if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && |
2337 | h->ae_algo->ops->cls_flower_active(h)) { | |
2338 | netdev_err(netdev, | |
2339 | "there are offloaded TC filters active, cannot disable HW TC offload"); | |
2340 | return -EINVAL; | |
2341 | } | |
2342 | ||
2ba30662 JS |
2343 | if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && |
2344 | h->ae_algo->ops->enable_vlan_filter) { | |
2345 | enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); | |
2346 | ret = h->ae_algo->ops->enable_vlan_filter(h, enable); | |
2347 | if (ret) | |
2348 | return ret; | |
2349 | } | |
2350 | ||
76ad4f0e S |
2351 | netdev->features = features; |
2352 | return 0; | |
2353 | } | |
2354 | ||
2a7556bb YL |
2355 | static netdev_features_t hns3_features_check(struct sk_buff *skb, |
2356 | struct net_device *dev, | |
2357 | netdev_features_t features) | |
2358 | { | |
2359 | #define HNS3_MAX_HDR_LEN 480U | |
2360 | #define HNS3_MAX_L4_HDR_LEN 60U | |
2361 | ||
2362 | size_t len; | |
2363 | ||
2364 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
2365 | return features; | |
2366 | ||
2367 | if (skb->encapsulation) | |
2368 | len = skb_inner_transport_header(skb) - skb->data; | |
2369 | else | |
2370 | len = skb_transport_header(skb) - skb->data; | |
2371 | ||
2372 | /* Assume L4 is 60 byte as TCP is the only protocol with a | |
2373 | * a flexible value, and it's max len is 60 bytes. | |
2374 | */ | |
2375 | len += HNS3_MAX_L4_HDR_LEN; | |
2376 | ||
2377 | /* Hardware only supports checksum on the skb with a max header | |
2378 | * len of 480 bytes. | |
2379 | */ | |
2380 | if (len > HNS3_MAX_HDR_LEN) | |
2381 | features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); | |
2382 | ||
2383 | return features; | |
2384 | } | |
2385 | ||
6c88d9d7 PL |
2386 | static void hns3_nic_get_stats64(struct net_device *netdev, |
2387 | struct rtnl_link_stats64 *stats) | |
76ad4f0e S |
2388 | { |
2389 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
2390 | int queue_num = priv->ae_handle->kinfo.num_tqps; | |
c5f65480 | 2391 | struct hnae3_handle *handle = priv->ae_handle; |
76ad4f0e | 2392 | struct hns3_enet_ring *ring; |
d3ec4ef6 JS |
2393 | u64 rx_length_errors = 0; |
2394 | u64 rx_crc_errors = 0; | |
2395 | u64 rx_multicast = 0; | |
76ad4f0e | 2396 | unsigned int start; |
d3ec4ef6 JS |
2397 | u64 tx_errors = 0; |
2398 | u64 rx_errors = 0; | |
76ad4f0e S |
2399 | unsigned int idx; |
2400 | u64 tx_bytes = 0; | |
2401 | u64 rx_bytes = 0; | |
2402 | u64 tx_pkts = 0; | |
2403 | u64 rx_pkts = 0; | |
d2a5dca8 JS |
2404 | u64 tx_drop = 0; |
2405 | u64 rx_drop = 0; | |
76ad4f0e | 2406 | |
b875cc37 JS |
2407 | if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) |
2408 | return; | |
2409 | ||
c5f65480 JS |
2410 | handle->ae_algo->ops->update_stats(handle, &netdev->stats); |
2411 | ||
76ad4f0e S |
2412 | for (idx = 0; idx < queue_num; idx++) { |
2413 | /* fetch the tx stats */ | |
5f06b903 | 2414 | ring = &priv->ring[idx]; |
76ad4f0e | 2415 | do { |
d36d36ce | 2416 | start = u64_stats_fetch_begin_irq(&ring->syncp); |
76ad4f0e S |
2417 | tx_bytes += ring->stats.tx_bytes; |
2418 | tx_pkts += ring->stats.tx_pkts; | |
d2a5dca8 | 2419 | tx_drop += ring->stats.sw_err_cnt; |
b20d7fe5 YL |
2420 | tx_drop += ring->stats.tx_vlan_err; |
2421 | tx_drop += ring->stats.tx_l4_proto_err; | |
2422 | tx_drop += ring->stats.tx_l2l3l4_err; | |
2423 | tx_drop += ring->stats.tx_tso_err; | |
d5d5e019 YL |
2424 | tx_drop += ring->stats.over_max_recursion; |
2425 | tx_drop += ring->stats.hw_limitation; | |
907676b1 | 2426 | tx_drop += ring->stats.copy_bits_err; |
7459775e YL |
2427 | tx_drop += ring->stats.skb2sgl_err; |
2428 | tx_drop += ring->stats.map_sg_err; | |
d3ec4ef6 | 2429 | tx_errors += ring->stats.sw_err_cnt; |
b20d7fe5 YL |
2430 | tx_errors += ring->stats.tx_vlan_err; |
2431 | tx_errors += ring->stats.tx_l4_proto_err; | |
2432 | tx_errors += ring->stats.tx_l2l3l4_err; | |
2433 | tx_errors += ring->stats.tx_tso_err; | |
d5d5e019 YL |
2434 | tx_errors += ring->stats.over_max_recursion; |
2435 | tx_errors += ring->stats.hw_limitation; | |
907676b1 | 2436 | tx_errors += ring->stats.copy_bits_err; |
7459775e YL |
2437 | tx_errors += ring->stats.skb2sgl_err; |
2438 | tx_errors += ring->stats.map_sg_err; | |
76ad4f0e S |
2439 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); |
2440 | ||
2441 | /* fetch the rx stats */ | |
5f06b903 | 2442 | ring = &priv->ring[idx + queue_num]; |
76ad4f0e | 2443 | do { |
d36d36ce | 2444 | start = u64_stats_fetch_begin_irq(&ring->syncp); |
76ad4f0e S |
2445 | rx_bytes += ring->stats.rx_bytes; |
2446 | rx_pkts += ring->stats.rx_pkts; | |
d2a5dca8 | 2447 | rx_drop += ring->stats.l2_err; |
d3ec4ef6 | 2448 | rx_errors += ring->stats.l2_err; |
8b552079 | 2449 | rx_errors += ring->stats.l3l4_csum_err; |
d3ec4ef6 | 2450 | rx_crc_errors += ring->stats.l2_err; |
d3ec4ef6 JS |
2451 | rx_multicast += ring->stats.rx_multicast; |
2452 | rx_length_errors += ring->stats.err_pkt_len; | |
76ad4f0e S |
2453 | } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); |
2454 | } | |
2455 | ||
2456 | stats->tx_bytes = tx_bytes; | |
2457 | stats->tx_packets = tx_pkts; | |
2458 | stats->rx_bytes = rx_bytes; | |
2459 | stats->rx_packets = rx_pkts; | |
2460 | ||
d3ec4ef6 JS |
2461 | stats->rx_errors = rx_errors; |
2462 | stats->multicast = rx_multicast; | |
2463 | stats->rx_length_errors = rx_length_errors; | |
2464 | stats->rx_crc_errors = rx_crc_errors; | |
76ad4f0e S |
2465 | stats->rx_missed_errors = netdev->stats.rx_missed_errors; |
2466 | ||
d3ec4ef6 JS |
2467 | stats->tx_errors = tx_errors; |
2468 | stats->rx_dropped = rx_drop; | |
2469 | stats->tx_dropped = tx_drop; | |
76ad4f0e S |
2470 | stats->collisions = netdev->stats.collisions; |
2471 | stats->rx_over_errors = netdev->stats.rx_over_errors; | |
2472 | stats->rx_frame_errors = netdev->stats.rx_frame_errors; | |
2473 | stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; | |
2474 | stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; | |
2475 | stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; | |
2476 | stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; | |
2477 | stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; | |
2478 | stats->tx_window_errors = netdev->stats.tx_window_errors; | |
2479 | stats->rx_compressed = netdev->stats.rx_compressed; | |
2480 | stats->tx_compressed = netdev->stats.tx_compressed; | |
2481 | } | |
2482 | ||
30d240df | 2483 | static int hns3_setup_tc(struct net_device *netdev, void *type_data) |
76ad4f0e | 2484 | { |
30d240df | 2485 | struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; |
75718800 | 2486 | struct hnae3_knic_private_info *kinfo; |
30d240df YL |
2487 | u8 tc = mqprio_qopt->qopt.num_tc; |
2488 | u16 mode = mqprio_qopt->mode; | |
2489 | u8 hw = mqprio_qopt->qopt.hw; | |
75718800 | 2490 | struct hnae3_handle *h; |
76ad4f0e | 2491 | |
30d240df YL |
2492 | if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && |
2493 | mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) | |
2494 | return -EOPNOTSUPP; | |
2495 | ||
76ad4f0e S |
2496 | if (tc > HNAE3_MAX_TC) |
2497 | return -EINVAL; | |
2498 | ||
76ad4f0e S |
2499 | if (!netdev) |
2500 | return -EINVAL; | |
2501 | ||
75718800 YL |
2502 | h = hns3_get_handle(netdev); |
2503 | kinfo = &h->kinfo; | |
2504 | ||
1c822948 YL |
2505 | netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); |
2506 | ||
1cce5eb6 | 2507 | return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? |
5a5c9091 | 2508 | kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; |
76ad4f0e S |
2509 | } |
2510 | ||
0205ec04 JS |
2511 | static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, |
2512 | struct flow_cls_offload *flow) | |
2513 | { | |
2514 | int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); | |
2515 | struct hnae3_handle *h = hns3_get_handle(priv->netdev); | |
2516 | ||
2517 | switch (flow->command) { | |
2518 | case FLOW_CLS_REPLACE: | |
2519 | if (h->ae_algo->ops->add_cls_flower) | |
2520 | return h->ae_algo->ops->add_cls_flower(h, flow, tc); | |
2521 | break; | |
2522 | case FLOW_CLS_DESTROY: | |
2523 | if (h->ae_algo->ops->del_cls_flower) | |
2524 | return h->ae_algo->ops->del_cls_flower(h, flow); | |
2525 | break; | |
2526 | default: | |
2527 | break; | |
2528 | } | |
2529 | ||
2530 | return -EOPNOTSUPP; | |
2531 | } | |
2532 | ||
2533 | static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, | |
2534 | void *cb_priv) | |
2535 | { | |
2536 | struct hns3_nic_priv *priv = cb_priv; | |
2537 | ||
2538 | if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) | |
2539 | return -EOPNOTSUPP; | |
2540 | ||
2541 | switch (type) { | |
2542 | case TC_SETUP_CLSFLOWER: | |
2543 | return hns3_setup_tc_cls_flower(priv, type_data); | |
2544 | default: | |
2545 | return -EOPNOTSUPP; | |
2546 | } | |
2547 | } | |
2548 | ||
2549 | static LIST_HEAD(hns3_block_cb_list); | |
2550 | ||
2572ac53 | 2551 | static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, |
de4784ca | 2552 | void *type_data) |
76ad4f0e | 2553 | { |
0205ec04 JS |
2554 | struct hns3_nic_priv *priv = netdev_priv(dev); |
2555 | int ret; | |
2556 | ||
2557 | switch (type) { | |
2558 | case TC_SETUP_QDISC_MQPRIO: | |
2559 | ret = hns3_setup_tc(dev, type_data); | |
2560 | break; | |
2561 | case TC_SETUP_BLOCK: | |
2562 | ret = flow_block_cb_setup_simple(type_data, | |
2563 | &hns3_block_cb_list, | |
2564 | hns3_setup_tc_block_cb, | |
2565 | priv, priv, true); | |
2566 | break; | |
2567 | default: | |
38cf0426 | 2568 | return -EOPNOTSUPP; |
0205ec04 | 2569 | } |
76ad4f0e | 2570 | |
0205ec04 | 2571 | return ret; |
76ad4f0e S |
2572 | } |
2573 | ||
2574 | static int hns3_vlan_rx_add_vid(struct net_device *netdev, | |
2575 | __be16 proto, u16 vid) | |
2576 | { | |
9780cb97 | 2577 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e S |
2578 | int ret = -EIO; |
2579 | ||
2580 | if (h->ae_algo->ops->set_vlan_filter) | |
2581 | ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); | |
2582 | ||
2583 | return ret; | |
2584 | } | |
2585 | ||
2586 | static int hns3_vlan_rx_kill_vid(struct net_device *netdev, | |
2587 | __be16 proto, u16 vid) | |
2588 | { | |
9780cb97 | 2589 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e S |
2590 | int ret = -EIO; |
2591 | ||
2592 | if (h->ae_algo->ops->set_vlan_filter) | |
2593 | ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); | |
2594 | ||
7fa6be4f | 2595 | return ret; |
681ec399 YL |
2596 | } |
2597 | ||
76ad4f0e S |
2598 | static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, |
2599 | u8 qos, __be16 vlan_proto) | |
2600 | { | |
9780cb97 | 2601 | struct hnae3_handle *h = hns3_get_handle(netdev); |
76ad4f0e S |
2602 | int ret = -EIO; |
2603 | ||
1c822948 | 2604 | netif_dbg(h, drv, netdev, |
39edaf24 GL |
2605 | "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", |
2606 | vf, vlan, qos, ntohs(vlan_proto)); | |
1c822948 | 2607 | |
76ad4f0e S |
2608 | if (h->ae_algo->ops->set_vf_vlan_filter) |
2609 | ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, | |
9b2f3477 | 2610 | qos, vlan_proto); |
76ad4f0e S |
2611 | |
2612 | return ret; | |
2613 | } | |
2614 | ||
22044f95 JS |
2615 | static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) |
2616 | { | |
2617 | struct hnae3_handle *handle = hns3_get_handle(netdev); | |
2618 | ||
2619 | if (hns3_nic_resetting(netdev)) | |
2620 | return -EBUSY; | |
2621 | ||
2622 | if (!handle->ae_algo->ops->set_vf_spoofchk) | |
2623 | return -EOPNOTSUPP; | |
2624 | ||
2625 | return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); | |
2626 | } | |
2627 | ||
e196ec75 JS |
2628 | static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) |
2629 | { | |
2630 | struct hnae3_handle *handle = hns3_get_handle(netdev); | |
2631 | ||
2632 | if (!handle->ae_algo->ops->set_vf_trust) | |
2633 | return -EOPNOTSUPP; | |
2634 | ||
2635 | return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); | |
2636 | } | |
2637 | ||
a8e8b7ff S |
2638 | static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) |
2639 | { | |
9780cb97 | 2640 | struct hnae3_handle *h = hns3_get_handle(netdev); |
a8e8b7ff S |
2641 | int ret; |
2642 | ||
6ff7ed80 HT |
2643 | if (hns3_nic_resetting(netdev)) |
2644 | return -EBUSY; | |
2645 | ||
a8e8b7ff S |
2646 | if (!h->ae_algo->ops->set_mtu) |
2647 | return -EOPNOTSUPP; | |
2648 | ||
1c822948 YL |
2649 | netif_dbg(h, drv, netdev, |
2650 | "change mtu from %u to %d\n", netdev->mtu, new_mtu); | |
2651 | ||
a8e8b7ff | 2652 | ret = h->ae_algo->ops->set_mtu(h, new_mtu); |
93d8daf4 | 2653 | if (ret) |
a8e8b7ff S |
2654 | netdev_err(netdev, "failed to change MTU in hardware %d\n", |
2655 | ret); | |
93d8daf4 YL |
2656 | else |
2657 | netdev->mtu = new_mtu; | |
5bad95a1 | 2658 | |
a8e8b7ff S |
2659 | return ret; |
2660 | } | |
2661 | ||
f8fa222c L |
2662 | static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) |
2663 | { | |
2664 | struct hns3_nic_priv *priv = netdev_priv(ndev); | |
e511c97d | 2665 | struct hnae3_handle *h = hns3_get_handle(ndev); |
0bfdf286 | 2666 | struct hns3_enet_ring *tx_ring; |
e511c97d | 2667 | struct napi_struct *napi; |
f8fa222c L |
2668 | int timeout_queue = 0; |
2669 | int hw_head, hw_tail; | |
e511c97d JS |
2670 | int fbd_num, fbd_oft; |
2671 | int ebd_num, ebd_oft; | |
2672 | int bd_num, bd_err; | |
2673 | int ring_en, tc; | |
f8fa222c L |
2674 | int i; |
2675 | ||
2676 | /* Find the stopped queue the same way the stack does */ | |
fa6c4084 | 2677 | for (i = 0; i < ndev->num_tx_queues; i++) { |
f8fa222c L |
2678 | struct netdev_queue *q; |
2679 | unsigned long trans_start; | |
2680 | ||
2681 | q = netdev_get_tx_queue(ndev, i); | |
5337824f | 2682 | trans_start = READ_ONCE(q->trans_start); |
f8fa222c L |
2683 | if (netif_xmit_stopped(q) && |
2684 | time_after(jiffies, | |
2685 | (trans_start + ndev->watchdog_timeo))) { | |
db596298 YM |
2686 | #ifdef CONFIG_BQL |
2687 | struct dql *dql = &q->dql; | |
2688 | ||
2689 | netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", | |
2690 | dql->last_obj_cnt, dql->num_queued, | |
2691 | dql->adj_limit, dql->num_completed); | |
2692 | #endif | |
f8fa222c | 2693 | timeout_queue = i; |
647522a5 YL |
2694 | netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", |
2695 | q->state, | |
2696 | jiffies_to_msecs(jiffies - trans_start)); | |
f8fa222c L |
2697 | break; |
2698 | } | |
2699 | } | |
2700 | ||
2701 | if (i == ndev->num_tx_queues) { | |
2702 | netdev_info(ndev, | |
2703 | "no netdev TX timeout queue found, timeout count: %llu\n", | |
2704 | priv->tx_timeout_count); | |
2705 | return false; | |
2706 | } | |
2707 | ||
beab694a JS |
2708 | priv->tx_timeout_count++; |
2709 | ||
5f06b903 | 2710 | tx_ring = &priv->ring[timeout_queue]; |
e511c97d JS |
2711 | napi = &tx_ring->tqp_vector->napi; |
2712 | ||
2713 | netdev_info(ndev, | |
2714 | "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", | |
2715 | priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, | |
2716 | tx_ring->next_to_clean, napi->state); | |
2717 | ||
2718 | netdev_info(ndev, | |
20d06ca2 | 2719 | "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n", |
e511c97d | 2720 | tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, |
20d06ca2 | 2721 | tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); |
e511c97d JS |
2722 | |
2723 | netdev_info(ndev, | |
f6061a05 YL |
2724 | "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", |
2725 | tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, | |
e511c97d JS |
2726 | tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); |
2727 | ||
2728 | /* When mac received many pause frames continuous, it's unable to send | |
2729 | * packets, which may cause tx timeout | |
2730 | */ | |
615466ce YM |
2731 | if (h->ae_algo->ops->get_mac_stats) { |
2732 | struct hns3_mac_stats mac_stats; | |
e511c97d | 2733 | |
615466ce | 2734 | h->ae_algo->ops->get_mac_stats(h, &mac_stats); |
e511c97d | 2735 | netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", |
615466ce | 2736 | mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); |
e511c97d | 2737 | } |
f8fa222c L |
2738 | |
2739 | hw_head = readl_relaxed(tx_ring->tqp->io_base + | |
2740 | HNS3_RING_TX_RING_HEAD_REG); | |
2741 | hw_tail = readl_relaxed(tx_ring->tqp->io_base + | |
2742 | HNS3_RING_TX_RING_TAIL_REG); | |
e511c97d JS |
2743 | fbd_num = readl_relaxed(tx_ring->tqp->io_base + |
2744 | HNS3_RING_TX_RING_FBDNUM_REG); | |
2745 | fbd_oft = readl_relaxed(tx_ring->tqp->io_base + | |
2746 | HNS3_RING_TX_RING_OFFSET_REG); | |
2747 | ebd_num = readl_relaxed(tx_ring->tqp->io_base + | |
2748 | HNS3_RING_TX_RING_EBDNUM_REG); | |
2749 | ebd_oft = readl_relaxed(tx_ring->tqp->io_base + | |
2750 | HNS3_RING_TX_RING_EBD_OFFSET_REG); | |
2751 | bd_num = readl_relaxed(tx_ring->tqp->io_base + | |
2752 | HNS3_RING_TX_RING_BD_NUM_REG); | |
2753 | bd_err = readl_relaxed(tx_ring->tqp->io_base + | |
2754 | HNS3_RING_TX_RING_BD_ERR_REG); | |
2755 | ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); | |
2756 | tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); | |
2757 | ||
f8fa222c | 2758 | netdev_info(ndev, |
e511c97d JS |
2759 | "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", |
2760 | bd_num, hw_head, hw_tail, bd_err, | |
f8fa222c | 2761 | readl(tx_ring->tqp_vector->mask_addr)); |
e511c97d JS |
2762 | netdev_info(ndev, |
2763 | "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", | |
2764 | ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); | |
f8fa222c L |
2765 | |
2766 | return true; | |
2767 | } | |
2768 | ||
0290bd29 | 2769 | static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue) |
f8fa222c L |
2770 | { |
2771 | struct hns3_nic_priv *priv = netdev_priv(ndev); | |
f8fa222c L |
2772 | struct hnae3_handle *h = priv->ae_handle; |
2773 | ||
2774 | if (!hns3_get_tx_timeo_queue_info(ndev)) | |
2775 | return; | |
2776 | ||
0742ed7c HT |
2777 | /* request the reset, and let the hclge to determine |
2778 | * which reset level should be done | |
2779 | */ | |
f8fa222c | 2780 | if (h->ae_algo->ops->reset_event) |
6ae4e733 | 2781 | h->ae_algo->ops->reset_event(h->pdev, h); |
f8fa222c L |
2782 | } |
2783 | ||
d93ed94f JS |
2784 | #ifdef CONFIG_RFS_ACCEL |
2785 | static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |
2786 | u16 rxq_index, u32 flow_id) | |
2787 | { | |
2788 | struct hnae3_handle *h = hns3_get_handle(dev); | |
2789 | struct flow_keys fkeys; | |
2790 | ||
2791 | if (!h->ae_algo->ops->add_arfs_entry) | |
2792 | return -EOPNOTSUPP; | |
2793 | ||
2794 | if (skb->encapsulation) | |
2795 | return -EPROTONOSUPPORT; | |
2796 | ||
2797 | if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0)) | |
2798 | return -EPROTONOSUPPORT; | |
2799 | ||
2800 | if ((fkeys.basic.n_proto != htons(ETH_P_IP) && | |
2801 | fkeys.basic.n_proto != htons(ETH_P_IPV6)) || | |
2802 | (fkeys.basic.ip_proto != IPPROTO_TCP && | |
2803 | fkeys.basic.ip_proto != IPPROTO_UDP)) | |
2804 | return -EPROTONOSUPPORT; | |
2805 | ||
2806 | return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); | |
2807 | } | |
2808 | #endif | |
2809 | ||
6430f744 YM |
2810 | static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, |
2811 | struct ifla_vf_info *ivf) | |
2812 | { | |
2813 | struct hnae3_handle *h = hns3_get_handle(ndev); | |
2814 | ||
2815 | if (!h->ae_algo->ops->get_vf_config) | |
2816 | return -EOPNOTSUPP; | |
2817 | ||
2818 | return h->ae_algo->ops->get_vf_config(h, vf, ivf); | |
2819 | } | |
2820 | ||
2821 | static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, | |
2822 | int link_state) | |
2823 | { | |
2824 | struct hnae3_handle *h = hns3_get_handle(ndev); | |
2825 | ||
2826 | if (!h->ae_algo->ops->set_vf_link_state) | |
2827 | return -EOPNOTSUPP; | |
2828 | ||
2829 | return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); | |
2830 | } | |
2831 | ||
ee9e4424 YL |
2832 | static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, |
2833 | int min_tx_rate, int max_tx_rate) | |
2834 | { | |
2835 | struct hnae3_handle *h = hns3_get_handle(ndev); | |
2836 | ||
2837 | if (!h->ae_algo->ops->set_vf_rate) | |
2838 | return -EOPNOTSUPP; | |
2839 | ||
2840 | return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, | |
2841 | false); | |
2842 | } | |
2843 | ||
8e6de441 HT |
2844 | static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) |
2845 | { | |
2846 | struct hnae3_handle *h = hns3_get_handle(netdev); | |
4f331fda | 2847 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
8e6de441 HT |
2848 | |
2849 | if (!h->ae_algo->ops->set_vf_mac) | |
2850 | return -EOPNOTSUPP; | |
2851 | ||
2852 | if (is_multicast_ether_addr(mac)) { | |
4f331fda | 2853 | hnae3_format_mac_addr(format_mac_addr, mac); |
8e6de441 | 2854 | netdev_err(netdev, |
4f331fda YM |
2855 | "Invalid MAC:%s specified. Could not set MAC\n", |
2856 | format_mac_addr); | |
8e6de441 HT |
2857 | return -EINVAL; |
2858 | } | |
2859 | ||
2860 | return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); | |
2861 | } | |
2862 | ||
76ad4f0e S |
2863 | static const struct net_device_ops hns3_nic_netdev_ops = { |
2864 | .ndo_open = hns3_nic_net_open, | |
2865 | .ndo_stop = hns3_nic_net_stop, | |
2866 | .ndo_start_xmit = hns3_nic_net_xmit, | |
f8fa222c | 2867 | .ndo_tx_timeout = hns3_nic_net_timeout, |
76ad4f0e | 2868 | .ndo_set_mac_address = hns3_nic_net_set_mac_address, |
a7605370 | 2869 | .ndo_eth_ioctl = hns3_nic_do_ioctl, |
a8e8b7ff | 2870 | .ndo_change_mtu = hns3_nic_change_mtu, |
76ad4f0e | 2871 | .ndo_set_features = hns3_nic_set_features, |
2a7556bb | 2872 | .ndo_features_check = hns3_features_check, |
76ad4f0e S |
2873 | .ndo_get_stats64 = hns3_nic_get_stats64, |
2874 | .ndo_setup_tc = hns3_nic_setup_tc, | |
2875 | .ndo_set_rx_mode = hns3_nic_set_rx_mode, | |
76ad4f0e S |
2876 | .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, |
2877 | .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, | |
2878 | .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, | |
22044f95 | 2879 | .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, |
e196ec75 | 2880 | .ndo_set_vf_trust = hns3_set_vf_trust, |
d93ed94f JS |
2881 | #ifdef CONFIG_RFS_ACCEL |
2882 | .ndo_rx_flow_steer = hns3_rx_flow_steer, | |
2883 | #endif | |
6430f744 YM |
2884 | .ndo_get_vf_config = hns3_nic_get_vf_config, |
2885 | .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, | |
ee9e4424 | 2886 | .ndo_set_vf_rate = hns3_nic_set_vf_rate, |
8e6de441 | 2887 | .ndo_set_vf_mac = hns3_nic_set_vf_mac, |
76ad4f0e S |
2888 | }; |
2889 | ||
97afd47b | 2890 | bool hns3_is_phys_func(struct pci_dev *pdev) |
2312e050 FL |
2891 | { |
2892 | u32 dev_id = pdev->device; | |
2893 | ||
2894 | switch (dev_id) { | |
2895 | case HNAE3_DEV_ID_GE: | |
2896 | case HNAE3_DEV_ID_25GE: | |
2897 | case HNAE3_DEV_ID_25GE_RDMA: | |
2898 | case HNAE3_DEV_ID_25GE_RDMA_MACSEC: | |
2899 | case HNAE3_DEV_ID_50GE_RDMA: | |
2900 | case HNAE3_DEV_ID_50GE_RDMA_MACSEC: | |
2901 | case HNAE3_DEV_ID_100G_RDMA_MACSEC: | |
ae6f010c | 2902 | case HNAE3_DEV_ID_200G_RDMA: |
2312e050 | 2903 | return true; |
c155e22b GH |
2904 | case HNAE3_DEV_ID_VF: |
2905 | case HNAE3_DEV_ID_RDMA_DCB_PFC_VF: | |
2312e050 FL |
2906 | return false; |
2907 | default: | |
adcf738b | 2908 | dev_warn(&pdev->dev, "un-recognized pci device-id %u", |
2312e050 FL |
2909 | dev_id); |
2910 | } | |
2911 | ||
2912 | return false; | |
2913 | } | |
2914 | ||
2312e050 FL |
2915 | static void hns3_disable_sriov(struct pci_dev *pdev) |
2916 | { | |
2917 | /* If our VFs are assigned we cannot shut down SR-IOV | |
2918 | * without causing issues, so just leave the hardware | |
2919 | * available but disabled | |
2920 | */ | |
2921 | if (pci_vfs_assigned(pdev)) { | |
2922 | dev_warn(&pdev->dev, | |
2923 | "disabling driver while VFs are assigned\n"); | |
2924 | return; | |
2925 | } | |
2926 | ||
2927 | pci_disable_sriov(pdev); | |
2928 | } | |
2929 | ||
76ad4f0e S |
2930 | /* hns3_probe - Device initialization routine |
2931 | * @pdev: PCI device information struct | |
2932 | * @ent: entry in hns3_pci_tbl | |
2933 | * | |
2934 | * hns3_probe initializes a PF identified by a pci_dev structure. | |
2935 | * The OS initialization, configuring of the PF private structure, | |
2936 | * and a hardware reset occur. | |
2937 | * | |
2938 | * Returns 0 on success, negative on failure | |
2939 | */ | |
2940 | static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
2941 | { | |
2942 | struct hnae3_ae_dev *ae_dev; | |
2943 | int ret; | |
2944 | ||
9b2f3477 | 2945 | ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); |
60df7e91 HT |
2946 | if (!ae_dev) |
2947 | return -ENOMEM; | |
76ad4f0e S |
2948 | |
2949 | ae_dev->pdev = pdev; | |
e92a0843 | 2950 | ae_dev->flag = ent->driver_data; |
76ad4f0e S |
2951 | pci_set_drvdata(pdev, ae_dev); |
2952 | ||
74354140 | 2953 | ret = hnae3_register_ae_dev(ae_dev); |
674a1357 | 2954 | if (ret) |
74354140 | 2955 | pci_set_drvdata(pdev, NULL); |
2312e050 | 2956 | |
74354140 | 2957 | return ret; |
76ad4f0e S |
2958 | } |
2959 | ||
2960 | /* hns3_remove - Device removal routine | |
2961 | * @pdev: PCI device information struct | |
2962 | */ | |
2963 | static void hns3_remove(struct pci_dev *pdev) | |
2964 | { | |
2965 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
2966 | ||
2312e050 FL |
2967 | if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) |
2968 | hns3_disable_sriov(pdev); | |
2969 | ||
76ad4f0e | 2970 | hnae3_unregister_ae_dev(ae_dev); |
ac864c23 | 2971 | pci_set_drvdata(pdev, NULL); |
76ad4f0e S |
2972 | } |
2973 | ||
fa8d82e8 PL |
2974 | /** |
2975 | * hns3_pci_sriov_configure | |
2976 | * @pdev: pointer to a pci_dev structure | |
2977 | * @num_vfs: number of VFs to allocate | |
2978 | * | |
2979 | * Enable or change the number of VFs. Called when the user updates the number | |
2980 | * of VFs in sysfs. | |
2981 | **/ | |
743e1a84 | 2982 | static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) |
fa8d82e8 PL |
2983 | { |
2984 | int ret; | |
2985 | ||
2986 | if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { | |
2987 | dev_warn(&pdev->dev, "Can not config SRIOV\n"); | |
2988 | return -EINVAL; | |
2989 | } | |
2990 | ||
2991 | if (num_vfs) { | |
2992 | ret = pci_enable_sriov(pdev, num_vfs); | |
2993 | if (ret) | |
2994 | dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); | |
743e1a84 SM |
2995 | else |
2996 | return num_vfs; | |
fa8d82e8 PL |
2997 | } else if (!pci_vfs_assigned(pdev)) { |
2998 | pci_disable_sriov(pdev); | |
2999 | } else { | |
3000 | dev_warn(&pdev->dev, | |
3001 | "Unable to free VFs because some are assigned to VMs.\n"); | |
3002 | } | |
3003 | ||
3004 | return 0; | |
3005 | } | |
3006 | ||
ce2c1d2e YL |
3007 | static void hns3_shutdown(struct pci_dev *pdev) |
3008 | { | |
3009 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
3010 | ||
3011 | hnae3_unregister_ae_dev(ae_dev); | |
ce2c1d2e YL |
3012 | pci_set_drvdata(pdev, NULL); |
3013 | ||
3014 | if (system_state == SYSTEM_POWER_OFF) | |
3015 | pci_set_power_state(pdev, PCI_D3hot); | |
3016 | } | |
3017 | ||
715c58e9 JZ |
3018 | static int __maybe_unused hns3_suspend(struct device *dev) |
3019 | { | |
3020 | struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); | |
3021 | ||
d0494135 | 3022 | if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { |
715c58e9 | 3023 | dev_info(dev, "Begin to suspend.\n"); |
d0494135 | 3024 | if (ae_dev->ops && ae_dev->ops->reset_prepare) |
715c58e9 JZ |
3025 | ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); |
3026 | } | |
3027 | ||
3028 | return 0; | |
3029 | } | |
3030 | ||
3031 | static int __maybe_unused hns3_resume(struct device *dev) | |
3032 | { | |
3033 | struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev); | |
3034 | ||
d0494135 | 3035 | if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { |
715c58e9 | 3036 | dev_info(dev, "Begin to resume.\n"); |
d0494135 | 3037 | if (ae_dev->ops && ae_dev->ops->reset_done) |
715c58e9 JZ |
3038 | ae_dev->ops->reset_done(ae_dev); |
3039 | } | |
3040 | ||
3041 | return 0; | |
3042 | } | |
3043 | ||
5a9f0eac SJ |
3044 | static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, |
3045 | pci_channel_state_t state) | |
3046 | { | |
3047 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
3048 | pci_ers_result_t ret; | |
3049 | ||
c5aaf176 | 3050 | dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); |
5a9f0eac SJ |
3051 | |
3052 | if (state == pci_channel_io_perm_failure) | |
3053 | return PCI_ERS_RESULT_DISCONNECT; | |
3054 | ||
661262bc | 3055 | if (!ae_dev || !ae_dev->ops) { |
5a9f0eac | 3056 | dev_err(&pdev->dev, |
661262bc | 3057 | "Can't recover - error happened before device initialized\n"); |
5a9f0eac SJ |
3058 | return PCI_ERS_RESULT_NONE; |
3059 | } | |
3060 | ||
381c356e SJ |
3061 | if (ae_dev->ops->handle_hw_ras_error) |
3062 | ret = ae_dev->ops->handle_hw_ras_error(ae_dev); | |
5a9f0eac SJ |
3063 | else |
3064 | return PCI_ERS_RESULT_NONE; | |
3065 | ||
3066 | return ret; | |
3067 | } | |
3068 | ||
6ae4e733 SJ |
3069 | static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) |
3070 | { | |
3071 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
ad9bf545 | 3072 | const struct hnae3_ae_ops *ops; |
123297b7 | 3073 | enum hnae3_reset_type reset_type; |
6ae4e733 SJ |
3074 | struct device *dev = &pdev->dev; |
3075 | ||
661262bc WL |
3076 | if (!ae_dev || !ae_dev->ops) |
3077 | return PCI_ERS_RESULT_NONE; | |
3078 | ||
ad9bf545 | 3079 | ops = ae_dev->ops; |
6ae4e733 | 3080 | /* request the reset */ |
fa17c708 GH |
3081 | if (ops->reset_event && ops->get_reset_level && |
3082 | ops->set_default_reset_request) { | |
9d5e67d1 | 3083 | if (ae_dev->hw_err_reset_req) { |
123297b7 SJ |
3084 | reset_type = ops->get_reset_level(ae_dev, |
3085 | &ae_dev->hw_err_reset_req); | |
3086 | ops->set_default_reset_request(ae_dev, reset_type); | |
3087 | dev_info(dev, "requesting reset due to PCI error\n"); | |
3088 | ops->reset_event(pdev, NULL); | |
3089 | } | |
69b51bbb | 3090 | |
6ae4e733 SJ |
3091 | return PCI_ERS_RESULT_RECOVERED; |
3092 | } | |
3093 | ||
3094 | return PCI_ERS_RESULT_DISCONNECT; | |
3095 | } | |
3096 | ||
6b9a97ee HT |
3097 | static void hns3_reset_prepare(struct pci_dev *pdev) |
3098 | { | |
3099 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
3100 | ||
8de91e92 | 3101 | dev_info(&pdev->dev, "FLR prepare\n"); |
bb1890d5 JZ |
3102 | if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) |
3103 | ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); | |
6b9a97ee HT |
3104 | } |
3105 | ||
3106 | static void hns3_reset_done(struct pci_dev *pdev) | |
3107 | { | |
3108 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); | |
3109 | ||
8de91e92 | 3110 | dev_info(&pdev->dev, "FLR done\n"); |
bb1890d5 JZ |
3111 | if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) |
3112 | ae_dev->ops->reset_done(ae_dev); | |
6b9a97ee HT |
3113 | } |
3114 | ||
5a9f0eac SJ |
3115 | static const struct pci_error_handlers hns3_err_handler = { |
3116 | .error_detected = hns3_error_detected, | |
6ae4e733 | 3117 | .slot_reset = hns3_slot_reset, |
6b9a97ee HT |
3118 | .reset_prepare = hns3_reset_prepare, |
3119 | .reset_done = hns3_reset_done, | |
5a9f0eac SJ |
3120 | }; |
3121 | ||
715c58e9 JZ |
3122 | static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume); |
3123 | ||
76ad4f0e S |
3124 | static struct pci_driver hns3_driver = { |
3125 | .name = hns3_driver_name, | |
3126 | .id_table = hns3_pci_tbl, | |
3127 | .probe = hns3_probe, | |
3128 | .remove = hns3_remove, | |
ce2c1d2e | 3129 | .shutdown = hns3_shutdown, |
715c58e9 | 3130 | .driver.pm = &hns3_pm_ops, |
fa8d82e8 | 3131 | .sriov_configure = hns3_pci_sriov_configure, |
5a9f0eac | 3132 | .err_handler = &hns3_err_handler, |
76ad4f0e S |
3133 | }; |
3134 | ||
3135 | /* set default feature to hns3 */ | |
3136 | static void hns3_set_default_feature(struct net_device *netdev) | |
3137 | { | |
3e85af6a PL |
3138 | struct hnae3_handle *h = hns3_get_handle(netdev); |
3139 | struct pci_dev *pdev = h->pdev; | |
295ba232 | 3140 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
3e85af6a | 3141 | |
76ad4f0e S |
3142 | netdev->priv_flags |= IFF_UNICAST_FLT; |
3143 | ||
76ad4f0e S |
3144 | netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; |
3145 | ||
66d52f3b | 3146 | netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | |
052ece6d | 3147 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | |
76ad4f0e S |
3148 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | |
3149 | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | | |
3150 | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | | |
57e72c12 | 3151 | NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; |
76ad4f0e | 3152 | |
295ba232 | 3153 | if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { |
5c9f6b39 | 3154 | netdev->features |= NETIF_F_GRO_HW; |
c17852a8 | 3155 | |
dc9b5ce0 | 3156 | if (!(h->flags & HNAE3_SUPPORT_VF)) |
c17852a8 | 3157 | netdev->features |= NETIF_F_NTUPLE; |
c17852a8 | 3158 | } |
0692cfe9 | 3159 | |
dc9b5ce0 | 3160 | if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) |
0692cfe9 | 3161 | netdev->features |= NETIF_F_GSO_UDP_L4; |
66d52f3b | 3162 | |
dc9b5ce0 | 3163 | if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) |
66d52f3b | 3164 | netdev->features |= NETIF_F_HW_CSUM; |
dc9b5ce0 | 3165 | else |
66d52f3b | 3166 | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
3e281621 | 3167 | |
dc9b5ce0 | 3168 | if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) |
3e281621 | 3169 | netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; |
0205ec04 | 3170 | |
dc9b5ce0 | 3171 | if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) |
0205ec04 | 3172 | netdev->features |= NETIF_F_HW_TC; |
2ba30662 | 3173 | |
dc9b5ce0 JS |
3174 | netdev->hw_features |= netdev->features; |
3175 | if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) | |
3176 | netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; | |
3177 | ||
3178 | netdev->vlan_features |= netdev->features & | |
3179 | ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | | |
3180 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | | |
3181 | NETIF_F_HW_TC); | |
3182 | ||
3183 | netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; | |
76ad4f0e S |
3184 | } |
3185 | ||
3186 | static int hns3_alloc_buffer(struct hns3_enet_ring *ring, | |
3187 | struct hns3_desc_cb *cb) | |
3188 | { | |
dbba6da0 | 3189 | unsigned int order = hns3_page_order(ring); |
76ad4f0e S |
3190 | struct page *p; |
3191 | ||
93188e96 YL |
3192 | if (ring->page_pool) { |
3193 | p = page_pool_dev_alloc_frag(ring->page_pool, | |
3194 | &cb->page_offset, | |
3195 | hns3_buf_size(ring)); | |
3196 | if (unlikely(!p)) | |
3197 | return -ENOMEM; | |
3198 | ||
3199 | cb->priv = p; | |
3200 | cb->buf = page_address(p); | |
3201 | cb->dma = page_pool_get_dma_addr(p); | |
3202 | cb->type = DESC_TYPE_PP_FRAG; | |
3203 | cb->reuse_flag = 0; | |
3204 | return 0; | |
3205 | } | |
3206 | ||
76ad4f0e S |
3207 | p = dev_alloc_pages(order); |
3208 | if (!p) | |
3209 | return -ENOMEM; | |
3210 | ||
3211 | cb->priv = p; | |
3212 | cb->page_offset = 0; | |
3213 | cb->reuse_flag = 0; | |
3214 | cb->buf = page_address(p); | |
dbba6da0 | 3215 | cb->length = hns3_page_size(ring); |
76ad4f0e | 3216 | cb->type = DESC_TYPE_PAGE; |
aeda9bf8 YL |
3217 | page_ref_add(p, USHRT_MAX - 1); |
3218 | cb->pagecnt_bias = USHRT_MAX; | |
76ad4f0e | 3219 | |
76ad4f0e S |
3220 | return 0; |
3221 | } | |
3222 | ||
3223 | static void hns3_free_buffer(struct hns3_enet_ring *ring, | |
619ae331 | 3224 | struct hns3_desc_cb *cb, int budget) |
76ad4f0e | 3225 | { |
907676b1 | 3226 | if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | |
7459775e | 3227 | DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) |
619ae331 | 3228 | napi_consume_skb(cb->priv, budget); |
93188e96 YL |
3229 | else if (!HNAE3_IS_TX_RING(ring)) { |
3230 | if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) | |
3231 | __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); | |
3232 | else if (cb->type & DESC_TYPE_PP_FRAG) | |
3233 | page_pool_put_full_page(ring->page_pool, cb->priv, | |
3234 | false); | |
3235 | } | |
76ad4f0e S |
3236 | memset(cb, 0, sizeof(*cb)); |
3237 | } | |
3238 | ||
3239 | static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) | |
3240 | { | |
3241 | cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, | |
3242 | cb->length, ring_to_dma_dir(ring)); | |
3243 | ||
2211f4e1 | 3244 | if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) |
76ad4f0e S |
3245 | return -EIO; |
3246 | ||
3247 | return 0; | |
3248 | } | |
3249 | ||
3250 | static void hns3_unmap_buffer(struct hns3_enet_ring *ring, | |
3251 | struct hns3_desc_cb *cb) | |
3252 | { | |
26f1ccdf | 3253 | if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) |
76ad4f0e S |
3254 | dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, |
3255 | ring_to_dma_dir(ring)); | |
907676b1 | 3256 | else if ((cb->type & DESC_TYPE_PAGE) && cb->length) |
76ad4f0e S |
3257 | dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, |
3258 | ring_to_dma_dir(ring)); | |
7459775e YL |
3259 | else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | |
3260 | DESC_TYPE_SGL_SKB)) | |
907676b1 | 3261 | hns3_tx_spare_reclaim_cb(ring, cb); |
76ad4f0e S |
3262 | } |
3263 | ||
3264 | static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) | |
3265 | { | |
3266 | hns3_unmap_buffer(ring, &ring->desc_cb[i]); | |
3267 | ring->desc[i].addr = 0; | |
9f9f0f19 | 3268 | ring->desc_cb[i].refill = 0; |
76ad4f0e S |
3269 | } |
3270 | ||
619ae331 YL |
3271 | static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, |
3272 | int budget) | |
76ad4f0e S |
3273 | { |
3274 | struct hns3_desc_cb *cb = &ring->desc_cb[i]; | |
3275 | ||
3276 | if (!ring->desc_cb[i].dma) | |
3277 | return; | |
3278 | ||
3279 | hns3_buffer_detach(ring, i); | |
619ae331 | 3280 | hns3_free_buffer(ring, cb, budget); |
76ad4f0e S |
3281 | } |
3282 | ||
3283 | static void hns3_free_buffers(struct hns3_enet_ring *ring) | |
3284 | { | |
3285 | int i; | |
3286 | ||
3287 | for (i = 0; i < ring->desc_num; i++) | |
619ae331 | 3288 | hns3_free_buffer_detach(ring, i, 0); |
76ad4f0e S |
3289 | } |
3290 | ||
3291 | /* free desc along with its attached buffer */ | |
3292 | static void hns3_free_desc(struct hns3_enet_ring *ring) | |
3293 | { | |
024cc792 HT |
3294 | int size = ring->desc_num * sizeof(ring->desc[0]); |
3295 | ||
76ad4f0e S |
3296 | hns3_free_buffers(ring); |
3297 | ||
024cc792 HT |
3298 | if (ring->desc) { |
3299 | dma_free_coherent(ring_to_dev(ring), size, | |
3300 | ring->desc, ring->desc_dma_addr); | |
3301 | ring->desc = NULL; | |
3302 | } | |
76ad4f0e S |
3303 | } |
3304 | ||
3305 | static int hns3_alloc_desc(struct hns3_enet_ring *ring) | |
3306 | { | |
3307 | int size = ring->desc_num * sizeof(ring->desc[0]); | |
3308 | ||
750afb08 LC |
3309 | ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, |
3310 | &ring->desc_dma_addr, GFP_KERNEL); | |
76ad4f0e S |
3311 | if (!ring->desc) |
3312 | return -ENOMEM; | |
3313 | ||
76ad4f0e S |
3314 | return 0; |
3315 | } | |
3316 | ||
4d2cad32 | 3317 | static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, |
76ad4f0e S |
3318 | struct hns3_desc_cb *cb) |
3319 | { | |
3320 | int ret; | |
3321 | ||
3322 | ret = hns3_alloc_buffer(ring, cb); | |
93188e96 | 3323 | if (ret || ring->page_pool) |
76ad4f0e S |
3324 | goto out; |
3325 | ||
3326 | ret = hns3_map_buffer(ring, cb); | |
3327 | if (ret) | |
3328 | goto out_with_buf; | |
3329 | ||
3330 | return 0; | |
3331 | ||
3332 | out_with_buf: | |
619ae331 | 3333 | hns3_free_buffer(ring, cb, 0); |
76ad4f0e S |
3334 | out: |
3335 | return ret; | |
3336 | } | |
3337 | ||
4d2cad32 | 3338 | static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) |
76ad4f0e | 3339 | { |
4d2cad32 | 3340 | int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); |
76ad4f0e S |
3341 | |
3342 | if (ret) | |
3343 | return ret; | |
3344 | ||
93188e96 YL |
3345 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + |
3346 | ring->desc_cb[i].page_offset); | |
9f9f0f19 | 3347 | ring->desc_cb[i].refill = 1; |
76ad4f0e S |
3348 | |
3349 | return 0; | |
3350 | } | |
3351 | ||
3352 | /* Allocate memory for raw pkg, and map with dma */ | |
3353 | static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) | |
3354 | { | |
3355 | int i, j, ret; | |
3356 | ||
3357 | for (i = 0; i < ring->desc_num; i++) { | |
4d2cad32 | 3358 | ret = hns3_alloc_and_attach_buffer(ring, i); |
76ad4f0e S |
3359 | if (ret) |
3360 | goto out_buffer_fail; | |
3361 | } | |
3362 | ||
3363 | return 0; | |
3364 | ||
3365 | out_buffer_fail: | |
3366 | for (j = i - 1; j >= 0; j--) | |
619ae331 | 3367 | hns3_free_buffer_detach(ring, j, 0); |
76ad4f0e S |
3368 | return ret; |
3369 | } | |
3370 | ||
9b2f3477 | 3371 | /* detach a in-used buffer and replace with a reserved one */ |
76ad4f0e S |
3372 | static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, |
3373 | struct hns3_desc_cb *res_cb) | |
3374 | { | |
b9077428 | 3375 | hns3_unmap_buffer(ring, &ring->desc_cb[i]); |
76ad4f0e | 3376 | ring->desc_cb[i] = *res_cb; |
9f9f0f19 | 3377 | ring->desc_cb[i].refill = 1; |
93188e96 YL |
3378 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + |
3379 | ring->desc_cb[i].page_offset); | |
7d0b130c | 3380 | ring->desc[i].rx.bd_base_info = 0; |
76ad4f0e S |
3381 | } |
3382 | ||
3383 | static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) | |
3384 | { | |
3385 | ring->desc_cb[i].reuse_flag = 0; | |
9f9f0f19 | 3386 | ring->desc_cb[i].refill = 1; |
9b2f3477 WL |
3387 | ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + |
3388 | ring->desc_cb[i].page_offset); | |
7d0b130c | 3389 | ring->desc[i].rx.bd_base_info = 0; |
c2a2e127 BS |
3390 | |
3391 | dma_sync_single_for_device(ring_to_dev(ring), | |
3392 | ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, | |
3393 | hns3_buf_size(ring), | |
3394 | DMA_FROM_DEVICE); | |
76ad4f0e S |
3395 | } |
3396 | ||
20d06ca2 | 3397 | static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, |
619ae331 | 3398 | int *bytes, int *pkts, int budget) |
76ad4f0e | 3399 | { |
20d06ca2 YL |
3400 | /* pair with ring->last_to_use update in hns3_tx_doorbell(), |
3401 | * smp_store_release() is not used in hns3_tx_doorbell() because | |
3402 | * the doorbell operation already have the needed barrier operation. | |
3403 | */ | |
3404 | int ltu = smp_load_acquire(&ring->last_to_use); | |
26cda2f1 YL |
3405 | int ntc = ring->next_to_clean; |
3406 | struct hns3_desc_cb *desc_cb; | |
20d06ca2 YL |
3407 | bool reclaimed = false; |
3408 | struct hns3_desc *desc; | |
3409 | ||
3410 | while (ltu != ntc) { | |
3411 | desc = &ring->desc[ntc]; | |
3412 | ||
3413 | if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & | |
3414 | BIT(HNS3_TXD_VLD_B)) | |
3415 | break; | |
76ad4f0e | 3416 | |
ce74370c | 3417 | desc_cb = &ring->desc_cb[ntc]; |
811c0830 | 3418 | |
907676b1 | 3419 | if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | |
7459775e YL |
3420 | DESC_TYPE_BOUNCE_HEAD | |
3421 | DESC_TYPE_SGL_SKB)) { | |
811c0830 YL |
3422 | (*pkts)++; |
3423 | (*bytes) += desc_cb->send_bytes; | |
3424 | } | |
3425 | ||
ce74370c | 3426 | /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ |
619ae331 | 3427 | hns3_free_buffer_detach(ring, ntc, budget); |
76ad4f0e | 3428 | |
ce74370c YL |
3429 | if (++ntc == ring->desc_num) |
3430 | ntc = 0; | |
3431 | ||
3432 | /* Issue prefetch for next Tx descriptor */ | |
3433 | prefetch(&ring->desc_cb[ntc]); | |
20d06ca2 | 3434 | reclaimed = true; |
ce74370c | 3435 | } |
26cda2f1 | 3436 | |
20d06ca2 YL |
3437 | if (unlikely(!reclaimed)) |
3438 | return false; | |
3439 | ||
26cda2f1 YL |
3440 | /* This smp_store_release() pairs with smp_load_acquire() in |
3441 | * ring_space called by hns3_nic_net_xmit. | |
3442 | */ | |
3443 | smp_store_release(&ring->next_to_clean, ntc); | |
907676b1 YL |
3444 | |
3445 | hns3_tx_spare_update(ring); | |
3446 | ||
20d06ca2 | 3447 | return true; |
76ad4f0e S |
3448 | } |
3449 | ||
619ae331 | 3450 | void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) |
76ad4f0e | 3451 | { |
c8711956 | 3452 | struct net_device *netdev = ring_to_netdev(ring); |
7a810110 | 3453 | struct hns3_nic_priv *priv = netdev_priv(netdev); |
76ad4f0e S |
3454 | struct netdev_queue *dev_queue; |
3455 | int bytes, pkts; | |
76ad4f0e S |
3456 | |
3457 | bytes = 0; | |
3458 | pkts = 0; | |
20d06ca2 | 3459 | |
619ae331 | 3460 | if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget))) |
20d06ca2 | 3461 | return; |
76ad4f0e S |
3462 | |
3463 | ring->tqp_vector->tx_group.total_bytes += bytes; | |
3464 | ring->tqp_vector->tx_group.total_packets += pkts; | |
3465 | ||
3466 | u64_stats_update_begin(&ring->syncp); | |
3467 | ring->stats.tx_bytes += bytes; | |
3468 | ring->stats.tx_pkts += pkts; | |
3469 | u64_stats_update_end(&ring->syncp); | |
3470 | ||
3471 | dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); | |
3472 | netdev_tx_completed_queue(dev_queue, pkts, bytes); | |
3473 | ||
2a597eff | 3474 | if (unlikely(netif_carrier_ok(netdev) && |
8ae10cfb | 3475 | ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { |
76ad4f0e S |
3476 | /* Make sure that anybody stopping the queue after this |
3477 | * sees the new next_to_clean. | |
3478 | */ | |
3479 | smp_mb(); | |
7a810110 JS |
3480 | if (netif_tx_queue_stopped(dev_queue) && |
3481 | !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { | |
76ad4f0e S |
3482 | netif_tx_wake_queue(dev_queue); |
3483 | ring->stats.restart_queue++; | |
3484 | } | |
3485 | } | |
76ad4f0e S |
3486 | } |
3487 | ||
3488 | static int hns3_desc_unused(struct hns3_enet_ring *ring) | |
3489 | { | |
3490 | int ntc = ring->next_to_clean; | |
3491 | int ntu = ring->next_to_use; | |
3492 | ||
9f9f0f19 YL |
3493 | if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) |
3494 | return ring->desc_num; | |
3495 | ||
76ad4f0e S |
3496 | return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; |
3497 | } | |
3498 | ||
68752b24 YL |
3499 | /* Return true if there is any allocation failure */ |
3500 | static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, | |
9b2f3477 | 3501 | int cleand_count) |
76ad4f0e S |
3502 | { |
3503 | struct hns3_desc_cb *desc_cb; | |
3504 | struct hns3_desc_cb res_cbs; | |
3505 | int i, ret; | |
3506 | ||
3507 | for (i = 0; i < cleand_count; i++) { | |
3508 | desc_cb = &ring->desc_cb[ring->next_to_use]; | |
3509 | if (desc_cb->reuse_flag) { | |
3510 | u64_stats_update_begin(&ring->syncp); | |
3511 | ring->stats.reuse_pg_cnt++; | |
3512 | u64_stats_update_end(&ring->syncp); | |
3513 | ||
3514 | hns3_reuse_buffer(ring, ring->next_to_use); | |
3515 | } else { | |
4d2cad32 | 3516 | ret = hns3_alloc_and_map_buffer(ring, &res_cbs); |
76ad4f0e S |
3517 | if (ret) { |
3518 | u64_stats_update_begin(&ring->syncp); | |
3519 | ring->stats.sw_err_cnt++; | |
3520 | u64_stats_update_end(&ring->syncp); | |
3521 | ||
c8711956 | 3522 | hns3_rl_err(ring_to_netdev(ring), |
b20d7fe5 YL |
3523 | "alloc rx buffer failed: %d\n", |
3524 | ret); | |
68752b24 YL |
3525 | |
3526 | writel(i, ring->tqp->io_base + | |
3527 | HNS3_RING_RX_RING_HEAD_REG); | |
3528 | return true; | |
76ad4f0e S |
3529 | } |
3530 | hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); | |
d21ff4f9 YL |
3531 | |
3532 | u64_stats_update_begin(&ring->syncp); | |
3533 | ring->stats.non_reuse_pg++; | |
3534 | u64_stats_update_end(&ring->syncp); | |
76ad4f0e S |
3535 | } |
3536 | ||
3537 | ring_ptr_move_fw(ring, next_to_use); | |
3538 | } | |
3539 | ||
48ee56fd | 3540 | writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); |
68752b24 | 3541 | return false; |
76ad4f0e S |
3542 | } |
3543 | ||
aeda9bf8 YL |
3544 | static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) |
3545 | { | |
fa7711b8 | 3546 | return page_count(cb->priv) == cb->pagecnt_bias; |
aeda9bf8 YL |
3547 | } |
3548 | ||
76ad4f0e S |
3549 | static void hns3_nic_reuse_page(struct sk_buff *skb, int i, |
3550 | struct hns3_enet_ring *ring, int pull_len, | |
3551 | struct hns3_desc_cb *desc_cb) | |
3552 | { | |
389ca146 | 3553 | struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; |
fa7711b8 | 3554 | u32 frag_offset = desc_cb->page_offset + pull_len; |
389ca146 | 3555 | int size = le16_to_cpu(desc->rx.size); |
dbba6da0 | 3556 | u32 truesize = hns3_buf_size(ring); |
fa7711b8 | 3557 | u32 frag_size = size - pull_len; |
96104500 | 3558 | bool reused; |
76ad4f0e | 3559 | |
93188e96 YL |
3560 | if (ring->page_pool) { |
3561 | skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, | |
3562 | frag_size, truesize); | |
3563 | return; | |
3564 | } | |
3565 | ||
fa7711b8 YL |
3566 | /* Avoid re-using remote or pfmem page */ |
3567 | if (unlikely(!dev_page_is_reusable(desc_cb->priv))) | |
3568 | goto out; | |
76ad4f0e | 3569 | |
96104500 YL |
3570 | reused = hns3_can_reuse_page(desc_cb); |
3571 | ||
3572 | /* Rx page can be reused when: | |
3573 | * 1. Rx page is only owned by the driver when page_offset | |
3574 | * is zero, which means 0 @ truesize will be used by | |
3575 | * stack after skb_add_rx_frag() is called, and the rest | |
3576 | * of rx page can be reused by driver. | |
3577 | * Or | |
3578 | * 2. Rx page is only owned by the driver when page_offset | |
3579 | * is non-zero, which means page_offset @ truesize will | |
3580 | * be used by stack after skb_add_rx_frag() is called, | |
3581 | * and 0 @ truesize can be reused by driver. | |
389ca146 | 3582 | */ |
96104500 YL |
3583 | if ((!desc_cb->page_offset && reused) || |
3584 | ((desc_cb->page_offset + truesize + truesize) <= | |
3585 | hns3_page_size(ring) && desc_cb->page_offset)) { | |
fa7711b8 | 3586 | desc_cb->page_offset += truesize; |
389ca146 | 3587 | desc_cb->reuse_flag = 1; |
96104500 YL |
3588 | } else if (desc_cb->page_offset && reused) { |
3589 | desc_cb->page_offset = 0; | |
3590 | desc_cb->reuse_flag = 1; | |
99f6b5fb YL |
3591 | } else if (frag_size <= ring->rx_copybreak) { |
3592 | void *frag = napi_alloc_frag(frag_size); | |
3593 | ||
3594 | if (unlikely(!frag)) { | |
3595 | u64_stats_update_begin(&ring->syncp); | |
3596 | ring->stats.frag_alloc_err++; | |
3597 | u64_stats_update_end(&ring->syncp); | |
3598 | ||
3599 | hns3_rl_err(ring_to_netdev(ring), | |
3600 | "failed to allocate rx frag\n"); | |
3601 | goto out; | |
3602 | } | |
3603 | ||
3604 | desc_cb->reuse_flag = 1; | |
3605 | memcpy(frag, desc_cb->buf + frag_offset, frag_size); | |
3606 | skb_add_rx_frag(skb, i, virt_to_page(frag), | |
3607 | offset_in_page(frag), frag_size, frag_size); | |
3608 | ||
3609 | u64_stats_update_begin(&ring->syncp); | |
3610 | ring->stats.frag_alloc++; | |
3611 | u64_stats_update_end(&ring->syncp); | |
3612 | return; | |
aeda9bf8 YL |
3613 | } |
3614 | ||
fa7711b8 YL |
3615 | out: |
3616 | desc_cb->pagecnt_bias--; | |
3617 | ||
aeda9bf8 YL |
3618 | if (unlikely(!desc_cb->pagecnt_bias)) { |
3619 | page_ref_add(desc_cb->priv, USHRT_MAX); | |
3620 | desc_cb->pagecnt_bias = USHRT_MAX; | |
76ad4f0e | 3621 | } |
fa7711b8 YL |
3622 | |
3623 | skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, | |
3624 | frag_size, truesize); | |
3625 | ||
3626 | if (unlikely(!desc_cb->reuse_flag)) | |
3627 | __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); | |
76ad4f0e S |
3628 | } |
3629 | ||
e2ee1c5a | 3630 | static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) |
d474d88f YL |
3631 | { |
3632 | __be16 type = skb->protocol; | |
3633 | struct tcphdr *th; | |
3634 | int depth = 0; | |
3635 | ||
e2ee1c5a | 3636 | while (eth_type_vlan(type)) { |
d474d88f YL |
3637 | struct vlan_hdr *vh; |
3638 | ||
3639 | if ((depth + VLAN_HLEN) > skb_headlen(skb)) | |
3640 | return -EFAULT; | |
3641 | ||
3642 | vh = (struct vlan_hdr *)(skb->data + depth); | |
3643 | type = vh->h_vlan_encapsulated_proto; | |
3644 | depth += VLAN_HLEN; | |
3645 | } | |
3646 | ||
e2ee1c5a YL |
3647 | skb_set_network_header(skb, depth); |
3648 | ||
d474d88f | 3649 | if (type == htons(ETH_P_IP)) { |
e2ee1c5a YL |
3650 | const struct iphdr *iph = ip_hdr(skb); |
3651 | ||
d474d88f | 3652 | depth += sizeof(struct iphdr); |
e2ee1c5a YL |
3653 | skb_set_transport_header(skb, depth); |
3654 | th = tcp_hdr(skb); | |
3655 | th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, | |
3656 | iph->daddr, 0); | |
d474d88f | 3657 | } else if (type == htons(ETH_P_IPV6)) { |
e2ee1c5a YL |
3658 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
3659 | ||
d474d88f | 3660 | depth += sizeof(struct ipv6hdr); |
e2ee1c5a YL |
3661 | skb_set_transport_header(skb, depth); |
3662 | th = tcp_hdr(skb); | |
3663 | th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, | |
3664 | &iph->daddr, 0); | |
d474d88f | 3665 | } else { |
b20d7fe5 YL |
3666 | hns3_rl_err(skb->dev, |
3667 | "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", | |
3668 | be16_to_cpu(type), depth); | |
d474d88f YL |
3669 | return -EFAULT; |
3670 | } | |
3671 | ||
d474d88f YL |
3672 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
3673 | if (th->cwr) | |
3674 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | |
3675 | ||
e2ee1c5a YL |
3676 | if (l234info & BIT(HNS3_RXD_GRO_FIXID_B)) |
3677 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; | |
d474d88f | 3678 | |
e2ee1c5a YL |
3679 | skb->csum_start = (unsigned char *)th - skb->head; |
3680 | skb->csum_offset = offsetof(struct tcphdr, check); | |
3681 | skb->ip_summed = CHECKSUM_PARTIAL; | |
698a8954 YL |
3682 | |
3683 | trace_hns3_gro(skb); | |
3684 | ||
d474d88f YL |
3685 | return 0; |
3686 | } | |
3687 | ||
1ddc028a HT |
3688 | static bool hns3_checksum_complete(struct hns3_enet_ring *ring, |
3689 | struct sk_buff *skb, u32 ptype, u16 csum) | |
4b2fe769 | 3690 | { |
1ddc028a HT |
3691 | if (ptype == HNS3_INVALID_PTYPE || |
3692 | hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) | |
3693 | return false; | |
4b2fe769 HT |
3694 | |
3695 | u64_stats_update_begin(&ring->syncp); | |
3696 | ring->stats.csum_complete++; | |
3697 | u64_stats_update_end(&ring->syncp); | |
3698 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1ddc028a HT |
3699 | skb->csum = csum_unfold((__force __sum16)csum); |
3700 | ||
3701 | return true; | |
4b2fe769 HT |
3702 | } |
3703 | ||
79664077 HT |
3704 | static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info, |
3705 | u32 ol_info, u32 ptype) | |
76ad4f0e | 3706 | { |
76ad4f0e | 3707 | int l3_type, l4_type; |
76ad4f0e | 3708 | int ol4_type; |
76ad4f0e | 3709 | |
79664077 HT |
3710 | if (ptype != HNS3_INVALID_PTYPE) { |
3711 | skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; | |
3712 | skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; | |
76ad4f0e S |
3713 | |
3714 | return; | |
3715 | } | |
3716 | ||
39c38824 | 3717 | ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, |
e4e87715 | 3718 | HNS3_RXD_OL4ID_S); |
76ad4f0e S |
3719 | switch (ol4_type) { |
3720 | case HNS3_OL4_TYPE_MAC_IN_UDP: | |
3721 | case HNS3_OL4_TYPE_NVGRE: | |
3722 | skb->csum_level = 1; | |
df561f66 | 3723 | fallthrough; |
76ad4f0e | 3724 | case HNS3_OL4_TYPE_NO_TUN: |
47e7b13b YL |
3725 | l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, |
3726 | HNS3_RXD_L3ID_S); | |
3727 | l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, | |
3728 | HNS3_RXD_L4ID_S); | |
76ad4f0e | 3729 | /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ |
94c5e532 PL |
3730 | if ((l3_type == HNS3_L3_TYPE_IPV4 || |
3731 | l3_type == HNS3_L3_TYPE_IPV6) && | |
3732 | (l4_type == HNS3_L4_TYPE_UDP || | |
3733 | l4_type == HNS3_L4_TYPE_TCP || | |
3734 | l4_type == HNS3_L4_TYPE_SCTP)) | |
76ad4f0e S |
3735 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3736 | break; | |
fa7a4bd5 JS |
3737 | default: |
3738 | break; | |
76ad4f0e S |
3739 | } |
3740 | } | |
3741 | ||
79664077 | 3742 | static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, |
1ddc028a HT |
3743 | u32 l234info, u32 bd_base_info, u32 ol_info, |
3744 | u16 csum) | |
79664077 HT |
3745 | { |
3746 | struct net_device *netdev = ring_to_netdev(ring); | |
3747 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
3748 | u32 ptype = HNS3_INVALID_PTYPE; | |
3749 | ||
3750 | skb->ip_summed = CHECKSUM_NONE; | |
3751 | ||
3752 | skb_checksum_none_assert(skb); | |
3753 | ||
3754 | if (!(netdev->features & NETIF_F_RXCSUM)) | |
3755 | return; | |
3756 | ||
3757 | if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) | |
3758 | ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, | |
3759 | HNS3_RXD_PTYPE_S); | |
3760 | ||
1ddc028a | 3761 | if (hns3_checksum_complete(ring, skb, ptype, csum)) |
79664077 | 3762 | return; |
79664077 HT |
3763 | |
3764 | /* check if hardware has done checksum */ | |
3765 | if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) | |
3766 | return; | |
3767 | ||
3768 | if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | | |
3769 | BIT(HNS3_RXD_OL3E_B) | | |
3770 | BIT(HNS3_RXD_OL4E_B)))) { | |
3771 | u64_stats_update_begin(&ring->syncp); | |
3772 | ring->stats.l3l4_csum_err++; | |
3773 | u64_stats_update_end(&ring->syncp); | |
3774 | ||
3775 | return; | |
3776 | } | |
3777 | ||
3778 | hns3_rx_handle_csum(skb, l234info, ol_info, ptype); | |
3779 | } | |
3780 | ||
d43e5aca YL |
3781 | static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) |
3782 | { | |
81ae0e04 PL |
3783 | if (skb_has_frag_list(skb)) |
3784 | napi_gro_flush(&ring->tqp_vector->napi, false); | |
3785 | ||
d43e5aca YL |
3786 | napi_gro_receive(&ring->tqp_vector->napi, skb); |
3787 | } | |
3788 | ||
701a6d6a JS |
3789 | static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, |
3790 | struct hns3_desc *desc, u32 l234info, | |
3791 | u16 *vlan_tag) | |
5b5455a9 | 3792 | { |
44e626f7 | 3793 | struct hnae3_handle *handle = ring->tqp->handle; |
5b5455a9 | 3794 | struct pci_dev *pdev = ring->tqp->handle->pdev; |
295ba232 | 3795 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
5b5455a9 | 3796 | |
295ba232 | 3797 | if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { |
701a6d6a JS |
3798 | *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); |
3799 | if (!(*vlan_tag & VLAN_VID_MASK)) | |
3800 | *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); | |
5b5455a9 | 3801 | |
701a6d6a | 3802 | return (*vlan_tag != 0); |
5b5455a9 PL |
3803 | } |
3804 | ||
3805 | #define HNS3_STRP_OUTER_VLAN 0x1 | |
3806 | #define HNS3_STRP_INNER_VLAN 0x2 | |
44e626f7 | 3807 | #define HNS3_STRP_BOTH 0x3 |
5b5455a9 | 3808 | |
44e626f7 JS |
3809 | /* Hardware always insert VLAN tag into RX descriptor when |
3810 | * remove the tag from packet, driver needs to determine | |
3811 | * reporting which tag to stack. | |
3812 | */ | |
e4e87715 PL |
3813 | switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, |
3814 | HNS3_RXD_STRP_TAGP_S)) { | |
5b5455a9 | 3815 | case HNS3_STRP_OUTER_VLAN: |
44e626f7 JS |
3816 | if (handle->port_base_vlan_state != |
3817 | HNAE3_PORT_BASE_VLAN_DISABLE) | |
3818 | return false; | |
3819 | ||
701a6d6a JS |
3820 | *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); |
3821 | return true; | |
5b5455a9 | 3822 | case HNS3_STRP_INNER_VLAN: |
44e626f7 JS |
3823 | if (handle->port_base_vlan_state != |
3824 | HNAE3_PORT_BASE_VLAN_DISABLE) | |
3825 | return false; | |
3826 | ||
701a6d6a | 3827 | *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); |
44e626f7 JS |
3828 | return true; |
3829 | case HNS3_STRP_BOTH: | |
3830 | if (handle->port_base_vlan_state == | |
3831 | HNAE3_PORT_BASE_VLAN_DISABLE) | |
3832 | *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); | |
3833 | else | |
3834 | *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); | |
3835 | ||
701a6d6a | 3836 | return true; |
5b5455a9 | 3837 | default: |
701a6d6a | 3838 | return false; |
5b5455a9 | 3839 | } |
5b5455a9 PL |
3840 | } |
3841 | ||
8c30e194 YL |
3842 | static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) |
3843 | { | |
3844 | ring->desc[ring->next_to_clean].rx.bd_base_info &= | |
3845 | cpu_to_le32(~BIT(HNS3_RXD_VLD_B)); | |
9f9f0f19 | 3846 | ring->desc_cb[ring->next_to_clean].refill = 0; |
8c30e194 YL |
3847 | ring->next_to_clean += 1; |
3848 | ||
3849 | if (unlikely(ring->next_to_clean == ring->desc_num)) | |
3850 | ring->next_to_clean = 0; | |
3851 | } | |
3852 | ||
b9a8f883 | 3853 | static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, |
e5597095 PL |
3854 | unsigned char *va) |
3855 | { | |
e5597095 | 3856 | struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; |
c8711956 | 3857 | struct net_device *netdev = ring_to_netdev(ring); |
e5597095 PL |
3858 | struct sk_buff *skb; |
3859 | ||
3860 | ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); | |
3861 | skb = ring->skb; | |
3862 | if (unlikely(!skb)) { | |
b20d7fe5 | 3863 | hns3_rl_err(netdev, "alloc rx skb fail\n"); |
e5597095 PL |
3864 | |
3865 | u64_stats_update_begin(&ring->syncp); | |
3866 | ring->stats.sw_err_cnt++; | |
3867 | u64_stats_update_end(&ring->syncp); | |
3868 | ||
3869 | return -ENOMEM; | |
3870 | } | |
3871 | ||
698a8954 | 3872 | trace_hns3_rx_desc(ring); |
e5597095 PL |
3873 | prefetchw(skb->data); |
3874 | ||
3875 | ring->pending_buf = 1; | |
81ae0e04 PL |
3876 | ring->frag_num = 0; |
3877 | ring->tail_skb = NULL; | |
e5597095 PL |
3878 | if (length <= HNS3_RX_HEAD_SIZE) { |
3879 | memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); | |
3880 | ||
a79afa78 AL |
3881 | /* We can reuse buffer as-is, just make sure it is reusable */ |
3882 | if (dev_page_is_reusable(desc_cb->priv)) | |
e5597095 | 3883 | desc_cb->reuse_flag = 1; |
93188e96 YL |
3884 | else if (desc_cb->type & DESC_TYPE_PP_FRAG) |
3885 | page_pool_put_full_page(ring->page_pool, desc_cb->priv, | |
3886 | false); | |
e5597095 | 3887 | else /* This page cannot be reused so discard it */ |
aeda9bf8 YL |
3888 | __page_frag_cache_drain(desc_cb->priv, |
3889 | desc_cb->pagecnt_bias); | |
e5597095 | 3890 | |
8c30e194 | 3891 | hns3_rx_ring_move_fw(ring); |
e5597095 PL |
3892 | return 0; |
3893 | } | |
93188e96 YL |
3894 | |
3895 | if (ring->page_pool) | |
3896 | skb_mark_for_recycle(skb); | |
3897 | ||
e5597095 PL |
3898 | u64_stats_update_begin(&ring->syncp); |
3899 | ring->stats.seg_pkt_cnt++; | |
3900 | u64_stats_update_end(&ring->syncp); | |
3901 | ||
c43f1255 | 3902 | ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); |
e5597095 | 3903 | __skb_put(skb, ring->pull_len); |
81ae0e04 | 3904 | hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, |
e5597095 | 3905 | desc_cb); |
8c30e194 | 3906 | hns3_rx_ring_move_fw(ring); |
e5597095 | 3907 | |
b2598318 | 3908 | return 0; |
e5597095 PL |
3909 | } |
3910 | ||
b2598318 | 3911 | static int hns3_add_frag(struct hns3_enet_ring *ring) |
e5597095 | 3912 | { |
d35bced8 YL |
3913 | struct sk_buff *skb = ring->skb; |
3914 | struct sk_buff *head_skb = skb; | |
81ae0e04 | 3915 | struct sk_buff *new_skb; |
e5597095 | 3916 | struct hns3_desc_cb *desc_cb; |
b2598318 | 3917 | struct hns3_desc *desc; |
e5597095 | 3918 | u32 bd_base_info; |
e5597095 | 3919 | |
b2598318 | 3920 | do { |
e5597095 PL |
3921 | desc = &ring->desc[ring->next_to_clean]; |
3922 | desc_cb = &ring->desc_cb[ring->next_to_clean]; | |
3923 | bd_base_info = le32_to_cpu(desc->rx.bd_base_info); | |
d394d33b JS |
3924 | /* make sure HW write desc complete */ |
3925 | dma_rmb(); | |
e8149933 | 3926 | if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) |
e5597095 PL |
3927 | return -ENXIO; |
3928 | ||
81ae0e04 | 3929 | if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { |
7fda3a93 | 3930 | new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); |
81ae0e04 | 3931 | if (unlikely(!new_skb)) { |
c8711956 | 3932 | hns3_rl_err(ring_to_netdev(ring), |
b20d7fe5 | 3933 | "alloc rx fraglist skb fail\n"); |
81ae0e04 PL |
3934 | return -ENXIO; |
3935 | } | |
93188e96 YL |
3936 | |
3937 | if (ring->page_pool) | |
3938 | skb_mark_for_recycle(new_skb); | |
3939 | ||
81ae0e04 PL |
3940 | ring->frag_num = 0; |
3941 | ||
3942 | if (ring->tail_skb) { | |
3943 | ring->tail_skb->next = new_skb; | |
3944 | ring->tail_skb = new_skb; | |
3945 | } else { | |
3946 | skb_shinfo(skb)->frag_list = new_skb; | |
3947 | ring->tail_skb = new_skb; | |
3948 | } | |
3949 | } | |
3950 | ||
3951 | if (ring->tail_skb) { | |
dbba6da0 | 3952 | head_skb->truesize += hns3_buf_size(ring); |
81ae0e04 PL |
3953 | head_skb->data_len += le16_to_cpu(desc->rx.size); |
3954 | head_skb->len += le16_to_cpu(desc->rx.size); | |
3955 | skb = ring->tail_skb; | |
3956 | } | |
3957 | ||
c2a2e127 BS |
3958 | dma_sync_single_for_cpu(ring_to_dev(ring), |
3959 | desc_cb->dma + desc_cb->page_offset, | |
3960 | hns3_buf_size(ring), | |
3961 | DMA_FROM_DEVICE); | |
3962 | ||
81ae0e04 | 3963 | hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); |
698a8954 | 3964 | trace_hns3_rx_desc(ring); |
8c30e194 | 3965 | hns3_rx_ring_move_fw(ring); |
e5597095 | 3966 | ring->pending_buf++; |
b2598318 | 3967 | } while (!(bd_base_info & BIT(HNS3_RXD_FE_B))); |
e5597095 PL |
3968 | |
3969 | return 0; | |
3970 | } | |
3971 | ||
d474d88f YL |
3972 | static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, |
3973 | struct sk_buff *skb, u32 l234info, | |
1ddc028a | 3974 | u32 bd_base_info, u32 ol_info, u16 csum) |
a6d53b97 | 3975 | { |
79664077 HT |
3976 | struct net_device *netdev = ring_to_netdev(ring); |
3977 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
a6d53b97 PL |
3978 | u32 l3_type; |
3979 | ||
e2ee1c5a YL |
3980 | skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, |
3981 | HNS3_RXD_GRO_SIZE_M, | |
3982 | HNS3_RXD_GRO_SIZE_S); | |
a6d53b97 | 3983 | /* if there is no HW GRO, do not set gro params */ |
e2ee1c5a | 3984 | if (!skb_shinfo(skb)->gso_size) { |
1ddc028a HT |
3985 | hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info, |
3986 | csum); | |
d474d88f YL |
3987 | return 0; |
3988 | } | |
a6d53b97 | 3989 | |
e2ee1c5a YL |
3990 | NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, |
3991 | HNS3_RXD_GRO_COUNT_M, | |
3992 | HNS3_RXD_GRO_COUNT_S); | |
a6d53b97 | 3993 | |
79664077 HT |
3994 | if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { |
3995 | u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, | |
3996 | HNS3_RXD_PTYPE_S); | |
3997 | ||
3998 | l3_type = hns3_rx_ptype_tbl[ptype].l3_type; | |
3999 | } else { | |
4000 | l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, | |
4001 | HNS3_RXD_L3ID_S); | |
4002 | } | |
4003 | ||
a6d53b97 PL |
4004 | if (l3_type == HNS3_L3_TYPE_IPV4) |
4005 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | |
4006 | else if (l3_type == HNS3_L3_TYPE_IPV6) | |
4007 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | |
4008 | else | |
d474d88f | 4009 | return -EFAULT; |
a6d53b97 | 4010 | |
e2ee1c5a | 4011 | return hns3_gro_complete(skb, l234info); |
a6d53b97 PL |
4012 | } |
4013 | ||
232fc64b | 4014 | static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, |
ea485867 | 4015 | struct sk_buff *skb, u32 rss_hash) |
232fc64b | 4016 | { |
232fc64b PL |
4017 | struct hnae3_handle *handle = ring->tqp->handle; |
4018 | enum pkt_hash_types rss_type; | |
4019 | ||
ea485867 | 4020 | if (rss_hash) |
232fc64b PL |
4021 | rss_type = handle->kinfo.rss_type; |
4022 | else | |
4023 | rss_type = PKT_HASH_TYPE_NONE; | |
4024 | ||
ea485867 | 4025 | skb_set_hash(skb, rss_hash, rss_type); |
232fc64b PL |
4026 | } |
4027 | ||
ea485867 | 4028 | static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) |
76ad4f0e | 4029 | { |
c8711956 | 4030 | struct net_device *netdev = ring_to_netdev(ring); |
c376fa1a | 4031 | enum hns3_pkt_l2t_type l2_frame_type; |
39c38824 | 4032 | u32 bd_base_info, l234info, ol_info; |
ea485867 | 4033 | struct hns3_desc *desc; |
d474d88f | 4034 | unsigned int len; |
ea485867 | 4035 | int pre_ntc, ret; |
1ddc028a | 4036 | u16 csum; |
ea485867 YL |
4037 | |
4038 | /* bdinfo handled below is only valid on the last BD of the | |
4039 | * current packet, and ring->next_to_clean indicates the first | |
4040 | * descriptor of next packet, so need - 1 below. | |
4041 | */ | |
4042 | pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : | |
4043 | (ring->desc_num - 1); | |
4044 | desc = &ring->desc[pre_ntc]; | |
4045 | bd_base_info = le32_to_cpu(desc->rx.bd_base_info); | |
4046 | l234info = le32_to_cpu(desc->rx.l234_info); | |
39c38824 | 4047 | ol_info = le32_to_cpu(desc->rx.ol_info); |
1ddc028a | 4048 | csum = le16_to_cpu(desc->csum); |
d474d88f | 4049 | |
0bf5eb78 HT |
4050 | if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { |
4051 | struct hnae3_handle *h = hns3_get_handle(netdev); | |
4052 | u32 nsec = le32_to_cpu(desc->ts_nsec); | |
4053 | u32 sec = le32_to_cpu(desc->ts_sec); | |
4054 | ||
4055 | if (h->ae_algo->ops->get_rx_hwts) | |
4056 | h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); | |
4057 | } | |
4058 | ||
d474d88f YL |
4059 | /* Based on hw strategy, the tag offloaded will be stored at |
4060 | * ot_vlan_tag in two layer tag case, and stored at vlan_tag | |
4061 | * in one layer tag case. | |
4062 | */ | |
4063 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { | |
4064 | u16 vlan_tag; | |
4065 | ||
4066 | if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) | |
4067 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
4068 | vlan_tag); | |
4069 | } | |
4070 | ||
d474d88f YL |
4071 | if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | |
4072 | BIT(HNS3_RXD_L2E_B))))) { | |
4073 | u64_stats_update_begin(&ring->syncp); | |
4074 | if (l234info & BIT(HNS3_RXD_L2E_B)) | |
4075 | ring->stats.l2_err++; | |
4076 | else | |
4077 | ring->stats.err_pkt_len++; | |
4078 | u64_stats_update_end(&ring->syncp); | |
4079 | ||
4080 | return -EFAULT; | |
4081 | } | |
4082 | ||
4083 | len = skb->len; | |
4084 | ||
4085 | /* Do update ip stack process */ | |
4086 | skb->protocol = eth_type_trans(skb, netdev); | |
4087 | ||
4088 | /* This is needed in order to enable forwarding support */ | |
39c38824 | 4089 | ret = hns3_set_gro_and_checksum(ring, skb, l234info, |
1ddc028a | 4090 | bd_base_info, ol_info, csum); |
d474d88f YL |
4091 | if (unlikely(ret)) { |
4092 | u64_stats_update_begin(&ring->syncp); | |
4093 | ring->stats.rx_err_cnt++; | |
4094 | u64_stats_update_end(&ring->syncp); | |
4095 | return ret; | |
4096 | } | |
4097 | ||
4098 | l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, | |
4099 | HNS3_RXD_DMAC_S); | |
4100 | ||
4101 | u64_stats_update_begin(&ring->syncp); | |
4102 | ring->stats.rx_pkts++; | |
4103 | ring->stats.rx_bytes += len; | |
4104 | ||
4105 | if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) | |
4106 | ring->stats.rx_multicast++; | |
4107 | ||
4108 | u64_stats_update_end(&ring->syncp); | |
4109 | ||
4110 | ring->tqp_vector->rx_group.total_bytes += len; | |
ea485867 YL |
4111 | |
4112 | hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); | |
d474d88f YL |
4113 | return 0; |
4114 | } | |
4115 | ||
d35bced8 | 4116 | static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) |
d474d88f | 4117 | { |
e5597095 | 4118 | struct sk_buff *skb = ring->skb; |
76ad4f0e S |
4119 | struct hns3_desc_cb *desc_cb; |
4120 | struct hns3_desc *desc; | |
b9a8f883 | 4121 | unsigned int length; |
76ad4f0e | 4122 | u32 bd_base_info; |
e5597095 | 4123 | int ret; |
76ad4f0e S |
4124 | |
4125 | desc = &ring->desc[ring->next_to_clean]; | |
4126 | desc_cb = &ring->desc_cb[ring->next_to_clean]; | |
4127 | ||
4128 | prefetch(desc); | |
4129 | ||
8c30e194 YL |
4130 | if (!skb) { |
4131 | bd_base_info = le32_to_cpu(desc->rx.bd_base_info); | |
8c30e194 YL |
4132 | /* Check valid BD */ |
4133 | if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) | |
4134 | return -ENXIO; | |
4135 | ||
4136 | dma_rmb(); | |
4137 | length = le16_to_cpu(desc->rx.size); | |
76ad4f0e | 4138 | |
cb0e3e61 | 4139 | ring->va = desc_cb->buf + desc_cb->page_offset; |
76ad4f0e | 4140 | |
c2a2e127 BS |
4141 | dma_sync_single_for_cpu(ring_to_dev(ring), |
4142 | desc_cb->dma + desc_cb->page_offset, | |
4143 | hns3_buf_size(ring), | |
4144 | DMA_FROM_DEVICE); | |
c2a2e127 | 4145 | |
8c30e194 YL |
4146 | /* Prefetch first cache line of first page. |
4147 | * Idea is to cache few bytes of the header of the packet. | |
4148 | * Our L1 Cache line size is 64B so need to prefetch twice to make | |
4149 | * it 128B. But in actual we can have greater size of caches with | |
4150 | * 128B Level 1 cache lines. In such a case, single fetch would | |
4151 | * suffice to cache in the relevant part of the header. | |
4152 | */ | |
4153 | net_prefetch(ring->va); | |
76ad4f0e | 4154 | |
e5597095 | 4155 | ret = hns3_alloc_skb(ring, length, ring->va); |
d35bced8 | 4156 | skb = ring->skb; |
76ad4f0e | 4157 | |
e5597095 PL |
4158 | if (ret < 0) /* alloc buffer fail */ |
4159 | return ret; | |
b2598318 YL |
4160 | if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */ |
4161 | ret = hns3_add_frag(ring); | |
e5597095 PL |
4162 | if (ret) |
4163 | return ret; | |
e5597095 | 4164 | } |
76ad4f0e | 4165 | } else { |
b2598318 | 4166 | ret = hns3_add_frag(ring); |
e5597095 PL |
4167 | if (ret) |
4168 | return ret; | |
b2598318 | 4169 | } |
76ad4f0e | 4170 | |
b2598318 YL |
4171 | /* As the head data may be changed when GRO enable, copy |
4172 | * the head data in after other data rx completed | |
4173 | */ | |
4174 | if (skb->len > HNS3_RX_HEAD_SIZE) | |
e5597095 PL |
4175 | memcpy(skb->data, ring->va, |
4176 | ALIGN(ring->pull_len, sizeof(long))); | |
76ad4f0e | 4177 | |
ea485867 | 4178 | ret = hns3_handle_bdinfo(ring, skb); |
d474d88f | 4179 | if (unlikely(ret)) { |
76ad4f0e | 4180 | dev_kfree_skb_any(skb); |
d474d88f | 4181 | return ret; |
76ad4f0e S |
4182 | } |
4183 | ||
d93ed94f | 4184 | skb_record_rx_queue(skb, ring->tqp->tqp_index); |
76ad4f0e S |
4185 | return 0; |
4186 | } | |
4187 | ||
9b2f3477 WL |
4188 | int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, |
4189 | void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) | |
76ad4f0e S |
4190 | { |
4191 | #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 | |
63380a1a | 4192 | int unused_count = hns3_desc_unused(ring); |
68752b24 | 4193 | bool failure = false; |
a4ee7624 | 4194 | int recv_pkts = 0; |
8c30e194 | 4195 | int err; |
76ad4f0e | 4196 | |
63380a1a | 4197 | unused_count -= ring->pending_buf; |
76ad4f0e | 4198 | |
8c30e194 | 4199 | while (recv_pkts < budget) { |
76ad4f0e | 4200 | /* Reuse or realloc buffers */ |
a4ee7624 | 4201 | if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { |
68752b24 YL |
4202 | failure = failure || |
4203 | hns3_nic_alloc_rx_buffers(ring, unused_count); | |
4204 | unused_count = 0; | |
76ad4f0e S |
4205 | } |
4206 | ||
4207 | /* Poll one pkt */ | |
d35bced8 YL |
4208 | err = hns3_handle_rx_bd(ring); |
4209 | /* Do not get FE for the packet or failed to alloc skb */ | |
4210 | if (unlikely(!ring->skb || err == -ENXIO)) { | |
76ad4f0e | 4211 | goto out; |
d35bced8 YL |
4212 | } else if (likely(!err)) { |
4213 | rx_fn(ring, ring->skb); | |
4214 | recv_pkts++; | |
76ad4f0e S |
4215 | } |
4216 | ||
a4ee7624 | 4217 | unused_count += ring->pending_buf; |
e5597095 PL |
4218 | ring->skb = NULL; |
4219 | ring->pending_buf = 0; | |
76ad4f0e S |
4220 | } |
4221 | ||
4222 | out: | |
3b6db4a0 YM |
4223 | /* sync head pointer before exiting, since hardware will calculate |
4224 | * FBD number with head pointer | |
4225 | */ | |
4226 | if (unused_count > 0) | |
4227 | failure = failure || | |
4228 | hns3_nic_alloc_rx_buffers(ring, unused_count); | |
4229 | ||
68752b24 | 4230 | return failure ? budget : recv_pkts; |
76ad4f0e S |
4231 | } |
4232 | ||
307ea4ce | 4233 | static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) |
76ad4f0e | 4234 | { |
307ea4ce HT |
4235 | struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; |
4236 | struct dim_sample sample = {}; | |
4a43caf5 | 4237 | |
307ea4ce HT |
4238 | if (!rx_group->coal.adapt_enable) |
4239 | return; | |
76ad4f0e | 4240 | |
307ea4ce HT |
4241 | dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, |
4242 | rx_group->total_bytes, &sample); | |
4243 | net_dim(&rx_group->dim, sample); | |
76ad4f0e S |
4244 | } |
4245 | ||
307ea4ce | 4246 | static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector) |
76ad4f0e | 4247 | { |
8b1ff1ea | 4248 | struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; |
307ea4ce | 4249 | struct dim_sample sample = {}; |
8b1ff1ea | 4250 | |
307ea4ce | 4251 | if (!tx_group->coal.adapt_enable) |
cd9d187b | 4252 | return; |
cd9d187b | 4253 | |
307ea4ce HT |
4254 | dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, |
4255 | tx_group->total_bytes, &sample); | |
4256 | net_dim(&tx_group->dim, sample); | |
76ad4f0e S |
4257 | } |
4258 | ||
4259 | static int hns3_nic_common_poll(struct napi_struct *napi, int budget) | |
4260 | { | |
ff0699e0 | 4261 | struct hns3_nic_priv *priv = netdev_priv(napi->dev); |
76ad4f0e S |
4262 | struct hns3_enet_ring *ring; |
4263 | int rx_pkt_total = 0; | |
4264 | ||
4265 | struct hns3_enet_tqp_vector *tqp_vector = | |
4266 | container_of(napi, struct hns3_enet_tqp_vector, napi); | |
4267 | bool clean_complete = true; | |
ceca4a5e | 4268 | int rx_budget = budget; |
76ad4f0e | 4269 | |
ff0699e0 HT |
4270 | if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { |
4271 | napi_complete(napi); | |
4272 | return 0; | |
4273 | } | |
4274 | ||
76ad4f0e S |
4275 | /* Since the actual Tx work is minimal, we can give the Tx a larger |
4276 | * budget and be more aggressive about cleaning up the Tx descriptors. | |
4277 | */ | |
799997a3 | 4278 | hns3_for_each_ring(ring, tqp_vector->tx_group) |
619ae331 | 4279 | hns3_clean_tx_ring(ring, budget); |
76ad4f0e S |
4280 | |
4281 | /* make sure rx ring budget not smaller than 1 */ | |
ceca4a5e YL |
4282 | if (tqp_vector->num_tqps > 1) |
4283 | rx_budget = max(budget / tqp_vector->num_tqps, 1); | |
76ad4f0e S |
4284 | |
4285 | hns3_for_each_ring(ring, tqp_vector->rx_group) { | |
d43e5aca YL |
4286 | int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, |
4287 | hns3_rx_skb); | |
76ad4f0e S |
4288 | if (rx_cleaned >= rx_budget) |
4289 | clean_complete = false; | |
4290 | ||
4291 | rx_pkt_total += rx_cleaned; | |
4292 | } | |
4293 | ||
4294 | tqp_vector->rx_group.total_packets += rx_pkt_total; | |
4295 | ||
4296 | if (!clean_complete) | |
4297 | return budget; | |
4298 | ||
531eba0f HT |
4299 | if (napi_complete(napi) && |
4300 | likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { | |
307ea4ce HT |
4301 | hns3_update_rx_int_coalesce(tqp_vector); |
4302 | hns3_update_tx_int_coalesce(tqp_vector); | |
4303 | ||
ff0699e0 HT |
4304 | hns3_mask_vector_irq(tqp_vector, 1); |
4305 | } | |
76ad4f0e S |
4306 | |
4307 | return rx_pkt_total; | |
4308 | } | |
4309 | ||
4310 | static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, | |
4311 | struct hnae3_ring_chain_node *head) | |
4312 | { | |
4313 | struct pci_dev *pdev = tqp_vector->handle->pdev; | |
4314 | struct hnae3_ring_chain_node *cur_chain = head; | |
4315 | struct hnae3_ring_chain_node *chain; | |
4316 | struct hns3_enet_ring *tx_ring; | |
4317 | struct hns3_enet_ring *rx_ring; | |
4318 | ||
4319 | tx_ring = tqp_vector->tx_group.ring; | |
4320 | if (tx_ring) { | |
4321 | cur_chain->tqp_index = tx_ring->tqp->tqp_index; | |
e4e87715 PL |
4322 | hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, |
4323 | HNAE3_RING_TYPE_TX); | |
4324 | hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, | |
4325 | HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); | |
76ad4f0e S |
4326 | |
4327 | cur_chain->next = NULL; | |
4328 | ||
4329 | while (tx_ring->next) { | |
4330 | tx_ring = tx_ring->next; | |
4331 | ||
4332 | chain = devm_kzalloc(&pdev->dev, sizeof(*chain), | |
4333 | GFP_KERNEL); | |
4334 | if (!chain) | |
73b907a0 | 4335 | goto err_free_chain; |
76ad4f0e S |
4336 | |
4337 | cur_chain->next = chain; | |
4338 | chain->tqp_index = tx_ring->tqp->tqp_index; | |
e4e87715 PL |
4339 | hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, |
4340 | HNAE3_RING_TYPE_TX); | |
4341 | hnae3_set_field(chain->int_gl_idx, | |
4342 | HNAE3_RING_GL_IDX_M, | |
4343 | HNAE3_RING_GL_IDX_S, | |
4344 | HNAE3_RING_GL_TX); | |
76ad4f0e S |
4345 | |
4346 | cur_chain = chain; | |
4347 | } | |
4348 | } | |
4349 | ||
4350 | rx_ring = tqp_vector->rx_group.ring; | |
4351 | if (!tx_ring && rx_ring) { | |
4352 | cur_chain->next = NULL; | |
4353 | cur_chain->tqp_index = rx_ring->tqp->tqp_index; | |
e4e87715 PL |
4354 | hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, |
4355 | HNAE3_RING_TYPE_RX); | |
4356 | hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, | |
4357 | HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); | |
76ad4f0e S |
4358 | |
4359 | rx_ring = rx_ring->next; | |
4360 | } | |
4361 | ||
4362 | while (rx_ring) { | |
4363 | chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); | |
4364 | if (!chain) | |
73b907a0 | 4365 | goto err_free_chain; |
76ad4f0e S |
4366 | |
4367 | cur_chain->next = chain; | |
4368 | chain->tqp_index = rx_ring->tqp->tqp_index; | |
e4e87715 PL |
4369 | hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, |
4370 | HNAE3_RING_TYPE_RX); | |
4371 | hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, | |
4372 | HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); | |
11af96a4 | 4373 | |
76ad4f0e S |
4374 | cur_chain = chain; |
4375 | ||
4376 | rx_ring = rx_ring->next; | |
4377 | } | |
4378 | ||
4379 | return 0; | |
73b907a0 HT |
4380 | |
4381 | err_free_chain: | |
4382 | cur_chain = head->next; | |
4383 | while (cur_chain) { | |
4384 | chain = cur_chain->next; | |
cda69d24 | 4385 | devm_kfree(&pdev->dev, cur_chain); |
73b907a0 HT |
4386 | cur_chain = chain; |
4387 | } | |
cda69d24 | 4388 | head->next = NULL; |
73b907a0 HT |
4389 | |
4390 | return -ENOMEM; | |
76ad4f0e S |
4391 | } |
4392 | ||
4393 | static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, | |
4394 | struct hnae3_ring_chain_node *head) | |
4395 | { | |
4396 | struct pci_dev *pdev = tqp_vector->handle->pdev; | |
4397 | struct hnae3_ring_chain_node *chain_tmp, *chain; | |
4398 | ||
4399 | chain = head->next; | |
4400 | ||
4401 | while (chain) { | |
4402 | chain_tmp = chain->next; | |
4403 | devm_kfree(&pdev->dev, chain); | |
4404 | chain = chain_tmp; | |
4405 | } | |
4406 | } | |
4407 | ||
4408 | static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, | |
4409 | struct hns3_enet_ring *ring) | |
4410 | { | |
4411 | ring->next = group->ring; | |
4412 | group->ring = ring; | |
4413 | ||
4414 | group->count++; | |
4415 | } | |
4416 | ||
874bff0b PL |
4417 | static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv) |
4418 | { | |
4419 | struct pci_dev *pdev = priv->ae_handle->pdev; | |
4420 | struct hns3_enet_tqp_vector *tqp_vector; | |
4421 | int num_vectors = priv->vector_num; | |
4422 | int numa_node; | |
4423 | int vector_i; | |
4424 | ||
4425 | numa_node = dev_to_node(&pdev->dev); | |
4426 | ||
4427 | for (vector_i = 0; vector_i < num_vectors; vector_i++) { | |
4428 | tqp_vector = &priv->tqp_vector[vector_i]; | |
4429 | cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node), | |
4430 | &tqp_vector->affinity_mask); | |
4431 | } | |
4432 | } | |
4433 | ||
307ea4ce HT |
4434 | static void hns3_rx_dim_work(struct work_struct *work) |
4435 | { | |
4436 | struct dim *dim = container_of(work, struct dim, work); | |
4437 | struct hns3_enet_ring_group *group = container_of(dim, | |
4438 | struct hns3_enet_ring_group, dim); | |
4439 | struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; | |
4440 | struct dim_cq_moder cur_moder = | |
4441 | net_dim_get_rx_moderation(dim->mode, dim->profile_ix); | |
4442 | ||
4443 | hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); | |
4444 | tqp_vector->rx_group.coal.int_gl = cur_moder.usec; | |
4445 | ||
4446 | if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { | |
4447 | hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts); | |
4448 | tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; | |
4449 | } | |
4450 | ||
4451 | dim->state = DIM_START_MEASURE; | |
4452 | } | |
4453 | ||
4454 | static void hns3_tx_dim_work(struct work_struct *work) | |
4455 | { | |
4456 | struct dim *dim = container_of(work, struct dim, work); | |
4457 | struct hns3_enet_ring_group *group = container_of(dim, | |
4458 | struct hns3_enet_ring_group, dim); | |
4459 | struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; | |
4460 | struct dim_cq_moder cur_moder = | |
4461 | net_dim_get_tx_moderation(dim->mode, dim->profile_ix); | |
4462 | ||
4463 | hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec); | |
4464 | tqp_vector->tx_group.coal.int_gl = cur_moder.usec; | |
4465 | ||
4466 | if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { | |
4467 | hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts); | |
4468 | tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; | |
4469 | } | |
4470 | ||
4471 | dim->state = DIM_START_MEASURE; | |
4472 | } | |
4473 | ||
4474 | static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) | |
4475 | { | |
4476 | INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); | |
307ea4ce | 4477 | INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); |
307ea4ce HT |
4478 | } |
4479 | ||
76ad4f0e S |
4480 | static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) |
4481 | { | |
76ad4f0e S |
4482 | struct hnae3_handle *h = priv->ae_handle; |
4483 | struct hns3_enet_tqp_vector *tqp_vector; | |
9d8d5a36 | 4484 | int ret; |
ece4bf46 | 4485 | int i; |
76ad4f0e | 4486 | |
874bff0b PL |
4487 | hns3_nic_set_cpumask(priv); |
4488 | ||
dd38c726 YL |
4489 | for (i = 0; i < priv->vector_num; i++) { |
4490 | tqp_vector = &priv->tqp_vector[i]; | |
91bfae25 | 4491 | hns3_vector_coalesce_init_hw(tqp_vector, priv); |
dd38c726 | 4492 | tqp_vector->num_tqps = 0; |
307ea4ce | 4493 | hns3_nic_init_dim(tqp_vector); |
dd38c726 | 4494 | } |
76ad4f0e | 4495 | |
dd38c726 YL |
4496 | for (i = 0; i < h->kinfo.num_tqps; i++) { |
4497 | u16 vector_i = i % priv->vector_num; | |
4498 | u16 tqp_num = h->kinfo.num_tqps; | |
76ad4f0e S |
4499 | |
4500 | tqp_vector = &priv->tqp_vector[vector_i]; | |
4501 | ||
4502 | hns3_add_ring_to_group(&tqp_vector->tx_group, | |
5f06b903 | 4503 | &priv->ring[i]); |
76ad4f0e S |
4504 | |
4505 | hns3_add_ring_to_group(&tqp_vector->rx_group, | |
5f06b903 | 4506 | &priv->ring[i + tqp_num]); |
76ad4f0e | 4507 | |
5f06b903 YL |
4508 | priv->ring[i].tqp_vector = tqp_vector; |
4509 | priv->ring[i + tqp_num].tqp_vector = tqp_vector; | |
dd38c726 | 4510 | tqp_vector->num_tqps++; |
76ad4f0e S |
4511 | } |
4512 | ||
dd38c726 | 4513 | for (i = 0; i < priv->vector_num; i++) { |
d392ecd1 SM |
4514 | struct hnae3_ring_chain_node vector_ring_chain; |
4515 | ||
76ad4f0e S |
4516 | tqp_vector = &priv->tqp_vector[i]; |
4517 | ||
4518 | tqp_vector->rx_group.total_bytes = 0; | |
4519 | tqp_vector->rx_group.total_packets = 0; | |
4520 | tqp_vector->tx_group.total_bytes = 0; | |
4521 | tqp_vector->tx_group.total_packets = 0; | |
76ad4f0e S |
4522 | tqp_vector->handle = h; |
4523 | ||
4524 | ret = hns3_get_vector_ring_chain(tqp_vector, | |
4525 | &vector_ring_chain); | |
4526 | if (ret) | |
cda69d24 | 4527 | goto map_ring_fail; |
76ad4f0e S |
4528 | |
4529 | ret = h->ae_algo->ops->map_ring_to_vector(h, | |
4530 | tqp_vector->vector_irq, &vector_ring_chain); | |
76ad4f0e S |
4531 | |
4532 | hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); | |
4533 | ||
dd38c726 | 4534 | if (ret) |
ece4bf46 | 4535 | goto map_ring_fail; |
dd38c726 | 4536 | |
76ad4f0e S |
4537 | netif_napi_add(priv->netdev, &tqp_vector->napi, |
4538 | hns3_nic_common_poll, NAPI_POLL_WEIGHT); | |
4539 | } | |
4540 | ||
dd38c726 | 4541 | return 0; |
ece4bf46 HT |
4542 | |
4543 | map_ring_fail: | |
4544 | while (i--) | |
4545 | netif_napi_del(&priv->tqp_vector[i].napi); | |
4546 | ||
4547 | return ret; | |
dd38c726 YL |
4548 | } |
4549 | ||
73a13d8d HT |
4550 | static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv) |
4551 | { | |
4552 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); | |
4553 | struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; | |
4554 | struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; | |
4555 | ||
4556 | /* initialize the configuration for interrupt coalescing. | |
4557 | * 1. GL (Interrupt Gap Limiter) | |
4558 | * 2. RL (Interrupt Rate Limiter) | |
4559 | * 3. QL (Interrupt Quantity Limiter) | |
4560 | * | |
4561 | * Default: enable interrupt coalescing self-adaptive and GL | |
4562 | */ | |
4563 | tx_coal->adapt_enable = 1; | |
4564 | rx_coal->adapt_enable = 1; | |
4565 | ||
4566 | tx_coal->int_gl = HNS3_INT_GL_50K; | |
4567 | rx_coal->int_gl = HNS3_INT_GL_50K; | |
4568 | ||
4569 | rx_coal->flow_level = HNS3_FLOW_LOW; | |
4570 | tx_coal->flow_level = HNS3_FLOW_LOW; | |
4571 | ||
4572 | if (ae_dev->dev_specs.int_ql_max) { | |
4573 | tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; | |
4574 | rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; | |
4575 | } | |
4576 | } | |
4577 | ||
dd38c726 YL |
4578 | static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) |
4579 | { | |
4580 | struct hnae3_handle *h = priv->ae_handle; | |
4581 | struct hns3_enet_tqp_vector *tqp_vector; | |
4582 | struct hnae3_vector_info *vector; | |
4583 | struct pci_dev *pdev = h->pdev; | |
4584 | u16 tqp_num = h->kinfo.num_tqps; | |
4585 | u16 vector_num; | |
4586 | int ret = 0; | |
4587 | u16 i; | |
4588 | ||
4589 | /* RSS size, cpu online and vector_num should be the same */ | |
4590 | /* Should consider 2p/4p later */ | |
4591 | vector_num = min_t(u16, num_online_cpus(), tqp_num); | |
75edb610 | 4592 | |
dd38c726 YL |
4593 | vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), |
4594 | GFP_KERNEL); | |
4595 | if (!vector) | |
4596 | return -ENOMEM; | |
4597 | ||
9b2f3477 | 4598 | /* save the actual available vector number */ |
dd38c726 YL |
4599 | vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); |
4600 | ||
4601 | priv->vector_num = vector_num; | |
4602 | priv->tqp_vector = (struct hns3_enet_tqp_vector *) | |
4603 | devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), | |
4604 | GFP_KERNEL); | |
4605 | if (!priv->tqp_vector) { | |
4606 | ret = -ENOMEM; | |
4607 | goto out; | |
4608 | } | |
4609 | ||
4610 | for (i = 0; i < priv->vector_num; i++) { | |
4611 | tqp_vector = &priv->tqp_vector[i]; | |
4612 | tqp_vector->idx = i; | |
4613 | tqp_vector->mask_addr = vector[i].io_addr; | |
4614 | tqp_vector->vector_irq = vector[i].vector; | |
91bfae25 | 4615 | hns3_vector_coalesce_init(tqp_vector, priv); |
dd38c726 YL |
4616 | } |
4617 | ||
76ad4f0e S |
4618 | out: |
4619 | devm_kfree(&pdev->dev, vector); | |
4620 | return ret; | |
4621 | } | |
4622 | ||
dd38c726 YL |
4623 | static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) |
4624 | { | |
4625 | group->ring = NULL; | |
4626 | group->count = 0; | |
4627 | } | |
4628 | ||
e2152785 | 4629 | static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) |
76ad4f0e S |
4630 | { |
4631 | struct hnae3_ring_chain_node vector_ring_chain; | |
4632 | struct hnae3_handle *h = priv->ae_handle; | |
4633 | struct hns3_enet_tqp_vector *tqp_vector; | |
e2152785 | 4634 | int i; |
76ad4f0e S |
4635 | |
4636 | for (i = 0; i < priv->vector_num; i++) { | |
4637 | tqp_vector = &priv->tqp_vector[i]; | |
4638 | ||
2c9dd668 HT |
4639 | if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) |
4640 | continue; | |
4641 | ||
ff7dfcdd HT |
4642 | /* Since the mapping can be overwritten, when fail to get the |
4643 | * chain between vector and ring, we should go on to deal with | |
4644 | * the remaining options. | |
4645 | */ | |
4646 | if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) | |
4647 | dev_warn(priv->dev, "failed to get ring chain\n"); | |
76ad4f0e | 4648 | |
e2152785 | 4649 | h->ae_algo->ops->unmap_ring_from_vector(h, |
76ad4f0e | 4650 | tqp_vector->vector_irq, &vector_ring_chain); |
76ad4f0e S |
4651 | |
4652 | hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); | |
4653 | ||
dd38c726 YL |
4654 | hns3_clear_ring_group(&tqp_vector->rx_group); |
4655 | hns3_clear_ring_group(&tqp_vector->tx_group); | |
76ad4f0e S |
4656 | netif_napi_del(&priv->tqp_vector[i].napi); |
4657 | } | |
dd38c726 YL |
4658 | } |
4659 | ||
08a10068 | 4660 | static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) |
dd38c726 YL |
4661 | { |
4662 | struct hnae3_handle *h = priv->ae_handle; | |
4663 | struct pci_dev *pdev = h->pdev; | |
4664 | int i, ret; | |
4665 | ||
4666 | for (i = 0; i < priv->vector_num; i++) { | |
4667 | struct hns3_enet_tqp_vector *tqp_vector; | |
4668 | ||
4669 | tqp_vector = &priv->tqp_vector[i]; | |
4670 | ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); | |
4671 | if (ret) | |
08a10068 | 4672 | return; |
dd38c726 | 4673 | } |
76ad4f0e | 4674 | |
dd38c726 | 4675 | devm_kfree(&pdev->dev, priv->tqp_vector); |
76ad4f0e S |
4676 | } |
4677 | ||
5f06b903 YL |
4678 | static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, |
4679 | unsigned int ring_type) | |
76ad4f0e | 4680 | { |
76ad4f0e | 4681 | int queue_num = priv->ae_handle->kinfo.num_tqps; |
76ad4f0e | 4682 | struct hns3_enet_ring *ring; |
c0425944 | 4683 | int desc_num; |
76ad4f0e | 4684 | |
76ad4f0e | 4685 | if (ring_type == HNAE3_RING_TYPE_TX) { |
5f06b903 | 4686 | ring = &priv->ring[q->tqp_index]; |
c0425944 | 4687 | desc_num = priv->ae_handle->kinfo.num_tx_desc; |
5f06b903 | 4688 | ring->queue_index = q->tqp_index; |
907676b1 YL |
4689 | ring->tx_copybreak = priv->tx_copybreak; |
4690 | ring->last_to_use = 0; | |
76ad4f0e | 4691 | } else { |
5f06b903 | 4692 | ring = &priv->ring[q->tqp_index + queue_num]; |
c0425944 | 4693 | desc_num = priv->ae_handle->kinfo.num_rx_desc; |
5f06b903 | 4694 | ring->queue_index = q->tqp_index; |
99f6b5fb | 4695 | ring->rx_copybreak = priv->rx_copybreak; |
76ad4f0e S |
4696 | } |
4697 | ||
e4e87715 | 4698 | hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); |
76ad4f0e | 4699 | |
76ad4f0e S |
4700 | ring->tqp = q; |
4701 | ring->desc = NULL; | |
4702 | ring->desc_cb = NULL; | |
4703 | ring->dev = priv->dev; | |
4704 | ring->desc_dma_addr = 0; | |
4705 | ring->buf_size = q->buf_size; | |
2c9dd668 | 4706 | ring->desc_num = desc_num; |
76ad4f0e S |
4707 | ring->next_to_use = 0; |
4708 | ring->next_to_clean = 0; | |
76ad4f0e S |
4709 | } |
4710 | ||
5f06b903 YL |
4711 | static void hns3_queue_to_ring(struct hnae3_queue *tqp, |
4712 | struct hns3_nic_priv *priv) | |
76ad4f0e | 4713 | { |
5f06b903 YL |
4714 | hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); |
4715 | hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); | |
76ad4f0e S |
4716 | } |
4717 | ||
4718 | static int hns3_get_ring_config(struct hns3_nic_priv *priv) | |
4719 | { | |
4720 | struct hnae3_handle *h = priv->ae_handle; | |
4721 | struct pci_dev *pdev = h->pdev; | |
5f06b903 | 4722 | int i; |
76ad4f0e | 4723 | |
5f06b903 YL |
4724 | priv->ring = devm_kzalloc(&pdev->dev, |
4725 | array3_size(h->kinfo.num_tqps, | |
4726 | sizeof(*priv->ring), 2), | |
4727 | GFP_KERNEL); | |
4728 | if (!priv->ring) | |
76ad4f0e S |
4729 | return -ENOMEM; |
4730 | ||
5f06b903 YL |
4731 | for (i = 0; i < h->kinfo.num_tqps; i++) |
4732 | hns3_queue_to_ring(h->kinfo.tqp[i], priv); | |
76ad4f0e S |
4733 | |
4734 | return 0; | |
76ad4f0e S |
4735 | } |
4736 | ||
09f2af64 PL |
4737 | static void hns3_put_ring_config(struct hns3_nic_priv *priv) |
4738 | { | |
5f06b903 | 4739 | if (!priv->ring) |
7b8f622e HT |
4740 | return; |
4741 | ||
5f06b903 YL |
4742 | devm_kfree(priv->dev, priv->ring); |
4743 | priv->ring = NULL; | |
09f2af64 PL |
4744 | } |
4745 | ||
93188e96 YL |
4746 | static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) |
4747 | { | |
4748 | struct page_pool_params pp_params = { | |
4749 | .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | | |
4750 | PP_FLAG_DMA_SYNC_DEV, | |
4751 | .order = hns3_page_order(ring), | |
4752 | .pool_size = ring->desc_num * hns3_buf_size(ring) / | |
4753 | (PAGE_SIZE << hns3_page_order(ring)), | |
4754 | .nid = dev_to_node(ring_to_dev(ring)), | |
4755 | .dev = ring_to_dev(ring), | |
4756 | .dma_dir = DMA_FROM_DEVICE, | |
4757 | .offset = 0, | |
4758 | .max_len = PAGE_SIZE << hns3_page_order(ring), | |
4759 | }; | |
4760 | ||
4761 | ring->page_pool = page_pool_create(&pp_params); | |
4762 | if (IS_ERR(ring->page_pool)) { | |
4763 | dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", | |
4764 | PTR_ERR(ring->page_pool)); | |
4765 | ring->page_pool = NULL; | |
4766 | } | |
4767 | } | |
4768 | ||
76ad4f0e S |
4769 | static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) |
4770 | { | |
4771 | int ret; | |
4772 | ||
4773 | if (ring->desc_num <= 0 || ring->buf_size <= 0) | |
4774 | return -EINVAL; | |
4775 | ||
77296bf6 YL |
4776 | ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, |
4777 | sizeof(ring->desc_cb[0]), GFP_KERNEL); | |
76ad4f0e S |
4778 | if (!ring->desc_cb) { |
4779 | ret = -ENOMEM; | |
4780 | goto out; | |
4781 | } | |
4782 | ||
4783 | ret = hns3_alloc_desc(ring); | |
4784 | if (ret) | |
4785 | goto out_with_desc_cb; | |
4786 | ||
4787 | if (!HNAE3_IS_TX_RING(ring)) { | |
f7ec554b YL |
4788 | if (page_pool_enabled) |
4789 | hns3_alloc_page_pool(ring); | |
93188e96 | 4790 | |
76ad4f0e S |
4791 | ret = hns3_alloc_ring_buffers(ring); |
4792 | if (ret) | |
4793 | goto out_with_desc; | |
907676b1 YL |
4794 | } else { |
4795 | hns3_init_tx_spare_buffer(ring); | |
76ad4f0e S |
4796 | } |
4797 | ||
4798 | return 0; | |
4799 | ||
4800 | out_with_desc: | |
4801 | hns3_free_desc(ring); | |
4802 | out_with_desc_cb: | |
77296bf6 | 4803 | devm_kfree(ring_to_dev(ring), ring->desc_cb); |
76ad4f0e S |
4804 | ring->desc_cb = NULL; |
4805 | out: | |
4806 | return ret; | |
4807 | } | |
4808 | ||
a723fb8e | 4809 | void hns3_fini_ring(struct hns3_enet_ring *ring) |
76ad4f0e S |
4810 | { |
4811 | hns3_free_desc(ring); | |
77296bf6 | 4812 | devm_kfree(ring_to_dev(ring), ring->desc_cb); |
76ad4f0e S |
4813 | ring->desc_cb = NULL; |
4814 | ring->next_to_clean = 0; | |
4815 | ring->next_to_use = 0; | |
20d06ca2 | 4816 | ring->last_to_use = 0; |
ac574b80 | 4817 | ring->pending_buf = 0; |
907676b1 | 4818 | if (!HNAE3_IS_TX_RING(ring) && ring->skb) { |
ac574b80 PL |
4819 | dev_kfree_skb_any(ring->skb); |
4820 | ring->skb = NULL; | |
907676b1 YL |
4821 | } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { |
4822 | struct hns3_tx_spare *tx_spare = ring->tx_spare; | |
4823 | ||
4824 | dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, | |
4825 | DMA_TO_DEVICE); | |
4826 | free_pages((unsigned long)tx_spare->buf, | |
4827 | get_order(tx_spare->len)); | |
4828 | devm_kfree(ring_to_dev(ring), tx_spare); | |
4829 | ring->tx_spare = NULL; | |
ac574b80 | 4830 | } |
93188e96 YL |
4831 | |
4832 | if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { | |
4833 | page_pool_destroy(ring->page_pool); | |
4834 | ring->page_pool = NULL; | |
4835 | } | |
76ad4f0e S |
4836 | } |
4837 | ||
1db9b1bf | 4838 | static int hns3_buf_size2type(u32 buf_size) |
76ad4f0e S |
4839 | { |
4840 | int bd_size_type; | |
4841 | ||
4842 | switch (buf_size) { | |
4843 | case 512: | |
4844 | bd_size_type = HNS3_BD_SIZE_512_TYPE; | |
4845 | break; | |
4846 | case 1024: | |
4847 | bd_size_type = HNS3_BD_SIZE_1024_TYPE; | |
4848 | break; | |
4849 | case 2048: | |
4850 | bd_size_type = HNS3_BD_SIZE_2048_TYPE; | |
4851 | break; | |
4852 | case 4096: | |
4853 | bd_size_type = HNS3_BD_SIZE_4096_TYPE; | |
4854 | break; | |
4855 | default: | |
4856 | bd_size_type = HNS3_BD_SIZE_2048_TYPE; | |
4857 | } | |
4858 | ||
4859 | return bd_size_type; | |
4860 | } | |
4861 | ||
4862 | static void hns3_init_ring_hw(struct hns3_enet_ring *ring) | |
4863 | { | |
4864 | dma_addr_t dma = ring->desc_dma_addr; | |
4865 | struct hnae3_queue *q = ring->tqp; | |
4866 | ||
4867 | if (!HNAE3_IS_TX_RING(ring)) { | |
9b2f3477 | 4868 | hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); |
76ad4f0e S |
4869 | hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, |
4870 | (u32)((dma >> 31) >> 1)); | |
4871 | ||
4872 | hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, | |
4873 | hns3_buf_size2type(ring->buf_size)); | |
4874 | hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, | |
4875 | ring->desc_num / 8 - 1); | |
76ad4f0e S |
4876 | } else { |
4877 | hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, | |
4878 | (u32)dma); | |
4879 | hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, | |
4880 | (u32)((dma >> 31) >> 1)); | |
4881 | ||
76ad4f0e S |
4882 | hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, |
4883 | ring->desc_num / 8 - 1); | |
4884 | } | |
4885 | } | |
4886 | ||
1c772154 YL |
4887 | static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) |
4888 | { | |
4889 | struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; | |
35244430 | 4890 | struct hnae3_tc_info *tc_info = &kinfo->tc_info; |
1c772154 YL |
4891 | int i; |
4892 | ||
a8e76fef | 4893 | for (i = 0; i < tc_info->num_tc; i++) { |
1c772154 YL |
4894 | int j; |
4895 | ||
35244430 | 4896 | for (j = 0; j < tc_info->tqp_count[i]; j++) { |
1c772154 YL |
4897 | struct hnae3_queue *q; |
4898 | ||
35244430 JS |
4899 | q = priv->ring[tc_info->tqp_offset[i] + j].tqp; |
4900 | hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); | |
1c772154 YL |
4901 | } |
4902 | } | |
4903 | } | |
4904 | ||
5668abda | 4905 | int hns3_init_all_ring(struct hns3_nic_priv *priv) |
76ad4f0e S |
4906 | { |
4907 | struct hnae3_handle *h = priv->ae_handle; | |
4908 | int ring_num = h->kinfo.num_tqps * 2; | |
4909 | int i, j; | |
4910 | int ret; | |
4911 | ||
4912 | for (i = 0; i < ring_num; i++) { | |
5f06b903 | 4913 | ret = hns3_alloc_ring_memory(&priv->ring[i]); |
76ad4f0e S |
4914 | if (ret) { |
4915 | dev_err(priv->dev, | |
4916 | "Alloc ring memory fail! ret=%d\n", ret); | |
4917 | goto out_when_alloc_ring_memory; | |
4918 | } | |
4919 | ||
5f06b903 | 4920 | u64_stats_init(&priv->ring[i].syncp); |
76ad4f0e S |
4921 | } |
4922 | ||
4923 | return 0; | |
4924 | ||
4925 | out_when_alloc_ring_memory: | |
4926 | for (j = i - 1; j >= 0; j--) | |
5f06b903 | 4927 | hns3_fini_ring(&priv->ring[j]); |
76ad4f0e S |
4928 | |
4929 | return -ENOMEM; | |
4930 | } | |
4931 | ||
64749c9c | 4932 | static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) |
76ad4f0e S |
4933 | { |
4934 | struct hnae3_handle *h = priv->ae_handle; | |
4935 | int i; | |
4936 | ||
4937 | for (i = 0; i < h->kinfo.num_tqps; i++) { | |
5f06b903 YL |
4938 | hns3_fini_ring(&priv->ring[i]); |
4939 | hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); | |
76ad4f0e | 4940 | } |
76ad4f0e S |
4941 | } |
4942 | ||
4943 | /* Set mac addr if it is configured. or leave it to the AE driver */ | |
8e6de441 | 4944 | static int hns3_init_mac_addr(struct net_device *netdev) |
76ad4f0e S |
4945 | { |
4946 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
4f331fda | 4947 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
76ad4f0e S |
4948 | struct hnae3_handle *h = priv->ae_handle; |
4949 | u8 mac_addr_temp[ETH_ALEN]; | |
7fa6be4f | 4950 | int ret = 0; |
76ad4f0e | 4951 | |
8e6de441 | 4952 | if (h->ae_algo->ops->get_mac_addr) |
76ad4f0e | 4953 | h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); |
76ad4f0e S |
4954 | |
4955 | /* Check if the MAC address is valid, if not get a random one */ | |
8e6de441 | 4956 | if (!is_valid_ether_addr(mac_addr_temp)) { |
76ad4f0e | 4957 | eth_hw_addr_random(netdev); |
4f331fda YM |
4958 | hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); |
4959 | dev_warn(priv->dev, "using random MAC address %s\n", | |
4960 | format_mac_addr); | |
ee4bcd3b | 4961 | } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { |
f3956ebb | 4962 | eth_hw_addr_set(netdev, mac_addr_temp); |
8e6de441 | 4963 | ether_addr_copy(netdev->perm_addr, mac_addr_temp); |
ee4bcd3b JS |
4964 | } else { |
4965 | return 0; | |
76ad4f0e | 4966 | } |
139e8792 L |
4967 | |
4968 | if (h->ae_algo->ops->set_mac_addr) | |
7fa6be4f | 4969 | ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); |
139e8792 | 4970 | |
7fa6be4f | 4971 | return ret; |
76ad4f0e S |
4972 | } |
4973 | ||
c8a8045b HT |
4974 | static int hns3_init_phy(struct net_device *netdev) |
4975 | { | |
4976 | struct hnae3_handle *h = hns3_get_handle(netdev); | |
4977 | int ret = 0; | |
4978 | ||
4979 | if (h->ae_algo->ops->mac_connect_phy) | |
4980 | ret = h->ae_algo->ops->mac_connect_phy(h); | |
4981 | ||
4982 | return ret; | |
4983 | } | |
4984 | ||
4985 | static void hns3_uninit_phy(struct net_device *netdev) | |
4986 | { | |
4987 | struct hnae3_handle *h = hns3_get_handle(netdev); | |
4988 | ||
4989 | if (h->ae_algo->ops->mac_disconnect_phy) | |
4990 | h->ae_algo->ops->mac_disconnect_phy(h); | |
4991 | } | |
4992 | ||
a6d818e3 YL |
4993 | static int hns3_client_start(struct hnae3_handle *handle) |
4994 | { | |
4995 | if (!handle->ae_algo->ops->client_start) | |
4996 | return 0; | |
4997 | ||
4998 | return handle->ae_algo->ops->client_start(handle); | |
4999 | } | |
5000 | ||
5001 | static void hns3_client_stop(struct hnae3_handle *handle) | |
5002 | { | |
5003 | if (!handle->ae_algo->ops->client_stop) | |
5004 | return; | |
5005 | ||
5006 | handle->ae_algo->ops->client_stop(handle); | |
5007 | } | |
5008 | ||
bb87be87 YL |
5009 | static void hns3_info_show(struct hns3_nic_priv *priv) |
5010 | { | |
5011 | struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; | |
4f331fda | 5012 | char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; |
bb87be87 | 5013 | |
4f331fda YM |
5014 | hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); |
5015 | dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); | |
adcf738b GL |
5016 | dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); |
5017 | dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); | |
5018 | dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); | |
5019 | dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); | |
5020 | dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); | |
5021 | dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); | |
35244430 JS |
5022 | dev_info(priv->dev, "Total number of enabled TCs: %u\n", |
5023 | kinfo->tc_info.num_tc); | |
adcf738b | 5024 | dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); |
bb87be87 YL |
5025 | } |
5026 | ||
9f0c6f4b YM |
5027 | static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, |
5028 | enum dim_cq_period_mode mode, bool is_tx) | |
5029 | { | |
5030 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); | |
5031 | struct hnae3_handle *handle = priv->ae_handle; | |
5032 | int i; | |
5033 | ||
5034 | if (is_tx) { | |
5035 | priv->tx_cqe_mode = mode; | |
5036 | ||
5037 | for (i = 0; i < priv->vector_num; i++) | |
5038 | priv->tqp_vector[i].tx_group.dim.mode = mode; | |
5039 | } else { | |
5040 | priv->rx_cqe_mode = mode; | |
5041 | ||
5042 | for (i = 0; i < priv->vector_num; i++) | |
5043 | priv->tqp_vector[i].rx_group.dim.mode = mode; | |
5044 | } | |
5045 | ||
5046 | /* only device version above V3(include V3), GL can switch CQ/EQ | |
5047 | * period mode. | |
5048 | */ | |
5049 | if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { | |
5050 | u32 new_mode; | |
5051 | u64 reg; | |
5052 | ||
5053 | new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? | |
5054 | HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; | |
5055 | reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; | |
5056 | ||
5057 | writel(new_mode, handle->kinfo.io_base + reg); | |
5058 | } | |
5059 | } | |
5060 | ||
cce1689e YM |
5061 | void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, |
5062 | enum dim_cq_period_mode tx_mode, | |
5063 | enum dim_cq_period_mode rx_mode) | |
9f0c6f4b YM |
5064 | { |
5065 | hns3_set_cq_period_mode(priv, tx_mode, true); | |
5066 | hns3_set_cq_period_mode(priv, rx_mode, false); | |
5067 | } | |
5068 | ||
c511dfff HT |
5069 | static void hns3_state_init(struct hnae3_handle *handle) |
5070 | { | |
5071 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); | |
5072 | struct net_device *netdev = handle->kinfo.netdev; | |
5073 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
5074 | ||
5075 | set_bit(HNS3_NIC_STATE_INITED, &priv->state); | |
5076 | ||
5077 | if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) | |
5078 | set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); | |
5079 | ||
5080 | if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) | |
5081 | set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); | |
5082 | ||
5083 | if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) | |
5084 | set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); | |
5085 | } | |
5086 | ||
76ad4f0e S |
5087 | static int hns3_client_init(struct hnae3_handle *handle) |
5088 | { | |
5089 | struct pci_dev *pdev = handle->pdev; | |
fd665b3d | 5090 | struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); |
0d43bf45 | 5091 | u16 alloc_tqps, max_rss_size; |
76ad4f0e S |
5092 | struct hns3_nic_priv *priv; |
5093 | struct net_device *netdev; | |
5094 | int ret; | |
5095 | ||
0d43bf45 HT |
5096 | handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, |
5097 | &max_rss_size); | |
5098 | netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps); | |
76ad4f0e S |
5099 | if (!netdev) |
5100 | return -ENOMEM; | |
5101 | ||
5102 | priv = netdev_priv(netdev); | |
5103 | priv->dev = &pdev->dev; | |
5104 | priv->netdev = netdev; | |
5105 | priv->ae_handle = handle; | |
f8fa222c | 5106 | priv->tx_timeout_count = 0; |
fd665b3d | 5107 | priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; |
b7b585c2 | 5108 | set_bit(HNS3_NIC_STATE_DOWN, &priv->state); |
76ad4f0e | 5109 | |
bb87be87 YL |
5110 | handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); |
5111 | ||
76ad4f0e S |
5112 | handle->kinfo.netdev = netdev; |
5113 | handle->priv = (void *)priv; | |
5114 | ||
8e6de441 | 5115 | hns3_init_mac_addr(netdev); |
76ad4f0e S |
5116 | |
5117 | hns3_set_default_feature(netdev); | |
5118 | ||
5119 | netdev->watchdog_timeo = HNS3_TX_TIMEOUT; | |
5120 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
5121 | netdev->netdev_ops = &hns3_nic_netdev_ops; | |
5122 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
5123 | hns3_ethtool_set_ops(netdev); | |
76ad4f0e S |
5124 | |
5125 | /* Carrier off reporting is important to ethtool even BEFORE open */ | |
5126 | netif_carrier_off(netdev); | |
5127 | ||
5128 | ret = hns3_get_ring_config(priv); | |
5129 | if (ret) { | |
5130 | ret = -ENOMEM; | |
5131 | goto out_get_ring_cfg; | |
5132 | } | |
5133 | ||
73a13d8d HT |
5134 | hns3_nic_init_coal_cfg(priv); |
5135 | ||
dd38c726 YL |
5136 | ret = hns3_nic_alloc_vector_data(priv); |
5137 | if (ret) { | |
5138 | ret = -ENOMEM; | |
5139 | goto out_alloc_vector_data; | |
5140 | } | |
5141 | ||
76ad4f0e S |
5142 | ret = hns3_nic_init_vector_data(priv); |
5143 | if (ret) { | |
5144 | ret = -ENOMEM; | |
5145 | goto out_init_vector_data; | |
5146 | } | |
5147 | ||
5148 | ret = hns3_init_all_ring(priv); | |
5149 | if (ret) { | |
5150 | ret = -ENOMEM; | |
5f06b903 | 5151 | goto out_init_ring; |
76ad4f0e S |
5152 | } |
5153 | ||
9f0c6f4b YM |
5154 | hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, |
5155 | DIM_CQ_PERIOD_MODE_START_FROM_EQE); | |
5156 | ||
c8a8045b HT |
5157 | ret = hns3_init_phy(netdev); |
5158 | if (ret) | |
5159 | goto out_init_phy; | |
5160 | ||
08a10068 YL |
5161 | /* the device can work without cpu rmap, only aRFS needs it */ |
5162 | ret = hns3_set_rx_cpu_rmap(netdev); | |
5163 | if (ret) | |
5164 | dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); | |
5165 | ||
5166 | ret = hns3_nic_init_irq(priv); | |
5167 | if (ret) { | |
5168 | dev_err(priv->dev, "init irq failed! ret=%d\n", ret); | |
5169 | hns3_free_rx_cpu_rmap(netdev); | |
5170 | goto out_init_irq_fail; | |
5171 | } | |
5172 | ||
a6d818e3 YL |
5173 | ret = hns3_client_start(handle); |
5174 | if (ret) { | |
5175 | dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); | |
bf6de231 | 5176 | goto out_client_start; |
a6d818e3 YL |
5177 | } |
5178 | ||
986743db YL |
5179 | hns3_dcbnl_setup(handle); |
5180 | ||
5e69ea7e YM |
5181 | ret = hns3_dbg_init(handle); |
5182 | if (ret) { | |
5183 | dev_err(priv->dev, "failed to init debugfs, ret = %d\n", | |
5184 | ret); | |
5185 | goto out_client_start; | |
5186 | } | |
b2292360 | 5187 | |
e070c8b9 | 5188 | netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); |
a8e8b7ff | 5189 | |
c511dfff | 5190 | hns3_state_init(handle); |
5e7414cd | 5191 | |
a289a7e5 JS |
5192 | ret = register_netdev(netdev); |
5193 | if (ret) { | |
5194 | dev_err(priv->dev, "probe register netdev fail!\n"); | |
5195 | goto out_reg_netdev_fail; | |
5196 | } | |
5197 | ||
bb87be87 YL |
5198 | if (netif_msg_drv(handle)) |
5199 | hns3_info_show(priv); | |
5200 | ||
76ad4f0e S |
5201 | return ret; |
5202 | ||
a289a7e5 JS |
5203 | out_reg_netdev_fail: |
5204 | hns3_dbg_uninit(handle); | |
18655128 | 5205 | out_client_start: |
08a10068 YL |
5206 | hns3_free_rx_cpu_rmap(netdev); |
5207 | hns3_nic_uninit_irq(priv); | |
5208 | out_init_irq_fail: | |
c8a8045b HT |
5209 | hns3_uninit_phy(netdev); |
5210 | out_init_phy: | |
5211 | hns3_uninit_all_ring(priv); | |
5f06b903 | 5212 | out_init_ring: |
e2152785 | 5213 | hns3_nic_uninit_vector_data(priv); |
76ad4f0e | 5214 | out_init_vector_data: |
dd38c726 YL |
5215 | hns3_nic_dealloc_vector_data(priv); |
5216 | out_alloc_vector_data: | |
5f06b903 | 5217 | priv->ring = NULL; |
76ad4f0e S |
5218 | out_get_ring_cfg: |
5219 | priv->ae_handle = NULL; | |
5220 | free_netdev(netdev); | |
5221 | return ret; | |
5222 | } | |
5223 | ||
5224 | static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) | |
5225 | { | |
5226 | struct net_device *netdev = handle->kinfo.netdev; | |
5227 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
76ad4f0e S |
5228 | |
5229 | if (netdev->reg_state != NETREG_UNINITIALIZED) | |
5230 | unregister_netdev(netdev); | |
5231 | ||
eb32c896 HT |
5232 | hns3_client_stop(handle); |
5233 | ||
0d2f68c7 HT |
5234 | hns3_uninit_phy(netdev); |
5235 | ||
814da63c HT |
5236 | if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { |
5237 | netdev_warn(netdev, "already uninitialized\n"); | |
5238 | goto out_netdev_free; | |
5239 | } | |
5240 | ||
08a10068 YL |
5241 | hns3_free_rx_cpu_rmap(netdev); |
5242 | ||
5243 | hns3_nic_uninit_irq(priv); | |
5244 | ||
f96315f2 | 5245 | hns3_clear_all_ring(handle, true); |
7b763f3f | 5246 | |
e2152785 | 5247 | hns3_nic_uninit_vector_data(priv); |
76ad4f0e | 5248 | |
08a10068 | 5249 | hns3_nic_dealloc_vector_data(priv); |
dd38c726 | 5250 | |
64749c9c | 5251 | hns3_uninit_all_ring(priv); |
76ad4f0e | 5252 | |
ec777890 YL |
5253 | hns3_put_ring_config(priv); |
5254 | ||
814da63c | 5255 | out_netdev_free: |
e22b5e72 | 5256 | hns3_dbg_uninit(handle); |
76ad4f0e S |
5257 | free_netdev(netdev); |
5258 | } | |
5259 | ||
5260 | static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) | |
5261 | { | |
5262 | struct net_device *netdev = handle->kinfo.netdev; | |
5263 | ||
5264 | if (!netdev) | |
5265 | return; | |
5266 | ||
5267 | if (linkup) { | |
76ad4f0e | 5268 | netif_tx_wake_all_queues(netdev); |
a7e90ee5 | 5269 | netif_carrier_on(netdev); |
bb87be87 YL |
5270 | if (netif_msg_link(handle)) |
5271 | netdev_info(netdev, "link up\n"); | |
76ad4f0e S |
5272 | } else { |
5273 | netif_carrier_off(netdev); | |
5274 | netif_tx_stop_all_queues(netdev); | |
bb87be87 YL |
5275 | if (netif_msg_link(handle)) |
5276 | netdev_info(netdev, "link down\n"); | |
76ad4f0e S |
5277 | } |
5278 | } | |
5279 | ||
beebca3a | 5280 | static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) |
bb6b94a8 | 5281 | { |
beebca3a | 5282 | while (ring->next_to_clean != ring->next_to_use) { |
7b763f3f | 5283 | ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; |
619ae331 | 5284 | hns3_free_buffer_detach(ring, ring->next_to_clean, 0); |
beebca3a YL |
5285 | ring_ptr_move_fw(ring, next_to_clean); |
5286 | } | |
f6061a05 YL |
5287 | |
5288 | ring->pending_buf = 0; | |
beebca3a YL |
5289 | } |
5290 | ||
7b763f3f FL |
5291 | static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) |
5292 | { | |
5293 | struct hns3_desc_cb res_cbs; | |
5294 | int ret; | |
5295 | ||
5296 | while (ring->next_to_use != ring->next_to_clean) { | |
5297 | /* When a buffer is not reused, it's memory has been | |
5298 | * freed in hns3_handle_rx_bd or will be freed by | |
5299 | * stack, so we need to replace the buffer here. | |
5300 | */ | |
5301 | if (!ring->desc_cb[ring->next_to_use].reuse_flag) { | |
4d2cad32 | 5302 | ret = hns3_alloc_and_map_buffer(ring, &res_cbs); |
7b763f3f FL |
5303 | if (ret) { |
5304 | u64_stats_update_begin(&ring->syncp); | |
5305 | ring->stats.sw_err_cnt++; | |
5306 | u64_stats_update_end(&ring->syncp); | |
5307 | /* if alloc new buffer fail, exit directly | |
5308 | * and reclear in up flow. | |
5309 | */ | |
c8711956 | 5310 | netdev_warn(ring_to_netdev(ring), |
7b763f3f FL |
5311 | "reserve buffer map failed, ret = %d\n", |
5312 | ret); | |
5313 | return ret; | |
5314 | } | |
9b2f3477 | 5315 | hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); |
7b763f3f FL |
5316 | } |
5317 | ring_ptr_move_fw(ring, next_to_use); | |
5318 | } | |
5319 | ||
cc5ff6e9 PL |
5320 | /* Free the pending skb in rx ring */ |
5321 | if (ring->skb) { | |
5322 | dev_kfree_skb_any(ring->skb); | |
5323 | ring->skb = NULL; | |
5324 | ring->pending_buf = 0; | |
5325 | } | |
5326 | ||
7b763f3f FL |
5327 | return 0; |
5328 | } | |
5329 | ||
5330 | static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) | |
beebca3a | 5331 | { |
beebca3a YL |
5332 | while (ring->next_to_use != ring->next_to_clean) { |
5333 | /* When a buffer is not reused, it's memory has been | |
5334 | * freed in hns3_handle_rx_bd or will be freed by | |
5335 | * stack, so only need to unmap the buffer here. | |
5336 | */ | |
5337 | if (!ring->desc_cb[ring->next_to_use].reuse_flag) { | |
5338 | hns3_unmap_buffer(ring, | |
5339 | &ring->desc_cb[ring->next_to_use]); | |
5340 | ring->desc_cb[ring->next_to_use].dma = 0; | |
5341 | } | |
5342 | ||
5343 | ring_ptr_move_fw(ring, next_to_use); | |
5344 | } | |
bb6b94a8 L |
5345 | } |
5346 | ||
f96315f2 | 5347 | static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) |
bb6b94a8 L |
5348 | { |
5349 | struct net_device *ndev = h->kinfo.netdev; | |
5350 | struct hns3_nic_priv *priv = netdev_priv(ndev); | |
5351 | u32 i; | |
5352 | ||
5353 | for (i = 0; i < h->kinfo.num_tqps; i++) { | |
bb6b94a8 L |
5354 | struct hns3_enet_ring *ring; |
5355 | ||
5f06b903 | 5356 | ring = &priv->ring[i]; |
beebca3a | 5357 | hns3_clear_tx_ring(ring); |
bb6b94a8 | 5358 | |
5f06b903 | 5359 | ring = &priv->ring[i + h->kinfo.num_tqps]; |
7b763f3f FL |
5360 | /* Continue to clear other rings even if clearing some |
5361 | * rings failed. | |
5362 | */ | |
f96315f2 HT |
5363 | if (force) |
5364 | hns3_force_clear_rx_ring(ring); | |
5365 | else | |
5366 | hns3_clear_rx_ring(ring); | |
bb6b94a8 L |
5367 | } |
5368 | } | |
5369 | ||
7b763f3f FL |
5370 | int hns3_nic_reset_all_ring(struct hnae3_handle *h) |
5371 | { | |
5372 | struct net_device *ndev = h->kinfo.netdev; | |
5373 | struct hns3_nic_priv *priv = netdev_priv(ndev); | |
5374 | struct hns3_enet_ring *rx_ring; | |
5375 | int i, j; | |
5376 | int ret; | |
5377 | ||
8fa86551 YM |
5378 | ret = h->ae_algo->ops->reset_queue(h); |
5379 | if (ret) | |
5380 | return ret; | |
7fa6be4f | 5381 | |
8fa86551 | 5382 | for (i = 0; i < h->kinfo.num_tqps; i++) { |
5f06b903 | 5383 | hns3_init_ring_hw(&priv->ring[i]); |
7b763f3f FL |
5384 | |
5385 | /* We need to clear tx ring here because self test will | |
5386 | * use the ring and will not run down before up | |
5387 | */ | |
5f06b903 YL |
5388 | hns3_clear_tx_ring(&priv->ring[i]); |
5389 | priv->ring[i].next_to_clean = 0; | |
5390 | priv->ring[i].next_to_use = 0; | |
20d06ca2 | 5391 | priv->ring[i].last_to_use = 0; |
7b763f3f | 5392 | |
5f06b903 | 5393 | rx_ring = &priv->ring[i + h->kinfo.num_tqps]; |
7b763f3f FL |
5394 | hns3_init_ring_hw(rx_ring); |
5395 | ret = hns3_clear_rx_ring(rx_ring); | |
5396 | if (ret) | |
5397 | return ret; | |
5398 | ||
5399 | /* We can not know the hardware head and tail when this | |
5400 | * function is called in reset flow, so we reuse all desc. | |
5401 | */ | |
5402 | for (j = 0; j < rx_ring->desc_num; j++) | |
5403 | hns3_reuse_buffer(rx_ring, j); | |
5404 | ||
5405 | rx_ring->next_to_clean = 0; | |
5406 | rx_ring->next_to_use = 0; | |
5407 | } | |
5408 | ||
1c772154 YL |
5409 | hns3_init_tx_ring_tc(priv); |
5410 | ||
7b763f3f FL |
5411 | return 0; |
5412 | } | |
5413 | ||
bb6b94a8 L |
5414 | static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) |
5415 | { | |
5416 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
5417 | struct net_device *ndev = kinfo->netdev; | |
257e4f29 HT |
5418 | struct hns3_nic_priv *priv = netdev_priv(ndev); |
5419 | ||
5420 | if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) | |
5421 | return 0; | |
bb6b94a8 L |
5422 | |
5423 | if (!netif_running(ndev)) | |
6b1385cc | 5424 | return 0; |
bb6b94a8 L |
5425 | |
5426 | return hns3_nic_net_stop(ndev); | |
5427 | } | |
5428 | ||
5429 | static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) | |
5430 | { | |
5431 | struct hnae3_knic_private_info *kinfo = &handle->kinfo; | |
257e4f29 | 5432 | struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); |
bb6b94a8 L |
5433 | int ret = 0; |
5434 | ||
b4047aac JS |
5435 | if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { |
5436 | netdev_err(kinfo->netdev, "device is not initialized yet\n"); | |
5437 | return -EFAULT; | |
5438 | } | |
5439 | ||
e8884027 HT |
5440 | clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); |
5441 | ||
bb6b94a8 | 5442 | if (netif_running(kinfo->netdev)) { |
e8884027 | 5443 | ret = hns3_nic_net_open(kinfo->netdev); |
bb6b94a8 | 5444 | if (ret) { |
e8884027 | 5445 | set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); |
bb6b94a8 | 5446 | netdev_err(kinfo->netdev, |
9b2f3477 | 5447 | "net up fail, ret=%d!\n", ret); |
bb6b94a8 L |
5448 | return ret; |
5449 | } | |
bb6b94a8 L |
5450 | } |
5451 | ||
5452 | return ret; | |
5453 | } | |
5454 | ||
5455 | static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) | |
5456 | { | |
5457 | struct net_device *netdev = handle->kinfo.netdev; | |
5458 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
5459 | int ret; | |
5460 | ||
bb6b94a8 L |
5461 | /* Carrier off reporting is important to ethtool even BEFORE open */ |
5462 | netif_carrier_off(netdev); | |
5463 | ||
2c9dd668 | 5464 | ret = hns3_get_ring_config(priv); |
862d969a HT |
5465 | if (ret) |
5466 | return ret; | |
5467 | ||
2c9dd668 HT |
5468 | ret = hns3_nic_alloc_vector_data(priv); |
5469 | if (ret) | |
5470 | goto err_put_ring; | |
5471 | ||
bb6b94a8 L |
5472 | ret = hns3_nic_init_vector_data(priv); |
5473 | if (ret) | |
862d969a | 5474 | goto err_dealloc_vector; |
bb6b94a8 L |
5475 | |
5476 | ret = hns3_init_all_ring(priv); | |
862d969a HT |
5477 | if (ret) |
5478 | goto err_uninit_vector; | |
bb6b94a8 | 5479 | |
9f0c6f4b YM |
5480 | hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); |
5481 | ||
08a10068 YL |
5482 | /* the device can work without cpu rmap, only aRFS needs it */ |
5483 | ret = hns3_set_rx_cpu_rmap(netdev); | |
5484 | if (ret) | |
5485 | dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); | |
5486 | ||
5487 | ret = hns3_nic_init_irq(priv); | |
5488 | if (ret) { | |
5489 | dev_err(priv->dev, "init irq failed! ret=%d\n", ret); | |
5490 | hns3_free_rx_cpu_rmap(netdev); | |
5491 | goto err_init_irq_fail; | |
5492 | } | |
5493 | ||
ee4bcd3b JS |
5494 | if (!hns3_is_phys_func(handle->pdev)) |
5495 | hns3_init_mac_addr(netdev); | |
5496 | ||
cd513a69 HT |
5497 | ret = hns3_client_start(handle); |
5498 | if (ret) { | |
5499 | dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); | |
08a10068 | 5500 | goto err_client_start_fail; |
cd513a69 HT |
5501 | } |
5502 | ||
814da63c HT |
5503 | set_bit(HNS3_NIC_STATE_INITED, &priv->state); |
5504 | ||
862d969a HT |
5505 | return ret; |
5506 | ||
08a10068 YL |
5507 | err_client_start_fail: |
5508 | hns3_free_rx_cpu_rmap(netdev); | |
5509 | hns3_nic_uninit_irq(priv); | |
5510 | err_init_irq_fail: | |
cd513a69 | 5511 | hns3_uninit_all_ring(priv); |
862d969a HT |
5512 | err_uninit_vector: |
5513 | hns3_nic_uninit_vector_data(priv); | |
862d969a HT |
5514 | err_dealloc_vector: |
5515 | hns3_nic_dealloc_vector_data(priv); | |
2c9dd668 HT |
5516 | err_put_ring: |
5517 | hns3_put_ring_config(priv); | |
862d969a | 5518 | |
bb6b94a8 L |
5519 | return ret; |
5520 | } | |
5521 | ||
5522 | static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) | |
5523 | { | |
5524 | struct net_device *netdev = handle->kinfo.netdev; | |
5525 | struct hns3_nic_priv *priv = netdev_priv(netdev); | |
bb6b94a8 | 5526 | |
1eeb3367 | 5527 | if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { |
814da63c HT |
5528 | netdev_warn(netdev, "already uninitialized\n"); |
5529 | return 0; | |
5530 | } | |
5531 | ||
08a10068 YL |
5532 | hns3_free_rx_cpu_rmap(netdev); |
5533 | hns3_nic_uninit_irq(priv); | |
f96315f2 HT |
5534 | hns3_clear_all_ring(handle, true); |
5535 | hns3_reset_tx_queue(priv->ae_handle); | |
bb6b94a8 | 5536 | |
e2152785 | 5537 | hns3_nic_uninit_vector_data(priv); |
bb6b94a8 | 5538 | |
08a10068 | 5539 | hns3_nic_dealloc_vector_data(priv); |
862d969a | 5540 | |
64749c9c | 5541 | hns3_uninit_all_ring(priv); |
bb6b94a8 | 5542 | |
2c9dd668 | 5543 | hns3_put_ring_config(priv); |
2c9dd668 | 5544 | |
205238f4 | 5545 | return 0; |
bb6b94a8 L |
5546 | } |
5547 | ||
e445f08a HC |
5548 | int hns3_reset_notify(struct hnae3_handle *handle, |
5549 | enum hnae3_reset_notify_type type) | |
bb6b94a8 L |
5550 | { |
5551 | int ret = 0; | |
5552 | ||
5553 | switch (type) { | |
5554 | case HNAE3_UP_CLIENT: | |
e1586241 SM |
5555 | ret = hns3_reset_notify_up_enet(handle); |
5556 | break; | |
bb6b94a8 L |
5557 | case HNAE3_DOWN_CLIENT: |
5558 | ret = hns3_reset_notify_down_enet(handle); | |
5559 | break; | |
5560 | case HNAE3_INIT_CLIENT: | |
5561 | ret = hns3_reset_notify_init_enet(handle); | |
5562 | break; | |
5563 | case HNAE3_UNINIT_CLIENT: | |
5564 | ret = hns3_reset_notify_uninit_enet(handle); | |
5565 | break; | |
5566 | default: | |
5567 | break; | |
5568 | } | |
5569 | ||
5570 | return ret; | |
5571 | } | |
5572 | ||
3a5a5f06 PL |
5573 | static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num, |
5574 | bool rxfh_configured) | |
5575 | { | |
5576 | int ret; | |
5577 | ||
5578 | ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, | |
5579 | rxfh_configured); | |
5580 | if (ret) { | |
5581 | dev_err(&handle->pdev->dev, | |
5582 | "Change tqp num(%u) fail.\n", new_tqp_num); | |
5583 | return ret; | |
5584 | } | |
5585 | ||
5586 | ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT); | |
5587 | if (ret) | |
5588 | return ret; | |
5589 | ||
5590 | ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT); | |
5591 | if (ret) | |
5592 | hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT); | |
5593 | ||
5594 | return ret; | |
5595 | } | |
5596 | ||
09f2af64 PL |
5597 | int hns3_set_channels(struct net_device *netdev, |
5598 | struct ethtool_channels *ch) | |
5599 | { | |
09f2af64 PL |
5600 | struct hnae3_handle *h = hns3_get_handle(netdev); |
5601 | struct hnae3_knic_private_info *kinfo = &h->kinfo; | |
90c68a41 | 5602 | bool rxfh_configured = netif_is_rxfh_configured(netdev); |
09f2af64 PL |
5603 | u32 new_tqp_num = ch->combined_count; |
5604 | u16 org_tqp_num; | |
5605 | int ret; | |
5606 | ||
44950d28 JS |
5607 | if (hns3_nic_resetting(netdev)) |
5608 | return -EBUSY; | |
5609 | ||
09f2af64 PL |
5610 | if (ch->rx_count || ch->tx_count) |
5611 | return -EINVAL; | |
5612 | ||
5a5c9091 JS |
5613 | if (kinfo->tc_info.mqprio_active) { |
5614 | dev_err(&netdev->dev, | |
5615 | "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); | |
5616 | return -EINVAL; | |
5617 | } | |
5618 | ||
678335a1 | 5619 | if (new_tqp_num > hns3_get_max_available_channels(h) || |
c78b5b6c | 5620 | new_tqp_num < 1) { |
09f2af64 | 5621 | dev_err(&netdev->dev, |
adcf738b | 5622 | "Change tqps fail, the tqp range is from 1 to %u", |
678335a1 | 5623 | hns3_get_max_available_channels(h)); |
09f2af64 PL |
5624 | return -EINVAL; |
5625 | } | |
5626 | ||
c78b5b6c | 5627 | if (kinfo->rss_size == new_tqp_num) |
09f2af64 PL |
5628 | return 0; |
5629 | ||
1c822948 YL |
5630 | netif_dbg(h, drv, netdev, |
5631 | "set channels: tqp_num=%u, rxfh=%d\n", | |
5632 | new_tqp_num, rxfh_configured); | |
5633 | ||
65749f73 HT |
5634 | ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); |
5635 | if (ret) | |
5636 | return ret; | |
dd38c726 | 5637 | |
65749f73 HT |
5638 | ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); |
5639 | if (ret) | |
5640 | return ret; | |
09f2af64 PL |
5641 | |
5642 | org_tqp_num = h->kinfo.num_tqps; | |
3a5a5f06 | 5643 | ret = hns3_change_channels(h, new_tqp_num, rxfh_configured); |
09f2af64 | 5644 | if (ret) { |
3a5a5f06 PL |
5645 | int ret1; |
5646 | ||
5647 | netdev_warn(netdev, | |
5648 | "Change channels fail, revert to old value\n"); | |
5649 | ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured); | |
5650 | if (ret1) { | |
5651 | netdev_err(netdev, | |
5652 | "revert to old channel fail\n"); | |
5653 | return ret1; | |
09f2af64 | 5654 | } |
3a5a5f06 | 5655 | |
65749f73 | 5656 | return ret; |
3a5a5f06 | 5657 | } |
09f2af64 | 5658 | |
3a5a5f06 | 5659 | return 0; |
09f2af64 PL |
5660 | } |
5661 | ||
a83d2961 WL |
5662 | static const struct hns3_hw_error_info hns3_hw_err[] = { |
5663 | { .type = HNAE3_PPU_POISON_ERROR, | |
5664 | .msg = "PPU poison" }, | |
5665 | { .type = HNAE3_CMDQ_ECC_ERROR, | |
5666 | .msg = "IMP CMDQ error" }, | |
5667 | { .type = HNAE3_IMP_RD_POISON_ERROR, | |
5668 | .msg = "IMP RD poison" }, | |
6cd131dd YM |
5669 | { .type = HNAE3_ROCEE_AXI_RESP_ERROR, |
5670 | .msg = "ROCEE AXI RESP error" }, | |
a83d2961 WL |
5671 | }; |
5672 | ||
5673 | static void hns3_process_hw_error(struct hnae3_handle *handle, | |
5674 | enum hnae3_hw_error_type type) | |
5675 | { | |
5676 | int i; | |
5677 | ||
5678 | for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) { | |
5679 | if (hns3_hw_err[i].type == type) { | |
5680 | dev_err(&handle->pdev->dev, "Detected %s!\n", | |
5681 | hns3_hw_err[i].msg); | |
5682 | break; | |
5683 | } | |
5684 | } | |
5685 | } | |
5686 | ||
1db9b1bf | 5687 | static const struct hnae3_client_ops client_ops = { |
76ad4f0e S |
5688 | .init_instance = hns3_client_init, |
5689 | .uninit_instance = hns3_client_uninit, | |
5690 | .link_status_change = hns3_link_status_change, | |
bb6b94a8 | 5691 | .reset_notify = hns3_reset_notify, |
a83d2961 | 5692 | .process_hw_error = hns3_process_hw_error, |
76ad4f0e S |
5693 | }; |
5694 | ||
5695 | /* hns3_init_module - Driver registration routine | |
5696 | * hns3_init_module is the first routine called when the driver is | |
5697 | * loaded. All it does is register with the PCI subsystem. | |
5698 | */ | |
5699 | static int __init hns3_init_module(void) | |
5700 | { | |
5701 | int ret; | |
5702 | ||
5703 | pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); | |
5704 | pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); | |
5705 | ||
5706 | client.type = HNAE3_CLIENT_KNIC; | |
cdc37385 | 5707 | snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s", |
76ad4f0e S |
5708 | hns3_driver_name); |
5709 | ||
5710 | client.ops = &client_ops; | |
5711 | ||
13562d1f XW |
5712 | INIT_LIST_HEAD(&client.node); |
5713 | ||
b2292360 | 5714 | hns3_dbg_register_debugfs(hns3_driver_name); |
5715 | ||
76ad4f0e S |
5716 | ret = hnae3_register_client(&client); |
5717 | if (ret) | |
b2292360 | 5718 | goto err_reg_client; |
76ad4f0e S |
5719 | |
5720 | ret = pci_register_driver(&hns3_driver); | |
5721 | if (ret) | |
b2292360 | 5722 | goto err_reg_driver; |
76ad4f0e S |
5723 | |
5724 | return ret; | |
b2292360 | 5725 | |
5726 | err_reg_driver: | |
5727 | hnae3_unregister_client(&client); | |
5728 | err_reg_client: | |
5729 | hns3_dbg_unregister_debugfs(); | |
5730 | return ret; | |
76ad4f0e S |
5731 | } |
5732 | module_init(hns3_init_module); | |
5733 | ||
5734 | /* hns3_exit_module - Driver exit cleanup routine | |
5735 | * hns3_exit_module is called just before the driver is removed | |
5736 | * from memory. | |
5737 | */ | |
5738 | static void __exit hns3_exit_module(void) | |
5739 | { | |
5740 | pci_unregister_driver(&hns3_driver); | |
5741 | hnae3_unregister_client(&client); | |
b2292360 | 5742 | hns3_dbg_unregister_debugfs(); |
76ad4f0e S |
5743 | } |
5744 | module_exit(hns3_exit_module); | |
5745 | ||
5746 | MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); | |
5747 | MODULE_AUTHOR("Huawei Tech. Co., Ltd."); | |
5748 | MODULE_LICENSE("GPL"); | |
5749 | MODULE_ALIAS("pci:hns-nic"); |