1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
10 #include <linux/if_vlan.h>
11 #include <linux/irq.h>
13 #include <linux/ipv6.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/aer.h>
17 #include <linux/skbuff.h>
18 #include <linux/sctp.h>
21 #include <net/ip6_checksum.h>
22 #include <net/pkt_cls.h>
24 #include <net/vxlan.h>
25 #include <net/geneve.h>
28 #include "hns3_enet.h"
29 /* All hns3 tracepoints are defined by the include below, which
30 * must be included exactly once across the whole kernel with
31 * CREATE_TRACE_POINTS defined
33 #define CREATE_TRACE_POINTS
34 #include "hns3_trace.h"
36 #define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
37 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
39 #define hns3_rl_err(fmt, ...) \
41 if (net_ratelimit()) \
42 netdev_err(fmt, ##__VA_ARGS__); \
45 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
47 static const char hns3_driver_name[] = "hns3";
48 static const char hns3_driver_string[] =
49 "Hisilicon Ethernet Network Driver for Hip08 Family";
50 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
51 static struct hnae3_client client;
53 static int debug = -1;
54 module_param(debug, int, 0);
55 MODULE_PARM_DESC(debug, " Network interface message level setting");
57 static unsigned int tx_sgl = 1;
58 module_param(tx_sgl, uint, 0600);
59 MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
61 static bool page_pool_enabled = true;
62 module_param(page_pool_enabled, bool, 0400);
64 #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
65 sizeof(struct sg_table))
66 #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
67 dma_get_cache_alignment())
69 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
72 #define HNS3_INNER_VLAN_TAG 1
73 #define HNS3_OUTER_VLAN_TAG 2
75 #define HNS3_MIN_TX_LEN 33U
76 #define HNS3_MIN_TUN_PKT_LEN 65U
78 /* hns3_pci_tbl - PCI Device ID Table
80 * Last entry must be all 0s
82 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
83 * Class, Class Mask, private data (not used) }
85 static const struct pci_device_id hns3_pci_tbl[] = {
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
89 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
90 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
91 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
92 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
93 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
94 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
95 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
96 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
97 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
98 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
99 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
100 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
101 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
102 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
103 /* required last entry */
106 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
108 #define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
115 #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
116 { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
118 static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
119 HNS3_RX_PTYPE_UNUSED_ENTRY(0),
120 HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
121 HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
122 HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
123 HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
124 HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
125 HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
126 HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
127 HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
128 HNS3_RX_PTYPE_UNUSED_ENTRY(9),
129 HNS3_RX_PTYPE_UNUSED_ENTRY(10),
130 HNS3_RX_PTYPE_UNUSED_ENTRY(11),
131 HNS3_RX_PTYPE_UNUSED_ENTRY(12),
132 HNS3_RX_PTYPE_UNUSED_ENTRY(13),
133 HNS3_RX_PTYPE_UNUSED_ENTRY(14),
134 HNS3_RX_PTYPE_UNUSED_ENTRY(15),
135 HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
136 HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
137 HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
138 HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
139 HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
140 HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
141 HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
142 HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
143 HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
144 HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
145 HNS3_RX_PTYPE_UNUSED_ENTRY(26),
146 HNS3_RX_PTYPE_UNUSED_ENTRY(27),
147 HNS3_RX_PTYPE_UNUSED_ENTRY(28),
148 HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
149 HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
150 HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
151 HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
152 HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
153 HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
154 HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
155 HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
156 HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
157 HNS3_RX_PTYPE_UNUSED_ENTRY(38),
158 HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
159 HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
160 HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
161 HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
162 HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
163 HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
164 HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
165 HNS3_RX_PTYPE_UNUSED_ENTRY(46),
166 HNS3_RX_PTYPE_UNUSED_ENTRY(47),
167 HNS3_RX_PTYPE_UNUSED_ENTRY(48),
168 HNS3_RX_PTYPE_UNUSED_ENTRY(49),
169 HNS3_RX_PTYPE_UNUSED_ENTRY(50),
170 HNS3_RX_PTYPE_UNUSED_ENTRY(51),
171 HNS3_RX_PTYPE_UNUSED_ENTRY(52),
172 HNS3_RX_PTYPE_UNUSED_ENTRY(53),
173 HNS3_RX_PTYPE_UNUSED_ENTRY(54),
174 HNS3_RX_PTYPE_UNUSED_ENTRY(55),
175 HNS3_RX_PTYPE_UNUSED_ENTRY(56),
176 HNS3_RX_PTYPE_UNUSED_ENTRY(57),
177 HNS3_RX_PTYPE_UNUSED_ENTRY(58),
178 HNS3_RX_PTYPE_UNUSED_ENTRY(59),
179 HNS3_RX_PTYPE_UNUSED_ENTRY(60),
180 HNS3_RX_PTYPE_UNUSED_ENTRY(61),
181 HNS3_RX_PTYPE_UNUSED_ENTRY(62),
182 HNS3_RX_PTYPE_UNUSED_ENTRY(63),
183 HNS3_RX_PTYPE_UNUSED_ENTRY(64),
184 HNS3_RX_PTYPE_UNUSED_ENTRY(65),
185 HNS3_RX_PTYPE_UNUSED_ENTRY(66),
186 HNS3_RX_PTYPE_UNUSED_ENTRY(67),
187 HNS3_RX_PTYPE_UNUSED_ENTRY(68),
188 HNS3_RX_PTYPE_UNUSED_ENTRY(69),
189 HNS3_RX_PTYPE_UNUSED_ENTRY(70),
190 HNS3_RX_PTYPE_UNUSED_ENTRY(71),
191 HNS3_RX_PTYPE_UNUSED_ENTRY(72),
192 HNS3_RX_PTYPE_UNUSED_ENTRY(73),
193 HNS3_RX_PTYPE_UNUSED_ENTRY(74),
194 HNS3_RX_PTYPE_UNUSED_ENTRY(75),
195 HNS3_RX_PTYPE_UNUSED_ENTRY(76),
196 HNS3_RX_PTYPE_UNUSED_ENTRY(77),
197 HNS3_RX_PTYPE_UNUSED_ENTRY(78),
198 HNS3_RX_PTYPE_UNUSED_ENTRY(79),
199 HNS3_RX_PTYPE_UNUSED_ENTRY(80),
200 HNS3_RX_PTYPE_UNUSED_ENTRY(81),
201 HNS3_RX_PTYPE_UNUSED_ENTRY(82),
202 HNS3_RX_PTYPE_UNUSED_ENTRY(83),
203 HNS3_RX_PTYPE_UNUSED_ENTRY(84),
204 HNS3_RX_PTYPE_UNUSED_ENTRY(85),
205 HNS3_RX_PTYPE_UNUSED_ENTRY(86),
206 HNS3_RX_PTYPE_UNUSED_ENTRY(87),
207 HNS3_RX_PTYPE_UNUSED_ENTRY(88),
208 HNS3_RX_PTYPE_UNUSED_ENTRY(89),
209 HNS3_RX_PTYPE_UNUSED_ENTRY(90),
210 HNS3_RX_PTYPE_UNUSED_ENTRY(91),
211 HNS3_RX_PTYPE_UNUSED_ENTRY(92),
212 HNS3_RX_PTYPE_UNUSED_ENTRY(93),
213 HNS3_RX_PTYPE_UNUSED_ENTRY(94),
214 HNS3_RX_PTYPE_UNUSED_ENTRY(95),
215 HNS3_RX_PTYPE_UNUSED_ENTRY(96),
216 HNS3_RX_PTYPE_UNUSED_ENTRY(97),
217 HNS3_RX_PTYPE_UNUSED_ENTRY(98),
218 HNS3_RX_PTYPE_UNUSED_ENTRY(99),
219 HNS3_RX_PTYPE_UNUSED_ENTRY(100),
220 HNS3_RX_PTYPE_UNUSED_ENTRY(101),
221 HNS3_RX_PTYPE_UNUSED_ENTRY(102),
222 HNS3_RX_PTYPE_UNUSED_ENTRY(103),
223 HNS3_RX_PTYPE_UNUSED_ENTRY(104),
224 HNS3_RX_PTYPE_UNUSED_ENTRY(105),
225 HNS3_RX_PTYPE_UNUSED_ENTRY(106),
226 HNS3_RX_PTYPE_UNUSED_ENTRY(107),
227 HNS3_RX_PTYPE_UNUSED_ENTRY(108),
228 HNS3_RX_PTYPE_UNUSED_ENTRY(109),
229 HNS3_RX_PTYPE_UNUSED_ENTRY(110),
230 HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
231 HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
232 HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
233 HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
234 HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
235 HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
236 HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
237 HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
238 HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
239 HNS3_RX_PTYPE_UNUSED_ENTRY(120),
240 HNS3_RX_PTYPE_UNUSED_ENTRY(121),
241 HNS3_RX_PTYPE_UNUSED_ENTRY(122),
242 HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
243 HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
244 HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
245 HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
246 HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
247 HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
248 HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
249 HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
250 HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
251 HNS3_RX_PTYPE_UNUSED_ENTRY(132),
252 HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
253 HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
254 HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
255 HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
256 HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
257 HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
258 HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
259 HNS3_RX_PTYPE_UNUSED_ENTRY(140),
260 HNS3_RX_PTYPE_UNUSED_ENTRY(141),
261 HNS3_RX_PTYPE_UNUSED_ENTRY(142),
262 HNS3_RX_PTYPE_UNUSED_ENTRY(143),
263 HNS3_RX_PTYPE_UNUSED_ENTRY(144),
264 HNS3_RX_PTYPE_UNUSED_ENTRY(145),
265 HNS3_RX_PTYPE_UNUSED_ENTRY(146),
266 HNS3_RX_PTYPE_UNUSED_ENTRY(147),
267 HNS3_RX_PTYPE_UNUSED_ENTRY(148),
268 HNS3_RX_PTYPE_UNUSED_ENTRY(149),
269 HNS3_RX_PTYPE_UNUSED_ENTRY(150),
270 HNS3_RX_PTYPE_UNUSED_ENTRY(151),
271 HNS3_RX_PTYPE_UNUSED_ENTRY(152),
272 HNS3_RX_PTYPE_UNUSED_ENTRY(153),
273 HNS3_RX_PTYPE_UNUSED_ENTRY(154),
274 HNS3_RX_PTYPE_UNUSED_ENTRY(155),
275 HNS3_RX_PTYPE_UNUSED_ENTRY(156),
276 HNS3_RX_PTYPE_UNUSED_ENTRY(157),
277 HNS3_RX_PTYPE_UNUSED_ENTRY(158),
278 HNS3_RX_PTYPE_UNUSED_ENTRY(159),
279 HNS3_RX_PTYPE_UNUSED_ENTRY(160),
280 HNS3_RX_PTYPE_UNUSED_ENTRY(161),
281 HNS3_RX_PTYPE_UNUSED_ENTRY(162),
282 HNS3_RX_PTYPE_UNUSED_ENTRY(163),
283 HNS3_RX_PTYPE_UNUSED_ENTRY(164),
284 HNS3_RX_PTYPE_UNUSED_ENTRY(165),
285 HNS3_RX_PTYPE_UNUSED_ENTRY(166),
286 HNS3_RX_PTYPE_UNUSED_ENTRY(167),
287 HNS3_RX_PTYPE_UNUSED_ENTRY(168),
288 HNS3_RX_PTYPE_UNUSED_ENTRY(169),
289 HNS3_RX_PTYPE_UNUSED_ENTRY(170),
290 HNS3_RX_PTYPE_UNUSED_ENTRY(171),
291 HNS3_RX_PTYPE_UNUSED_ENTRY(172),
292 HNS3_RX_PTYPE_UNUSED_ENTRY(173),
293 HNS3_RX_PTYPE_UNUSED_ENTRY(174),
294 HNS3_RX_PTYPE_UNUSED_ENTRY(175),
295 HNS3_RX_PTYPE_UNUSED_ENTRY(176),
296 HNS3_RX_PTYPE_UNUSED_ENTRY(177),
297 HNS3_RX_PTYPE_UNUSED_ENTRY(178),
298 HNS3_RX_PTYPE_UNUSED_ENTRY(179),
299 HNS3_RX_PTYPE_UNUSED_ENTRY(180),
300 HNS3_RX_PTYPE_UNUSED_ENTRY(181),
301 HNS3_RX_PTYPE_UNUSED_ENTRY(182),
302 HNS3_RX_PTYPE_UNUSED_ENTRY(183),
303 HNS3_RX_PTYPE_UNUSED_ENTRY(184),
304 HNS3_RX_PTYPE_UNUSED_ENTRY(185),
305 HNS3_RX_PTYPE_UNUSED_ENTRY(186),
306 HNS3_RX_PTYPE_UNUSED_ENTRY(187),
307 HNS3_RX_PTYPE_UNUSED_ENTRY(188),
308 HNS3_RX_PTYPE_UNUSED_ENTRY(189),
309 HNS3_RX_PTYPE_UNUSED_ENTRY(190),
310 HNS3_RX_PTYPE_UNUSED_ENTRY(191),
311 HNS3_RX_PTYPE_UNUSED_ENTRY(192),
312 HNS3_RX_PTYPE_UNUSED_ENTRY(193),
313 HNS3_RX_PTYPE_UNUSED_ENTRY(194),
314 HNS3_RX_PTYPE_UNUSED_ENTRY(195),
315 HNS3_RX_PTYPE_UNUSED_ENTRY(196),
316 HNS3_RX_PTYPE_UNUSED_ENTRY(197),
317 HNS3_RX_PTYPE_UNUSED_ENTRY(198),
318 HNS3_RX_PTYPE_UNUSED_ENTRY(199),
319 HNS3_RX_PTYPE_UNUSED_ENTRY(200),
320 HNS3_RX_PTYPE_UNUSED_ENTRY(201),
321 HNS3_RX_PTYPE_UNUSED_ENTRY(202),
322 HNS3_RX_PTYPE_UNUSED_ENTRY(203),
323 HNS3_RX_PTYPE_UNUSED_ENTRY(204),
324 HNS3_RX_PTYPE_UNUSED_ENTRY(205),
325 HNS3_RX_PTYPE_UNUSED_ENTRY(206),
326 HNS3_RX_PTYPE_UNUSED_ENTRY(207),
327 HNS3_RX_PTYPE_UNUSED_ENTRY(208),
328 HNS3_RX_PTYPE_UNUSED_ENTRY(209),
329 HNS3_RX_PTYPE_UNUSED_ENTRY(210),
330 HNS3_RX_PTYPE_UNUSED_ENTRY(211),
331 HNS3_RX_PTYPE_UNUSED_ENTRY(212),
332 HNS3_RX_PTYPE_UNUSED_ENTRY(213),
333 HNS3_RX_PTYPE_UNUSED_ENTRY(214),
334 HNS3_RX_PTYPE_UNUSED_ENTRY(215),
335 HNS3_RX_PTYPE_UNUSED_ENTRY(216),
336 HNS3_RX_PTYPE_UNUSED_ENTRY(217),
337 HNS3_RX_PTYPE_UNUSED_ENTRY(218),
338 HNS3_RX_PTYPE_UNUSED_ENTRY(219),
339 HNS3_RX_PTYPE_UNUSED_ENTRY(220),
340 HNS3_RX_PTYPE_UNUSED_ENTRY(221),
341 HNS3_RX_PTYPE_UNUSED_ENTRY(222),
342 HNS3_RX_PTYPE_UNUSED_ENTRY(223),
343 HNS3_RX_PTYPE_UNUSED_ENTRY(224),
344 HNS3_RX_PTYPE_UNUSED_ENTRY(225),
345 HNS3_RX_PTYPE_UNUSED_ENTRY(226),
346 HNS3_RX_PTYPE_UNUSED_ENTRY(227),
347 HNS3_RX_PTYPE_UNUSED_ENTRY(228),
348 HNS3_RX_PTYPE_UNUSED_ENTRY(229),
349 HNS3_RX_PTYPE_UNUSED_ENTRY(230),
350 HNS3_RX_PTYPE_UNUSED_ENTRY(231),
351 HNS3_RX_PTYPE_UNUSED_ENTRY(232),
352 HNS3_RX_PTYPE_UNUSED_ENTRY(233),
353 HNS3_RX_PTYPE_UNUSED_ENTRY(234),
354 HNS3_RX_PTYPE_UNUSED_ENTRY(235),
355 HNS3_RX_PTYPE_UNUSED_ENTRY(236),
356 HNS3_RX_PTYPE_UNUSED_ENTRY(237),
357 HNS3_RX_PTYPE_UNUSED_ENTRY(238),
358 HNS3_RX_PTYPE_UNUSED_ENTRY(239),
359 HNS3_RX_PTYPE_UNUSED_ENTRY(240),
360 HNS3_RX_PTYPE_UNUSED_ENTRY(241),
361 HNS3_RX_PTYPE_UNUSED_ENTRY(242),
362 HNS3_RX_PTYPE_UNUSED_ENTRY(243),
363 HNS3_RX_PTYPE_UNUSED_ENTRY(244),
364 HNS3_RX_PTYPE_UNUSED_ENTRY(245),
365 HNS3_RX_PTYPE_UNUSED_ENTRY(246),
366 HNS3_RX_PTYPE_UNUSED_ENTRY(247),
367 HNS3_RX_PTYPE_UNUSED_ENTRY(248),
368 HNS3_RX_PTYPE_UNUSED_ENTRY(249),
369 HNS3_RX_PTYPE_UNUSED_ENTRY(250),
370 HNS3_RX_PTYPE_UNUSED_ENTRY(251),
371 HNS3_RX_PTYPE_UNUSED_ENTRY(252),
372 HNS3_RX_PTYPE_UNUSED_ENTRY(253),
373 HNS3_RX_PTYPE_UNUSED_ENTRY(254),
374 HNS3_RX_PTYPE_UNUSED_ENTRY(255),
377 #define HNS3_INVALID_PTYPE \
378 ARRAY_SIZE(hns3_rx_ptype_tbl)
380 static irqreturn_t hns3_irq_handle(int irq, void *vector)
382 struct hns3_enet_tqp_vector *tqp_vector = vector;
384 napi_schedule_irqoff(&tqp_vector->napi);
385 tqp_vector->event_cnt++;
390 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
392 struct hns3_enet_tqp_vector *tqp_vectors;
395 for (i = 0; i < priv->vector_num; i++) {
396 tqp_vectors = &priv->tqp_vector[i];
398 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
401 /* clear the affinity mask */
402 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
404 /* release the irq resource */
405 free_irq(tqp_vectors->vector_irq, tqp_vectors);
406 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
410 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
412 struct hns3_enet_tqp_vector *tqp_vectors;
413 int txrx_int_idx = 0;
419 for (i = 0; i < priv->vector_num; i++) {
420 tqp_vectors = &priv->tqp_vector[i];
422 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
425 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
426 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
427 "%s-%s-%s-%d", hns3_driver_name,
428 pci_name(priv->ae_handle->pdev),
429 "TxRx", txrx_int_idx++);
431 } else if (tqp_vectors->rx_group.ring) {
432 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
433 "%s-%s-%s-%d", hns3_driver_name,
434 pci_name(priv->ae_handle->pdev),
436 } else if (tqp_vectors->tx_group.ring) {
437 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
438 "%s-%s-%s-%d", hns3_driver_name,
439 pci_name(priv->ae_handle->pdev),
442 /* Skip this unused q_vector */
446 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
448 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
449 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
450 tqp_vectors->name, tqp_vectors);
452 netdev_err(priv->netdev, "request irq(%d) fail\n",
453 tqp_vectors->vector_irq);
454 hns3_nic_uninit_irq(priv);
458 irq_set_affinity_hint(tqp_vectors->vector_irq,
459 &tqp_vectors->affinity_mask);
461 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
467 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
470 writel(mask_en, tqp_vector->mask_addr);
473 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
475 napi_enable(&tqp_vector->napi);
476 enable_irq(tqp_vector->vector_irq);
479 hns3_mask_vector_irq(tqp_vector, 1);
482 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
485 hns3_mask_vector_irq(tqp_vector, 0);
487 disable_irq(tqp_vector->vector_irq);
488 napi_disable(&tqp_vector->napi);
489 cancel_work_sync(&tqp_vector->rx_group.dim.work);
490 cancel_work_sync(&tqp_vector->tx_group.dim.work);
493 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
496 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
498 /* this defines the configuration for RL (Interrupt Rate Limiter).
499 * Rl defines rate of interrupts i.e. number of interrupts-per-second
500 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
502 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
503 !tqp_vector->rx_group.coal.adapt_enable)
504 /* According to the hardware, the range of rl_reg is
505 * 0-59 and the unit is 4.
507 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
509 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
512 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
517 if (tqp_vector->rx_group.coal.unit_1us)
518 new_val = gl_value | HNS3_INT_GL_1US;
520 new_val = hns3_gl_usec_to_reg(gl_value);
522 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
525 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
530 if (tqp_vector->tx_group.coal.unit_1us)
531 new_val = gl_value | HNS3_INT_GL_1US;
533 new_val = hns3_gl_usec_to_reg(gl_value);
535 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
538 void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
541 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
544 void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
547 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
550 static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
551 struct hns3_nic_priv *priv)
553 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
554 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
555 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
556 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
557 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
559 tx_coal->adapt_enable = ptx_coal->adapt_enable;
560 rx_coal->adapt_enable = prx_coal->adapt_enable;
562 tx_coal->int_gl = ptx_coal->int_gl;
563 rx_coal->int_gl = prx_coal->int_gl;
565 rx_coal->flow_level = prx_coal->flow_level;
566 tx_coal->flow_level = ptx_coal->flow_level;
568 /* device version above V3(include V3), GL can configure 1us
569 * unit, so uses 1us unit.
571 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
572 tx_coal->unit_1us = 1;
573 rx_coal->unit_1us = 1;
576 if (ae_dev->dev_specs.int_ql_max) {
577 tx_coal->ql_enable = 1;
578 rx_coal->ql_enable = 1;
579 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
580 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
581 tx_coal->int_ql = ptx_coal->int_ql;
582 rx_coal->int_ql = prx_coal->int_ql;
587 hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
588 struct hns3_nic_priv *priv)
590 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
591 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
592 struct hnae3_handle *h = priv->ae_handle;
594 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
595 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
596 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
598 if (tx_coal->ql_enable)
599 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
601 if (rx_coal->ql_enable)
602 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
605 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
607 struct hnae3_handle *h = hns3_get_handle(netdev);
608 struct hnae3_knic_private_info *kinfo = &h->kinfo;
609 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
610 unsigned int queue_size = kinfo->num_tqps;
613 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) {
614 netdev_reset_tc(netdev);
616 ret = netdev_set_num_tc(netdev, tc_info->num_tc);
619 "netdev_set_num_tc fail, ret=%d!\n", ret);
623 for (i = 0; i < tc_info->num_tc; i++)
624 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
625 tc_info->tqp_offset[i]);
628 ret = netif_set_real_num_tx_queues(netdev, queue_size);
631 "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
635 ret = netif_set_real_num_rx_queues(netdev, queue_size);
638 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
645 u16 hns3_get_max_available_channels(struct hnae3_handle *h)
647 u16 alloc_tqps, max_rss_size, rss_size;
649 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
650 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc;
652 return min_t(u16, rss_size, max_rss_size);
655 static void hns3_tqp_enable(struct hnae3_queue *tqp)
659 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
660 rcb_reg |= BIT(HNS3_RING_EN_B);
661 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
664 static void hns3_tqp_disable(struct hnae3_queue *tqp)
668 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
669 rcb_reg &= ~BIT(HNS3_RING_EN_B);
670 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
673 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
675 #ifdef CONFIG_RFS_ACCEL
676 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
677 netdev->rx_cpu_rmap = NULL;
681 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
683 #ifdef CONFIG_RFS_ACCEL
684 struct hns3_nic_priv *priv = netdev_priv(netdev);
685 struct hns3_enet_tqp_vector *tqp_vector;
688 if (!netdev->rx_cpu_rmap) {
689 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
690 if (!netdev->rx_cpu_rmap)
694 for (i = 0; i < priv->vector_num; i++) {
695 tqp_vector = &priv->tqp_vector[i];
696 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
697 tqp_vector->vector_irq);
699 hns3_free_rx_cpu_rmap(netdev);
707 static int hns3_nic_net_up(struct net_device *netdev)
709 struct hns3_nic_priv *priv = netdev_priv(netdev);
710 struct hnae3_handle *h = priv->ae_handle;
714 ret = hns3_nic_reset_all_ring(h);
718 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
720 /* enable the vectors */
721 for (i = 0; i < priv->vector_num; i++)
722 hns3_vector_enable(&priv->tqp_vector[i]);
725 for (j = 0; j < h->kinfo.num_tqps; j++)
726 hns3_tqp_enable(h->kinfo.tqp[j]);
728 /* start the ae_dev */
729 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
731 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
733 hns3_tqp_disable(h->kinfo.tqp[j]);
735 for (j = i - 1; j >= 0; j--)
736 hns3_vector_disable(&priv->tqp_vector[j]);
742 static void hns3_config_xps(struct hns3_nic_priv *priv)
746 for (i = 0; i < priv->vector_num; i++) {
747 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
748 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
753 ret = netif_set_xps_queue(priv->netdev,
754 &tqp_vector->affinity_mask,
755 ring->tqp->tqp_index);
757 netdev_warn(priv->netdev,
758 "set xps queue failed: %d", ret);
765 static int hns3_nic_net_open(struct net_device *netdev)
767 struct hns3_nic_priv *priv = netdev_priv(netdev);
768 struct hnae3_handle *h = hns3_get_handle(netdev);
769 struct hnae3_knic_private_info *kinfo;
772 if (hns3_nic_resetting(netdev))
775 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
776 netdev_warn(netdev, "net open repeatedly!\n");
780 netif_carrier_off(netdev);
782 ret = hns3_nic_set_real_num_queue(netdev);
786 ret = hns3_nic_net_up(netdev);
788 netdev_err(netdev, "net up fail, ret=%d!\n", ret);
793 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
794 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]);
796 if (h->ae_algo->ops->set_timer_task)
797 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
799 hns3_config_xps(priv);
801 netif_dbg(h, drv, netdev, "net open\n");
806 static void hns3_reset_tx_queue(struct hnae3_handle *h)
808 struct net_device *ndev = h->kinfo.netdev;
809 struct hns3_nic_priv *priv = netdev_priv(ndev);
810 struct netdev_queue *dev_queue;
813 for (i = 0; i < h->kinfo.num_tqps; i++) {
814 dev_queue = netdev_get_tx_queue(ndev,
815 priv->ring[i].queue_index);
816 netdev_tx_reset_queue(dev_queue);
820 static void hns3_nic_net_down(struct net_device *netdev)
822 struct hns3_nic_priv *priv = netdev_priv(netdev);
823 struct hnae3_handle *h = hns3_get_handle(netdev);
824 const struct hnae3_ae_ops *ops;
827 /* disable vectors */
828 for (i = 0; i < priv->vector_num; i++)
829 hns3_vector_disable(&priv->tqp_vector[i]);
832 for (i = 0; i < h->kinfo.num_tqps; i++)
833 hns3_tqp_disable(h->kinfo.tqp[i]);
836 ops = priv->ae_handle->ae_algo->ops;
838 ops->stop(priv->ae_handle);
840 /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
841 * during reset process, because driver may not be able
842 * to disable the ring through firmware when downing the netdev.
844 if (!hns3_nic_resetting(netdev))
845 hns3_clear_all_ring(priv->ae_handle, false);
847 hns3_reset_tx_queue(priv->ae_handle);
850 static int hns3_nic_net_stop(struct net_device *netdev)
852 struct hns3_nic_priv *priv = netdev_priv(netdev);
853 struct hnae3_handle *h = hns3_get_handle(netdev);
855 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
858 netif_dbg(h, drv, netdev, "net stop\n");
860 if (h->ae_algo->ops->set_timer_task)
861 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
863 netif_carrier_off(netdev);
864 netif_tx_disable(netdev);
866 hns3_nic_net_down(netdev);
871 static int hns3_nic_uc_sync(struct net_device *netdev,
872 const unsigned char *addr)
874 struct hnae3_handle *h = hns3_get_handle(netdev);
876 if (h->ae_algo->ops->add_uc_addr)
877 return h->ae_algo->ops->add_uc_addr(h, addr);
882 static int hns3_nic_uc_unsync(struct net_device *netdev,
883 const unsigned char *addr)
885 struct hnae3_handle *h = hns3_get_handle(netdev);
887 /* need ignore the request of removing device address, because
888 * we store the device address and other addresses of uc list
889 * in the function's mac filter list.
891 if (ether_addr_equal(addr, netdev->dev_addr))
894 if (h->ae_algo->ops->rm_uc_addr)
895 return h->ae_algo->ops->rm_uc_addr(h, addr);
900 static int hns3_nic_mc_sync(struct net_device *netdev,
901 const unsigned char *addr)
903 struct hnae3_handle *h = hns3_get_handle(netdev);
905 if (h->ae_algo->ops->add_mc_addr)
906 return h->ae_algo->ops->add_mc_addr(h, addr);
911 static int hns3_nic_mc_unsync(struct net_device *netdev,
912 const unsigned char *addr)
914 struct hnae3_handle *h = hns3_get_handle(netdev);
916 if (h->ae_algo->ops->rm_mc_addr)
917 return h->ae_algo->ops->rm_mc_addr(h, addr);
922 static u8 hns3_get_netdev_flags(struct net_device *netdev)
926 if (netdev->flags & IFF_PROMISC)
927 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
928 else if (netdev->flags & IFF_ALLMULTI)
929 flags = HNAE3_USER_MPE;
934 static void hns3_nic_set_rx_mode(struct net_device *netdev)
936 struct hnae3_handle *h = hns3_get_handle(netdev);
939 new_flags = hns3_get_netdev_flags(netdev);
941 __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
942 __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
944 /* User mode Promisc mode enable and vlan filtering is disabled to
945 * let all packets in.
947 h->netdev_flags = new_flags;
948 hns3_request_update_promisc_mode(h);
951 void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
953 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
955 if (ops->request_update_promisc_mode)
956 ops->request_update_promisc_mode(handle);
959 static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
961 struct hns3_tx_spare *tx_spare = ring->tx_spare;
964 /* This smp_load_acquire() pairs with smp_store_release() in
965 * hns3_tx_spare_update() called in tx desc cleaning process.
967 ntc = smp_load_acquire(&tx_spare->last_to_clean);
968 ntu = tx_spare->next_to_use;
971 return ntc - ntu - 1;
973 /* The free tx buffer is divided into two part, so pick the
976 return max(ntc, tx_spare->len - ntu) - 1;
979 static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
981 struct hns3_tx_spare *tx_spare = ring->tx_spare;
984 tx_spare->last_to_clean == tx_spare->next_to_clean)
987 /* This smp_store_release() pairs with smp_load_acquire() in
988 * hns3_tx_spare_space() called in xmit process.
990 smp_store_release(&tx_spare->last_to_clean,
991 tx_spare->next_to_clean);
994 static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
998 u32 len = skb->len <= ring->tx_copybreak ? skb->len :
1001 if (len > ring->tx_copybreak)
1004 if (ALIGN(len, dma_get_cache_alignment()) > space) {
1005 hns3_ring_stats_update(ring, tx_spare_full);
1012 static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
1013 struct sk_buff *skb,
1016 if (skb->len <= ring->tx_copybreak || !tx_sgl ||
1017 (!skb_has_frag_list(skb) &&
1018 skb_shinfo(skb)->nr_frags < tx_sgl))
1021 if (space < HNS3_MAX_SGL_SIZE) {
1022 hns3_ring_stats_update(ring, tx_spare_full);
1029 static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
1031 u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
1032 struct hns3_tx_spare *tx_spare;
1040 order = get_order(alloc_size);
1041 if (order >= MAX_ORDER) {
1042 if (net_ratelimit())
1043 dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
1047 tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
1050 /* The driver still work without the tx spare buffer */
1051 dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
1052 goto devm_kzalloc_error;
1055 page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
1058 dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
1059 goto alloc_pages_error;
1062 dma = dma_map_page(ring_to_dev(ring), page, 0,
1063 PAGE_SIZE << order, DMA_TO_DEVICE);
1064 if (dma_mapping_error(ring_to_dev(ring), dma)) {
1065 dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
1066 goto dma_mapping_error;
1069 tx_spare->dma = dma;
1070 tx_spare->buf = page_address(page);
1071 tx_spare->len = PAGE_SIZE << order;
1072 ring->tx_spare = tx_spare;
1078 devm_kfree(ring_to_dev(ring), tx_spare);
1080 ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
1083 /* Use hns3_tx_spare_space() to make sure there is enough buffer
1084 * before calling below function to allocate tx buffer.
1086 static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
1087 unsigned int size, dma_addr_t *dma,
1090 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1091 u32 ntu = tx_spare->next_to_use;
1093 size = ALIGN(size, dma_get_cache_alignment());
1096 /* Tx spare buffer wraps back here because the end of
1097 * freed tx buffer is not enough.
1099 if (ntu + size > tx_spare->len) {
1100 *cb_len += (tx_spare->len - ntu);
1104 tx_spare->next_to_use = ntu + size;
1105 if (tx_spare->next_to_use == tx_spare->len)
1106 tx_spare->next_to_use = 0;
1108 *dma = tx_spare->dma + ntu;
1110 return tx_spare->buf + ntu;
1113 static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
1115 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1117 if (len > tx_spare->next_to_use) {
1118 len -= tx_spare->next_to_use;
1119 tx_spare->next_to_use = tx_spare->len - len;
1121 tx_spare->next_to_use -= len;
1125 static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
1126 struct hns3_desc_cb *cb)
1128 struct hns3_tx_spare *tx_spare = ring->tx_spare;
1129 u32 ntc = tx_spare->next_to_clean;
1130 u32 len = cb->length;
1132 tx_spare->next_to_clean += len;
1134 if (tx_spare->next_to_clean >= tx_spare->len) {
1135 tx_spare->next_to_clean -= tx_spare->len;
1137 if (tx_spare->next_to_clean) {
1139 len = tx_spare->next_to_clean;
1143 /* This tx spare buffer is only really reclaimed after calling
1144 * hns3_tx_spare_update(), so it is still safe to use the info in
1145 * the tx buffer to do the dma sync or sg unmapping after
1146 * tx_spare->next_to_clean is moved forword.
1148 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
1149 dma_addr_t dma = tx_spare->dma + ntc;
1151 dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
1154 struct sg_table *sgt = tx_spare->buf + ntc;
1156 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
1161 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
1162 u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
1164 u32 l4_offset, hdr_len;
1165 union l3_hdr_info l3;
1166 union l4_hdr_info l4;
1170 if (!skb_is_gso(skb))
1173 ret = skb_cow_head(skb, 0);
1174 if (unlikely(ret < 0))
1177 l3.hdr = skb_network_header(skb);
1178 l4.hdr = skb_transport_header(skb);
1180 /* Software should clear the IPv4's checksum field when tso is
1183 if (l3.v4->version == 4)
1187 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1189 SKB_GSO_UDP_TUNNEL |
1190 SKB_GSO_UDP_TUNNEL_CSUM)) {
1191 /* reset l3&l4 pointers from outer to inner headers */
1192 l3.hdr = skb_inner_network_header(skb);
1193 l4.hdr = skb_inner_transport_header(skb);
1195 /* Software should clear the IPv4's checksum field when
1198 if (l3.v4->version == 4)
1202 /* normal or tunnel packet */
1203 l4_offset = l4.hdr - skb->data;
1205 /* remove payload length from inner pseudo checksum when tso */
1206 l4_paylen = skb->len - l4_offset;
1208 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1209 hdr_len = sizeof(*l4.udp) + l4_offset;
1210 csum_replace_by_diff(&l4.udp->check,
1211 (__force __wsum)htonl(l4_paylen));
1213 hdr_len = (l4.tcp->doff << 2) + l4_offset;
1214 csum_replace_by_diff(&l4.tcp->check,
1215 (__force __wsum)htonl(l4_paylen));
1218 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;
1220 /* find the txbd field values */
1221 *paylen_fdop_ol4cs = skb->len - hdr_len;
1222 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
1224 /* offload outer UDP header checksum */
1225 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1226 hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
1228 /* get MSS for TSO */
1229 *mss = skb_shinfo(skb)->gso_size;
1231 trace_hns3_tso(skb);
1236 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
1239 union l3_hdr_info l3;
1240 unsigned char *l4_hdr;
1241 unsigned char *exthdr;
1245 /* find outer header point */
1246 l3.hdr = skb_network_header(skb);
1247 l4_hdr = skb_transport_header(skb);
1249 if (skb->protocol == htons(ETH_P_IPV6)) {
1250 exthdr = l3.hdr + sizeof(*l3.v6);
1251 l4_proto_tmp = l3.v6->nexthdr;
1252 if (l4_hdr != exthdr)
1253 ipv6_skip_exthdr(skb, exthdr - skb->data,
1254 &l4_proto_tmp, &frag_off);
1255 } else if (skb->protocol == htons(ETH_P_IP)) {
1256 l4_proto_tmp = l3.v4->protocol;
1261 *ol4_proto = l4_proto_tmp;
1264 if (!skb->encapsulation) {
1269 /* find inner header point */
1270 l3.hdr = skb_inner_network_header(skb);
1271 l4_hdr = skb_inner_transport_header(skb);
1273 if (l3.v6->version == 6) {
1274 exthdr = l3.hdr + sizeof(*l3.v6);
1275 l4_proto_tmp = l3.v6->nexthdr;
1276 if (l4_hdr != exthdr)
1277 ipv6_skip_exthdr(skb, exthdr - skb->data,
1278 &l4_proto_tmp, &frag_off);
1279 } else if (l3.v4->version == 4) {
1280 l4_proto_tmp = l3.v4->protocol;
1283 *il4_proto = l4_proto_tmp;
1288 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1289 * and it is udp packet, which has a dest port as the IANA assigned.
1290 * the hardware is expected to do the checksum offload, but the
1291 * hardware will not do the checksum offload when udp dest port is
1292 * 4789, 4790 or 6081.
1294 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
1296 struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1297 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
1298 union l4_hdr_info l4;
1300 /* device version above V3(include V3), the hardware can
1301 * do this checksum offload.
1303 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
1306 l4.hdr = skb_transport_header(skb);
1308 if (!(!skb->encapsulation &&
1309 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
1310 l4.udp->dest == htons(GENEVE_UDP_PORT) ||
1311 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
1317 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1318 u32 *ol_type_vlan_len_msec)
1320 u32 l2_len, l3_len, l4_len;
1321 unsigned char *il2_hdr;
1322 union l3_hdr_info l3;
1323 union l4_hdr_info l4;
1325 l3.hdr = skb_network_header(skb);
1326 l4.hdr = skb_transport_header(skb);
1328 /* compute OL2 header size, defined in 2 Bytes */
1329 l2_len = l3.hdr - skb->data;
1330 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
1332 /* compute OL3 header size, defined in 4 Bytes */
1333 l3_len = l4.hdr - l3.hdr;
1334 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
1336 il2_hdr = skb_inner_mac_header(skb);
1337 /* compute OL4 header size, defined in 4 Bytes */
1338 l4_len = il2_hdr - l4.hdr;
1339 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
1341 /* define outer network header type */
1342 if (skb->protocol == htons(ETH_P_IP)) {
1343 if (skb_is_gso(skb))
1344 hns3_set_field(*ol_type_vlan_len_msec,
1346 HNS3_OL3T_IPV4_CSUM);
1348 hns3_set_field(*ol_type_vlan_len_msec,
1350 HNS3_OL3T_IPV4_NO_CSUM);
1351 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1352 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
1356 if (ol4_proto == IPPROTO_UDP)
1357 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1358 HNS3_TUN_MAC_IN_UDP);
1359 else if (ol4_proto == IPPROTO_GRE)
1360 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
1364 static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
1365 u32 *type_cs_vlan_tso)
1367 if (l3.v4->version == 4) {
1368 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1371 /* the stack computes the IP header already, the only time we
1372 * need the hardware to recompute it is in the case of TSO.
1374 if (skb_is_gso(skb))
1375 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
1376 } else if (l3.v6->version == 6) {
1377 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
1382 static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
1383 u32 l4_proto, u32 *type_cs_vlan_tso)
1385 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
1388 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1389 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1391 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1395 if (hns3_tunnel_csum_bug(skb)) {
1396 int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
1398 return ret ? ret : skb_checksum_help(skb);
1401 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1402 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1404 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1405 (sizeof(struct udphdr) >> 2));
1408 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
1409 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
1411 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
1412 (sizeof(struct sctphdr) >> 2));
1415 /* drop the skb tunnel packet if hardware don't support,
1416 * because hardware can't calculate csum when TSO.
1418 if (skb_is_gso(skb))
1421 /* the stack computes the IP header already,
1422 * driver calculate l4 checksum when not TSO.
1424 return skb_checksum_help(skb);
1430 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
1431 u8 il4_proto, u32 *type_cs_vlan_tso,
1432 u32 *ol_type_vlan_len_msec)
1434 unsigned char *l2_hdr = skb->data;
1435 u32 l4_proto = ol4_proto;
1436 union l4_hdr_info l4;
1437 union l3_hdr_info l3;
1440 l4.hdr = skb_transport_header(skb);
1441 l3.hdr = skb_network_header(skb);
1443 /* handle encapsulation skb */
1444 if (skb->encapsulation) {
1445 /* If this is a not UDP/GRE encapsulation skb */
1446 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
1447 /* drop the skb tunnel packet if hardware don't support,
1448 * because hardware can't calculate csum when TSO.
1450 if (skb_is_gso(skb))
1453 /* the stack computes the IP header already,
1454 * driver calculate l4 checksum when not TSO.
1456 return skb_checksum_help(skb);
1459 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
1461 /* switch to inner header */
1462 l2_hdr = skb_inner_mac_header(skb);
1463 l3.hdr = skb_inner_network_header(skb);
1464 l4.hdr = skb_inner_transport_header(skb);
1465 l4_proto = il4_proto;
1468 hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
1470 /* compute inner(/normal) L2 header size, defined in 2 Bytes */
1471 l2_len = l3.hdr - l2_hdr;
1472 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
1474 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
1475 l3_len = l4.hdr - l3.hdr;
1476 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
1478 return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
1481 static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
1482 struct sk_buff *skb)
1484 struct hnae3_handle *handle = tx_ring->tqp->handle;
1485 struct hnae3_ae_dev *ae_dev;
1486 struct vlan_ethhdr *vhdr;
1489 if (!(skb->protocol == htons(ETH_P_8021Q) ||
1490 skb_vlan_tag_present(skb)))
1493 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert
1494 * VLAN enabled, only one VLAN header is allowed in skb, otherwise it
1495 * will cause RAS error.
1497 ae_dev = pci_get_drvdata(handle->pdev);
1498 if (unlikely(skb_vlan_tagged_multi(skb) &&
1499 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
1500 handle->port_base_vlan_state ==
1501 HNAE3_PORT_BASE_VLAN_ENABLE))
1504 if (skb->protocol == htons(ETH_P_8021Q) &&
1505 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1506 /* When HW VLAN acceleration is turned off, and the stack
1507 * sets the protocol to 802.1q, the driver just need to
1508 * set the protocol to the encapsulated ethertype.
1510 skb->protocol = vlan_get_protocol(skb);
1514 if (skb_vlan_tag_present(skb)) {
1515 /* Based on hw strategy, use out_vtag in two layer tag case,
1516 * and use inner_vtag in one tag case.
1518 if (skb->protocol == htons(ETH_P_8021Q) &&
1519 handle->port_base_vlan_state ==
1520 HNAE3_PORT_BASE_VLAN_DISABLE)
1521 rc = HNS3_OUTER_VLAN_TAG;
1523 rc = HNS3_INNER_VLAN_TAG;
1525 skb->protocol = vlan_get_protocol(skb);
1529 rc = skb_cow_head(skb, 0);
1530 if (unlikely(rc < 0))
1533 vhdr = (struct vlan_ethhdr *)skb->data;
1534 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
1537 skb->protocol = vlan_get_protocol(skb);
1541 /* check if the hardware is capable of checksum offloading */
1542 static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
1544 struct hns3_nic_priv *priv = netdev_priv(skb->dev);
1546 /* Kindly note, due to backward compatibility of the TX descriptor,
1547 * HW checksum of the non-IP packets and GSO packets is handled at
1548 * different place in the following code
1550 if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
1551 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
1557 struct hns3_desc_param {
1559 u32 ol_type_vlan_len_msec;
1560 u32 type_cs_vlan_tso;
1566 static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
1568 pa->paylen_ol4cs = skb->len;
1569 pa->ol_type_vlan_len_msec = 0;
1570 pa->type_cs_vlan_tso = 0;
1571 pa->mss_hw_csum = 0;
1576 static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
1577 struct sk_buff *skb,
1578 struct hns3_desc_param *param)
1582 ret = hns3_handle_vtags(ring, skb);
1583 if (unlikely(ret < 0)) {
1584 hns3_ring_stats_update(ring, tx_vlan_err);
1586 } else if (ret == HNS3_INNER_VLAN_TAG) {
1587 param->inner_vtag = skb_vlan_tag_get(skb);
1588 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1590 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
1591 } else if (ret == HNS3_OUTER_VLAN_TAG) {
1592 param->out_vtag = skb_vlan_tag_get(skb);
1593 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
1595 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1601 static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
1602 struct sk_buff *skb,
1603 struct hns3_desc_cb *desc_cb,
1604 struct hns3_desc_param *param)
1606 u8 ol4_proto, il4_proto;
1609 if (hns3_check_hw_tx_csum(skb)) {
1610 /* set checksum start and offset, defined in 2 Bytes */
1611 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
1612 skb_checksum_start_offset(skb) >> 1);
1613 hns3_set_field(param->ol_type_vlan_len_msec,
1614 HNS3_TXD_CSUM_OFFSET_S,
1615 skb->csum_offset >> 1);
1616 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
1620 skb_reset_mac_len(skb);
1622 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1623 if (unlikely(ret < 0)) {
1624 hns3_ring_stats_update(ring, tx_l4_proto_err);
1628 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1629 ¶m->type_cs_vlan_tso,
1630 ¶m->ol_type_vlan_len_msec);
1631 if (unlikely(ret < 0)) {
1632 hns3_ring_stats_update(ring, tx_l2l3l4_err);
1636 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum,
1637 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes);
1638 if (unlikely(ret < 0)) {
1639 hns3_ring_stats_update(ring, tx_tso_err);
1645 static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
1646 struct sk_buff *skb, struct hns3_desc *desc,
1647 struct hns3_desc_cb *desc_cb)
1649 struct hns3_desc_param param;
1652 hns3_init_desc_data(skb, ¶m);
1653 ret = hns3_handle_vlan_info(ring, skb, ¶m);
1654 if (unlikely(ret < 0))
1657 desc_cb->send_bytes = skb->len;
1659 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1660 ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m);
1666 desc->tx.ol_type_vlan_len_msec =
1667 cpu_to_le32(param.ol_type_vlan_len_msec);
1668 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
1669 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
1670 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
1671 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
1672 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
1677 static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
1680 #define HNS3_LIKELY_BD_NUM 1
1682 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1683 unsigned int frag_buf_num;
1686 if (likely(size <= HNS3_MAX_BD_SIZE)) {
1687 desc->addr = cpu_to_le64(dma);
1688 desc->tx.send_size = cpu_to_le16(size);
1689 desc->tx.bdtp_fe_sc_vld_ra_ri =
1690 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1692 trace_hns3_tx_desc(ring, ring->next_to_use);
1693 ring_ptr_move_fw(ring, next_to_use);
1694 return HNS3_LIKELY_BD_NUM;
1697 frag_buf_num = hns3_tx_bd_count(size);
1698 sizeoflast = size % HNS3_MAX_BD_SIZE;
1699 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1701 /* When frag size is bigger than hardware limit, split this frag */
1702 for (k = 0; k < frag_buf_num; k++) {
1703 /* now, fill the descriptor */
1704 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1705 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1706 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1707 desc->tx.bdtp_fe_sc_vld_ra_ri =
1708 cpu_to_le16(BIT(HNS3_TXD_VLD_B));
1710 trace_hns3_tx_desc(ring, ring->next_to_use);
1711 /* move ring pointer to next */
1712 ring_ptr_move_fw(ring, next_to_use);
1714 desc = &ring->desc[ring->next_to_use];
1717 return frag_buf_num;
1720 static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
1723 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1724 struct device *dev = ring_to_dev(ring);
1728 if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
1729 struct sk_buff *skb = (struct sk_buff *)priv;
1731 size = skb_headlen(skb);
1735 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1736 } else if (type & DESC_TYPE_BOUNCE_HEAD) {
1737 /* Head data has been filled in hns3_handle_tx_bounce(),
1738 * just return 0 here.
1742 skb_frag_t *frag = (skb_frag_t *)priv;
1744 size = skb_frag_size(frag);
1748 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1751 if (unlikely(dma_mapping_error(dev, dma))) {
1752 hns3_ring_stats_update(ring, sw_err_cnt);
1756 desc_cb->priv = priv;
1757 desc_cb->length = size;
1759 desc_cb->type = type;
1761 return hns3_fill_desc(ring, dma, size);
1764 static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1765 unsigned int bd_num)
1770 size = skb_headlen(skb);
1771 while (size > HNS3_MAX_BD_SIZE) {
1772 bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1773 size -= HNS3_MAX_BD_SIZE;
1775 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1780 bd_size[bd_num++] = size;
1781 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1785 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1786 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1787 size = skb_frag_size(frag);
1791 while (size > HNS3_MAX_BD_SIZE) {
1792 bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
1793 size -= HNS3_MAX_BD_SIZE;
1795 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1799 bd_size[bd_num++] = size;
1800 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1807 static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
1808 u8 max_non_tso_bd_num, unsigned int bd_num,
1809 unsigned int recursion_level)
1811 #define HNS3_MAX_RECURSION_LEVEL 24
1813 struct sk_buff *frag_skb;
1815 /* If the total len is within the max bd limit */
1816 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
1817 !skb_has_frag_list(skb) &&
1818 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
1819 return skb_shinfo(skb)->nr_frags + 1U;
1821 if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
1824 bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
1825 if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
1828 skb_walk_frags(skb, frag_skb) {
1829 bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
1830 bd_num, recursion_level + 1);
1831 if (bd_num > HNS3_MAX_TSO_BD_NUM)
1838 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1840 if (!skb->encapsulation)
1841 return skb_transport_offset(skb) + tcp_hdrlen(skb);
1843 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1846 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1847 * than MSS, we simplify it by ensuring skb_headlen + the first continuous
1848 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1849 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1850 * than MSS except the last max_non_tso_bd_num - 1 frags.
1852 static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
1853 unsigned int bd_num, u8 max_non_tso_bd_num)
1855 unsigned int tot_len = 0;
1858 for (i = 0; i < max_non_tso_bd_num - 1U; i++)
1859 tot_len += bd_size[i];
1861 /* ensure the first max_non_tso_bd_num frags is greater than
1864 if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
1865 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
1868 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
1869 * than mss except the last one.
1871 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
1872 tot_len -= bd_size[i];
1873 tot_len += bd_size[i + max_non_tso_bd_num - 1U];
1875 if (tot_len < skb_shinfo(skb)->gso_size)
1882 void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
1886 for (i = 0; i < MAX_SKB_FRAGS; i++)
1887 size[i] = skb_frag_size(&shinfo->frags[i]);
1890 static int hns3_skb_linearize(struct hns3_enet_ring *ring,
1891 struct sk_buff *skb,
1892 unsigned int bd_num)
1894 /* 'bd_num == UINT_MAX' means the skb' fraglist has a
1895 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
1897 if (bd_num == UINT_MAX) {
1898 hns3_ring_stats_update(ring, over_max_recursion);
1902 /* The skb->len has exceeded the hw limitation, linearization
1905 if (skb->len > HNS3_MAX_TSO_SIZE ||
1906 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
1907 hns3_ring_stats_update(ring, hw_limitation);
1911 if (__skb_linearize(skb)) {
1912 hns3_ring_stats_update(ring, sw_err_cnt);
1919 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1920 struct net_device *netdev,
1921 struct sk_buff *skb)
1923 struct hns3_nic_priv *priv = netdev_priv(netdev);
1924 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
1925 unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
1926 unsigned int bd_num;
1928 bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
1929 if (unlikely(bd_num > max_non_tso_bd_num)) {
1930 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
1931 !hns3_skb_need_linearized(skb, bd_size, bd_num,
1932 max_non_tso_bd_num)) {
1933 trace_hns3_over_max_bd(skb);
1937 if (hns3_skb_linearize(ring, skb, bd_num))
1940 bd_num = hns3_tx_bd_count(skb->len);
1942 hns3_ring_stats_update(ring, tx_copy);
1946 if (likely(ring_space(ring) >= bd_num))
1949 netif_stop_subqueue(netdev, ring->queue_index);
1950 smp_mb(); /* Memory barrier before checking ring_space */
1952 /* Start queue in case hns3_clean_tx_ring has just made room
1953 * available and has not seen the queue stopped state performed
1954 * by netif_stop_subqueue above.
1956 if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
1957 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
1958 netif_start_subqueue(netdev, ring->queue_index);
1962 hns3_ring_stats_update(ring, tx_busy);
1967 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1969 struct device *dev = ring_to_dev(ring);
1972 for (i = 0; i < ring->desc_num; i++) {
1973 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1974 struct hns3_desc_cb *desc_cb;
1976 memset(desc, 0, sizeof(*desc));
1978 /* check if this is where we started */
1979 if (ring->next_to_use == next_to_use_orig)
1983 ring_ptr_move_bw(ring, next_to_use);
1985 desc_cb = &ring->desc_cb[ring->next_to_use];
1990 /* unmap the descriptor dma address */
1991 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
1992 dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
1994 else if (desc_cb->type &
1995 (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
1996 hns3_tx_spare_rollback(ring, desc_cb->length);
1997 else if (desc_cb->length)
1998 dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
2001 desc_cb->length = 0;
2003 desc_cb->type = DESC_TYPE_UNKNOWN;
2007 static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
2008 struct sk_buff *skb, unsigned int type)
2010 struct sk_buff *frag_skb;
2011 int i, ret, bd_num = 0;
2013 ret = hns3_map_and_fill_desc(ring, skb, type);
2014 if (unlikely(ret < 0))
2019 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2020 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2022 ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
2023 if (unlikely(ret < 0))
2029 skb_walk_frags(skb, frag_skb) {
2030 ret = hns3_fill_skb_to_desc(ring, frag_skb,
2031 DESC_TYPE_FRAGLIST_SKB);
2032 if (unlikely(ret < 0))
2041 static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
2043 #define HNS3_BYTES_PER_64BIT 8
2045 struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
2048 /* make sure everything is visible to device before
2049 * excuting tx push or updating doorbell
2054 int idx = (ring->next_to_use - num + ring->desc_num) %
2057 u64_stats_update_begin(&ring->syncp);
2058 ring->stats.tx_push++;
2059 u64_stats_update_end(&ring->syncp);
2060 memcpy(&desc[offset], &ring->desc[idx],
2061 sizeof(struct hns3_desc));
2065 __iowrite64_copy(ring->tqp->mem_base, desc,
2066 (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
2067 HNS3_BYTES_PER_64BIT);
2072 static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
2074 #define HNS3_MEM_DOORBELL_OFFSET 64
2076 __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
2078 /* make sure everything is visible to device before
2079 * excuting tx push or updating doorbell
2083 __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
2085 u64_stats_update_begin(&ring->syncp);
2086 ring->stats.tx_mem_doorbell += ring->pending_buf;
2087 u64_stats_update_end(&ring->syncp);
2092 static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
2095 struct net_device *netdev = ring_to_netdev(ring);
2096 struct hns3_nic_priv *priv = netdev_priv(netdev);
2098 /* when tx push is enabled, the packet whose number of BD below
2099 * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
2101 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
2102 !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
2103 hns3_tx_push_bd(ring, num);
2104 WRITE_ONCE(ring->last_to_use, ring->next_to_use);
2108 ring->pending_buf += num;
2111 hns3_ring_stats_update(ring, tx_more);
2115 if (ring->tqp->mem_base)
2116 hns3_tx_mem_doorbell(ring);
2118 writel(ring->pending_buf,
2119 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
2121 ring->pending_buf = 0;
2122 WRITE_ONCE(ring->last_to_use, ring->next_to_use);
2125 static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
2126 struct hns3_desc *desc)
2128 struct hnae3_handle *h = hns3_get_handle(netdev);
2130 if (!(h->ae_algo->ops->set_tx_hwts_info &&
2131 h->ae_algo->ops->set_tx_hwts_info(h, skb)))
2134 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
2137 static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
2138 struct sk_buff *skb)
2140 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2141 unsigned int type = DESC_TYPE_BOUNCE_HEAD;
2142 unsigned int size = skb_headlen(skb);
2149 if (skb->len <= ring->tx_copybreak) {
2151 type = DESC_TYPE_BOUNCE_ALL;
2154 /* hns3_can_use_tx_bounce() is called to ensure the below
2155 * function can always return the tx buffer.
2157 buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
2159 ret = skb_copy_bits(skb, 0, buf, size);
2160 if (unlikely(ret < 0)) {
2161 hns3_tx_spare_rollback(ring, cb_len);
2162 hns3_ring_stats_update(ring, copy_bits_err);
2166 desc_cb->priv = skb;
2167 desc_cb->length = cb_len;
2169 desc_cb->type = type;
2171 bd_num += hns3_fill_desc(ring, dma, size);
2173 if (type == DESC_TYPE_BOUNCE_HEAD) {
2174 ret = hns3_fill_skb_to_desc(ring, skb,
2175 DESC_TYPE_BOUNCE_HEAD);
2176 if (unlikely(ret < 0))
2182 dma_sync_single_for_device(ring_to_dev(ring), dma, size,
2185 hns3_ring_stats_update(ring, tx_bounce);
2190 static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
2191 struct sk_buff *skb)
2193 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2194 u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
2195 struct sg_table *sgt;
2201 if (skb_has_frag_list(skb))
2202 nfrag = HNS3_MAX_TSO_BD_NUM;
2204 /* hns3_can_use_tx_sgl() is called to ensure the below
2205 * function can always return the tx buffer.
2207 sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
2210 /* scatterlist follows by the sg table */
2211 sgt->sgl = (struct scatterlist *)(sgt + 1);
2212 sg_init_table(sgt->sgl, nfrag);
2213 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
2214 if (unlikely(nents < 0)) {
2215 hns3_tx_spare_rollback(ring, cb_len);
2216 hns3_ring_stats_update(ring, skb2sgl_err);
2220 sgt->orig_nents = nents;
2221 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
2223 if (unlikely(!sgt->nents)) {
2224 hns3_tx_spare_rollback(ring, cb_len);
2225 hns3_ring_stats_update(ring, map_sg_err);
2229 desc_cb->priv = skb;
2230 desc_cb->length = cb_len;
2232 desc_cb->type = DESC_TYPE_SGL_SKB;
2234 for (i = 0; i < sgt->nents; i++)
2235 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
2236 sg_dma_len(sgt->sgl + i));
2237 hns3_ring_stats_update(ring, tx_sgl);
2242 static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
2243 struct sk_buff *skb)
2247 if (!ring->tx_spare)
2250 space = hns3_tx_spare_space(ring);
2252 if (hns3_can_use_tx_sgl(ring, skb, space))
2253 return hns3_handle_tx_sgl(ring, skb);
2255 if (hns3_can_use_tx_bounce(ring, skb, space))
2256 return hns3_handle_tx_bounce(ring, skb);
2259 return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
2262 static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
2263 struct sk_buff *skb,
2264 struct hns3_desc_cb *desc_cb,
2265 int next_to_use_head)
2269 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
2271 if (unlikely(ret < 0))
2274 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
2275 * zero, which is unlikely, and 'ret > 0' means how many tx desc
2276 * need to be notified to the hw.
2278 ret = hns3_handle_desc_filling(ring, skb);
2279 if (likely(ret > 0))
2283 hns3_clear_desc(ring, next_to_use_head);
2287 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
2289 struct hns3_nic_priv *priv = netdev_priv(netdev);
2290 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
2291 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
2292 struct netdev_queue *dev_queue;
2296 /* Hardware can only handle short frames above 32 bytes */
2297 if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
2298 hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2300 hns3_ring_stats_update(ring, sw_err_cnt);
2302 return NETDEV_TX_OK;
2305 /* Prefetch the data used later */
2306 prefetch(skb->data);
2308 ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
2309 if (unlikely(ret <= 0)) {
2310 if (ret == -EBUSY) {
2311 hns3_tx_doorbell(ring, 0, true);
2312 return NETDEV_TX_BUSY;
2315 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
2319 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
2320 if (unlikely(ret <= 0))
2323 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
2324 (ring->desc_num - 1);
2326 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2327 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
2329 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
2330 cpu_to_le16(BIT(HNS3_TXD_FE_B));
2331 trace_hns3_tx_desc(ring, pre_ntu);
2333 skb_tx_timestamp(skb);
2335 /* Complete translate all packets */
2336 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
2337 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
2338 netdev_xmit_more());
2339 hns3_tx_doorbell(ring, ret, doorbell);
2341 return NETDEV_TX_OK;
2344 dev_kfree_skb_any(skb);
2345 hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
2346 return NETDEV_TX_OK;
2349 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
2351 char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
2352 char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
2353 struct hnae3_handle *h = hns3_get_handle(netdev);
2354 struct sockaddr *mac_addr = p;
2357 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
2358 return -EADDRNOTAVAIL;
2360 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
2361 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
2362 netdev_info(netdev, "already using mac address %s\n",
2363 format_mac_addr_sa);
2367 /* For VF device, if there is a perm_addr, then the user will not
2368 * be allowed to change the address.
2370 if (!hns3_is_phys_func(h->pdev) &&
2371 !is_zero_ether_addr(netdev->perm_addr)) {
2372 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
2373 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
2374 netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
2375 format_mac_addr_perm, format_mac_addr_sa);
2379 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
2381 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
2385 eth_hw_addr_set(netdev, mac_addr->sa_data);
2390 static int hns3_nic_do_ioctl(struct net_device *netdev,
2391 struct ifreq *ifr, int cmd)
2393 struct hnae3_handle *h = hns3_get_handle(netdev);
2395 if (!netif_running(netdev))
2398 if (!h->ae_algo->ops->do_ioctl)
2401 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
2404 static int hns3_nic_set_features(struct net_device *netdev,
2405 netdev_features_t features)
2407 netdev_features_t changed = netdev->features ^ features;
2408 struct hns3_nic_priv *priv = netdev_priv(netdev);
2409 struct hnae3_handle *h = priv->ae_handle;
2413 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
2414 enable = !!(features & NETIF_F_GRO_HW);
2415 ret = h->ae_algo->ops->set_gro_en(h, enable);
2420 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
2421 h->ae_algo->ops->enable_hw_strip_rxvtag) {
2422 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
2423 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
2428 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
2429 enable = !!(features & NETIF_F_NTUPLE);
2430 h->ae_algo->ops->enable_fd(h, enable);
2433 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
2434 h->ae_algo->ops->cls_flower_active(h)) {
2436 "there are offloaded TC filters active, cannot disable HW TC offload");
2440 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2441 h->ae_algo->ops->enable_vlan_filter) {
2442 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2443 ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
2448 netdev->features = features;
2452 static netdev_features_t hns3_features_check(struct sk_buff *skb,
2453 struct net_device *dev,
2454 netdev_features_t features)
2456 #define HNS3_MAX_HDR_LEN 480U
2457 #define HNS3_MAX_L4_HDR_LEN 60U
2461 if (skb->ip_summed != CHECKSUM_PARTIAL)
2464 if (skb->encapsulation)
2465 len = skb_inner_transport_header(skb) - skb->data;
2467 len = skb_transport_header(skb) - skb->data;
2469 /* Assume L4 is 60 byte as TCP is the only protocol with a
2470 * a flexible value, and it's max len is 60 bytes.
2472 len += HNS3_MAX_L4_HDR_LEN;
2474 /* Hardware only supports checksum on the skb with a max header
2477 if (len > HNS3_MAX_HDR_LEN)
2478 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2483 static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
2484 struct hns3_enet_ring *ring, bool is_tx)
2489 start = u64_stats_fetch_begin_irq(&ring->syncp);
2491 stats->tx_bytes += ring->stats.tx_bytes;
2492 stats->tx_packets += ring->stats.tx_pkts;
2493 stats->tx_dropped += ring->stats.sw_err_cnt;
2494 stats->tx_dropped += ring->stats.tx_vlan_err;
2495 stats->tx_dropped += ring->stats.tx_l4_proto_err;
2496 stats->tx_dropped += ring->stats.tx_l2l3l4_err;
2497 stats->tx_dropped += ring->stats.tx_tso_err;
2498 stats->tx_dropped += ring->stats.over_max_recursion;
2499 stats->tx_dropped += ring->stats.hw_limitation;
2500 stats->tx_dropped += ring->stats.copy_bits_err;
2501 stats->tx_dropped += ring->stats.skb2sgl_err;
2502 stats->tx_dropped += ring->stats.map_sg_err;
2503 stats->tx_errors += ring->stats.sw_err_cnt;
2504 stats->tx_errors += ring->stats.tx_vlan_err;
2505 stats->tx_errors += ring->stats.tx_l4_proto_err;
2506 stats->tx_errors += ring->stats.tx_l2l3l4_err;
2507 stats->tx_errors += ring->stats.tx_tso_err;
2508 stats->tx_errors += ring->stats.over_max_recursion;
2509 stats->tx_errors += ring->stats.hw_limitation;
2510 stats->tx_errors += ring->stats.copy_bits_err;
2511 stats->tx_errors += ring->stats.skb2sgl_err;
2512 stats->tx_errors += ring->stats.map_sg_err;
2514 stats->rx_bytes += ring->stats.rx_bytes;
2515 stats->rx_packets += ring->stats.rx_pkts;
2516 stats->rx_dropped += ring->stats.l2_err;
2517 stats->rx_errors += ring->stats.l2_err;
2518 stats->rx_errors += ring->stats.l3l4_csum_err;
2519 stats->rx_crc_errors += ring->stats.l2_err;
2520 stats->multicast += ring->stats.rx_multicast;
2521 stats->rx_length_errors += ring->stats.err_pkt_len;
2523 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
2526 static void hns3_nic_get_stats64(struct net_device *netdev,
2527 struct rtnl_link_stats64 *stats)
2529 struct hns3_nic_priv *priv = netdev_priv(netdev);
2530 int queue_num = priv->ae_handle->kinfo.num_tqps;
2531 struct hnae3_handle *handle = priv->ae_handle;
2532 struct rtnl_link_stats64 ring_total_stats;
2533 struct hns3_enet_ring *ring;
2536 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
2539 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
2541 memset(&ring_total_stats, 0, sizeof(ring_total_stats));
2542 for (idx = 0; idx < queue_num; idx++) {
2543 /* fetch the tx stats */
2544 ring = &priv->ring[idx];
2545 hns3_fetch_stats(&ring_total_stats, ring, true);
2547 /* fetch the rx stats */
2548 ring = &priv->ring[idx + queue_num];
2549 hns3_fetch_stats(&ring_total_stats, ring, false);
2552 stats->tx_bytes = ring_total_stats.tx_bytes;
2553 stats->tx_packets = ring_total_stats.tx_packets;
2554 stats->rx_bytes = ring_total_stats.rx_bytes;
2555 stats->rx_packets = ring_total_stats.rx_packets;
2557 stats->rx_errors = ring_total_stats.rx_errors;
2558 stats->multicast = ring_total_stats.multicast;
2559 stats->rx_length_errors = ring_total_stats.rx_length_errors;
2560 stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
2561 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
2563 stats->tx_errors = ring_total_stats.tx_errors;
2564 stats->rx_dropped = ring_total_stats.rx_dropped;
2565 stats->tx_dropped = ring_total_stats.tx_dropped;
2566 stats->collisions = netdev->stats.collisions;
2567 stats->rx_over_errors = netdev->stats.rx_over_errors;
2568 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
2569 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
2570 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
2571 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
2572 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
2573 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
2574 stats->tx_window_errors = netdev->stats.tx_window_errors;
2575 stats->rx_compressed = netdev->stats.rx_compressed;
2576 stats->tx_compressed = netdev->stats.tx_compressed;
2579 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
2581 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2582 struct hnae3_knic_private_info *kinfo;
2583 u8 tc = mqprio_qopt->qopt.num_tc;
2584 u16 mode = mqprio_qopt->mode;
2585 u8 hw = mqprio_qopt->qopt.hw;
2586 struct hnae3_handle *h;
2588 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
2589 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
2592 if (tc > HNAE3_MAX_TC)
2598 h = hns3_get_handle(netdev);
2601 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
2603 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
2604 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP;
2607 static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv,
2608 struct flow_cls_offload *flow)
2610 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid);
2611 struct hnae3_handle *h = hns3_get_handle(priv->netdev);
2613 switch (flow->command) {
2614 case FLOW_CLS_REPLACE:
2615 if (h->ae_algo->ops->add_cls_flower)
2616 return h->ae_algo->ops->add_cls_flower(h, flow, tc);
2618 case FLOW_CLS_DESTROY:
2619 if (h->ae_algo->ops->del_cls_flower)
2620 return h->ae_algo->ops->del_cls_flower(h, flow);
2629 static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2632 struct hns3_nic_priv *priv = cb_priv;
2634 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
2638 case TC_SETUP_CLSFLOWER:
2639 return hns3_setup_tc_cls_flower(priv, type_data);
2645 static LIST_HEAD(hns3_block_cb_list);
2647 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
2650 struct hns3_nic_priv *priv = netdev_priv(dev);
2654 case TC_SETUP_QDISC_MQPRIO:
2655 ret = hns3_setup_tc(dev, type_data);
2657 case TC_SETUP_BLOCK:
2658 ret = flow_block_cb_setup_simple(type_data,
2659 &hns3_block_cb_list,
2660 hns3_setup_tc_block_cb,
2670 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
2671 __be16 proto, u16 vid)
2673 struct hnae3_handle *h = hns3_get_handle(netdev);
2676 if (h->ae_algo->ops->set_vlan_filter)
2677 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
2682 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
2683 __be16 proto, u16 vid)
2685 struct hnae3_handle *h = hns3_get_handle(netdev);
2688 if (h->ae_algo->ops->set_vlan_filter)
2689 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
2694 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2695 u8 qos, __be16 vlan_proto)
2697 struct hnae3_handle *h = hns3_get_handle(netdev);
2700 netif_dbg(h, drv, netdev,
2701 "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
2702 vf, vlan, qos, ntohs(vlan_proto));
2704 if (h->ae_algo->ops->set_vf_vlan_filter)
2705 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
2711 static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2713 struct hnae3_handle *handle = hns3_get_handle(netdev);
2715 if (hns3_nic_resetting(netdev))
2718 if (!handle->ae_algo->ops->set_vf_spoofchk)
2721 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
2724 static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
2726 struct hnae3_handle *handle = hns3_get_handle(netdev);
2728 if (!handle->ae_algo->ops->set_vf_trust)
2731 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
2734 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
2736 struct hnae3_handle *h = hns3_get_handle(netdev);
2739 if (hns3_nic_resetting(netdev))
2742 if (!h->ae_algo->ops->set_mtu)
2745 netif_dbg(h, drv, netdev,
2746 "change mtu from %u to %d\n", netdev->mtu, new_mtu);
2748 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
2750 netdev_err(netdev, "failed to change MTU in hardware %d\n",
2753 netdev->mtu = new_mtu;
2758 static int hns3_get_timeout_queue(struct net_device *ndev)
2762 /* Find the stopped queue the same way the stack does */
2763 for (i = 0; i < ndev->num_tx_queues; i++) {
2764 struct netdev_queue *q;
2765 unsigned long trans_start;
2767 q = netdev_get_tx_queue(ndev, i);
2768 trans_start = READ_ONCE(q->trans_start);
2769 if (netif_xmit_stopped(q) &&
2771 (trans_start + ndev->watchdog_timeo))) {
2773 struct dql *dql = &q->dql;
2775 netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
2776 dql->last_obj_cnt, dql->num_queued,
2777 dql->adj_limit, dql->num_completed);
2779 netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
2781 jiffies_to_msecs(jiffies - trans_start));
2789 static void hns3_dump_queue_stats(struct net_device *ndev,
2790 struct hns3_enet_ring *tx_ring,
2793 struct napi_struct *napi = &tx_ring->tqp_vector->napi;
2794 struct hns3_nic_priv *priv = netdev_priv(ndev);
2797 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
2798 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
2799 tx_ring->next_to_clean, napi->state);
2802 "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
2803 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
2804 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
2807 "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
2808 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
2809 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
2811 netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
2812 tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
2815 static void hns3_dump_queue_reg(struct net_device *ndev,
2816 struct hns3_enet_ring *tx_ring)
2819 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
2820 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
2821 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
2822 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
2823 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
2824 readl(tx_ring->tqp_vector->mask_addr));
2826 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
2827 hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
2828 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
2829 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
2830 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
2831 hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
2832 hns3_tqp_read_reg(tx_ring,
2833 HNS3_RING_TX_RING_EBD_OFFSET_REG));
2836 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
2838 struct hns3_nic_priv *priv = netdev_priv(ndev);
2839 struct hnae3_handle *h = hns3_get_handle(ndev);
2840 struct hns3_enet_ring *tx_ring;
2843 timeout_queue = hns3_get_timeout_queue(ndev);
2844 if (timeout_queue >= ndev->num_tx_queues) {
2846 "no netdev TX timeout queue found, timeout count: %llu\n",
2847 priv->tx_timeout_count);
2851 priv->tx_timeout_count++;
2853 tx_ring = &priv->ring[timeout_queue];
2854 hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
2856 /* When mac received many pause frames continuous, it's unable to send
2857 * packets, which may cause tx timeout
2859 if (h->ae_algo->ops->get_mac_stats) {
2860 struct hns3_mac_stats mac_stats;
2862 h->ae_algo->ops->get_mac_stats(h, &mac_stats);
2863 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
2864 mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
2867 hns3_dump_queue_reg(ndev, tx_ring);
2872 static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
2874 struct hns3_nic_priv *priv = netdev_priv(ndev);
2875 struct hnae3_handle *h = priv->ae_handle;
2877 if (!hns3_get_tx_timeo_queue_info(ndev))
2880 /* request the reset, and let the hclge to determine
2881 * which reset level should be done
2883 if (h->ae_algo->ops->reset_event)
2884 h->ae_algo->ops->reset_event(h->pdev, h);
2887 #ifdef CONFIG_RFS_ACCEL
2888 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
2889 u16 rxq_index, u32 flow_id)
2891 struct hnae3_handle *h = hns3_get_handle(dev);
2892 struct flow_keys fkeys;
2894 if (!h->ae_algo->ops->add_arfs_entry)
2897 if (skb->encapsulation)
2898 return -EPROTONOSUPPORT;
2900 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
2901 return -EPROTONOSUPPORT;
2903 if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
2904 fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
2905 (fkeys.basic.ip_proto != IPPROTO_TCP &&
2906 fkeys.basic.ip_proto != IPPROTO_UDP))
2907 return -EPROTONOSUPPORT;
2909 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
2913 static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
2914 struct ifla_vf_info *ivf)
2916 struct hnae3_handle *h = hns3_get_handle(ndev);
2918 if (!h->ae_algo->ops->get_vf_config)
2921 return h->ae_algo->ops->get_vf_config(h, vf, ivf);
2924 static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
2927 struct hnae3_handle *h = hns3_get_handle(ndev);
2929 if (!h->ae_algo->ops->set_vf_link_state)
2932 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
2935 static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
2936 int min_tx_rate, int max_tx_rate)
2938 struct hnae3_handle *h = hns3_get_handle(ndev);
2940 if (!h->ae_algo->ops->set_vf_rate)
2943 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
2947 static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2949 struct hnae3_handle *h = hns3_get_handle(netdev);
2950 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
2952 if (!h->ae_algo->ops->set_vf_mac)
2955 if (is_multicast_ether_addr(mac)) {
2956 hnae3_format_mac_addr(format_mac_addr, mac);
2958 "Invalid MAC:%s specified. Could not set MAC\n",
2963 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
2966 static const struct net_device_ops hns3_nic_netdev_ops = {
2967 .ndo_open = hns3_nic_net_open,
2968 .ndo_stop = hns3_nic_net_stop,
2969 .ndo_start_xmit = hns3_nic_net_xmit,
2970 .ndo_tx_timeout = hns3_nic_net_timeout,
2971 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
2972 .ndo_eth_ioctl = hns3_nic_do_ioctl,
2973 .ndo_change_mtu = hns3_nic_change_mtu,
2974 .ndo_set_features = hns3_nic_set_features,
2975 .ndo_features_check = hns3_features_check,
2976 .ndo_get_stats64 = hns3_nic_get_stats64,
2977 .ndo_setup_tc = hns3_nic_setup_tc,
2978 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
2979 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
2980 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
2981 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
2982 .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
2983 .ndo_set_vf_trust = hns3_set_vf_trust,
2984 #ifdef CONFIG_RFS_ACCEL
2985 .ndo_rx_flow_steer = hns3_rx_flow_steer,
2987 .ndo_get_vf_config = hns3_nic_get_vf_config,
2988 .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
2989 .ndo_set_vf_rate = hns3_nic_set_vf_rate,
2990 .ndo_set_vf_mac = hns3_nic_set_vf_mac,
2993 bool hns3_is_phys_func(struct pci_dev *pdev)
2995 u32 dev_id = pdev->device;
2998 case HNAE3_DEV_ID_GE:
2999 case HNAE3_DEV_ID_25GE:
3000 case HNAE3_DEV_ID_25GE_RDMA:
3001 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
3002 case HNAE3_DEV_ID_50GE_RDMA:
3003 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
3004 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
3005 case HNAE3_DEV_ID_200G_RDMA:
3007 case HNAE3_DEV_ID_VF:
3008 case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
3011 dev_warn(&pdev->dev, "un-recognized pci device-id %u",
3018 static void hns3_disable_sriov(struct pci_dev *pdev)
3020 /* If our VFs are assigned we cannot shut down SR-IOV
3021 * without causing issues, so just leave the hardware
3022 * available but disabled
3024 if (pci_vfs_assigned(pdev)) {
3025 dev_warn(&pdev->dev,
3026 "disabling driver while VFs are assigned\n");
3030 pci_disable_sriov(pdev);
3033 /* hns3_probe - Device initialization routine
3034 * @pdev: PCI device information struct
3035 * @ent: entry in hns3_pci_tbl
3037 * hns3_probe initializes a PF identified by a pci_dev structure.
3038 * The OS initialization, configuring of the PF private structure,
3039 * and a hardware reset occur.
3041 * Returns 0 on success, negative on failure
3043 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3045 struct hnae3_ae_dev *ae_dev;
3048 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
3052 ae_dev->pdev = pdev;
3053 ae_dev->flag = ent->driver_data;
3054 pci_set_drvdata(pdev, ae_dev);
3056 ret = hnae3_register_ae_dev(ae_dev);
3058 pci_set_drvdata(pdev, NULL);
3064 * hns3_clean_vf_config
3065 * @pdev: pointer to a pci_dev structure
3066 * @num_vfs: number of VFs allocated
3068 * Clean residual vf config after disable sriov
3070 static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
3072 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3074 if (ae_dev->ops->clean_vf_config)
3075 ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
3078 /* hns3_remove - Device removal routine
3079 * @pdev: PCI device information struct
3081 static void hns3_remove(struct pci_dev *pdev)
3083 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3085 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
3086 hns3_disable_sriov(pdev);
3088 hnae3_unregister_ae_dev(ae_dev);
3089 pci_set_drvdata(pdev, NULL);
3093 * hns3_pci_sriov_configure
3094 * @pdev: pointer to a pci_dev structure
3095 * @num_vfs: number of VFs to allocate
3097 * Enable or change the number of VFs. Called when the user updates the number
3100 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
3104 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
3105 dev_warn(&pdev->dev, "Can not config SRIOV\n");
3110 ret = pci_enable_sriov(pdev, num_vfs);
3112 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
3115 } else if (!pci_vfs_assigned(pdev)) {
3116 int num_vfs_pre = pci_num_vf(pdev);
3118 pci_disable_sriov(pdev);
3119 hns3_clean_vf_config(pdev, num_vfs_pre);
3121 dev_warn(&pdev->dev,
3122 "Unable to free VFs because some are assigned to VMs.\n");
3128 static void hns3_shutdown(struct pci_dev *pdev)
3130 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3132 hnae3_unregister_ae_dev(ae_dev);
3133 pci_set_drvdata(pdev, NULL);
3135 if (system_state == SYSTEM_POWER_OFF)
3136 pci_set_power_state(pdev, PCI_D3hot);
3139 static int __maybe_unused hns3_suspend(struct device *dev)
3141 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3143 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3144 dev_info(dev, "Begin to suspend.\n");
3145 if (ae_dev->ops && ae_dev->ops->reset_prepare)
3146 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET);
3152 static int __maybe_unused hns3_resume(struct device *dev)
3154 struct hnae3_ae_dev *ae_dev = dev_get_drvdata(dev);
3156 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) {
3157 dev_info(dev, "Begin to resume.\n");
3158 if (ae_dev->ops && ae_dev->ops->reset_done)
3159 ae_dev->ops->reset_done(ae_dev);
3165 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
3166 pci_channel_state_t state)
3168 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3169 pci_ers_result_t ret;
3171 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
3173 if (state == pci_channel_io_perm_failure)
3174 return PCI_ERS_RESULT_DISCONNECT;
3176 if (!ae_dev || !ae_dev->ops) {
3178 "Can't recover - error happened before device initialized\n");
3179 return PCI_ERS_RESULT_NONE;
3182 if (ae_dev->ops->handle_hw_ras_error)
3183 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
3185 return PCI_ERS_RESULT_NONE;
3190 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
3192 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3193 const struct hnae3_ae_ops *ops;
3194 enum hnae3_reset_type reset_type;
3195 struct device *dev = &pdev->dev;
3197 if (!ae_dev || !ae_dev->ops)
3198 return PCI_ERS_RESULT_NONE;
3201 /* request the reset */
3202 if (ops->reset_event && ops->get_reset_level &&
3203 ops->set_default_reset_request) {
3204 if (ae_dev->hw_err_reset_req) {
3205 reset_type = ops->get_reset_level(ae_dev,
3206 &ae_dev->hw_err_reset_req);
3207 ops->set_default_reset_request(ae_dev, reset_type);
3208 dev_info(dev, "requesting reset due to PCI error\n");
3209 ops->reset_event(pdev, NULL);
3212 return PCI_ERS_RESULT_RECOVERED;
3215 return PCI_ERS_RESULT_DISCONNECT;
3218 static void hns3_reset_prepare(struct pci_dev *pdev)
3220 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3222 dev_info(&pdev->dev, "FLR prepare\n");
3223 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare)
3224 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET);
3227 static void hns3_reset_done(struct pci_dev *pdev)
3229 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3231 dev_info(&pdev->dev, "FLR done\n");
3232 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done)
3233 ae_dev->ops->reset_done(ae_dev);
3236 static const struct pci_error_handlers hns3_err_handler = {
3237 .error_detected = hns3_error_detected,
3238 .slot_reset = hns3_slot_reset,
3239 .reset_prepare = hns3_reset_prepare,
3240 .reset_done = hns3_reset_done,
3243 static SIMPLE_DEV_PM_OPS(hns3_pm_ops, hns3_suspend, hns3_resume);
3245 static struct pci_driver hns3_driver = {
3246 .name = hns3_driver_name,
3247 .id_table = hns3_pci_tbl,
3248 .probe = hns3_probe,
3249 .remove = hns3_remove,
3250 .shutdown = hns3_shutdown,
3251 .driver.pm = &hns3_pm_ops,
3252 .sriov_configure = hns3_pci_sriov_configure,
3253 .err_handler = &hns3_err_handler,
3256 /* set default feature to hns3 */
3257 static void hns3_set_default_feature(struct net_device *netdev)
3259 struct hnae3_handle *h = hns3_get_handle(netdev);
3260 struct pci_dev *pdev = h->pdev;
3261 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3263 netdev->priv_flags |= IFF_UNICAST_FLT;
3265 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3267 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3268 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3269 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
3270 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
3271 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
3272 NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
3274 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3275 netdev->features |= NETIF_F_GRO_HW;
3277 if (!(h->flags & HNAE3_SUPPORT_VF))
3278 netdev->features |= NETIF_F_NTUPLE;
3281 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
3282 netdev->features |= NETIF_F_GSO_UDP_L4;
3284 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
3285 netdev->features |= NETIF_F_HW_CSUM;
3287 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3289 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps))
3290 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
3292 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps))
3293 netdev->features |= NETIF_F_HW_TC;
3295 netdev->hw_features |= netdev->features;
3296 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
3297 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3299 netdev->vlan_features |= netdev->features &
3300 ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX |
3301 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE |
3304 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
3307 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
3308 struct hns3_desc_cb *cb)
3310 unsigned int order = hns3_page_order(ring);
3313 if (ring->page_pool) {
3314 p = page_pool_dev_alloc_frag(ring->page_pool,
3316 hns3_buf_size(ring));
3321 cb->buf = page_address(p);
3322 cb->dma = page_pool_get_dma_addr(p);
3323 cb->type = DESC_TYPE_PP_FRAG;
3328 p = dev_alloc_pages(order);
3333 cb->page_offset = 0;
3335 cb->buf = page_address(p);
3336 cb->length = hns3_page_size(ring);
3337 cb->type = DESC_TYPE_PAGE;
3338 page_ref_add(p, USHRT_MAX - 1);
3339 cb->pagecnt_bias = USHRT_MAX;
3344 static void hns3_free_buffer(struct hns3_enet_ring *ring,
3345 struct hns3_desc_cb *cb, int budget)
3347 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
3348 DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
3349 napi_consume_skb(cb->priv, budget);
3350 else if (!HNAE3_IS_TX_RING(ring)) {
3351 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
3352 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
3353 else if (cb->type & DESC_TYPE_PP_FRAG)
3354 page_pool_put_full_page(ring->page_pool, cb->priv,
3357 memset(cb, 0, sizeof(*cb));
3360 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
3362 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
3363 cb->length, ring_to_dma_dir(ring));
3365 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
3371 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
3372 struct hns3_desc_cb *cb)
3374 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
3375 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
3376 ring_to_dma_dir(ring));
3377 else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
3378 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
3379 ring_to_dma_dir(ring));
3380 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
3382 hns3_tx_spare_reclaim_cb(ring, cb);
3385 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
3387 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3388 ring->desc[i].addr = 0;
3389 ring->desc_cb[i].refill = 0;
3392 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
3395 struct hns3_desc_cb *cb = &ring->desc_cb[i];
3397 if (!ring->desc_cb[i].dma)
3400 hns3_buffer_detach(ring, i);
3401 hns3_free_buffer(ring, cb, budget);
3404 static void hns3_free_buffers(struct hns3_enet_ring *ring)
3408 for (i = 0; i < ring->desc_num; i++)
3409 hns3_free_buffer_detach(ring, i, 0);
3412 /* free desc along with its attached buffer */
3413 static void hns3_free_desc(struct hns3_enet_ring *ring)
3415 int size = ring->desc_num * sizeof(ring->desc[0]);
3417 hns3_free_buffers(ring);
3420 dma_free_coherent(ring_to_dev(ring), size,
3421 ring->desc, ring->desc_dma_addr);
3426 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
3428 int size = ring->desc_num * sizeof(ring->desc[0]);
3430 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
3431 &ring->desc_dma_addr, GFP_KERNEL);
3438 static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
3439 struct hns3_desc_cb *cb)
3443 ret = hns3_alloc_buffer(ring, cb);
3444 if (ret || ring->page_pool)
3447 ret = hns3_map_buffer(ring, cb);
3454 hns3_free_buffer(ring, cb, 0);
3459 static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
3461 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
3466 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3467 ring->desc_cb[i].page_offset);
3468 ring->desc_cb[i].refill = 1;
3473 /* Allocate memory for raw pkg, and map with dma */
3474 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
3478 for (i = 0; i < ring->desc_num; i++) {
3479 ret = hns3_alloc_and_attach_buffer(ring, i);
3481 goto out_buffer_fail;
3487 for (j = i - 1; j >= 0; j--)
3488 hns3_free_buffer_detach(ring, j, 0);
3492 /* detach a in-used buffer and replace with a reserved one */
3493 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
3494 struct hns3_desc_cb *res_cb)
3496 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
3497 ring->desc_cb[i] = *res_cb;
3498 ring->desc_cb[i].refill = 1;
3499 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3500 ring->desc_cb[i].page_offset);
3501 ring->desc[i].rx.bd_base_info = 0;
3504 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
3506 ring->desc_cb[i].reuse_flag = 0;
3507 ring->desc_cb[i].refill = 1;
3508 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
3509 ring->desc_cb[i].page_offset);
3510 ring->desc[i].rx.bd_base_info = 0;
3512 dma_sync_single_for_device(ring_to_dev(ring),
3513 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
3514 hns3_buf_size(ring),
3518 static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
3519 int *bytes, int *pkts, int budget)
3521 /* pair with ring->last_to_use update in hns3_tx_doorbell(),
3522 * smp_store_release() is not used in hns3_tx_doorbell() because
3523 * the doorbell operation already have the needed barrier operation.
3525 int ltu = smp_load_acquire(&ring->last_to_use);
3526 int ntc = ring->next_to_clean;
3527 struct hns3_desc_cb *desc_cb;
3528 bool reclaimed = false;
3529 struct hns3_desc *desc;
3531 while (ltu != ntc) {
3532 desc = &ring->desc[ntc];
3534 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
3535 BIT(HNS3_TXD_VLD_B))
3538 desc_cb = &ring->desc_cb[ntc];
3540 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
3541 DESC_TYPE_BOUNCE_HEAD |
3542 DESC_TYPE_SGL_SKB)) {
3544 (*bytes) += desc_cb->send_bytes;
3547 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
3548 hns3_free_buffer_detach(ring, ntc, budget);
3550 if (++ntc == ring->desc_num)
3553 /* Issue prefetch for next Tx descriptor */
3554 prefetch(&ring->desc_cb[ntc]);
3558 if (unlikely(!reclaimed))
3561 /* This smp_store_release() pairs with smp_load_acquire() in
3562 * ring_space called by hns3_nic_net_xmit.
3564 smp_store_release(&ring->next_to_clean, ntc);
3566 hns3_tx_spare_update(ring);
3571 void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
3573 struct net_device *netdev = ring_to_netdev(ring);
3574 struct hns3_nic_priv *priv = netdev_priv(netdev);
3575 struct netdev_queue *dev_queue;
3581 if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
3584 ring->tqp_vector->tx_group.total_bytes += bytes;
3585 ring->tqp_vector->tx_group.total_packets += pkts;
3587 u64_stats_update_begin(&ring->syncp);
3588 ring->stats.tx_bytes += bytes;
3589 ring->stats.tx_pkts += pkts;
3590 u64_stats_update_end(&ring->syncp);
3592 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
3593 netdev_tx_completed_queue(dev_queue, pkts, bytes);
3595 if (unlikely(netif_carrier_ok(netdev) &&
3596 ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
3597 /* Make sure that anybody stopping the queue after this
3598 * sees the new next_to_clean.
3601 if (netif_tx_queue_stopped(dev_queue) &&
3602 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
3603 netif_tx_wake_queue(dev_queue);
3604 ring->stats.restart_queue++;
3609 static int hns3_desc_unused(struct hns3_enet_ring *ring)
3611 int ntc = ring->next_to_clean;
3612 int ntu = ring->next_to_use;
3614 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
3615 return ring->desc_num;
3617 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
3620 /* Return true if there is any allocation failure */
3621 static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
3624 struct hns3_desc_cb *desc_cb;
3625 struct hns3_desc_cb res_cbs;
3628 for (i = 0; i < cleand_count; i++) {
3629 desc_cb = &ring->desc_cb[ring->next_to_use];
3630 if (desc_cb->reuse_flag) {
3631 hns3_ring_stats_update(ring, reuse_pg_cnt);
3633 hns3_reuse_buffer(ring, ring->next_to_use);
3635 ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
3637 hns3_ring_stats_update(ring, sw_err_cnt);
3639 hns3_rl_err(ring_to_netdev(ring),
3640 "alloc rx buffer failed: %d\n",
3643 writel(i, ring->tqp->io_base +
3644 HNS3_RING_RX_RING_HEAD_REG);
3647 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
3649 hns3_ring_stats_update(ring, non_reuse_pg);
3652 ring_ptr_move_fw(ring, next_to_use);
3655 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
3659 static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
3661 return page_count(cb->priv) == cb->pagecnt_bias;
3664 static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
3665 struct hns3_enet_ring *ring,
3667 struct hns3_desc_cb *desc_cb)
3669 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3670 u32 frag_offset = desc_cb->page_offset + pull_len;
3671 int size = le16_to_cpu(desc->rx.size);
3672 u32 frag_size = size - pull_len;
3673 void *frag = napi_alloc_frag(frag_size);
3675 if (unlikely(!frag)) {
3676 hns3_ring_stats_update(ring, frag_alloc_err);
3678 hns3_rl_err(ring_to_netdev(ring),
3679 "failed to allocate rx frag\n");
3683 desc_cb->reuse_flag = 1;
3684 memcpy(frag, desc_cb->buf + frag_offset, frag_size);
3685 skb_add_rx_frag(skb, i, virt_to_page(frag),
3686 offset_in_page(frag), frag_size, frag_size);
3688 hns3_ring_stats_update(ring, frag_alloc);
3692 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
3693 struct hns3_enet_ring *ring, int pull_len,
3694 struct hns3_desc_cb *desc_cb)
3696 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
3697 u32 frag_offset = desc_cb->page_offset + pull_len;
3698 int size = le16_to_cpu(desc->rx.size);
3699 u32 truesize = hns3_buf_size(ring);
3700 u32 frag_size = size - pull_len;
3704 if (ring->page_pool) {
3705 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3706 frag_size, truesize);
3710 /* Avoid re-using remote or pfmem page */
3711 if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
3714 reused = hns3_can_reuse_page(desc_cb);
3716 /* Rx page can be reused when:
3717 * 1. Rx page is only owned by the driver when page_offset
3718 * is zero, which means 0 @ truesize will be used by
3719 * stack after skb_add_rx_frag() is called, and the rest
3720 * of rx page can be reused by driver.
3722 * 2. Rx page is only owned by the driver when page_offset
3723 * is non-zero, which means page_offset @ truesize will
3724 * be used by stack after skb_add_rx_frag() is called,
3725 * and 0 @ truesize can be reused by driver.
3727 if ((!desc_cb->page_offset && reused) ||
3728 ((desc_cb->page_offset + truesize + truesize) <=
3729 hns3_page_size(ring) && desc_cb->page_offset)) {
3730 desc_cb->page_offset += truesize;
3731 desc_cb->reuse_flag = 1;
3732 } else if (desc_cb->page_offset && reused) {
3733 desc_cb->page_offset = 0;
3734 desc_cb->reuse_flag = 1;
3735 } else if (frag_size <= ring->rx_copybreak) {
3736 ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
3742 desc_cb->pagecnt_bias--;
3744 if (unlikely(!desc_cb->pagecnt_bias)) {
3745 page_ref_add(desc_cb->priv, USHRT_MAX);
3746 desc_cb->pagecnt_bias = USHRT_MAX;
3749 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
3750 frag_size, truesize);
3752 if (unlikely(!desc_cb->reuse_flag))
3753 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
3756 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
3758 __be16 type = skb->protocol;
3762 while (eth_type_vlan(type)) {
3763 struct vlan_hdr *vh;
3765 if ((depth + VLAN_HLEN) > skb_headlen(skb))
3768 vh = (struct vlan_hdr *)(skb->data + depth);
3769 type = vh->h_vlan_encapsulated_proto;
3773 skb_set_network_header(skb, depth);
3775 if (type == htons(ETH_P_IP)) {
3776 const struct iphdr *iph = ip_hdr(skb);
3778 depth += sizeof(struct iphdr);
3779 skb_set_transport_header(skb, depth);
3781 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
3783 } else if (type == htons(ETH_P_IPV6)) {
3784 const struct ipv6hdr *iph = ipv6_hdr(skb);
3786 depth += sizeof(struct ipv6hdr);
3787 skb_set_transport_header(skb, depth);
3789 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
3792 hns3_rl_err(skb->dev,
3793 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
3794 be16_to_cpu(type), depth);
3798 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
3800 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
3802 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
3803 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
3805 skb->csum_start = (unsigned char *)th - skb->head;
3806 skb->csum_offset = offsetof(struct tcphdr, check);
3807 skb->ip_summed = CHECKSUM_PARTIAL;
3809 trace_hns3_gro(skb);
3814 static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
3815 struct sk_buff *skb, u32 ptype, u16 csum)
3817 if (ptype == HNS3_INVALID_PTYPE ||
3818 hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
3821 hns3_ring_stats_update(ring, csum_complete);
3822 skb->ip_summed = CHECKSUM_COMPLETE;
3823 skb->csum = csum_unfold((__force __sum16)csum);
3828 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
3829 u32 ol_info, u32 ptype)
3831 int l3_type, l4_type;
3834 if (ptype != HNS3_INVALID_PTYPE) {
3835 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level;
3836 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed;
3841 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
3844 case HNS3_OL4_TYPE_MAC_IN_UDP:
3845 case HNS3_OL4_TYPE_NVGRE:
3846 skb->csum_level = 1;
3848 case HNS3_OL4_TYPE_NO_TUN:
3849 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
3851 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
3853 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
3854 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
3855 l3_type == HNS3_L3_TYPE_IPV6) &&
3856 (l4_type == HNS3_L4_TYPE_UDP ||
3857 l4_type == HNS3_L4_TYPE_TCP ||
3858 l4_type == HNS3_L4_TYPE_SCTP))
3859 skb->ip_summed = CHECKSUM_UNNECESSARY;
3866 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
3867 u32 l234info, u32 bd_base_info, u32 ol_info,
3870 struct net_device *netdev = ring_to_netdev(ring);
3871 struct hns3_nic_priv *priv = netdev_priv(netdev);
3872 u32 ptype = HNS3_INVALID_PTYPE;
3874 skb->ip_summed = CHECKSUM_NONE;
3876 skb_checksum_none_assert(skb);
3878 if (!(netdev->features & NETIF_F_RXCSUM))
3881 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state))
3882 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
3885 if (hns3_checksum_complete(ring, skb, ptype, csum))
3888 /* check if hardware has done checksum */
3889 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
3892 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
3893 BIT(HNS3_RXD_OL3E_B) |
3894 BIT(HNS3_RXD_OL4E_B)))) {
3895 hns3_ring_stats_update(ring, l3l4_csum_err);
3900 hns3_rx_handle_csum(skb, l234info, ol_info, ptype);
3903 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
3905 if (skb_has_frag_list(skb))
3906 napi_gro_flush(&ring->tqp_vector->napi, false);
3908 napi_gro_receive(&ring->tqp_vector->napi, skb);
3911 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
3912 struct hns3_desc *desc, u32 l234info,
3915 struct hnae3_handle *handle = ring->tqp->handle;
3916 struct pci_dev *pdev = ring->tqp->handle->pdev;
3917 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3919 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
3920 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3921 if (!(*vlan_tag & VLAN_VID_MASK))
3922 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3924 return (*vlan_tag != 0);
3927 #define HNS3_STRP_OUTER_VLAN 0x1
3928 #define HNS3_STRP_INNER_VLAN 0x2
3929 #define HNS3_STRP_BOTH 0x3
3931 /* Hardware always insert VLAN tag into RX descriptor when
3932 * remove the tag from packet, driver needs to determine
3933 * reporting which tag to stack.
3935 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
3936 HNS3_RXD_STRP_TAGP_S)) {
3937 case HNS3_STRP_OUTER_VLAN:
3938 if (handle->port_base_vlan_state !=
3939 HNAE3_PORT_BASE_VLAN_DISABLE)
3942 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3944 case HNS3_STRP_INNER_VLAN:
3945 if (handle->port_base_vlan_state !=
3946 HNAE3_PORT_BASE_VLAN_DISABLE)
3949 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3951 case HNS3_STRP_BOTH:
3952 if (handle->port_base_vlan_state ==
3953 HNAE3_PORT_BASE_VLAN_DISABLE)
3954 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
3956 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
3964 static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
3966 ring->desc[ring->next_to_clean].rx.bd_base_info &=
3967 cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
3968 ring->desc_cb[ring->next_to_clean].refill = 0;
3969 ring->next_to_clean += 1;
3971 if (unlikely(ring->next_to_clean == ring->desc_num))
3972 ring->next_to_clean = 0;
3975 static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
3978 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
3979 struct net_device *netdev = ring_to_netdev(ring);
3980 struct sk_buff *skb;
3982 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
3984 if (unlikely(!skb)) {
3985 hns3_rl_err(netdev, "alloc rx skb fail\n");
3986 hns3_ring_stats_update(ring, sw_err_cnt);
3991 trace_hns3_rx_desc(ring);
3992 prefetchw(skb->data);
3994 ring->pending_buf = 1;
3996 ring->tail_skb = NULL;
3997 if (length <= HNS3_RX_HEAD_SIZE) {
3998 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
4000 /* We can reuse buffer as-is, just make sure it is reusable */
4001 if (dev_page_is_reusable(desc_cb->priv))
4002 desc_cb->reuse_flag = 1;
4003 else if (desc_cb->type & DESC_TYPE_PP_FRAG)
4004 page_pool_put_full_page(ring->page_pool, desc_cb->priv,
4006 else /* This page cannot be reused so discard it */
4007 __page_frag_cache_drain(desc_cb->priv,
4008 desc_cb->pagecnt_bias);
4010 hns3_rx_ring_move_fw(ring);
4014 if (ring->page_pool)
4015 skb_mark_for_recycle(skb);
4017 hns3_ring_stats_update(ring, seg_pkt_cnt);
4019 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
4020 __skb_put(skb, ring->pull_len);
4021 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
4023 hns3_rx_ring_move_fw(ring);
4028 static int hns3_add_frag(struct hns3_enet_ring *ring)
4030 struct sk_buff *skb = ring->skb;
4031 struct sk_buff *head_skb = skb;
4032 struct sk_buff *new_skb;
4033 struct hns3_desc_cb *desc_cb;
4034 struct hns3_desc *desc;
4038 desc = &ring->desc[ring->next_to_clean];
4039 desc_cb = &ring->desc_cb[ring->next_to_clean];
4040 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4041 /* make sure HW write desc complete */
4043 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
4046 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
4047 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
4048 if (unlikely(!new_skb)) {
4049 hns3_rl_err(ring_to_netdev(ring),
4050 "alloc rx fraglist skb fail\n");
4054 if (ring->page_pool)
4055 skb_mark_for_recycle(new_skb);
4059 if (ring->tail_skb) {
4060 ring->tail_skb->next = new_skb;
4061 ring->tail_skb = new_skb;
4063 skb_shinfo(skb)->frag_list = new_skb;
4064 ring->tail_skb = new_skb;
4068 if (ring->tail_skb) {
4069 head_skb->truesize += hns3_buf_size(ring);
4070 head_skb->data_len += le16_to_cpu(desc->rx.size);
4071 head_skb->len += le16_to_cpu(desc->rx.size);
4072 skb = ring->tail_skb;
4075 dma_sync_single_for_cpu(ring_to_dev(ring),
4076 desc_cb->dma + desc_cb->page_offset,
4077 hns3_buf_size(ring),
4080 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
4081 trace_hns3_rx_desc(ring);
4082 hns3_rx_ring_move_fw(ring);
4083 ring->pending_buf++;
4084 } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
4089 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
4090 struct sk_buff *skb, u32 l234info,
4091 u32 bd_base_info, u32 ol_info, u16 csum)
4093 struct net_device *netdev = ring_to_netdev(ring);
4094 struct hns3_nic_priv *priv = netdev_priv(netdev);
4097 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
4098 HNS3_RXD_GRO_SIZE_M,
4099 HNS3_RXD_GRO_SIZE_S);
4100 /* if there is no HW GRO, do not set gro params */
4101 if (!skb_shinfo(skb)->gso_size) {
4102 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info,
4107 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
4108 HNS3_RXD_GRO_COUNT_M,
4109 HNS3_RXD_GRO_COUNT_S);
4111 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
4112 u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
4115 l3_type = hns3_rx_ptype_tbl[ptype].l3_type;
4117 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
4121 if (l3_type == HNS3_L3_TYPE_IPV4)
4122 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
4123 else if (l3_type == HNS3_L3_TYPE_IPV6)
4124 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
4128 return hns3_gro_complete(skb, l234info);
4131 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
4132 struct sk_buff *skb, u32 rss_hash)
4134 struct hnae3_handle *handle = ring->tqp->handle;
4135 enum pkt_hash_types rss_type;
4138 rss_type = handle->kinfo.rss_type;
4140 rss_type = PKT_HASH_TYPE_NONE;
4142 skb_set_hash(skb, rss_hash, rss_type);
4145 static void hns3_handle_rx_ts_info(struct net_device *netdev,
4146 struct hns3_desc *desc, struct sk_buff *skb,
4149 if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
4150 struct hnae3_handle *h = hns3_get_handle(netdev);
4151 u32 nsec = le32_to_cpu(desc->ts_nsec);
4152 u32 sec = le32_to_cpu(desc->ts_sec);
4154 if (h->ae_algo->ops->get_rx_hwts)
4155 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
4159 static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
4160 struct hns3_desc *desc, struct sk_buff *skb,
4163 struct net_device *netdev = ring_to_netdev(ring);
4165 /* Based on hw strategy, the tag offloaded will be stored at
4166 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
4167 * in one layer tag case.
4169 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
4172 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
4173 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
4178 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
4180 struct net_device *netdev = ring_to_netdev(ring);
4181 enum hns3_pkt_l2t_type l2_frame_type;
4182 u32 bd_base_info, l234info, ol_info;
4183 struct hns3_desc *desc;
4188 /* bdinfo handled below is only valid on the last BD of the
4189 * current packet, and ring->next_to_clean indicates the first
4190 * descriptor of next packet, so need - 1 below.
4192 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
4193 (ring->desc_num - 1);
4194 desc = &ring->desc[pre_ntc];
4195 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4196 l234info = le32_to_cpu(desc->rx.l234_info);
4197 ol_info = le32_to_cpu(desc->rx.ol_info);
4198 csum = le16_to_cpu(desc->csum);
4200 hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
4202 hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
4204 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
4205 BIT(HNS3_RXD_L2E_B))))) {
4206 u64_stats_update_begin(&ring->syncp);
4207 if (l234info & BIT(HNS3_RXD_L2E_B))
4208 ring->stats.l2_err++;
4210 ring->stats.err_pkt_len++;
4211 u64_stats_update_end(&ring->syncp);
4218 /* Do update ip stack process */
4219 skb->protocol = eth_type_trans(skb, netdev);
4221 /* This is needed in order to enable forwarding support */
4222 ret = hns3_set_gro_and_checksum(ring, skb, l234info,
4223 bd_base_info, ol_info, csum);
4224 if (unlikely(ret)) {
4225 hns3_ring_stats_update(ring, rx_err_cnt);
4229 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
4232 u64_stats_update_begin(&ring->syncp);
4233 ring->stats.rx_pkts++;
4234 ring->stats.rx_bytes += len;
4236 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
4237 ring->stats.rx_multicast++;
4239 u64_stats_update_end(&ring->syncp);
4241 ring->tqp_vector->rx_group.total_bytes += len;
4243 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
4247 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
4249 struct sk_buff *skb = ring->skb;
4250 struct hns3_desc_cb *desc_cb;
4251 struct hns3_desc *desc;
4252 unsigned int length;
4256 desc = &ring->desc[ring->next_to_clean];
4257 desc_cb = &ring->desc_cb[ring->next_to_clean];
4262 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
4263 /* Check valid BD */
4264 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
4268 length = le16_to_cpu(desc->rx.size);
4270 ring->va = desc_cb->buf + desc_cb->page_offset;
4272 dma_sync_single_for_cpu(ring_to_dev(ring),
4273 desc_cb->dma + desc_cb->page_offset,
4274 hns3_buf_size(ring),
4277 /* Prefetch first cache line of first page.
4278 * Idea is to cache few bytes of the header of the packet.
4279 * Our L1 Cache line size is 64B so need to prefetch twice to make
4280 * it 128B. But in actual we can have greater size of caches with
4281 * 128B Level 1 cache lines. In such a case, single fetch would
4282 * suffice to cache in the relevant part of the header.
4284 net_prefetch(ring->va);
4286 ret = hns3_alloc_skb(ring, length, ring->va);
4289 if (ret < 0) /* alloc buffer fail */
4291 if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
4292 ret = hns3_add_frag(ring);
4297 ret = hns3_add_frag(ring);
4302 /* As the head data may be changed when GRO enable, copy
4303 * the head data in after other data rx completed
4305 if (skb->len > HNS3_RX_HEAD_SIZE)
4306 memcpy(skb->data, ring->va,
4307 ALIGN(ring->pull_len, sizeof(long)));
4309 ret = hns3_handle_bdinfo(ring, skb);
4310 if (unlikely(ret)) {
4311 dev_kfree_skb_any(skb);
4315 skb_record_rx_queue(skb, ring->tqp->tqp_index);
4319 int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
4320 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
4322 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
4323 int unused_count = hns3_desc_unused(ring);
4324 bool failure = false;
4328 unused_count -= ring->pending_buf;
4330 while (recv_pkts < budget) {
4331 /* Reuse or realloc buffers */
4332 if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
4333 failure = failure ||
4334 hns3_nic_alloc_rx_buffers(ring, unused_count);
4339 err = hns3_handle_rx_bd(ring);
4340 /* Do not get FE for the packet or failed to alloc skb */
4341 if (unlikely(!ring->skb || err == -ENXIO)) {
4343 } else if (likely(!err)) {
4344 rx_fn(ring, ring->skb);
4348 unused_count += ring->pending_buf;
4350 ring->pending_buf = 0;
4354 /* sync head pointer before exiting, since hardware will calculate
4355 * FBD number with head pointer
4357 if (unused_count > 0)
4358 failure = failure ||
4359 hns3_nic_alloc_rx_buffers(ring, unused_count);
4361 return failure ? budget : recv_pkts;
4364 static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4366 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
4367 struct dim_sample sample = {};
4369 if (!rx_group->coal.adapt_enable)
4372 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
4373 rx_group->total_bytes, &sample);
4374 net_dim(&rx_group->dim, sample);
4377 static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
4379 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
4380 struct dim_sample sample = {};
4382 if (!tx_group->coal.adapt_enable)
4385 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
4386 tx_group->total_bytes, &sample);
4387 net_dim(&tx_group->dim, sample);
4390 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
4392 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
4393 struct hns3_enet_ring *ring;
4394 int rx_pkt_total = 0;
4396 struct hns3_enet_tqp_vector *tqp_vector =
4397 container_of(napi, struct hns3_enet_tqp_vector, napi);
4398 bool clean_complete = true;
4399 int rx_budget = budget;
4401 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4402 napi_complete(napi);
4406 /* Since the actual Tx work is minimal, we can give the Tx a larger
4407 * budget and be more aggressive about cleaning up the Tx descriptors.
4409 hns3_for_each_ring(ring, tqp_vector->tx_group)
4410 hns3_clean_tx_ring(ring, budget);
4412 /* make sure rx ring budget not smaller than 1 */
4413 if (tqp_vector->num_tqps > 1)
4414 rx_budget = max(budget / tqp_vector->num_tqps, 1);
4416 hns3_for_each_ring(ring, tqp_vector->rx_group) {
4417 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
4419 if (rx_cleaned >= rx_budget)
4420 clean_complete = false;
4422 rx_pkt_total += rx_cleaned;
4425 tqp_vector->rx_group.total_packets += rx_pkt_total;
4427 if (!clean_complete)
4430 if (napi_complete(napi) &&
4431 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
4432 hns3_update_rx_int_coalesce(tqp_vector);
4433 hns3_update_tx_int_coalesce(tqp_vector);
4435 hns3_mask_vector_irq(tqp_vector, 1);
4438 return rx_pkt_total;
4441 static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4442 struct hnae3_ring_chain_node **head,
4445 u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
4446 u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
4447 struct hnae3_ring_chain_node *cur_chain = *head;
4448 struct pci_dev *pdev = tqp_vector->handle->pdev;
4449 struct hnae3_ring_chain_node *chain;
4450 struct hns3_enet_ring *ring;
4452 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
4455 while (cur_chain->next)
4456 cur_chain = cur_chain->next;
4460 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
4464 cur_chain->next = chain;
4467 chain->tqp_index = ring->tqp->tqp_index;
4468 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
4470 hnae3_set_field(chain->int_gl_idx,
4471 HNAE3_RING_GL_IDX_M,
4472 HNAE3_RING_GL_IDX_S, field_value);
4482 static struct hnae3_ring_chain_node *
4483 hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
4485 struct pci_dev *pdev = tqp_vector->handle->pdev;
4486 struct hnae3_ring_chain_node *cur_chain = NULL;
4487 struct hnae3_ring_chain_node *chain;
4489 if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
4490 goto err_free_chain;
4492 if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
4493 goto err_free_chain;
4499 chain = cur_chain->next;
4500 devm_kfree(&pdev->dev, cur_chain);
4507 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
4508 struct hnae3_ring_chain_node *head)
4510 struct pci_dev *pdev = tqp_vector->handle->pdev;
4511 struct hnae3_ring_chain_node *chain_tmp, *chain;
4516 chain_tmp = chain->next;
4517 devm_kfree(&pdev->dev, chain);
4522 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
4523 struct hns3_enet_ring *ring)
4525 ring->next = group->ring;
4531 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
4533 struct pci_dev *pdev = priv->ae_handle->pdev;
4534 struct hns3_enet_tqp_vector *tqp_vector;
4535 int num_vectors = priv->vector_num;
4539 numa_node = dev_to_node(&pdev->dev);
4541 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
4542 tqp_vector = &priv->tqp_vector[vector_i];
4543 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
4544 &tqp_vector->affinity_mask);
4548 static void hns3_rx_dim_work(struct work_struct *work)
4550 struct dim *dim = container_of(work, struct dim, work);
4551 struct hns3_enet_ring_group *group = container_of(dim,
4552 struct hns3_enet_ring_group, dim);
4553 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4554 struct dim_cq_moder cur_moder =
4555 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
4557 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
4558 tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
4560 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
4561 hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
4562 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
4565 dim->state = DIM_START_MEASURE;
4568 static void hns3_tx_dim_work(struct work_struct *work)
4570 struct dim *dim = container_of(work, struct dim, work);
4571 struct hns3_enet_ring_group *group = container_of(dim,
4572 struct hns3_enet_ring_group, dim);
4573 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
4574 struct dim_cq_moder cur_moder =
4575 net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
4577 hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
4578 tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
4580 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
4581 hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
4582 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
4585 dim->state = DIM_START_MEASURE;
4588 static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
4590 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
4591 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
4594 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
4596 struct hnae3_handle *h = priv->ae_handle;
4597 struct hns3_enet_tqp_vector *tqp_vector;
4601 hns3_nic_set_cpumask(priv);
4603 for (i = 0; i < priv->vector_num; i++) {
4604 tqp_vector = &priv->tqp_vector[i];
4605 hns3_vector_coalesce_init_hw(tqp_vector, priv);
4606 tqp_vector->num_tqps = 0;
4607 hns3_nic_init_dim(tqp_vector);
4610 for (i = 0; i < h->kinfo.num_tqps; i++) {
4611 u16 vector_i = i % priv->vector_num;
4612 u16 tqp_num = h->kinfo.num_tqps;
4614 tqp_vector = &priv->tqp_vector[vector_i];
4616 hns3_add_ring_to_group(&tqp_vector->tx_group,
4619 hns3_add_ring_to_group(&tqp_vector->rx_group,
4620 &priv->ring[i + tqp_num]);
4622 priv->ring[i].tqp_vector = tqp_vector;
4623 priv->ring[i + tqp_num].tqp_vector = tqp_vector;
4624 tqp_vector->num_tqps++;
4627 for (i = 0; i < priv->vector_num; i++) {
4628 struct hnae3_ring_chain_node *vector_ring_chain;
4630 tqp_vector = &priv->tqp_vector[i];
4632 tqp_vector->rx_group.total_bytes = 0;
4633 tqp_vector->rx_group.total_packets = 0;
4634 tqp_vector->tx_group.total_bytes = 0;
4635 tqp_vector->tx_group.total_packets = 0;
4636 tqp_vector->handle = h;
4638 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
4639 if (!vector_ring_chain) {
4644 ret = h->ae_algo->ops->map_ring_to_vector(h,
4645 tqp_vector->vector_irq, vector_ring_chain);
4647 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
4652 netif_napi_add(priv->netdev, &tqp_vector->napi,
4653 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
4660 netif_napi_del(&priv->tqp_vector[i].napi);
4665 static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
4667 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
4668 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
4669 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
4671 /* initialize the configuration for interrupt coalescing.
4672 * 1. GL (Interrupt Gap Limiter)
4673 * 2. RL (Interrupt Rate Limiter)
4674 * 3. QL (Interrupt Quantity Limiter)
4676 * Default: enable interrupt coalescing self-adaptive and GL
4678 tx_coal->adapt_enable = 1;
4679 rx_coal->adapt_enable = 1;
4681 tx_coal->int_gl = HNS3_INT_GL_50K;
4682 rx_coal->int_gl = HNS3_INT_GL_50K;
4684 rx_coal->flow_level = HNS3_FLOW_LOW;
4685 tx_coal->flow_level = HNS3_FLOW_LOW;
4687 if (ae_dev->dev_specs.int_ql_max) {
4688 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4689 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
4693 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
4695 struct hnae3_handle *h = priv->ae_handle;
4696 struct hns3_enet_tqp_vector *tqp_vector;
4697 struct hnae3_vector_info *vector;
4698 struct pci_dev *pdev = h->pdev;
4699 u16 tqp_num = h->kinfo.num_tqps;
4704 /* RSS size, cpu online and vector_num should be the same */
4705 /* Should consider 2p/4p later */
4706 vector_num = min_t(u16, num_online_cpus(), tqp_num);
4708 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
4713 /* save the actual available vector number */
4714 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
4716 priv->vector_num = vector_num;
4717 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
4718 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
4720 if (!priv->tqp_vector) {
4725 for (i = 0; i < priv->vector_num; i++) {
4726 tqp_vector = &priv->tqp_vector[i];
4727 tqp_vector->idx = i;
4728 tqp_vector->mask_addr = vector[i].io_addr;
4729 tqp_vector->vector_irq = vector[i].vector;
4730 hns3_vector_coalesce_init(tqp_vector, priv);
4734 devm_kfree(&pdev->dev, vector);
4738 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
4744 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
4746 struct hnae3_ring_chain_node *vector_ring_chain;
4747 struct hnae3_handle *h = priv->ae_handle;
4748 struct hns3_enet_tqp_vector *tqp_vector;
4751 for (i = 0; i < priv->vector_num; i++) {
4752 tqp_vector = &priv->tqp_vector[i];
4754 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
4757 /* Since the mapping can be overwritten, when fail to get the
4758 * chain between vector and ring, we should go on to deal with
4759 * the remaining options.
4761 vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
4762 if (!vector_ring_chain)
4763 dev_warn(priv->dev, "failed to get ring chain\n");
4765 h->ae_algo->ops->unmap_ring_from_vector(h,
4766 tqp_vector->vector_irq, vector_ring_chain);
4768 hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
4770 hns3_clear_ring_group(&tqp_vector->rx_group);
4771 hns3_clear_ring_group(&tqp_vector->tx_group);
4772 netif_napi_del(&priv->tqp_vector[i].napi);
4776 static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
4778 struct hnae3_handle *h = priv->ae_handle;
4779 struct pci_dev *pdev = h->pdev;
4782 for (i = 0; i < priv->vector_num; i++) {
4783 struct hns3_enet_tqp_vector *tqp_vector;
4785 tqp_vector = &priv->tqp_vector[i];
4786 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
4791 devm_kfree(&pdev->dev, priv->tqp_vector);
4794 static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
4795 unsigned int ring_type)
4797 int queue_num = priv->ae_handle->kinfo.num_tqps;
4798 struct hns3_enet_ring *ring;
4801 if (ring_type == HNAE3_RING_TYPE_TX) {
4802 ring = &priv->ring[q->tqp_index];
4803 desc_num = priv->ae_handle->kinfo.num_tx_desc;
4804 ring->queue_index = q->tqp_index;
4805 ring->tx_copybreak = priv->tx_copybreak;
4806 ring->last_to_use = 0;
4808 ring = &priv->ring[q->tqp_index + queue_num];
4809 desc_num = priv->ae_handle->kinfo.num_rx_desc;
4810 ring->queue_index = q->tqp_index;
4811 ring->rx_copybreak = priv->rx_copybreak;
4814 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
4818 ring->desc_cb = NULL;
4819 ring->dev = priv->dev;
4820 ring->desc_dma_addr = 0;
4821 ring->buf_size = q->buf_size;
4822 ring->desc_num = desc_num;
4823 ring->next_to_use = 0;
4824 ring->next_to_clean = 0;
4827 static void hns3_queue_to_ring(struct hnae3_queue *tqp,
4828 struct hns3_nic_priv *priv)
4830 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
4831 hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
4834 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
4836 struct hnae3_handle *h = priv->ae_handle;
4837 struct pci_dev *pdev = h->pdev;
4840 priv->ring = devm_kzalloc(&pdev->dev,
4841 array3_size(h->kinfo.num_tqps,
4842 sizeof(*priv->ring), 2),
4847 for (i = 0; i < h->kinfo.num_tqps; i++)
4848 hns3_queue_to_ring(h->kinfo.tqp[i], priv);
4853 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
4858 devm_kfree(priv->dev, priv->ring);
4862 static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
4864 struct page_pool_params pp_params = {
4865 .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
4866 PP_FLAG_DMA_SYNC_DEV,
4867 .order = hns3_page_order(ring),
4868 .pool_size = ring->desc_num * hns3_buf_size(ring) /
4869 (PAGE_SIZE << hns3_page_order(ring)),
4870 .nid = dev_to_node(ring_to_dev(ring)),
4871 .dev = ring_to_dev(ring),
4872 .dma_dir = DMA_FROM_DEVICE,
4874 .max_len = PAGE_SIZE << hns3_page_order(ring),
4877 ring->page_pool = page_pool_create(&pp_params);
4878 if (IS_ERR(ring->page_pool)) {
4879 dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
4880 PTR_ERR(ring->page_pool));
4881 ring->page_pool = NULL;
4885 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
4889 if (ring->desc_num <= 0 || ring->buf_size <= 0)
4892 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
4893 sizeof(ring->desc_cb[0]), GFP_KERNEL);
4894 if (!ring->desc_cb) {
4899 ret = hns3_alloc_desc(ring);
4901 goto out_with_desc_cb;
4903 if (!HNAE3_IS_TX_RING(ring)) {
4904 if (page_pool_enabled)
4905 hns3_alloc_page_pool(ring);
4907 ret = hns3_alloc_ring_buffers(ring);
4911 hns3_init_tx_spare_buffer(ring);
4917 hns3_free_desc(ring);
4919 devm_kfree(ring_to_dev(ring), ring->desc_cb);
4920 ring->desc_cb = NULL;
4925 void hns3_fini_ring(struct hns3_enet_ring *ring)
4927 hns3_free_desc(ring);
4928 devm_kfree(ring_to_dev(ring), ring->desc_cb);
4929 ring->desc_cb = NULL;
4930 ring->next_to_clean = 0;
4931 ring->next_to_use = 0;
4932 ring->last_to_use = 0;
4933 ring->pending_buf = 0;
4934 if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
4935 dev_kfree_skb_any(ring->skb);
4937 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
4938 struct hns3_tx_spare *tx_spare = ring->tx_spare;
4940 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
4942 free_pages((unsigned long)tx_spare->buf,
4943 get_order(tx_spare->len));
4944 devm_kfree(ring_to_dev(ring), tx_spare);
4945 ring->tx_spare = NULL;
4948 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
4949 page_pool_destroy(ring->page_pool);
4950 ring->page_pool = NULL;
4954 static int hns3_buf_size2type(u32 buf_size)
4960 bd_size_type = HNS3_BD_SIZE_512_TYPE;
4963 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
4966 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
4969 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
4972 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
4975 return bd_size_type;
4978 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
4980 dma_addr_t dma = ring->desc_dma_addr;
4981 struct hnae3_queue *q = ring->tqp;
4983 if (!HNAE3_IS_TX_RING(ring)) {
4984 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
4985 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
4986 (u32)((dma >> 31) >> 1));
4988 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
4989 hns3_buf_size2type(ring->buf_size));
4990 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
4991 ring->desc_num / 8 - 1);
4993 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
4995 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
4996 (u32)((dma >> 31) >> 1));
4998 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
4999 ring->desc_num / 8 - 1);
5003 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
5005 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
5006 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
5009 for (i = 0; i < tc_info->num_tc; i++) {
5012 for (j = 0; j < tc_info->tqp_count[i]; j++) {
5013 struct hnae3_queue *q;
5015 q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
5016 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
5021 int hns3_init_all_ring(struct hns3_nic_priv *priv)
5023 struct hnae3_handle *h = priv->ae_handle;
5024 int ring_num = h->kinfo.num_tqps * 2;
5028 for (i = 0; i < ring_num; i++) {
5029 ret = hns3_alloc_ring_memory(&priv->ring[i]);
5032 "Alloc ring memory fail! ret=%d\n", ret);
5033 goto out_when_alloc_ring_memory;
5036 u64_stats_init(&priv->ring[i].syncp);
5041 out_when_alloc_ring_memory:
5042 for (j = i - 1; j >= 0; j--)
5043 hns3_fini_ring(&priv->ring[j]);
5048 static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
5050 struct hnae3_handle *h = priv->ae_handle;
5053 for (i = 0; i < h->kinfo.num_tqps; i++) {
5054 hns3_fini_ring(&priv->ring[i]);
5055 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
5059 /* Set mac addr if it is configured. or leave it to the AE driver */
5060 static int hns3_init_mac_addr(struct net_device *netdev)
5062 struct hns3_nic_priv *priv = netdev_priv(netdev);
5063 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
5064 struct hnae3_handle *h = priv->ae_handle;
5065 u8 mac_addr_temp[ETH_ALEN];
5068 if (h->ae_algo->ops->get_mac_addr)
5069 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
5071 /* Check if the MAC address is valid, if not get a random one */
5072 if (!is_valid_ether_addr(mac_addr_temp)) {
5073 eth_hw_addr_random(netdev);
5074 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
5075 dev_warn(priv->dev, "using random MAC address %s\n",
5077 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
5078 eth_hw_addr_set(netdev, mac_addr_temp);
5079 ether_addr_copy(netdev->perm_addr, mac_addr_temp);
5084 if (h->ae_algo->ops->set_mac_addr)
5085 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
5090 static int hns3_init_phy(struct net_device *netdev)
5092 struct hnae3_handle *h = hns3_get_handle(netdev);
5095 if (h->ae_algo->ops->mac_connect_phy)
5096 ret = h->ae_algo->ops->mac_connect_phy(h);
5101 static void hns3_uninit_phy(struct net_device *netdev)
5103 struct hnae3_handle *h = hns3_get_handle(netdev);
5105 if (h->ae_algo->ops->mac_disconnect_phy)
5106 h->ae_algo->ops->mac_disconnect_phy(h);
5109 static int hns3_client_start(struct hnae3_handle *handle)
5111 if (!handle->ae_algo->ops->client_start)
5114 return handle->ae_algo->ops->client_start(handle);
5117 static void hns3_client_stop(struct hnae3_handle *handle)
5119 if (!handle->ae_algo->ops->client_stop)
5122 handle->ae_algo->ops->client_stop(handle);
5125 static void hns3_info_show(struct hns3_nic_priv *priv)
5127 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
5128 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
5130 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
5131 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
5132 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
5133 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
5134 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
5135 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
5136 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
5137 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
5138 dev_info(priv->dev, "Total number of enabled TCs: %u\n",
5139 kinfo->tc_info.num_tc);
5140 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
5143 static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
5144 enum dim_cq_period_mode mode, bool is_tx)
5146 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
5147 struct hnae3_handle *handle = priv->ae_handle;
5151 priv->tx_cqe_mode = mode;
5153 for (i = 0; i < priv->vector_num; i++)
5154 priv->tqp_vector[i].tx_group.dim.mode = mode;
5156 priv->rx_cqe_mode = mode;
5158 for (i = 0; i < priv->vector_num; i++)
5159 priv->tqp_vector[i].rx_group.dim.mode = mode;
5162 /* only device version above V3(include V3), GL can switch CQ/EQ
5165 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
5169 new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ?
5170 HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE;
5171 reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG;
5173 writel(new_mode, handle->kinfo.io_base + reg);
5177 void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
5178 enum dim_cq_period_mode tx_mode,
5179 enum dim_cq_period_mode rx_mode)
5181 hns3_set_cq_period_mode(priv, tx_mode, true);
5182 hns3_set_cq_period_mode(priv, rx_mode, false);
5185 static void hns3_state_init(struct hnae3_handle *handle)
5187 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
5188 struct net_device *netdev = handle->kinfo.netdev;
5189 struct hns3_nic_priv *priv = netdev_priv(netdev);
5191 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5193 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
5194 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
5196 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5197 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
5199 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
5200 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
5202 if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev))
5203 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
5206 static int hns3_client_init(struct hnae3_handle *handle)
5208 struct pci_dev *pdev = handle->pdev;
5209 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5210 u16 alloc_tqps, max_rss_size;
5211 struct hns3_nic_priv *priv;
5212 struct net_device *netdev;
5215 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
5217 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
5221 priv = netdev_priv(netdev);
5222 priv->dev = &pdev->dev;
5223 priv->netdev = netdev;
5224 priv->ae_handle = handle;
5225 priv->tx_timeout_count = 0;
5226 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
5227 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
5229 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
5231 handle->kinfo.netdev = netdev;
5232 handle->priv = (void *)priv;
5234 hns3_init_mac_addr(netdev);
5236 hns3_set_default_feature(netdev);
5238 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
5239 netdev->priv_flags |= IFF_UNICAST_FLT;
5240 netdev->netdev_ops = &hns3_nic_netdev_ops;
5241 SET_NETDEV_DEV(netdev, &pdev->dev);
5242 hns3_ethtool_set_ops(netdev);
5244 /* Carrier off reporting is important to ethtool even BEFORE open */
5245 netif_carrier_off(netdev);
5247 ret = hns3_get_ring_config(priv);
5250 goto out_get_ring_cfg;
5253 hns3_nic_init_coal_cfg(priv);
5255 ret = hns3_nic_alloc_vector_data(priv);
5258 goto out_alloc_vector_data;
5261 ret = hns3_nic_init_vector_data(priv);
5264 goto out_init_vector_data;
5267 ret = hns3_init_all_ring(priv);
5273 hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE,
5274 DIM_CQ_PERIOD_MODE_START_FROM_EQE);
5276 ret = hns3_init_phy(netdev);
5280 /* the device can work without cpu rmap, only aRFS needs it */
5281 ret = hns3_set_rx_cpu_rmap(netdev);
5283 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5285 ret = hns3_nic_init_irq(priv);
5287 dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5288 hns3_free_rx_cpu_rmap(netdev);
5289 goto out_init_irq_fail;
5292 ret = hns3_client_start(handle);
5294 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5295 goto out_client_start;
5298 hns3_dcbnl_setup(handle);
5300 ret = hns3_dbg_init(handle);
5302 dev_err(priv->dev, "failed to init debugfs, ret = %d\n",
5304 goto out_client_start;
5307 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
5309 hns3_state_init(handle);
5311 ret = register_netdev(netdev);
5313 dev_err(priv->dev, "probe register netdev fail!\n");
5314 goto out_reg_netdev_fail;
5317 if (netif_msg_drv(handle))
5318 hns3_info_show(priv);
5322 out_reg_netdev_fail:
5323 hns3_dbg_uninit(handle);
5325 hns3_free_rx_cpu_rmap(netdev);
5326 hns3_nic_uninit_irq(priv);
5328 hns3_uninit_phy(netdev);
5330 hns3_uninit_all_ring(priv);
5332 hns3_nic_uninit_vector_data(priv);
5333 out_init_vector_data:
5334 hns3_nic_dealloc_vector_data(priv);
5335 out_alloc_vector_data:
5338 priv->ae_handle = NULL;
5339 free_netdev(netdev);
5343 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
5345 struct net_device *netdev = handle->kinfo.netdev;
5346 struct hns3_nic_priv *priv = netdev_priv(netdev);
5348 if (netdev->reg_state != NETREG_UNINITIALIZED)
5349 unregister_netdev(netdev);
5351 hns3_client_stop(handle);
5353 hns3_uninit_phy(netdev);
5355 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5356 netdev_warn(netdev, "already uninitialized\n");
5357 goto out_netdev_free;
5360 hns3_free_rx_cpu_rmap(netdev);
5362 hns3_nic_uninit_irq(priv);
5364 hns3_clear_all_ring(handle, true);
5366 hns3_nic_uninit_vector_data(priv);
5368 hns3_nic_dealloc_vector_data(priv);
5370 hns3_uninit_all_ring(priv);
5372 hns3_put_ring_config(priv);
5375 hns3_dbg_uninit(handle);
5376 free_netdev(netdev);
5379 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
5381 struct net_device *netdev = handle->kinfo.netdev;
5387 netif_tx_wake_all_queues(netdev);
5388 netif_carrier_on(netdev);
5389 if (netif_msg_link(handle))
5390 netdev_info(netdev, "link up\n");
5392 netif_carrier_off(netdev);
5393 netif_tx_stop_all_queues(netdev);
5394 if (netif_msg_link(handle))
5395 netdev_info(netdev, "link down\n");
5399 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
5401 while (ring->next_to_clean != ring->next_to_use) {
5402 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
5403 hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
5404 ring_ptr_move_fw(ring, next_to_clean);
5407 ring->pending_buf = 0;
5410 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
5412 struct hns3_desc_cb res_cbs;
5415 while (ring->next_to_use != ring->next_to_clean) {
5416 /* When a buffer is not reused, it's memory has been
5417 * freed in hns3_handle_rx_bd or will be freed by
5418 * stack, so we need to replace the buffer here.
5420 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5421 ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
5423 hns3_ring_stats_update(ring, sw_err_cnt);
5424 /* if alloc new buffer fail, exit directly
5425 * and reclear in up flow.
5427 netdev_warn(ring_to_netdev(ring),
5428 "reserve buffer map failed, ret = %d\n",
5432 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
5434 ring_ptr_move_fw(ring, next_to_use);
5437 /* Free the pending skb in rx ring */
5439 dev_kfree_skb_any(ring->skb);
5441 ring->pending_buf = 0;
5447 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
5449 while (ring->next_to_use != ring->next_to_clean) {
5450 /* When a buffer is not reused, it's memory has been
5451 * freed in hns3_handle_rx_bd or will be freed by
5452 * stack, so only need to unmap the buffer here.
5454 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
5455 hns3_unmap_buffer(ring,
5456 &ring->desc_cb[ring->next_to_use]);
5457 ring->desc_cb[ring->next_to_use].dma = 0;
5460 ring_ptr_move_fw(ring, next_to_use);
5464 static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
5466 struct net_device *ndev = h->kinfo.netdev;
5467 struct hns3_nic_priv *priv = netdev_priv(ndev);
5470 for (i = 0; i < h->kinfo.num_tqps; i++) {
5471 struct hns3_enet_ring *ring;
5473 ring = &priv->ring[i];
5474 hns3_clear_tx_ring(ring);
5476 ring = &priv->ring[i + h->kinfo.num_tqps];
5477 /* Continue to clear other rings even if clearing some
5481 hns3_force_clear_rx_ring(ring);
5483 hns3_clear_rx_ring(ring);
5487 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
5489 struct net_device *ndev = h->kinfo.netdev;
5490 struct hns3_nic_priv *priv = netdev_priv(ndev);
5491 struct hns3_enet_ring *rx_ring;
5495 ret = h->ae_algo->ops->reset_queue(h);
5499 for (i = 0; i < h->kinfo.num_tqps; i++) {
5500 hns3_init_ring_hw(&priv->ring[i]);
5502 /* We need to clear tx ring here because self test will
5503 * use the ring and will not run down before up
5505 hns3_clear_tx_ring(&priv->ring[i]);
5506 priv->ring[i].next_to_clean = 0;
5507 priv->ring[i].next_to_use = 0;
5508 priv->ring[i].last_to_use = 0;
5510 rx_ring = &priv->ring[i + h->kinfo.num_tqps];
5511 hns3_init_ring_hw(rx_ring);
5512 ret = hns3_clear_rx_ring(rx_ring);
5516 /* We can not know the hardware head and tail when this
5517 * function is called in reset flow, so we reuse all desc.
5519 for (j = 0; j < rx_ring->desc_num; j++)
5520 hns3_reuse_buffer(rx_ring, j);
5522 rx_ring->next_to_clean = 0;
5523 rx_ring->next_to_use = 0;
5526 hns3_init_tx_ring_tc(priv);
5531 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
5533 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5534 struct net_device *ndev = kinfo->netdev;
5535 struct hns3_nic_priv *priv = netdev_priv(ndev);
5537 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
5540 if (!netif_running(ndev))
5543 return hns3_nic_net_stop(ndev);
5546 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
5548 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5549 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
5552 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5553 netdev_err(kinfo->netdev, "device is not initialized yet\n");
5557 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5559 if (netif_running(kinfo->netdev)) {
5560 ret = hns3_nic_net_open(kinfo->netdev);
5562 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
5563 netdev_err(kinfo->netdev,
5564 "net up fail, ret=%d!\n", ret);
5572 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
5574 struct net_device *netdev = handle->kinfo.netdev;
5575 struct hns3_nic_priv *priv = netdev_priv(netdev);
5578 /* Carrier off reporting is important to ethtool even BEFORE open */
5579 netif_carrier_off(netdev);
5581 ret = hns3_get_ring_config(priv);
5585 ret = hns3_nic_alloc_vector_data(priv);
5589 ret = hns3_nic_init_vector_data(priv);
5591 goto err_dealloc_vector;
5593 ret = hns3_init_all_ring(priv);
5595 goto err_uninit_vector;
5597 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode);
5599 /* the device can work without cpu rmap, only aRFS needs it */
5600 ret = hns3_set_rx_cpu_rmap(netdev);
5602 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
5604 ret = hns3_nic_init_irq(priv);
5606 dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
5607 hns3_free_rx_cpu_rmap(netdev);
5608 goto err_init_irq_fail;
5611 if (!hns3_is_phys_func(handle->pdev))
5612 hns3_init_mac_addr(netdev);
5614 ret = hns3_client_start(handle);
5616 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
5617 goto err_client_start_fail;
5620 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
5624 err_client_start_fail:
5625 hns3_free_rx_cpu_rmap(netdev);
5626 hns3_nic_uninit_irq(priv);
5628 hns3_uninit_all_ring(priv);
5630 hns3_nic_uninit_vector_data(priv);
5632 hns3_nic_dealloc_vector_data(priv);
5634 hns3_put_ring_config(priv);
5639 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
5641 struct net_device *netdev = handle->kinfo.netdev;
5642 struct hns3_nic_priv *priv = netdev_priv(netdev);
5644 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
5645 netdev_warn(netdev, "already uninitialized\n");
5649 hns3_free_rx_cpu_rmap(netdev);
5650 hns3_nic_uninit_irq(priv);
5651 hns3_clear_all_ring(handle, true);
5652 hns3_reset_tx_queue(priv->ae_handle);
5654 hns3_nic_uninit_vector_data(priv);
5656 hns3_nic_dealloc_vector_data(priv);
5658 hns3_uninit_all_ring(priv);
5660 hns3_put_ring_config(priv);
5665 int hns3_reset_notify(struct hnae3_handle *handle,
5666 enum hnae3_reset_notify_type type)
5671 case HNAE3_UP_CLIENT:
5672 ret = hns3_reset_notify_up_enet(handle);
5674 case HNAE3_DOWN_CLIENT:
5675 ret = hns3_reset_notify_down_enet(handle);
5677 case HNAE3_INIT_CLIENT:
5678 ret = hns3_reset_notify_init_enet(handle);
5680 case HNAE3_UNINIT_CLIENT:
5681 ret = hns3_reset_notify_uninit_enet(handle);
5690 static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
5691 bool rxfh_configured)
5695 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
5698 dev_err(&handle->pdev->dev,
5699 "Change tqp num(%u) fail.\n", new_tqp_num);
5703 ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
5707 ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT);
5709 hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
5714 int hns3_set_channels(struct net_device *netdev,
5715 struct ethtool_channels *ch)
5717 struct hnae3_handle *h = hns3_get_handle(netdev);
5718 struct hnae3_knic_private_info *kinfo = &h->kinfo;
5719 bool rxfh_configured = netif_is_rxfh_configured(netdev);
5720 u32 new_tqp_num = ch->combined_count;
5724 if (hns3_nic_resetting(netdev))
5727 if (ch->rx_count || ch->tx_count)
5730 if (kinfo->tc_info.mqprio_active) {
5731 dev_err(&netdev->dev,
5732 "it's not allowed to set channels via ethtool when MQPRIO mode is on\n");
5736 if (new_tqp_num > hns3_get_max_available_channels(h) ||
5738 dev_err(&netdev->dev,
5739 "Change tqps fail, the tqp range is from 1 to %u",
5740 hns3_get_max_available_channels(h));
5744 if (kinfo->rss_size == new_tqp_num)
5747 netif_dbg(h, drv, netdev,
5748 "set channels: tqp_num=%u, rxfh=%d\n",
5749 new_tqp_num, rxfh_configured);
5751 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
5755 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
5759 org_tqp_num = h->kinfo.num_tqps;
5760 ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
5765 "Change channels fail, revert to old value\n");
5766 ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
5769 "revert to old channel fail\n");
5779 static const struct hns3_hw_error_info hns3_hw_err[] = {
5780 { .type = HNAE3_PPU_POISON_ERROR,
5781 .msg = "PPU poison" },
5782 { .type = HNAE3_CMDQ_ECC_ERROR,
5783 .msg = "IMP CMDQ error" },
5784 { .type = HNAE3_IMP_RD_POISON_ERROR,
5785 .msg = "IMP RD poison" },
5786 { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
5787 .msg = "ROCEE AXI RESP error" },
5790 static void hns3_process_hw_error(struct hnae3_handle *handle,
5791 enum hnae3_hw_error_type type)
5795 for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
5796 if (hns3_hw_err[i].type == type) {
5797 dev_err(&handle->pdev->dev, "Detected %s!\n",
5798 hns3_hw_err[i].msg);
5804 static const struct hnae3_client_ops client_ops = {
5805 .init_instance = hns3_client_init,
5806 .uninit_instance = hns3_client_uninit,
5807 .link_status_change = hns3_link_status_change,
5808 .reset_notify = hns3_reset_notify,
5809 .process_hw_error = hns3_process_hw_error,
5812 /* hns3_init_module - Driver registration routine
5813 * hns3_init_module is the first routine called when the driver is
5814 * loaded. All it does is register with the PCI subsystem.
5816 static int __init hns3_init_module(void)
5820 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
5821 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
5823 client.type = HNAE3_CLIENT_KNIC;
5824 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
5827 client.ops = &client_ops;
5829 INIT_LIST_HEAD(&client.node);
5831 hns3_dbg_register_debugfs(hns3_driver_name);
5833 ret = hnae3_register_client(&client);
5835 goto err_reg_client;
5837 ret = pci_register_driver(&hns3_driver);
5839 goto err_reg_driver;
5844 hnae3_unregister_client(&client);
5846 hns3_dbg_unregister_debugfs();
5849 module_init(hns3_init_module);
5851 /* hns3_exit_module - Driver exit cleanup routine
5852 * hns3_exit_module is called just before the driver is removed
5855 static void __exit hns3_exit_module(void)
5857 pci_unregister_driver(&hns3_driver);
5858 hnae3_unregister_client(&client);
5859 hns3_dbg_unregister_debugfs();
5861 module_exit(hns3_exit_module);
5863 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
5864 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
5865 MODULE_LICENSE("GPL");
5866 MODULE_ALIAS("pci:hns-nic");