2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/if_vlan.h>
21 #include <net/rtnetlink.h>
22 #include "hclge_cmd.h"
23 #include "hclge_dcb.h"
24 #include "hclge_main.h"
25 #include "hclge_mbx.h"
26 #include "hclge_mdio.h"
30 #define HCLGE_NAME "hclge"
31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
37 enum hclge_mta_dmac_sel_type mta_mac_sel,
39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static struct hnae3_ae_algo ae_algo;
45 static const struct pci_device_id ae_algo_pci_tbl[] = {
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
53 /* required last entry */
57 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
59 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
61 "Serdes Loopback test",
65 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
66 {"igu_rx_oversize_pkt",
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
68 {"igu_rx_undersize_pkt",
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
70 {"igu_rx_out_all_pkt",
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
78 {"egu_tx_out_all_pkt",
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
86 {"ssu_ppp_mac_key_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
88 {"ssu_ppp_host_key_num",
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
90 {"ppp_ssu_mac_rlt_num",
91 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
92 {"ppp_ssu_host_rlt_num",
93 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
104 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
107 {"igu_rx_no_eof_pkt",
108 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
109 {"igu_rx_no_sof_pkt",
110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
112 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
113 {"ssu_full_drop_num",
114 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
115 {"ssu_part_drop_num",
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
118 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
120 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
122 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
124 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
128 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
129 {"qcn_fb_invaild_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
131 {"rx_packet_tc0_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
133 {"rx_packet_tc1_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
135 {"rx_packet_tc2_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
137 {"rx_packet_tc3_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
139 {"rx_packet_tc4_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
141 {"rx_packet_tc5_in_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
143 {"rx_packet_tc6_in_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
145 {"rx_packet_tc7_in_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
147 {"rx_packet_tc0_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
149 {"rx_packet_tc1_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
151 {"rx_packet_tc2_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
153 {"rx_packet_tc3_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
155 {"rx_packet_tc4_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
157 {"rx_packet_tc5_out_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
159 {"rx_packet_tc6_out_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
161 {"rx_packet_tc7_out_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
163 {"tx_packet_tc0_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
165 {"tx_packet_tc1_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
167 {"tx_packet_tc2_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
169 {"tx_packet_tc3_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
171 {"tx_packet_tc4_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
173 {"tx_packet_tc5_in_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
175 {"tx_packet_tc6_in_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
177 {"tx_packet_tc7_in_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
179 {"tx_packet_tc0_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
181 {"tx_packet_tc1_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
183 {"tx_packet_tc2_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
185 {"tx_packet_tc3_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
187 {"tx_packet_tc4_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
189 {"tx_packet_tc5_out_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
191 {"tx_packet_tc6_out_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
193 {"tx_packet_tc7_out_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
195 {"pkt_curr_buf_tc0_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
197 {"pkt_curr_buf_tc1_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
199 {"pkt_curr_buf_tc2_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
201 {"pkt_curr_buf_tc3_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
203 {"pkt_curr_buf_tc4_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
205 {"pkt_curr_buf_tc5_cnt",
206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
207 {"pkt_curr_buf_tc6_cnt",
208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
209 {"pkt_curr_buf_tc7_cnt",
210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
212 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
213 {"lo_pri_unicast_rlt_drop_num",
214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
215 {"hi_pri_multicast_rlt_drop_num",
216 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
217 {"lo_pri_multicast_rlt_drop_num",
218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
219 {"rx_oq_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
221 {"tx_oq_drop_pkt_cnt",
222 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
223 {"nic_l2_err_drop_pkt_cnt",
224 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
225 {"roc_l2_err_drop_pkt_cnt",
226 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
229 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
230 {"mac_tx_mac_pause_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
232 {"mac_rx_mac_pause_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
234 {"mac_tx_pfc_pri0_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
236 {"mac_tx_pfc_pri1_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
238 {"mac_tx_pfc_pri2_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
240 {"mac_tx_pfc_pri3_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
242 {"mac_tx_pfc_pri4_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
244 {"mac_tx_pfc_pri5_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
246 {"mac_tx_pfc_pri6_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
248 {"mac_tx_pfc_pri7_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
250 {"mac_rx_pfc_pri0_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
252 {"mac_rx_pfc_pri1_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
254 {"mac_rx_pfc_pri2_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
256 {"mac_rx_pfc_pri3_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
258 {"mac_rx_pfc_pri4_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
260 {"mac_rx_pfc_pri5_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
262 {"mac_rx_pfc_pri6_pkt_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
264 {"mac_rx_pfc_pri7_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
266 {"mac_tx_total_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
268 {"mac_tx_total_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
270 {"mac_tx_good_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
272 {"mac_tx_bad_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
274 {"mac_tx_good_oct_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
276 {"mac_tx_bad_oct_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
278 {"mac_tx_uni_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
280 {"mac_tx_multi_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
282 {"mac_tx_broad_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
284 {"mac_tx_undersize_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
286 {"mac_tx_oversize_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
288 {"mac_tx_64_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
290 {"mac_tx_65_127_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
292 {"mac_tx_128_255_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
294 {"mac_tx_256_511_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
296 {"mac_tx_512_1023_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
298 {"mac_tx_1024_1518_oct_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
300 {"mac_tx_1519_2047_oct_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
302 {"mac_tx_2048_4095_oct_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
304 {"mac_tx_4096_8191_oct_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
306 {"mac_tx_8192_9216_oct_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
308 {"mac_tx_9217_12287_oct_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
310 {"mac_tx_12288_16383_oct_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
312 {"mac_tx_1519_max_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
314 {"mac_tx_1519_max_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
316 {"mac_rx_total_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
318 {"mac_rx_total_oct_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
320 {"mac_rx_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
322 {"mac_rx_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
324 {"mac_rx_good_oct_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
326 {"mac_rx_bad_oct_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
328 {"mac_rx_uni_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
330 {"mac_rx_multi_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
332 {"mac_rx_broad_pkt_num",
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
334 {"mac_rx_undersize_pkt_num",
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
336 {"mac_rx_oversize_pkt_num",
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
338 {"mac_rx_64_oct_pkt_num",
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
340 {"mac_rx_65_127_oct_pkt_num",
341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
342 {"mac_rx_128_255_oct_pkt_num",
343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
344 {"mac_rx_256_511_oct_pkt_num",
345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
346 {"mac_rx_512_1023_oct_pkt_num",
347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
348 {"mac_rx_1024_1518_oct_pkt_num",
349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
350 {"mac_rx_1519_2047_oct_pkt_num",
351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
352 {"mac_rx_2048_4095_oct_pkt_num",
353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
354 {"mac_rx_4096_8191_oct_pkt_num",
355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
356 {"mac_rx_8192_9216_oct_pkt_num",
357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
358 {"mac_rx_9217_12287_oct_pkt_num",
359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
360 {"mac_rx_12288_16383_oct_pkt_num",
361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
362 {"mac_rx_1519_max_good_pkt_num",
363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
364 {"mac_rx_1519_max_bad_pkt_num",
365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
367 {"mac_tx_fragment_pkt_num",
368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
369 {"mac_tx_undermin_pkt_num",
370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
371 {"mac_tx_jabber_pkt_num",
372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
373 {"mac_tx_err_all_pkt_num",
374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
375 {"mac_tx_from_app_good_pkt_num",
376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
377 {"mac_tx_from_app_bad_pkt_num",
378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
379 {"mac_rx_fragment_pkt_num",
380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
381 {"mac_rx_undermin_pkt_num",
382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
383 {"mac_rx_jabber_pkt_num",
384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
385 {"mac_rx_fcs_err_pkt_num",
386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
387 {"mac_rx_send_app_good_pkt_num",
388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
389 {"mac_rx_send_app_bad_pkt_num",
390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
393 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
395 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
396 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
397 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
398 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
399 .i_port_bitmap = 0x1,
403 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
405 #define HCLGE_64_BIT_CMD_NUM 5
406 #define HCLGE_64_BIT_RTN_DATANUM 4
407 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
408 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
413 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
414 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
416 dev_err(&hdev->pdev->dev,
417 "Get 64 bit pkt stats fail, status = %d.\n", ret);
421 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
422 if (unlikely(i == 0)) {
423 desc_data = (__le64 *)(&desc[i].data[0]);
424 n = HCLGE_64_BIT_RTN_DATANUM - 1;
426 desc_data = (__le64 *)(&desc[i]);
427 n = HCLGE_64_BIT_RTN_DATANUM;
429 for (k = 0; k < n; k++) {
430 *data++ += le64_to_cpu(*desc_data);
438 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
440 stats->pkt_curr_buf_cnt = 0;
441 stats->pkt_curr_buf_tc0_cnt = 0;
442 stats->pkt_curr_buf_tc1_cnt = 0;
443 stats->pkt_curr_buf_tc2_cnt = 0;
444 stats->pkt_curr_buf_tc3_cnt = 0;
445 stats->pkt_curr_buf_tc4_cnt = 0;
446 stats->pkt_curr_buf_tc5_cnt = 0;
447 stats->pkt_curr_buf_tc6_cnt = 0;
448 stats->pkt_curr_buf_tc7_cnt = 0;
451 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
453 #define HCLGE_32_BIT_CMD_NUM 8
454 #define HCLGE_32_BIT_RTN_DATANUM 8
456 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
457 struct hclge_32_bit_stats *all_32_bit_stats;
463 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
464 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
466 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
467 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
469 dev_err(&hdev->pdev->dev,
470 "Get 32 bit pkt stats fail, status = %d.\n", ret);
475 hclge_reset_partial_32bit_counter(all_32_bit_stats);
476 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
477 if (unlikely(i == 0)) {
478 __le16 *desc_data_16bit;
480 all_32_bit_stats->igu_rx_err_pkt +=
481 le32_to_cpu(desc[i].data[0]);
483 desc_data_16bit = (__le16 *)&desc[i].data[1];
484 all_32_bit_stats->igu_rx_no_eof_pkt +=
485 le16_to_cpu(*desc_data_16bit);
488 all_32_bit_stats->igu_rx_no_sof_pkt +=
489 le16_to_cpu(*desc_data_16bit);
491 desc_data = &desc[i].data[2];
492 n = HCLGE_32_BIT_RTN_DATANUM - 4;
494 desc_data = (__le32 *)&desc[i];
495 n = HCLGE_32_BIT_RTN_DATANUM;
497 for (k = 0; k < n; k++) {
498 *data++ += le32_to_cpu(*desc_data);
506 static int hclge_mac_update_stats(struct hclge_dev *hdev)
508 #define HCLGE_MAC_CMD_NUM 21
509 #define HCLGE_RTN_DATA_NUM 4
511 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
512 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
517 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
518 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
520 dev_err(&hdev->pdev->dev,
521 "Get MAC pkt stats fail, status = %d.\n", ret);
526 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
527 if (unlikely(i == 0)) {
528 desc_data = (__le64 *)(&desc[i].data[0]);
529 n = HCLGE_RTN_DATA_NUM - 2;
531 desc_data = (__le64 *)(&desc[i]);
532 n = HCLGE_RTN_DATA_NUM;
534 for (k = 0; k < n; k++) {
535 *data++ += le64_to_cpu(*desc_data);
543 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
545 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
546 struct hclge_vport *vport = hclge_get_vport(handle);
547 struct hclge_dev *hdev = vport->back;
548 struct hnae3_queue *queue;
549 struct hclge_desc desc[1];
550 struct hclge_tqp *tqp;
553 for (i = 0; i < kinfo->num_tqps; i++) {
554 queue = handle->kinfo.tqp[i];
555 tqp = container_of(queue, struct hclge_tqp, q);
556 /* command : HCLGE_OPC_QUERY_IGU_STAT */
557 hclge_cmd_setup_basic_desc(&desc[0],
558 HCLGE_OPC_QUERY_RX_STATUS,
561 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
562 ret = hclge_cmd_send(&hdev->hw, desc, 1);
564 dev_err(&hdev->pdev->dev,
565 "Query tqp stat fail, status = %d,queue = %d\n",
569 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
570 le32_to_cpu(desc[0].data[1]);
573 for (i = 0; i < kinfo->num_tqps; i++) {
574 queue = handle->kinfo.tqp[i];
575 tqp = container_of(queue, struct hclge_tqp, q);
576 /* command : HCLGE_OPC_QUERY_IGU_STAT */
577 hclge_cmd_setup_basic_desc(&desc[0],
578 HCLGE_OPC_QUERY_TX_STATUS,
581 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
582 ret = hclge_cmd_send(&hdev->hw, desc, 1);
584 dev_err(&hdev->pdev->dev,
585 "Query tqp stat fail, status = %d,queue = %d\n",
589 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
590 le32_to_cpu(desc[0].data[1]);
596 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
598 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
599 struct hclge_tqp *tqp;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
608 for (i = 0; i < kinfo->num_tqps; i++) {
609 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
610 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
616 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
618 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
620 return kinfo->num_tqps * (2);
623 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
625 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
629 for (i = 0; i < kinfo->num_tqps; i++) {
630 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
631 struct hclge_tqp, q);
632 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
634 buff = buff + ETH_GSTRING_LEN;
637 for (i = 0; i < kinfo->num_tqps; i++) {
638 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
639 struct hclge_tqp, q);
640 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
642 buff = buff + ETH_GSTRING_LEN;
648 static u64 *hclge_comm_get_stats(void *comm_stats,
649 const struct hclge_comm_stats_str strs[],
655 for (i = 0; i < size; i++)
656 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
661 static u8 *hclge_comm_get_strings(u32 stringset,
662 const struct hclge_comm_stats_str strs[],
665 char *buff = (char *)data;
668 if (stringset != ETH_SS_STATS)
671 for (i = 0; i < size; i++) {
672 snprintf(buff, ETH_GSTRING_LEN,
674 buff = buff + ETH_GSTRING_LEN;
680 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
681 struct net_device_stats *net_stats)
683 net_stats->tx_dropped = 0;
684 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
685 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
686 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
688 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
689 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
690 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
691 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
692 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
694 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
695 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
697 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
698 net_stats->rx_length_errors =
699 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
700 net_stats->rx_length_errors +=
701 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
702 net_stats->rx_over_errors =
703 hw_stats->mac_stats.mac_rx_oversize_pkt_num;
706 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
708 struct hnae3_handle *handle;
711 handle = &hdev->vport[0].nic;
712 if (handle->client) {
713 status = hclge_tqps_update_stats(handle);
715 dev_err(&hdev->pdev->dev,
716 "Update TQPS stats fail, status = %d.\n",
721 status = hclge_mac_update_stats(hdev);
723 dev_err(&hdev->pdev->dev,
724 "Update MAC stats fail, status = %d.\n", status);
726 status = hclge_32_bit_update_stats(hdev);
728 dev_err(&hdev->pdev->dev,
729 "Update 32 bit stats fail, status = %d.\n",
732 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
735 static void hclge_update_stats(struct hnae3_handle *handle,
736 struct net_device_stats *net_stats)
738 struct hclge_vport *vport = hclge_get_vport(handle);
739 struct hclge_dev *hdev = vport->back;
740 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
743 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
746 status = hclge_mac_update_stats(hdev);
748 dev_err(&hdev->pdev->dev,
749 "Update MAC stats fail, status = %d.\n",
752 status = hclge_32_bit_update_stats(hdev);
754 dev_err(&hdev->pdev->dev,
755 "Update 32 bit stats fail, status = %d.\n",
758 status = hclge_64_bit_update_stats(hdev);
760 dev_err(&hdev->pdev->dev,
761 "Update 64 bit stats fail, status = %d.\n",
764 status = hclge_tqps_update_stats(handle);
766 dev_err(&hdev->pdev->dev,
767 "Update TQPS stats fail, status = %d.\n",
770 hclge_update_netstat(hw_stats, net_stats);
772 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
775 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
777 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
779 struct hclge_vport *vport = hclge_get_vport(handle);
780 struct hclge_dev *hdev = vport->back;
783 /* Loopback test support rules:
784 * mac: only GE mode support
785 * serdes: all mac mode will support include GE/XGE/LGE/CGE
786 * phy: only support when phy device exist on board
788 if (stringset == ETH_SS_TEST) {
789 /* clear loopback bit flags at first */
790 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
791 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
792 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
793 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
795 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
799 } else if (stringset == ETH_SS_STATS) {
800 count = ARRAY_SIZE(g_mac_stats_string) +
801 ARRAY_SIZE(g_all_32bit_stats_string) +
802 ARRAY_SIZE(g_all_64bit_stats_string) +
803 hclge_tqps_get_sset_count(handle, stringset);
809 static void hclge_get_strings(struct hnae3_handle *handle,
813 u8 *p = (char *)data;
816 if (stringset == ETH_SS_STATS) {
817 size = ARRAY_SIZE(g_mac_stats_string);
818 p = hclge_comm_get_strings(stringset,
822 size = ARRAY_SIZE(g_all_32bit_stats_string);
823 p = hclge_comm_get_strings(stringset,
824 g_all_32bit_stats_string,
827 size = ARRAY_SIZE(g_all_64bit_stats_string);
828 p = hclge_comm_get_strings(stringset,
829 g_all_64bit_stats_string,
832 p = hclge_tqps_get_strings(handle, p);
833 } else if (stringset == ETH_SS_TEST) {
834 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
836 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
838 p += ETH_GSTRING_LEN;
840 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
842 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
844 p += ETH_GSTRING_LEN;
846 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
848 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
850 p += ETH_GSTRING_LEN;
855 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
857 struct hclge_vport *vport = hclge_get_vport(handle);
858 struct hclge_dev *hdev = vport->back;
861 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
863 ARRAY_SIZE(g_mac_stats_string),
865 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
866 g_all_32bit_stats_string,
867 ARRAY_SIZE(g_all_32bit_stats_string),
869 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
870 g_all_64bit_stats_string,
871 ARRAY_SIZE(g_all_64bit_stats_string),
873 p = hclge_tqps_get_stats(handle, p);
876 static int hclge_parse_func_status(struct hclge_dev *hdev,
877 struct hclge_func_status_cmd *status)
879 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
882 /* Set the pf to main pf */
883 if (status->pf_state & HCLGE_PF_STATE_MAIN)
884 hdev->flag |= HCLGE_FLAG_MAIN;
886 hdev->flag &= ~HCLGE_FLAG_MAIN;
891 static int hclge_query_function_status(struct hclge_dev *hdev)
893 struct hclge_func_status_cmd *req;
894 struct hclge_desc desc;
898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
899 req = (struct hclge_func_status_cmd *)desc.data;
902 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
904 dev_err(&hdev->pdev->dev,
905 "query function status failed %d.\n",
911 /* Check pf reset is done */
914 usleep_range(1000, 2000);
915 } while (timeout++ < 5);
917 ret = hclge_parse_func_status(hdev, req);
922 static int hclge_query_pf_resource(struct hclge_dev *hdev)
924 struct hclge_pf_res_cmd *req;
925 struct hclge_desc desc;
928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
929 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
931 dev_err(&hdev->pdev->dev,
932 "query pf resource failed %d.\n", ret);
936 req = (struct hclge_pf_res_cmd *)desc.data;
937 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
938 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
940 if (hnae3_dev_roce_supported(hdev)) {
942 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
943 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
945 /* PF should have NIC vectors and Roce vectors,
946 * NIC vectors are queued before Roce vectors.
948 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
951 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
952 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
958 static int hclge_parse_speed(int speed_cmd, int *speed)
962 *speed = HCLGE_MAC_SPEED_10M;
965 *speed = HCLGE_MAC_SPEED_100M;
968 *speed = HCLGE_MAC_SPEED_1G;
971 *speed = HCLGE_MAC_SPEED_10G;
974 *speed = HCLGE_MAC_SPEED_25G;
977 *speed = HCLGE_MAC_SPEED_40G;
980 *speed = HCLGE_MAC_SPEED_50G;
983 *speed = HCLGE_MAC_SPEED_100G;
992 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
995 unsigned long *supported = hdev->hw.mac.supported;
997 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
998 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1001 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1002 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1005 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1006 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1009 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1010 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1013 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1014 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1017 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
1018 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1021 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1023 u8 media_type = hdev->hw.mac.media_type;
1025 if (media_type != HNAE3_MEDIA_TYPE_FIBER)
1028 hclge_parse_fiber_link_mode(hdev, speed_ability);
1031 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1033 struct hclge_cfg_param_cmd *req;
1034 u64 mac_addr_tmp_high;
1038 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1040 /* get the configuration */
1041 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1044 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1045 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1046 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1047 HCLGE_CFG_TQP_DESC_N_M,
1048 HCLGE_CFG_TQP_DESC_N_S);
1050 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1051 HCLGE_CFG_PHY_ADDR_M,
1052 HCLGE_CFG_PHY_ADDR_S);
1053 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1054 HCLGE_CFG_MEDIA_TP_M,
1055 HCLGE_CFG_MEDIA_TP_S);
1056 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1057 HCLGE_CFG_RX_BUF_LEN_M,
1058 HCLGE_CFG_RX_BUF_LEN_S);
1059 /* get mac_address */
1060 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1061 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1062 HCLGE_CFG_MAC_ADDR_H_M,
1063 HCLGE_CFG_MAC_ADDR_H_S);
1065 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1067 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1068 HCLGE_CFG_DEFAULT_SPEED_M,
1069 HCLGE_CFG_DEFAULT_SPEED_S);
1070 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1071 HCLGE_CFG_RSS_SIZE_M,
1072 HCLGE_CFG_RSS_SIZE_S);
1074 for (i = 0; i < ETH_ALEN; i++)
1075 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1077 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1078 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1080 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1081 HCLGE_CFG_SPEED_ABILITY_M,
1082 HCLGE_CFG_SPEED_ABILITY_S);
1085 /* hclge_get_cfg: query the static parameter from flash
1086 * @hdev: pointer to struct hclge_dev
1087 * @hcfg: the config structure to be getted
1089 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1091 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1092 struct hclge_cfg_param_cmd *req;
1095 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1098 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1099 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1101 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1102 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1103 /* Len should be united by 4 bytes when send to hardware */
1104 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1105 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1106 req->offset = cpu_to_le32(offset);
1109 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1111 dev_err(&hdev->pdev->dev,
1112 "get config failed %d.\n", ret);
1116 hclge_parse_cfg(hcfg, desc);
1120 static int hclge_get_cap(struct hclge_dev *hdev)
1124 ret = hclge_query_function_status(hdev);
1126 dev_err(&hdev->pdev->dev,
1127 "query function status error %d.\n", ret);
1131 /* get pf resource */
1132 ret = hclge_query_pf_resource(hdev);
1134 dev_err(&hdev->pdev->dev,
1135 "query pf resource error %d.\n", ret);
1142 static int hclge_configure(struct hclge_dev *hdev)
1144 struct hclge_cfg cfg;
1147 ret = hclge_get_cfg(hdev, &cfg);
1149 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1153 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1154 hdev->base_tqp_pid = 0;
1155 hdev->rss_size_max = cfg.rss_size_max;
1156 hdev->rx_buf_len = cfg.rx_buf_len;
1157 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1158 hdev->hw.mac.media_type = cfg.media_type;
1159 hdev->hw.mac.phy_addr = cfg.phy_addr;
1160 hdev->num_desc = cfg.tqp_desc_num;
1161 hdev->tm_info.num_pg = 1;
1162 hdev->tc_max = cfg.tc_num;
1163 hdev->tm_info.hw_pfc_map = 0;
1165 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1167 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1171 hclge_parse_link_mode(hdev, cfg.speed_ability);
1173 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1174 (hdev->tc_max < 1)) {
1175 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1180 /* Dev does not support DCB */
1181 if (!hnae3_dev_dcb_supported(hdev)) {
1185 hdev->pfc_max = hdev->tc_max;
1188 hdev->tm_info.num_tc = hdev->tc_max;
1190 /* Currently not support uncontiuous tc */
1191 for (i = 0; i < hdev->tm_info.num_tc; i++)
1192 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1194 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1199 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1202 struct hclge_cfg_tso_status_cmd *req;
1203 struct hclge_desc desc;
1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1208 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1211 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1212 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1213 req->tso_mss_min = cpu_to_le16(tso_mss);
1216 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1217 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1218 req->tso_mss_max = cpu_to_le16(tso_mss);
1220 return hclge_cmd_send(&hdev->hw, &desc, 1);
1223 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1225 struct hclge_tqp *tqp;
1228 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1229 sizeof(struct hclge_tqp), GFP_KERNEL);
1235 for (i = 0; i < hdev->num_tqps; i++) {
1236 tqp->dev = &hdev->pdev->dev;
1239 tqp->q.ae_algo = &ae_algo;
1240 tqp->q.buf_size = hdev->rx_buf_len;
1241 tqp->q.desc_num = hdev->num_desc;
1242 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1243 i * HCLGE_TQP_REG_SIZE;
1251 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1252 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1254 struct hclge_tqp_map_cmd *req;
1255 struct hclge_desc desc;
1258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1260 req = (struct hclge_tqp_map_cmd *)desc.data;
1261 req->tqp_id = cpu_to_le16(tqp_pid);
1262 req->tqp_vf = func_id;
1263 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1264 1 << HCLGE_TQP_MAP_EN_B;
1265 req->tqp_vid = cpu_to_le16(tqp_vid);
1267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1269 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1277 static int hclge_assign_tqp(struct hclge_vport *vport,
1278 struct hnae3_queue **tqp, u16 num_tqps)
1280 struct hclge_dev *hdev = vport->back;
1283 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1284 alloced < num_tqps; i++) {
1285 if (!hdev->htqp[i].alloced) {
1286 hdev->htqp[i].q.handle = &vport->nic;
1287 hdev->htqp[i].q.tqp_index = alloced;
1288 tqp[alloced] = &hdev->htqp[i].q;
1289 hdev->htqp[i].alloced = true;
1293 vport->alloc_tqps = num_tqps;
1298 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1300 struct hnae3_handle *nic = &vport->nic;
1301 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1302 struct hclge_dev *hdev = vport->back;
1305 kinfo->num_desc = hdev->num_desc;
1306 kinfo->rx_buf_len = hdev->rx_buf_len;
1307 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1309 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1310 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1312 for (i = 0; i < HNAE3_MAX_TC; i++) {
1313 if (hdev->hw_tc_map & BIT(i)) {
1314 kinfo->tc_info[i].enable = true;
1315 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1316 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1317 kinfo->tc_info[i].tc = i;
1319 /* Set to default queue if TC is disable */
1320 kinfo->tc_info[i].enable = false;
1321 kinfo->tc_info[i].tqp_offset = 0;
1322 kinfo->tc_info[i].tqp_count = 1;
1323 kinfo->tc_info[i].tc = 0;
1327 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1328 sizeof(struct hnae3_queue *), GFP_KERNEL);
1332 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1334 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1341 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1342 struct hclge_vport *vport)
1344 struct hnae3_handle *nic = &vport->nic;
1345 struct hnae3_knic_private_info *kinfo;
1348 kinfo = &nic->kinfo;
1349 for (i = 0; i < kinfo->num_tqps; i++) {
1350 struct hclge_tqp *q =
1351 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1355 is_pf = !(vport->vport_id);
1356 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1365 static int hclge_map_tqp(struct hclge_dev *hdev)
1367 struct hclge_vport *vport = hdev->vport;
1370 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1371 for (i = 0; i < num_vport; i++) {
1374 ret = hclge_map_tqp_to_vport(hdev, vport);
1384 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1386 /* this would be initialized later */
1389 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1391 struct hnae3_handle *nic = &vport->nic;
1392 struct hclge_dev *hdev = vport->back;
1395 nic->pdev = hdev->pdev;
1396 nic->ae_algo = &ae_algo;
1397 nic->numa_node_mask = hdev->numa_node_mask;
1399 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1400 ret = hclge_knic_setup(vport, num_tqps);
1402 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1407 hclge_unic_setup(vport, num_tqps);
1413 static int hclge_alloc_vport(struct hclge_dev *hdev)
1415 struct pci_dev *pdev = hdev->pdev;
1416 struct hclge_vport *vport;
1422 /* We need to alloc a vport for main NIC of PF */
1423 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1425 if (hdev->num_tqps < num_vport) {
1426 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1427 hdev->num_tqps, num_vport);
1431 /* Alloc the same number of TQPs for every vport */
1432 tqp_per_vport = hdev->num_tqps / num_vport;
1433 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1435 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1440 hdev->vport = vport;
1441 hdev->num_alloc_vport = num_vport;
1443 if (IS_ENABLED(CONFIG_PCI_IOV))
1444 hdev->num_alloc_vfs = hdev->num_req_vfs;
1446 for (i = 0; i < num_vport; i++) {
1448 vport->vport_id = i;
1451 ret = hclge_vport_setup(vport, tqp_main_vport);
1453 ret = hclge_vport_setup(vport, tqp_per_vport);
1456 "vport setup failed for vport %d, %d\n",
1467 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1468 struct hclge_pkt_buf_alloc *buf_alloc)
1470 /* TX buffer size is unit by 128 byte */
1471 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1472 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1473 struct hclge_tx_buff_alloc_cmd *req;
1474 struct hclge_desc desc;
1478 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1481 for (i = 0; i < HCLGE_TC_NUM; i++) {
1482 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1484 req->tx_pkt_buff[i] =
1485 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1486 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1489 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1491 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1499 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1500 struct hclge_pkt_buf_alloc *buf_alloc)
1502 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1505 dev_err(&hdev->pdev->dev,
1506 "tx buffer alloc failed %d\n", ret);
1513 static int hclge_get_tc_num(struct hclge_dev *hdev)
1517 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1518 if (hdev->hw_tc_map & BIT(i))
1523 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1528 if (hdev->hw_tc_map & BIT(i) &&
1529 hdev->tm_info.hw_pfc_map & BIT(i))
1534 /* Get the number of pfc enabled TCs, which have private buffer */
1535 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1536 struct hclge_pkt_buf_alloc *buf_alloc)
1538 struct hclge_priv_buf *priv;
1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1542 priv = &buf_alloc->priv_buf[i];
1543 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1551 /* Get the number of pfc disabled TCs, which have private buffer */
1552 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1553 struct hclge_pkt_buf_alloc *buf_alloc)
1555 struct hclge_priv_buf *priv;
1558 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1559 priv = &buf_alloc->priv_buf[i];
1560 if (hdev->hw_tc_map & BIT(i) &&
1561 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1569 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1571 struct hclge_priv_buf *priv;
1575 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1576 priv = &buf_alloc->priv_buf[i];
1578 rx_priv += priv->buf_size;
1583 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1585 u32 i, total_tx_size = 0;
1587 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1588 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1590 return total_tx_size;
1593 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1594 struct hclge_pkt_buf_alloc *buf_alloc,
1597 u32 shared_buf_min, shared_buf_tc, shared_std;
1598 int tc_num, pfc_enable_num;
1603 tc_num = hclge_get_tc_num(hdev);
1604 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1606 if (hnae3_dev_dcb_supported(hdev))
1607 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1609 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1611 shared_buf_tc = pfc_enable_num * hdev->mps +
1612 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1614 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1616 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1617 if (rx_all <= rx_priv + shared_std)
1620 shared_buf = rx_all - rx_priv;
1621 buf_alloc->s_buf.buf_size = shared_buf;
1622 buf_alloc->s_buf.self.high = shared_buf;
1623 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1625 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1626 if ((hdev->hw_tc_map & BIT(i)) &&
1627 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1628 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1629 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1631 buf_alloc->s_buf.tc_thrd[i].low = 0;
1632 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1639 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1640 struct hclge_pkt_buf_alloc *buf_alloc)
1644 total_size = hdev->pkt_buf_size;
1646 /* alloc tx buffer for all enabled tc */
1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1648 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1650 if (total_size < HCLGE_DEFAULT_TX_BUF)
1653 if (hdev->hw_tc_map & BIT(i))
1654 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1656 priv->tx_buf_size = 0;
1658 total_size -= priv->tx_buf_size;
1664 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1665 * @hdev: pointer to struct hclge_dev
1666 * @buf_alloc: pointer to buffer calculation data
1667 * @return: 0: calculate sucessful, negative: fail
1669 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1670 struct hclge_pkt_buf_alloc *buf_alloc)
1672 u32 rx_all = hdev->pkt_buf_size;
1673 int no_pfc_priv_num, pfc_priv_num;
1674 struct hclge_priv_buf *priv;
1677 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1679 /* When DCB is not supported, rx private
1680 * buffer is not allocated.
1682 if (!hnae3_dev_dcb_supported(hdev)) {
1683 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1689 /* step 1, try to alloc private buffer for all enabled tc */
1690 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1691 priv = &buf_alloc->priv_buf[i];
1692 if (hdev->hw_tc_map & BIT(i)) {
1694 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1695 priv->wl.low = hdev->mps;
1696 priv->wl.high = priv->wl.low + hdev->mps;
1697 priv->buf_size = priv->wl.high +
1701 priv->wl.high = 2 * hdev->mps;
1702 priv->buf_size = priv->wl.high;
1712 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1715 /* step 2, try to decrease the buffer size of
1716 * no pfc TC's private buffer
1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1719 priv = &buf_alloc->priv_buf[i];
1726 if (!(hdev->hw_tc_map & BIT(i)))
1731 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1733 priv->wl.high = priv->wl.low + hdev->mps;
1734 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1737 priv->wl.high = hdev->mps;
1738 priv->buf_size = priv->wl.high;
1742 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1745 /* step 3, try to reduce the number of pfc disabled TCs,
1746 * which have private buffer
1748 /* get the total no pfc enable TC number, which have private buffer */
1749 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1751 /* let the last to be cleared first */
1752 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1753 priv = &buf_alloc->priv_buf[i];
1755 if (hdev->hw_tc_map & BIT(i) &&
1756 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1757 /* Clear the no pfc TC private buffer */
1765 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1766 no_pfc_priv_num == 0)
1770 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1773 /* step 4, try to reduce the number of pfc enabled TCs
1774 * which have private buffer.
1776 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1778 /* let the last to be cleared first */
1779 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1780 priv = &buf_alloc->priv_buf[i];
1782 if (hdev->hw_tc_map & BIT(i) &&
1783 hdev->tm_info.hw_pfc_map & BIT(i)) {
1784 /* Reduce the number of pfc TC with private buffer */
1792 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1796 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1802 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1803 struct hclge_pkt_buf_alloc *buf_alloc)
1805 struct hclge_rx_priv_buff_cmd *req;
1806 struct hclge_desc desc;
1810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1811 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1813 /* Alloc private buffer TCs */
1814 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1815 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1818 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1820 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1824 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1825 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1829 dev_err(&hdev->pdev->dev,
1830 "rx private buffer alloc cmd failed %d\n", ret);
1837 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1838 struct hclge_pkt_buf_alloc *buf_alloc)
1840 struct hclge_rx_priv_wl_buf *req;
1841 struct hclge_priv_buf *priv;
1842 struct hclge_desc desc[2];
1846 for (i = 0; i < 2; i++) {
1847 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1849 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1851 /* The first descriptor set the NEXT bit to 1 */
1853 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1855 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1857 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1858 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1860 priv = &buf_alloc->priv_buf[idx];
1861 req->tc_wl[j].high =
1862 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1863 req->tc_wl[j].high |=
1864 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1866 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1867 req->tc_wl[j].low |=
1868 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1872 /* Send 2 descriptor at one time */
1873 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1875 dev_err(&hdev->pdev->dev,
1876 "rx private waterline config cmd failed %d\n",
1883 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1884 struct hclge_pkt_buf_alloc *buf_alloc)
1886 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1887 struct hclge_rx_com_thrd *req;
1888 struct hclge_desc desc[2];
1889 struct hclge_tc_thrd *tc;
1893 for (i = 0; i < 2; i++) {
1894 hclge_cmd_setup_basic_desc(&desc[i],
1895 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1896 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1898 /* The first descriptor set the NEXT bit to 1 */
1900 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1902 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1904 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1905 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1907 req->com_thrd[j].high =
1908 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1909 req->com_thrd[j].high |=
1910 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1911 req->com_thrd[j].low =
1912 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1913 req->com_thrd[j].low |=
1914 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1918 /* Send 2 descriptors at one time */
1919 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1921 dev_err(&hdev->pdev->dev,
1922 "common threshold config cmd failed %d\n", ret);
1928 static int hclge_common_wl_config(struct hclge_dev *hdev,
1929 struct hclge_pkt_buf_alloc *buf_alloc)
1931 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1932 struct hclge_rx_com_wl *req;
1933 struct hclge_desc desc;
1936 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1938 req = (struct hclge_rx_com_wl *)desc.data;
1939 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1940 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1942 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1943 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1945 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1947 dev_err(&hdev->pdev->dev,
1948 "common waterline config cmd failed %d\n", ret);
1955 int hclge_buffer_alloc(struct hclge_dev *hdev)
1957 struct hclge_pkt_buf_alloc *pkt_buf;
1960 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1964 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1966 dev_err(&hdev->pdev->dev,
1967 "could not calc tx buffer size for all TCs %d\n", ret);
1971 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1973 dev_err(&hdev->pdev->dev,
1974 "could not alloc tx buffers %d\n", ret);
1978 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1980 dev_err(&hdev->pdev->dev,
1981 "could not calc rx priv buffer size for all TCs %d\n",
1986 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1988 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1993 if (hnae3_dev_dcb_supported(hdev)) {
1994 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1996 dev_err(&hdev->pdev->dev,
1997 "could not configure rx private waterline %d\n",
2002 ret = hclge_common_thrd_config(hdev, pkt_buf);
2004 dev_err(&hdev->pdev->dev,
2005 "could not configure common threshold %d\n",
2011 ret = hclge_common_wl_config(hdev, pkt_buf);
2013 dev_err(&hdev->pdev->dev,
2014 "could not configure common waterline %d\n", ret);
2021 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2023 struct hnae3_handle *roce = &vport->roce;
2024 struct hnae3_handle *nic = &vport->nic;
2026 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2028 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2029 vport->back->num_msi_left == 0)
2032 roce->rinfo.base_vector = vport->back->roce_base_vector;
2034 roce->rinfo.netdev = nic->kinfo.netdev;
2035 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2037 roce->pdev = nic->pdev;
2038 roce->ae_algo = nic->ae_algo;
2039 roce->numa_node_mask = nic->numa_node_mask;
2044 static int hclge_init_msi(struct hclge_dev *hdev)
2046 struct pci_dev *pdev = hdev->pdev;
2050 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2051 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2054 "failed(%d) to allocate MSI/MSI-X vectors\n",
2058 if (vectors < hdev->num_msi)
2059 dev_warn(&hdev->pdev->dev,
2060 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2061 hdev->num_msi, vectors);
2063 hdev->num_msi = vectors;
2064 hdev->num_msi_left = vectors;
2065 hdev->base_msi_vector = pdev->irq;
2066 hdev->roce_base_vector = hdev->base_msi_vector +
2067 HCLGE_ROCE_VECTOR_OFFSET;
2069 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2070 sizeof(u16), GFP_KERNEL);
2071 if (!hdev->vector_status) {
2072 pci_free_irq_vectors(pdev);
2076 for (i = 0; i < hdev->num_msi; i++)
2077 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2079 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2080 sizeof(int), GFP_KERNEL);
2081 if (!hdev->vector_irq) {
2082 pci_free_irq_vectors(pdev);
2089 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2091 struct hclge_mac *mac = &hdev->hw.mac;
2093 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2094 mac->duplex = (u8)duplex;
2096 mac->duplex = HCLGE_MAC_FULL;
2101 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2103 struct hclge_config_mac_speed_dup_cmd *req;
2104 struct hclge_desc desc;
2107 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2109 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2111 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2114 case HCLGE_MAC_SPEED_10M:
2115 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2116 HCLGE_CFG_SPEED_S, 6);
2118 case HCLGE_MAC_SPEED_100M:
2119 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2120 HCLGE_CFG_SPEED_S, 7);
2122 case HCLGE_MAC_SPEED_1G:
2123 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2124 HCLGE_CFG_SPEED_S, 0);
2126 case HCLGE_MAC_SPEED_10G:
2127 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2128 HCLGE_CFG_SPEED_S, 1);
2130 case HCLGE_MAC_SPEED_25G:
2131 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2132 HCLGE_CFG_SPEED_S, 2);
2134 case HCLGE_MAC_SPEED_40G:
2135 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2136 HCLGE_CFG_SPEED_S, 3);
2138 case HCLGE_MAC_SPEED_50G:
2139 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2140 HCLGE_CFG_SPEED_S, 4);
2142 case HCLGE_MAC_SPEED_100G:
2143 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2144 HCLGE_CFG_SPEED_S, 5);
2147 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2151 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2154 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2156 dev_err(&hdev->pdev->dev,
2157 "mac speed/duplex config cmd failed %d.\n", ret);
2161 hclge_check_speed_dup(hdev, duplex, speed);
2166 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2169 struct hclge_vport *vport = hclge_get_vport(handle);
2170 struct hclge_dev *hdev = vport->back;
2172 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2175 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2178 struct hclge_query_an_speed_dup_cmd *req;
2179 struct hclge_desc desc;
2183 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2185 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2186 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2188 dev_err(&hdev->pdev->dev,
2189 "mac speed/autoneg/duplex query cmd failed %d\n",
2194 *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2195 speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2196 HCLGE_QUERY_SPEED_S);
2198 ret = hclge_parse_speed(speed_tmp, speed);
2200 dev_err(&hdev->pdev->dev,
2201 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2208 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2210 struct hclge_config_auto_neg_cmd *req;
2211 struct hclge_desc desc;
2215 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2217 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2218 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2219 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2221 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2223 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2231 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2233 struct hclge_vport *vport = hclge_get_vport(handle);
2234 struct hclge_dev *hdev = vport->back;
2236 return hclge_set_autoneg_en(hdev, enable);
2239 static int hclge_get_autoneg(struct hnae3_handle *handle)
2241 struct hclge_vport *vport = hclge_get_vport(handle);
2242 struct hclge_dev *hdev = vport->back;
2243 struct phy_device *phydev = hdev->hw.mac.phydev;
2246 return phydev->autoneg;
2248 return hdev->hw.mac.autoneg;
2251 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
2255 struct hclge_mac_vlan_mask_entry_cmd *req;
2256 struct hclge_desc desc;
2259 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
2260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
2262 hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
2264 ether_addr_copy(req->mac_mask, mac_mask);
2266 status = hclge_cmd_send(&hdev->hw, &desc, 1);
2268 dev_err(&hdev->pdev->dev,
2269 "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
2275 static int hclge_mac_init(struct hclge_dev *hdev)
2277 struct hnae3_handle *handle = &hdev->vport[0].nic;
2278 struct net_device *netdev = handle->kinfo.netdev;
2279 struct hclge_mac *mac = &hdev->hw.mac;
2280 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2281 struct hclge_vport *vport;
2286 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2288 dev_err(&hdev->pdev->dev,
2289 "Config mac speed dup fail ret=%d\n", ret);
2295 /* Initialize the MTA table work mode */
2296 hdev->enable_mta = true;
2297 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2299 ret = hclge_set_mta_filter_mode(hdev,
2300 hdev->mta_mac_sel_type,
2303 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2308 for (i = 0; i < hdev->num_alloc_vport; i++) {
2309 vport = &hdev->vport[i];
2310 vport->accept_mta_mc = false;
2312 memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow));
2313 ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false);
2315 dev_err(&hdev->pdev->dev,
2316 "set mta filter mode fail ret=%d\n", ret);
2321 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
2323 dev_err(&hdev->pdev->dev,
2324 "set default mac_vlan_mask fail ret=%d\n", ret);
2333 ret = hclge_set_mtu(handle, mtu);
2335 dev_err(&hdev->pdev->dev,
2336 "set mtu failed ret=%d\n", ret);
2343 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2345 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2346 schedule_work(&hdev->mbx_service_task);
2349 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2351 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2352 schedule_work(&hdev->rst_service_task);
2355 static void hclge_task_schedule(struct hclge_dev *hdev)
2357 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2358 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2359 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2360 (void)schedule_work(&hdev->service_task);
2363 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2365 struct hclge_link_status_cmd *req;
2366 struct hclge_desc desc;
2370 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2373 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2378 req = (struct hclge_link_status_cmd *)desc.data;
2379 link_status = req->status & HCLGE_LINK_STATUS;
2381 return !!link_status;
2384 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2389 mac_state = hclge_get_mac_link_status(hdev);
2391 if (hdev->hw.mac.phydev) {
2392 if (!genphy_read_status(hdev->hw.mac.phydev))
2393 link_stat = mac_state &
2394 hdev->hw.mac.phydev->link;
2399 link_stat = mac_state;
2405 static void hclge_update_link_status(struct hclge_dev *hdev)
2407 struct hnae3_client *client = hdev->nic_client;
2408 struct hnae3_handle *handle;
2414 state = hclge_get_mac_phy_link(hdev);
2415 if (state != hdev->hw.mac.link) {
2416 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2417 handle = &hdev->vport[i].nic;
2418 client->ops->link_status_change(handle, state);
2420 hdev->hw.mac.link = state;
2424 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2426 struct hclge_mac mac = hdev->hw.mac;
2431 /* get the speed and duplex as autoneg'result from mac cmd when phy
2434 if (mac.phydev || !mac.autoneg)
2437 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2439 dev_err(&hdev->pdev->dev,
2440 "mac autoneg/speed/duplex query failed %d\n", ret);
2444 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2445 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2447 dev_err(&hdev->pdev->dev,
2448 "mac speed/duplex config failed %d\n", ret);
2456 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2458 struct hclge_vport *vport = hclge_get_vport(handle);
2459 struct hclge_dev *hdev = vport->back;
2461 return hclge_update_speed_duplex(hdev);
2464 static int hclge_get_status(struct hnae3_handle *handle)
2466 struct hclge_vport *vport = hclge_get_vport(handle);
2467 struct hclge_dev *hdev = vport->back;
2469 hclge_update_link_status(hdev);
2471 return hdev->hw.mac.link;
2474 static void hclge_service_timer(struct timer_list *t)
2476 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2478 mod_timer(&hdev->service_timer, jiffies + HZ);
2479 hdev->hw_stats.stats_timer++;
2480 hclge_task_schedule(hdev);
2483 static void hclge_service_complete(struct hclge_dev *hdev)
2485 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2487 /* Flush memory before next watchdog */
2488 smp_mb__before_atomic();
2489 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2492 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2497 /* fetch the events from their corresponding regs */
2498 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
2499 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2501 /* Assumption: If by any chance reset and mailbox events are reported
2502 * together then we will only process reset event in this go and will
2503 * defer the processing of the mailbox events. Since, we would have not
2504 * cleared RX CMDQ event this time we would receive again another
2505 * interrupt from H/W just for the mailbox.
2508 /* check for vector0 reset event sources */
2509 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2510 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2511 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2512 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2513 return HCLGE_VECTOR0_EVENT_RST;
2516 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2517 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2518 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2519 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2520 return HCLGE_VECTOR0_EVENT_RST;
2523 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2524 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2525 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2526 return HCLGE_VECTOR0_EVENT_RST;
2529 /* check for vector0 mailbox(=CMDQ RX) event source */
2530 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2531 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2532 *clearval = cmdq_src_reg;
2533 return HCLGE_VECTOR0_EVENT_MBX;
2536 return HCLGE_VECTOR0_EVENT_OTHER;
2539 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2542 switch (event_type) {
2543 case HCLGE_VECTOR0_EVENT_RST:
2544 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2546 case HCLGE_VECTOR0_EVENT_MBX:
2547 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2552 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2554 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2555 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2556 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2557 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2558 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2561 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2563 writel(enable ? 1 : 0, vector->addr);
2566 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2568 struct hclge_dev *hdev = data;
2572 hclge_enable_vector(&hdev->misc_vector, false);
2573 event_cause = hclge_check_event_cause(hdev, &clearval);
2575 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2576 switch (event_cause) {
2577 case HCLGE_VECTOR0_EVENT_RST:
2578 hclge_reset_task_schedule(hdev);
2580 case HCLGE_VECTOR0_EVENT_MBX:
2581 /* If we are here then,
2582 * 1. Either we are not handling any mbx task and we are not
2585 * 2. We could be handling a mbx task but nothing more is
2587 * In both cases, we should schedule mbx task as there are more
2588 * mbx messages reported by this interrupt.
2590 hclge_mbx_task_schedule(hdev);
2593 dev_warn(&hdev->pdev->dev,
2594 "received unknown or unhandled event of vector0\n");
2598 /* clear the source of interrupt if it is not cause by reset */
2599 if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
2600 hclge_clear_event_cause(hdev, event_cause, clearval);
2601 hclge_enable_vector(&hdev->misc_vector, true);
2607 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2609 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2610 dev_warn(&hdev->pdev->dev,
2611 "vector(vector_id %d) has been freed.\n", vector_id);
2615 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2616 hdev->num_msi_left += 1;
2617 hdev->num_msi_used -= 1;
2620 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2622 struct hclge_misc_vector *vector = &hdev->misc_vector;
2624 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2626 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2627 hdev->vector_status[0] = 0;
2629 hdev->num_msi_left -= 1;
2630 hdev->num_msi_used += 1;
2633 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2637 hclge_get_misc_vector(hdev);
2639 /* this would be explicitly freed in the end */
2640 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2641 0, "hclge_misc", hdev);
2643 hclge_free_vector(hdev, 0);
2644 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2645 hdev->misc_vector.vector_irq);
2651 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2653 free_irq(hdev->misc_vector.vector_irq, hdev);
2654 hclge_free_vector(hdev, 0);
2657 static int hclge_notify_client(struct hclge_dev *hdev,
2658 enum hnae3_reset_notify_type type)
2660 struct hnae3_client *client = hdev->nic_client;
2663 if (!client->ops->reset_notify)
2666 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2667 struct hnae3_handle *handle = &hdev->vport[i].nic;
2670 ret = client->ops->reset_notify(handle, type);
2678 static int hclge_reset_wait(struct hclge_dev *hdev)
2680 #define HCLGE_RESET_WATI_MS 100
2681 #define HCLGE_RESET_WAIT_CNT 5
2682 u32 val, reg, reg_bit;
2685 switch (hdev->reset_type) {
2686 case HNAE3_GLOBAL_RESET:
2687 reg = HCLGE_GLOBAL_RESET_REG;
2688 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2690 case HNAE3_CORE_RESET:
2691 reg = HCLGE_GLOBAL_RESET_REG;
2692 reg_bit = HCLGE_CORE_RESET_BIT;
2694 case HNAE3_FUNC_RESET:
2695 reg = HCLGE_FUN_RST_ING;
2696 reg_bit = HCLGE_FUN_RST_ING_B;
2699 dev_err(&hdev->pdev->dev,
2700 "Wait for unsupported reset type: %d\n",
2705 val = hclge_read_dev(&hdev->hw, reg);
2706 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2707 msleep(HCLGE_RESET_WATI_MS);
2708 val = hclge_read_dev(&hdev->hw, reg);
2712 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2713 dev_warn(&hdev->pdev->dev,
2714 "Wait for reset timeout: %d\n", hdev->reset_type);
2721 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2723 struct hclge_desc desc;
2724 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2727 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2728 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2729 req->fun_reset_vfid = func_id;
2731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2733 dev_err(&hdev->pdev->dev,
2734 "send function reset cmd fail, status =%d\n", ret);
2739 static void hclge_do_reset(struct hclge_dev *hdev)
2741 struct pci_dev *pdev = hdev->pdev;
2744 switch (hdev->reset_type) {
2745 case HNAE3_GLOBAL_RESET:
2746 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2747 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2748 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2749 dev_info(&pdev->dev, "Global Reset requested\n");
2751 case HNAE3_CORE_RESET:
2752 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2753 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2754 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2755 dev_info(&pdev->dev, "Core Reset requested\n");
2757 case HNAE3_FUNC_RESET:
2758 dev_info(&pdev->dev, "PF Reset requested\n");
2759 hclge_func_reset_cmd(hdev, 0);
2760 /* schedule again to check later */
2761 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2762 hclge_reset_task_schedule(hdev);
2765 dev_warn(&pdev->dev,
2766 "Unsupported reset type: %d\n", hdev->reset_type);
2771 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2772 unsigned long *addr)
2774 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2776 /* return the highest priority reset level amongst all */
2777 if (test_bit(HNAE3_GLOBAL_RESET, addr))
2778 rst_level = HNAE3_GLOBAL_RESET;
2779 else if (test_bit(HNAE3_CORE_RESET, addr))
2780 rst_level = HNAE3_CORE_RESET;
2781 else if (test_bit(HNAE3_IMP_RESET, addr))
2782 rst_level = HNAE3_IMP_RESET;
2783 else if (test_bit(HNAE3_FUNC_RESET, addr))
2784 rst_level = HNAE3_FUNC_RESET;
2786 /* now, clear all other resets */
2787 clear_bit(HNAE3_GLOBAL_RESET, addr);
2788 clear_bit(HNAE3_CORE_RESET, addr);
2789 clear_bit(HNAE3_IMP_RESET, addr);
2790 clear_bit(HNAE3_FUNC_RESET, addr);
2795 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2799 switch (hdev->reset_type) {
2800 case HNAE3_IMP_RESET:
2801 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2803 case HNAE3_GLOBAL_RESET:
2804 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2806 case HNAE3_CORE_RESET:
2807 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2816 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2817 hclge_enable_vector(&hdev->misc_vector, true);
2820 static void hclge_reset(struct hclge_dev *hdev)
2822 struct hnae3_handle *handle;
2824 /* perform reset of the stack & ae device for a client */
2825 handle = &hdev->vport[0].nic;
2827 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2829 if (!hclge_reset_wait(hdev)) {
2830 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
2831 hclge_reset_ae_dev(hdev->ae_dev);
2832 hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
2834 hclge_clear_reset_cause(hdev);
2836 /* schedule again to check pending resets later */
2837 set_bit(hdev->reset_type, &hdev->reset_pending);
2838 hclge_reset_task_schedule(hdev);
2841 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2842 handle->last_reset_time = jiffies;
2846 static void hclge_reset_event(struct hnae3_handle *handle)
2848 struct hclge_vport *vport = hclge_get_vport(handle);
2849 struct hclge_dev *hdev = vport->back;
2851 /* check if this is a new reset request and we are not here just because
2852 * last reset attempt did not succeed and watchdog hit us again. We will
2853 * know this if last reset request did not occur very recently (watchdog
2854 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
2855 * In case of new request we reset the "reset level" to PF reset.
2856 * And if it is a repeat reset request of the most recent one then we
2857 * want to make sure we throttle the reset request. Therefore, we will
2858 * not allow it again before 3*HZ times.
2860 if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
2862 else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
2863 handle->reset_level = HNAE3_FUNC_RESET;
2865 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
2866 handle->reset_level);
2868 /* request reset & schedule reset task */
2869 set_bit(handle->reset_level, &hdev->reset_request);
2870 hclge_reset_task_schedule(hdev);
2872 if (handle->reset_level < HNAE3_GLOBAL_RESET)
2873 handle->reset_level++;
2876 static void hclge_reset_subtask(struct hclge_dev *hdev)
2878 /* check if there is any ongoing reset in the hardware. This status can
2879 * be checked from reset_pending. If there is then, we need to wait for
2880 * hardware to complete reset.
2881 * a. If we are able to figure out in reasonable time that hardware
2882 * has fully resetted then, we can proceed with driver, client
2884 * b. else, we can come back later to check this status so re-sched
2887 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
2888 if (hdev->reset_type != HNAE3_NONE_RESET)
2891 /* check if we got any *new* reset requests to be honored */
2892 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
2893 if (hdev->reset_type != HNAE3_NONE_RESET)
2894 hclge_do_reset(hdev);
2896 hdev->reset_type = HNAE3_NONE_RESET;
2899 static void hclge_reset_service_task(struct work_struct *work)
2901 struct hclge_dev *hdev =
2902 container_of(work, struct hclge_dev, rst_service_task);
2904 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
2907 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
2909 hclge_reset_subtask(hdev);
2911 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
2914 static void hclge_mailbox_service_task(struct work_struct *work)
2916 struct hclge_dev *hdev =
2917 container_of(work, struct hclge_dev, mbx_service_task);
2919 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
2922 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
2924 hclge_mbx_handler(hdev);
2926 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
2929 static void hclge_service_task(struct work_struct *work)
2931 struct hclge_dev *hdev =
2932 container_of(work, struct hclge_dev, service_task);
2934 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
2935 hclge_update_stats_for_all(hdev);
2936 hdev->hw_stats.stats_timer = 0;
2939 hclge_update_speed_duplex(hdev);
2940 hclge_update_link_status(hdev);
2941 hclge_service_complete(hdev);
2944 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2946 /* VF handle has no client */
2947 if (!handle->client)
2948 return container_of(handle, struct hclge_vport, nic);
2949 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2950 return container_of(handle, struct hclge_vport, roce);
2952 return container_of(handle, struct hclge_vport, nic);
2955 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2956 struct hnae3_vector_info *vector_info)
2958 struct hclge_vport *vport = hclge_get_vport(handle);
2959 struct hnae3_vector_info *vector = vector_info;
2960 struct hclge_dev *hdev = vport->back;
2964 vector_num = min(hdev->num_msi_left, vector_num);
2966 for (j = 0; j < vector_num; j++) {
2967 for (i = 1; i < hdev->num_msi; i++) {
2968 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2969 vector->vector = pci_irq_vector(hdev->pdev, i);
2970 vector->io_addr = hdev->hw.io_base +
2971 HCLGE_VECTOR_REG_BASE +
2972 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2974 HCLGE_VECTOR_VF_OFFSET;
2975 hdev->vector_status[i] = vport->vport_id;
2976 hdev->vector_irq[i] = vector->vector;
2985 hdev->num_msi_left -= alloc;
2986 hdev->num_msi_used += alloc;
2991 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2995 for (i = 0; i < hdev->num_msi; i++)
2996 if (vector == hdev->vector_irq[i])
3002 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3004 struct hclge_vport *vport = hclge_get_vport(handle);
3005 struct hclge_dev *hdev = vport->back;
3008 vector_id = hclge_get_vector_index(hdev, vector);
3009 if (vector_id < 0) {
3010 dev_err(&hdev->pdev->dev,
3011 "Get vector index fail. vector_id =%d\n", vector_id);
3015 hclge_free_vector(hdev, vector_id);
3020 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3022 return HCLGE_RSS_KEY_SIZE;
3025 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3027 return HCLGE_RSS_IND_TBL_SIZE;
3030 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3031 const u8 hfunc, const u8 *key)
3033 struct hclge_rss_config_cmd *req;
3034 struct hclge_desc desc;
3039 req = (struct hclge_rss_config_cmd *)desc.data;
3041 for (key_offset = 0; key_offset < 3; key_offset++) {
3042 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3045 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3046 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3048 if (key_offset == 2)
3050 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3052 key_size = HCLGE_RSS_HASH_KEY_NUM;
3054 memcpy(req->hash_key,
3055 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3057 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3059 dev_err(&hdev->pdev->dev,
3060 "Configure RSS config fail, status = %d\n",
3068 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3070 struct hclge_rss_indirection_table_cmd *req;
3071 struct hclge_desc desc;
3075 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3077 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3078 hclge_cmd_setup_basic_desc
3079 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3081 req->start_table_index =
3082 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3083 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3085 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3086 req->rss_result[j] =
3087 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3089 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3091 dev_err(&hdev->pdev->dev,
3092 "Configure rss indir table fail,status = %d\n",
3100 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3101 u16 *tc_size, u16 *tc_offset)
3103 struct hclge_rss_tc_mode_cmd *req;
3104 struct hclge_desc desc;
3108 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3109 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3111 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3114 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3115 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3116 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3117 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3118 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3120 req->rss_tc_mode[i] = cpu_to_le16(mode);
3123 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3125 dev_err(&hdev->pdev->dev,
3126 "Configure rss tc mode fail, status = %d\n", ret);
3133 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3135 struct hclge_rss_input_tuple_cmd *req;
3136 struct hclge_desc desc;
3139 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3141 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3143 /* Get the tuple cfg from pf */
3144 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3145 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3146 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3147 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3148 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3149 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3150 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3151 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3154 dev_err(&hdev->pdev->dev,
3155 "Configure rss input fail, status = %d\n", ret);
3162 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3165 struct hclge_vport *vport = hclge_get_vport(handle);
3168 /* Get hash algorithm */
3170 *hfunc = vport->rss_algo;
3172 /* Get the RSS Key required by the user */
3174 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3176 /* Get indirect table */
3178 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3179 indir[i] = vport->rss_indirection_tbl[i];
3184 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3185 const u8 *key, const u8 hfunc)
3187 struct hclge_vport *vport = hclge_get_vport(handle);
3188 struct hclge_dev *hdev = vport->back;
3192 /* Set the RSS Hash Key if specififed by the user */
3195 if (hfunc == ETH_RSS_HASH_TOP ||
3196 hfunc == ETH_RSS_HASH_NO_CHANGE)
3197 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3200 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3204 /* Update the shadow RSS key with user specified qids */
3205 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3206 vport->rss_algo = hash_algo;
3209 /* Update the shadow RSS table with user specified qids */
3210 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3211 vport->rss_indirection_tbl[i] = indir[i];
3213 /* Update the hardware */
3214 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3217 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3219 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3221 if (nfc->data & RXH_L4_B_2_3)
3222 hash_sets |= HCLGE_D_PORT_BIT;
3224 hash_sets &= ~HCLGE_D_PORT_BIT;
3226 if (nfc->data & RXH_IP_SRC)
3227 hash_sets |= HCLGE_S_IP_BIT;
3229 hash_sets &= ~HCLGE_S_IP_BIT;
3231 if (nfc->data & RXH_IP_DST)
3232 hash_sets |= HCLGE_D_IP_BIT;
3234 hash_sets &= ~HCLGE_D_IP_BIT;
3236 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3237 hash_sets |= HCLGE_V_TAG_BIT;
3242 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3243 struct ethtool_rxnfc *nfc)
3245 struct hclge_vport *vport = hclge_get_vport(handle);
3246 struct hclge_dev *hdev = vport->back;
3247 struct hclge_rss_input_tuple_cmd *req;
3248 struct hclge_desc desc;
3252 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3253 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3256 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3259 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3260 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3261 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3262 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3263 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3264 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3265 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3266 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3268 tuple_sets = hclge_get_rss_hash_bits(nfc);
3269 switch (nfc->flow_type) {
3271 req->ipv4_tcp_en = tuple_sets;
3274 req->ipv6_tcp_en = tuple_sets;
3277 req->ipv4_udp_en = tuple_sets;
3280 req->ipv6_udp_en = tuple_sets;
3283 req->ipv4_sctp_en = tuple_sets;
3286 if ((nfc->data & RXH_L4_B_0_1) ||
3287 (nfc->data & RXH_L4_B_2_3))
3290 req->ipv6_sctp_en = tuple_sets;
3293 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3296 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3302 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3304 dev_err(&hdev->pdev->dev,
3305 "Set rss tuple fail, status = %d\n", ret);
3309 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3310 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3311 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3312 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3313 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3314 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3315 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3316 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3320 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3321 struct ethtool_rxnfc *nfc)
3323 struct hclge_vport *vport = hclge_get_vport(handle);
3328 switch (nfc->flow_type) {
3330 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3333 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3336 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3339 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3342 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3345 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3349 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3358 if (tuple_sets & HCLGE_D_PORT_BIT)
3359 nfc->data |= RXH_L4_B_2_3;
3360 if (tuple_sets & HCLGE_S_PORT_BIT)
3361 nfc->data |= RXH_L4_B_0_1;
3362 if (tuple_sets & HCLGE_D_IP_BIT)
3363 nfc->data |= RXH_IP_DST;
3364 if (tuple_sets & HCLGE_S_IP_BIT)
3365 nfc->data |= RXH_IP_SRC;
3370 static int hclge_get_tc_size(struct hnae3_handle *handle)
3372 struct hclge_vport *vport = hclge_get_vport(handle);
3373 struct hclge_dev *hdev = vport->back;
3375 return hdev->rss_size_max;
3378 int hclge_rss_init_hw(struct hclge_dev *hdev)
3380 struct hclge_vport *vport = hdev->vport;
3381 u8 *rss_indir = vport[0].rss_indirection_tbl;
3382 u16 rss_size = vport[0].alloc_rss_size;
3383 u8 *key = vport[0].rss_hash_key;
3384 u8 hfunc = vport[0].rss_algo;
3385 u16 tc_offset[HCLGE_MAX_TC_NUM];
3386 u16 tc_valid[HCLGE_MAX_TC_NUM];
3387 u16 tc_size[HCLGE_MAX_TC_NUM];
3391 ret = hclge_set_rss_indir_table(hdev, rss_indir);
3395 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3399 ret = hclge_set_rss_input_tuple(hdev);
3403 /* Each TC have the same queue size, and tc_size set to hardware is
3404 * the log2 of roundup power of two of rss_size, the acutal queue
3405 * size is limited by indirection table.
3407 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3408 dev_err(&hdev->pdev->dev,
3409 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
3414 roundup_size = roundup_pow_of_two(rss_size);
3415 roundup_size = ilog2(roundup_size);
3417 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3420 if (!(hdev->hw_tc_map & BIT(i)))
3424 tc_size[i] = roundup_size;
3425 tc_offset[i] = rss_size * i;
3428 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3431 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3433 struct hclge_vport *vport = hdev->vport;
3436 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3437 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3438 vport[j].rss_indirection_tbl[i] =
3439 i % vport[j].alloc_rss_size;
3443 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3445 struct hclge_vport *vport = hdev->vport;
3448 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3449 vport[i].rss_tuple_sets.ipv4_tcp_en =
3450 HCLGE_RSS_INPUT_TUPLE_OTHER;
3451 vport[i].rss_tuple_sets.ipv4_udp_en =
3452 HCLGE_RSS_INPUT_TUPLE_OTHER;
3453 vport[i].rss_tuple_sets.ipv4_sctp_en =
3454 HCLGE_RSS_INPUT_TUPLE_SCTP;
3455 vport[i].rss_tuple_sets.ipv4_fragment_en =
3456 HCLGE_RSS_INPUT_TUPLE_OTHER;
3457 vport[i].rss_tuple_sets.ipv6_tcp_en =
3458 HCLGE_RSS_INPUT_TUPLE_OTHER;
3459 vport[i].rss_tuple_sets.ipv6_udp_en =
3460 HCLGE_RSS_INPUT_TUPLE_OTHER;
3461 vport[i].rss_tuple_sets.ipv6_sctp_en =
3462 HCLGE_RSS_INPUT_TUPLE_SCTP;
3463 vport[i].rss_tuple_sets.ipv6_fragment_en =
3464 HCLGE_RSS_INPUT_TUPLE_OTHER;
3466 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3468 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
3471 hclge_rss_indir_init_cfg(hdev);
3474 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3475 int vector_id, bool en,
3476 struct hnae3_ring_chain_node *ring_chain)
3478 struct hclge_dev *hdev = vport->back;
3479 struct hnae3_ring_chain_node *node;
3480 struct hclge_desc desc;
3481 struct hclge_ctrl_vector_chain_cmd *req
3482 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3483 enum hclge_cmd_status status;
3484 enum hclge_opcode_type op;
3485 u16 tqp_type_and_id;
3488 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3489 hclge_cmd_setup_basic_desc(&desc, op, false);
3490 req->int_vector_id = vector_id;
3493 for (node = ring_chain; node; node = node->next) {
3494 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3495 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
3497 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3498 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3499 HCLGE_TQP_ID_S, node->tqp_index);
3500 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3502 hnae3_get_field(node->int_gl_idx,
3503 HNAE3_RING_GL_IDX_M,
3504 HNAE3_RING_GL_IDX_S));
3505 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3506 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3507 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3508 req->vfid = vport->vport_id;
3510 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3512 dev_err(&hdev->pdev->dev,
3513 "Map TQP fail, status is %d.\n",
3519 hclge_cmd_setup_basic_desc(&desc,
3522 req->int_vector_id = vector_id;
3527 req->int_cause_num = i;
3528 req->vfid = vport->vport_id;
3529 status = hclge_cmd_send(&hdev->hw, &desc, 1);
3531 dev_err(&hdev->pdev->dev,
3532 "Map TQP fail, status is %d.\n", status);
3540 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3542 struct hnae3_ring_chain_node *ring_chain)
3544 struct hclge_vport *vport = hclge_get_vport(handle);
3545 struct hclge_dev *hdev = vport->back;
3548 vector_id = hclge_get_vector_index(hdev, vector);
3549 if (vector_id < 0) {
3550 dev_err(&hdev->pdev->dev,
3551 "Get vector index fail. vector_id =%d\n", vector_id);
3555 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3558 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3560 struct hnae3_ring_chain_node *ring_chain)
3562 struct hclge_vport *vport = hclge_get_vport(handle);
3563 struct hclge_dev *hdev = vport->back;
3566 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3569 vector_id = hclge_get_vector_index(hdev, vector);
3570 if (vector_id < 0) {
3571 dev_err(&handle->pdev->dev,
3572 "Get vector index fail. ret =%d\n", vector_id);
3576 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3578 dev_err(&handle->pdev->dev,
3579 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3586 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3587 struct hclge_promisc_param *param)
3589 struct hclge_promisc_cfg_cmd *req;
3590 struct hclge_desc desc;
3593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3595 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3596 req->vf_id = param->vf_id;
3598 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3599 * pdev revision(0x20), new revision support them. The
3600 * value of this two fields will not return error when driver
3601 * send command to fireware in revision(0x20).
3603 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3604 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3606 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3608 dev_err(&hdev->pdev->dev,
3609 "Set promisc mode fail, status is %d.\n", ret);
3615 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3616 bool en_mc, bool en_bc, int vport_id)
3621 memset(param, 0, sizeof(struct hclge_promisc_param));
3623 param->enable = HCLGE_PROMISC_EN_UC;
3625 param->enable |= HCLGE_PROMISC_EN_MC;
3627 param->enable |= HCLGE_PROMISC_EN_BC;
3628 param->vf_id = vport_id;
3631 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3634 struct hclge_vport *vport = hclge_get_vport(handle);
3635 struct hclge_dev *hdev = vport->back;
3636 struct hclge_promisc_param param;
3638 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true,
3640 hclge_cmd_set_promisc_mode(hdev, ¶m);
3643 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3645 struct hclge_desc desc;
3646 struct hclge_config_mac_mode_cmd *req =
3647 (struct hclge_config_mac_mode_cmd *)desc.data;
3651 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3652 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3653 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3654 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3655 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3656 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3657 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3658 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3659 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3660 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3661 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3662 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3663 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3664 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3665 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3666 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3668 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3670 dev_err(&hdev->pdev->dev,
3671 "mac enable fail, ret =%d.\n", ret);
3674 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
3676 struct hclge_config_mac_mode_cmd *req;
3677 struct hclge_desc desc;
3681 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3682 /* 1 Read out the MAC mode config at first */
3683 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
3684 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3686 dev_err(&hdev->pdev->dev,
3687 "mac loopback get fail, ret =%d.\n", ret);
3691 /* 2 Then setup the loopback flag */
3692 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3693 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
3695 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3697 /* 3 Config mac work mode with loopback flag
3698 * and its original configure parameters
3700 hclge_cmd_reuse_desc(&desc, false);
3701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3703 dev_err(&hdev->pdev->dev,
3704 "mac loopback set fail, ret =%d.\n", ret);
3708 static int hclge_set_loopback(struct hnae3_handle *handle,
3709 enum hnae3_loop loop_mode, bool en)
3711 struct hclge_vport *vport = hclge_get_vport(handle);
3712 struct hclge_dev *hdev = vport->back;
3715 switch (loop_mode) {
3716 case HNAE3_MAC_INTER_LOOP_MAC:
3717 ret = hclge_set_mac_loopback(hdev, en);
3721 dev_err(&hdev->pdev->dev,
3722 "loop_mode %d is not supported\n", loop_mode);
3729 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3730 int stream_id, bool enable)
3732 struct hclge_desc desc;
3733 struct hclge_cfg_com_tqp_queue_cmd *req =
3734 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3737 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3738 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3739 req->stream_id = cpu_to_le16(stream_id);
3740 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3742 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3744 dev_err(&hdev->pdev->dev,
3745 "Tqp enable fail, status =%d.\n", ret);
3749 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3751 struct hclge_vport *vport = hclge_get_vport(handle);
3752 struct hnae3_queue *queue;
3753 struct hclge_tqp *tqp;
3756 for (i = 0; i < vport->alloc_tqps; i++) {
3757 queue = handle->kinfo.tqp[i];
3758 tqp = container_of(queue, struct hclge_tqp, q);
3759 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3763 static int hclge_ae_start(struct hnae3_handle *handle)
3765 struct hclge_vport *vport = hclge_get_vport(handle);
3766 struct hclge_dev *hdev = vport->back;
3769 for (i = 0; i < vport->alloc_tqps; i++)
3770 hclge_tqp_enable(hdev, i, 0, true);
3773 hclge_cfg_mac_mode(hdev, true);
3774 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3775 mod_timer(&hdev->service_timer, jiffies + HZ);
3776 hdev->hw.mac.link = 0;
3778 /* reset tqp stats */
3779 hclge_reset_tqp_stats(handle);
3781 ret = hclge_mac_start_phy(hdev);
3788 static void hclge_ae_stop(struct hnae3_handle *handle)
3790 struct hclge_vport *vport = hclge_get_vport(handle);
3791 struct hclge_dev *hdev = vport->back;
3794 del_timer_sync(&hdev->service_timer);
3795 cancel_work_sync(&hdev->service_task);
3796 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3798 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
3799 hclge_mac_stop_phy(hdev);
3803 for (i = 0; i < vport->alloc_tqps; i++)
3804 hclge_tqp_enable(hdev, i, 0, false);
3807 hclge_cfg_mac_mode(hdev, false);
3809 hclge_mac_stop_phy(hdev);
3811 /* reset tqp stats */
3812 hclge_reset_tqp_stats(handle);
3813 del_timer_sync(&hdev->service_timer);
3814 cancel_work_sync(&hdev->service_task);
3815 hclge_update_link_status(hdev);
3818 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3819 u16 cmdq_resp, u8 resp_code,
3820 enum hclge_mac_vlan_tbl_opcode op)
3822 struct hclge_dev *hdev = vport->back;
3823 int return_status = -EIO;
3826 dev_err(&hdev->pdev->dev,
3827 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3832 if (op == HCLGE_MAC_VLAN_ADD) {
3833 if ((!resp_code) || (resp_code == 1)) {
3835 } else if (resp_code == 2) {
3836 return_status = -ENOSPC;
3837 dev_err(&hdev->pdev->dev,
3838 "add mac addr failed for uc_overflow.\n");
3839 } else if (resp_code == 3) {
3840 return_status = -ENOSPC;
3841 dev_err(&hdev->pdev->dev,
3842 "add mac addr failed for mc_overflow.\n");
3844 dev_err(&hdev->pdev->dev,
3845 "add mac addr failed for undefined, code=%d.\n",
3848 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3851 } else if (resp_code == 1) {
3852 return_status = -ENOENT;
3853 dev_dbg(&hdev->pdev->dev,
3854 "remove mac addr failed for miss.\n");
3856 dev_err(&hdev->pdev->dev,
3857 "remove mac addr failed for undefined, code=%d.\n",
3860 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3863 } else if (resp_code == 1) {
3864 return_status = -ENOENT;
3865 dev_dbg(&hdev->pdev->dev,
3866 "lookup mac addr failed for miss.\n");
3868 dev_err(&hdev->pdev->dev,
3869 "lookup mac addr failed for undefined, code=%d.\n",
3873 return_status = -EINVAL;
3874 dev_err(&hdev->pdev->dev,
3875 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3879 return return_status;
3882 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3887 if (vfid > 255 || vfid < 0)
3890 if (vfid >= 0 && vfid <= 191) {
3891 word_num = vfid / 32;
3892 bit_num = vfid % 32;
3894 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3896 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3898 word_num = (vfid - 192) / 32;
3899 bit_num = vfid % 32;
3901 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3903 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3909 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3911 #define HCLGE_DESC_NUMBER 3
3912 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3915 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3916 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3917 if (desc[i].data[j])
3923 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3926 const unsigned char *mac_addr = addr;
3927 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3928 (mac_addr[0]) | (mac_addr[1] << 8);
3929 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3931 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3932 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3935 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3938 u16 high_val = addr[1] | (addr[0] << 8);
3939 struct hclge_dev *hdev = vport->back;
3940 u32 rsh = 4 - hdev->mta_mac_sel_type;
3941 u16 ret_val = (high_val >> rsh) & 0xfff;
3946 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3947 enum hclge_mta_dmac_sel_type mta_mac_sel,
3950 struct hclge_mta_filter_mode_cmd *req;
3951 struct hclge_desc desc;
3954 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3957 hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3959 hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3960 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3962 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3964 dev_err(&hdev->pdev->dev,
3965 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3973 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3977 struct hclge_cfg_func_mta_filter_cmd *req;
3978 struct hclge_desc desc;
3981 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3982 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3984 hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3986 req->function_id = func_id;
3988 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3990 dev_err(&hdev->pdev->dev,
3991 "Config func_id enable failed for cmd_send, ret =%d.\n",
3999 static int hclge_set_mta_table_item(struct hclge_vport *vport,
4003 struct hclge_dev *hdev = vport->back;
4004 struct hclge_cfg_func_mta_item_cmd *req;
4005 struct hclge_desc desc;
4009 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
4010 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
4011 hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
4013 hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
4014 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
4015 req->item_idx = cpu_to_le16(item_idx);
4017 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4019 dev_err(&hdev->pdev->dev,
4020 "Config mta table item failed for cmd_send, ret =%d.\n",
4026 set_bit(idx, vport->mta_shadow);
4028 clear_bit(idx, vport->mta_shadow);
4033 static int hclge_update_mta_status(struct hnae3_handle *handle)
4035 unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
4036 struct hclge_vport *vport = hclge_get_vport(handle);
4037 struct net_device *netdev = handle->kinfo.netdev;
4038 struct netdev_hw_addr *ha;
4041 memset(mta_status, 0, sizeof(mta_status));
4043 /* update mta_status from mc addr list */
4044 netdev_for_each_mc_addr(ha, netdev) {
4045 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr);
4046 set_bit(tbl_idx, mta_status);
4049 return hclge_update_mta_status_common(vport, mta_status,
4050 0, HCLGE_MTA_TBL_SIZE, true);
4053 int hclge_update_mta_status_common(struct hclge_vport *vport,
4054 unsigned long *status,
4059 struct hclge_dev *hdev = vport->back;
4060 u16 update_max = idx + count;
4066 /* setup mta check range */
4067 if (update_filter) {
4069 check_max = HCLGE_MTA_TBL_SIZE;
4072 check_max = update_max;
4076 /* check and update all mta item */
4077 for (; i < check_max; i++) {
4078 /* ignore unused item */
4079 if (!test_bit(i, vport->mta_shadow))
4082 /* if i in update range then update it */
4083 if (i >= idx && i < update_max)
4084 if (!test_bit(i - idx, status))
4085 hclge_set_mta_table_item(vport, i, false);
4087 if (!used && test_bit(i, vport->mta_shadow))
4091 /* no longer use mta, disable it */
4092 if (vport->accept_mta_mc && update_filter && !used) {
4093 ret = hclge_cfg_func_mta_filter(hdev,
4097 dev_err(&hdev->pdev->dev,
4098 "disable func mta filter fail ret=%d\n",
4101 vport->accept_mta_mc = false;
4107 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
4108 struct hclge_mac_vlan_tbl_entry_cmd *req)
4110 struct hclge_dev *hdev = vport->back;
4111 struct hclge_desc desc;
4116 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
4118 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4120 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4122 dev_err(&hdev->pdev->dev,
4123 "del mac addr failed for cmd_send, ret =%d.\n",
4127 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4128 retval = le16_to_cpu(desc.retval);
4130 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4131 HCLGE_MAC_VLAN_REMOVE);
4134 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
4135 struct hclge_mac_vlan_tbl_entry_cmd *req,
4136 struct hclge_desc *desc,
4139 struct hclge_dev *hdev = vport->back;
4144 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
4146 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4147 memcpy(desc[0].data,
4149 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4150 hclge_cmd_setup_basic_desc(&desc[1],
4151 HCLGE_OPC_MAC_VLAN_ADD,
4153 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4154 hclge_cmd_setup_basic_desc(&desc[2],
4155 HCLGE_OPC_MAC_VLAN_ADD,
4157 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4159 memcpy(desc[0].data,
4161 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4162 ret = hclge_cmd_send(&hdev->hw, desc, 1);
4165 dev_err(&hdev->pdev->dev,
4166 "lookup mac addr failed for cmd_send, ret =%d.\n",
4170 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
4171 retval = le16_to_cpu(desc[0].retval);
4173 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
4174 HCLGE_MAC_VLAN_LKUP);
4177 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
4178 struct hclge_mac_vlan_tbl_entry_cmd *req,
4179 struct hclge_desc *mc_desc)
4181 struct hclge_dev *hdev = vport->back;
4188 struct hclge_desc desc;
4190 hclge_cmd_setup_basic_desc(&desc,
4191 HCLGE_OPC_MAC_VLAN_ADD,
4193 memcpy(desc.data, req,
4194 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4196 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4197 retval = le16_to_cpu(desc.retval);
4199 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4201 HCLGE_MAC_VLAN_ADD);
4203 hclge_cmd_reuse_desc(&mc_desc[0], false);
4204 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4205 hclge_cmd_reuse_desc(&mc_desc[1], false);
4206 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4207 hclge_cmd_reuse_desc(&mc_desc[2], false);
4208 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
4209 memcpy(mc_desc[0].data, req,
4210 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
4211 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
4212 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
4213 retval = le16_to_cpu(mc_desc[0].retval);
4215 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
4217 HCLGE_MAC_VLAN_ADD);
4221 dev_err(&hdev->pdev->dev,
4222 "add mac addr failed for cmd_send, ret =%d.\n",
4230 static int hclge_add_uc_addr(struct hnae3_handle *handle,
4231 const unsigned char *addr)
4233 struct hclge_vport *vport = hclge_get_vport(handle);
4235 return hclge_add_uc_addr_common(vport, addr);
4238 int hclge_add_uc_addr_common(struct hclge_vport *vport,
4239 const unsigned char *addr)
4241 struct hclge_dev *hdev = vport->back;
4242 struct hclge_mac_vlan_tbl_entry_cmd req;
4243 struct hclge_desc desc;
4244 u16 egress_port = 0;
4247 /* mac addr check */
4248 if (is_zero_ether_addr(addr) ||
4249 is_broadcast_ether_addr(addr) ||
4250 is_multicast_ether_addr(addr)) {
4251 dev_err(&hdev->pdev->dev,
4252 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
4254 is_zero_ether_addr(addr),
4255 is_broadcast_ether_addr(addr),
4256 is_multicast_ether_addr(addr));
4260 memset(&req, 0, sizeof(req));
4261 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4263 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
4264 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
4266 req.egress_port = cpu_to_le16(egress_port);
4268 hclge_prepare_mac_addr(&req, addr);
4270 /* Lookup the mac address in the mac_vlan table, and add
4271 * it if the entry is inexistent. Repeated unicast entry
4272 * is not allowed in the mac vlan table.
4274 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
4276 return hclge_add_mac_vlan_tbl(vport, &req, NULL);
4278 /* check if we just hit the duplicate */
4282 dev_err(&hdev->pdev->dev,
4283 "PF failed to add unicast entry(%pM) in the MAC table\n",
4289 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
4290 const unsigned char *addr)
4292 struct hclge_vport *vport = hclge_get_vport(handle);
4294 return hclge_rm_uc_addr_common(vport, addr);
4297 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
4298 const unsigned char *addr)
4300 struct hclge_dev *hdev = vport->back;
4301 struct hclge_mac_vlan_tbl_entry_cmd req;
4304 /* mac addr check */
4305 if (is_zero_ether_addr(addr) ||
4306 is_broadcast_ether_addr(addr) ||
4307 is_multicast_ether_addr(addr)) {
4308 dev_dbg(&hdev->pdev->dev,
4309 "Remove mac err! invalid mac:%pM.\n",
4314 memset(&req, 0, sizeof(req));
4315 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4316 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4317 hclge_prepare_mac_addr(&req, addr);
4318 ret = hclge_remove_mac_vlan_tbl(vport, &req);
4323 static int hclge_add_mc_addr(struct hnae3_handle *handle,
4324 const unsigned char *addr)
4326 struct hclge_vport *vport = hclge_get_vport(handle);
4328 return hclge_add_mc_addr_common(vport, addr);
4331 int hclge_add_mc_addr_common(struct hclge_vport *vport,
4332 const unsigned char *addr)
4334 struct hclge_dev *hdev = vport->back;
4335 struct hclge_mac_vlan_tbl_entry_cmd req;
4336 struct hclge_desc desc[3];
4340 /* mac addr check */
4341 if (!is_multicast_ether_addr(addr)) {
4342 dev_err(&hdev->pdev->dev,
4343 "Add mc mac err! invalid mac:%pM.\n",
4347 memset(&req, 0, sizeof(req));
4348 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4349 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4350 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4351 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4352 hclge_prepare_mac_addr(&req, addr);
4353 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4355 /* This mac addr exist, update VFID for it */
4356 hclge_update_desc_vfid(desc, vport->vport_id, false);
4357 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4359 /* This mac addr do not exist, add new entry for it */
4360 memset(desc[0].data, 0, sizeof(desc[0].data));
4361 memset(desc[1].data, 0, sizeof(desc[0].data));
4362 memset(desc[2].data, 0, sizeof(desc[0].data));
4363 hclge_update_desc_vfid(desc, vport->vport_id, false);
4364 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4367 /* If mc mac vlan table is full, use MTA table */
4368 if (status == -ENOSPC) {
4369 if (!vport->accept_mta_mc) {
4370 status = hclge_cfg_func_mta_filter(hdev,
4374 dev_err(&hdev->pdev->dev,
4375 "set mta filter mode fail ret=%d\n",
4379 vport->accept_mta_mc = true;
4382 /* Set MTA table for this MAC address */
4383 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
4384 status = hclge_set_mta_table_item(vport, tbl_idx, true);
4390 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
4391 const unsigned char *addr)
4393 struct hclge_vport *vport = hclge_get_vport(handle);
4395 return hclge_rm_mc_addr_common(vport, addr);
4398 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
4399 const unsigned char *addr)
4401 struct hclge_dev *hdev = vport->back;
4402 struct hclge_mac_vlan_tbl_entry_cmd req;
4403 enum hclge_cmd_status status;
4404 struct hclge_desc desc[3];
4406 /* mac addr check */
4407 if (!is_multicast_ether_addr(addr)) {
4408 dev_dbg(&hdev->pdev->dev,
4409 "Remove mc mac err! invalid mac:%pM.\n",
4414 memset(&req, 0, sizeof(req));
4415 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
4416 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4417 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
4418 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
4419 hclge_prepare_mac_addr(&req, addr);
4420 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
4422 /* This mac addr exist, remove this handle's VFID for it */
4423 hclge_update_desc_vfid(desc, vport->vport_id, true);
4425 if (hclge_is_all_function_id_zero(desc))
4426 /* All the vfid is zero, so need to delete this entry */
4427 status = hclge_remove_mac_vlan_tbl(vport, &req);
4429 /* Not all the vfid is zero, update the vfid */
4430 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
4433 /* Maybe this mac address is in mta table, but it cannot be
4434 * deleted here because an entry of mta represents an address
4435 * range rather than a specific address. the delete action to
4436 * all entries will take effect in update_mta_status called by
4437 * hns3_nic_set_rx_mode.
4445 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
4446 u16 cmdq_resp, u8 resp_code)
4448 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
4449 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
4450 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
4451 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
4456 dev_err(&hdev->pdev->dev,
4457 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
4462 switch (resp_code) {
4463 case HCLGE_ETHERTYPE_SUCCESS_ADD:
4464 case HCLGE_ETHERTYPE_ALREADY_ADD:
4467 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
4468 dev_err(&hdev->pdev->dev,
4469 "add mac ethertype failed for manager table overflow.\n");
4470 return_status = -EIO;
4472 case HCLGE_ETHERTYPE_KEY_CONFLICT:
4473 dev_err(&hdev->pdev->dev,
4474 "add mac ethertype failed for key conflict.\n");
4475 return_status = -EIO;
4478 dev_err(&hdev->pdev->dev,
4479 "add mac ethertype failed for undefined, code=%d.\n",
4481 return_status = -EIO;
4484 return return_status;
4487 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
4488 const struct hclge_mac_mgr_tbl_entry_cmd *req)
4490 struct hclge_desc desc;
4495 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
4496 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
4498 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4500 dev_err(&hdev->pdev->dev,
4501 "add mac ethertype failed for cmd_send, ret =%d.\n",
4506 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
4507 retval = le16_to_cpu(desc.retval);
4509 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
4512 static int init_mgr_tbl(struct hclge_dev *hdev)
4517 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
4518 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
4520 dev_err(&hdev->pdev->dev,
4521 "add mac ethertype failed, ret =%d.\n",
4530 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
4532 struct hclge_vport *vport = hclge_get_vport(handle);
4533 struct hclge_dev *hdev = vport->back;
4535 ether_addr_copy(p, hdev->hw.mac.mac_addr);
4538 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
4541 const unsigned char *new_addr = (const unsigned char *)p;
4542 struct hclge_vport *vport = hclge_get_vport(handle);
4543 struct hclge_dev *hdev = vport->back;
4546 /* mac addr check */
4547 if (is_zero_ether_addr(new_addr) ||
4548 is_broadcast_ether_addr(new_addr) ||
4549 is_multicast_ether_addr(new_addr)) {
4550 dev_err(&hdev->pdev->dev,
4551 "Change uc mac err! invalid mac:%p.\n",
4556 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
4557 dev_warn(&hdev->pdev->dev,
4558 "remove old uc mac address fail.\n");
4560 ret = hclge_add_uc_addr(handle, new_addr);
4562 dev_err(&hdev->pdev->dev,
4563 "add uc mac address fail, ret =%d.\n",
4567 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
4568 dev_err(&hdev->pdev->dev,
4569 "restore uc mac address fail.\n");
4574 ret = hclge_pause_addr_cfg(hdev, new_addr);
4576 dev_err(&hdev->pdev->dev,
4577 "configure mac pause address fail, ret =%d.\n",
4582 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
4587 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
4590 struct hclge_vlan_filter_ctrl_cmd *req;
4591 struct hclge_desc desc;
4594 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
4596 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
4597 req->vlan_type = vlan_type;
4598 req->vlan_fe = filter_en;
4600 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4602 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
4610 #define HCLGE_FILTER_TYPE_VF 0
4611 #define HCLGE_FILTER_TYPE_PORT 1
4613 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
4615 struct hclge_vport *vport = hclge_get_vport(handle);
4616 struct hclge_dev *hdev = vport->back;
4618 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
4621 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
4622 bool is_kill, u16 vlan, u8 qos,
4625 #define HCLGE_MAX_VF_BYTES 16
4626 struct hclge_vlan_filter_vf_cfg_cmd *req0;
4627 struct hclge_vlan_filter_vf_cfg_cmd *req1;
4628 struct hclge_desc desc[2];
4633 hclge_cmd_setup_basic_desc(&desc[0],
4634 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4635 hclge_cmd_setup_basic_desc(&desc[1],
4636 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
4638 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4640 vf_byte_off = vfid / 8;
4641 vf_byte_val = 1 << (vfid % 8);
4643 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
4644 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
4646 req0->vlan_id = cpu_to_le16(vlan);
4647 req0->vlan_cfg = is_kill;
4649 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
4650 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
4652 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
4654 ret = hclge_cmd_send(&hdev->hw, desc, 2);
4656 dev_err(&hdev->pdev->dev,
4657 "Send vf vlan command fail, ret =%d.\n",
4663 #define HCLGE_VF_VLAN_NO_ENTRY 2
4664 if (!req0->resp_code || req0->resp_code == 1)
4667 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
4668 dev_warn(&hdev->pdev->dev,
4669 "vf vlan table is full, vf vlan filter is disabled\n");
4673 dev_err(&hdev->pdev->dev,
4674 "Add vf vlan filter fail, ret =%d.\n",
4677 if (!req0->resp_code)
4680 dev_err(&hdev->pdev->dev,
4681 "Kill vf vlan filter fail, ret =%d.\n",
4688 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
4689 u16 vlan_id, bool is_kill)
4691 struct hclge_vlan_filter_pf_cfg_cmd *req;
4692 struct hclge_desc desc;
4693 u8 vlan_offset_byte_val;
4694 u8 vlan_offset_byte;
4698 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
4700 vlan_offset_160 = vlan_id / 160;
4701 vlan_offset_byte = (vlan_id % 160) / 8;
4702 vlan_offset_byte_val = 1 << (vlan_id % 8);
4704 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
4705 req->vlan_offset = vlan_offset_160;
4706 req->vlan_cfg = is_kill;
4707 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
4709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4711 dev_err(&hdev->pdev->dev,
4712 "port vlan command, send fail, ret =%d.\n", ret);
4716 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
4717 u16 vport_id, u16 vlan_id, u8 qos,
4720 u16 vport_idx, vport_num = 0;
4723 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
4726 dev_err(&hdev->pdev->dev,
4727 "Set %d vport vlan filter config fail, ret =%d.\n",
4732 /* vlan 0 may be added twice when 8021q module is enabled */
4733 if (!is_kill && !vlan_id &&
4734 test_bit(vport_id, hdev->vlan_table[vlan_id]))
4737 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
4738 dev_err(&hdev->pdev->dev,
4739 "Add port vlan failed, vport %d is already in vlan %d\n",
4745 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
4746 dev_err(&hdev->pdev->dev,
4747 "Delete port vlan failed, vport %d is not in vlan %d\n",
4752 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID)
4755 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
4756 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
4762 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
4763 u16 vlan_id, bool is_kill)
4765 struct hclge_vport *vport = hclge_get_vport(handle);
4766 struct hclge_dev *hdev = vport->back;
4768 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
4772 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4773 u16 vlan, u8 qos, __be16 proto)
4775 struct hclge_vport *vport = hclge_get_vport(handle);
4776 struct hclge_dev *hdev = vport->back;
4778 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4780 if (proto != htons(ETH_P_8021Q))
4781 return -EPROTONOSUPPORT;
4783 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
4786 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
4788 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
4789 struct hclge_vport_vtag_tx_cfg_cmd *req;
4790 struct hclge_dev *hdev = vport->back;
4791 struct hclge_desc desc;
4794 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
4796 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
4797 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
4798 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
4799 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
4800 vcfg->accept_tag1 ? 1 : 0);
4801 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
4802 vcfg->accept_untag1 ? 1 : 0);
4803 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
4804 vcfg->accept_tag2 ? 1 : 0);
4805 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
4806 vcfg->accept_untag2 ? 1 : 0);
4807 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
4808 vcfg->insert_tag1_en ? 1 : 0);
4809 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
4810 vcfg->insert_tag2_en ? 1 : 0);
4811 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
4813 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4814 req->vf_bitmap[req->vf_offset] =
4815 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4817 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4819 dev_err(&hdev->pdev->dev,
4820 "Send port txvlan cfg command fail, ret =%d\n",
4826 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
4828 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
4829 struct hclge_vport_vtag_rx_cfg_cmd *req;
4830 struct hclge_dev *hdev = vport->back;
4831 struct hclge_desc desc;
4834 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
4836 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
4837 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
4838 vcfg->strip_tag1_en ? 1 : 0);
4839 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
4840 vcfg->strip_tag2_en ? 1 : 0);
4841 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
4842 vcfg->vlan1_vlan_prionly ? 1 : 0);
4843 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
4844 vcfg->vlan2_vlan_prionly ? 1 : 0);
4846 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
4847 req->vf_bitmap[req->vf_offset] =
4848 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
4850 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4852 dev_err(&hdev->pdev->dev,
4853 "Send port rxvlan cfg command fail, ret =%d\n",
4859 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
4861 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
4862 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
4863 struct hclge_desc desc;
4866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
4867 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
4868 rx_req->ot_fst_vlan_type =
4869 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
4870 rx_req->ot_sec_vlan_type =
4871 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
4872 rx_req->in_fst_vlan_type =
4873 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
4874 rx_req->in_sec_vlan_type =
4875 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
4877 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4879 dev_err(&hdev->pdev->dev,
4880 "Send rxvlan protocol type command fail, ret =%d\n",
4885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
4887 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
4888 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
4889 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
4891 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4893 dev_err(&hdev->pdev->dev,
4894 "Send txvlan protocol type command fail, ret =%d\n",
4900 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4902 #define HCLGE_DEF_VLAN_TYPE 0x8100
4904 struct hnae3_handle *handle;
4905 struct hclge_vport *vport;
4909 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
4913 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
4917 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4918 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4919 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
4920 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
4921 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
4922 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
4924 ret = hclge_set_vlan_protocol_type(hdev);
4928 for (i = 0; i < hdev->num_alloc_vport; i++) {
4929 vport = &hdev->vport[i];
4930 vport->txvlan_cfg.accept_tag1 = true;
4931 vport->txvlan_cfg.accept_untag1 = true;
4933 /* accept_tag2 and accept_untag2 are not supported on
4934 * pdev revision(0x20), new revision support them. The
4935 * value of this two fields will not return error when driver
4936 * send command to fireware in revision(0x20).
4937 * This two fields can not configured by user.
4939 vport->txvlan_cfg.accept_tag2 = true;
4940 vport->txvlan_cfg.accept_untag2 = true;
4942 vport->txvlan_cfg.insert_tag1_en = false;
4943 vport->txvlan_cfg.insert_tag2_en = false;
4944 vport->txvlan_cfg.default_tag1 = 0;
4945 vport->txvlan_cfg.default_tag2 = 0;
4947 ret = hclge_set_vlan_tx_offload_cfg(vport);
4951 vport->rxvlan_cfg.strip_tag1_en = false;
4952 vport->rxvlan_cfg.strip_tag2_en = true;
4953 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4954 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4956 ret = hclge_set_vlan_rx_offload_cfg(vport);
4961 handle = &hdev->vport[0].nic;
4962 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4965 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
4967 struct hclge_vport *vport = hclge_get_vport(handle);
4969 vport->rxvlan_cfg.strip_tag1_en = false;
4970 vport->rxvlan_cfg.strip_tag2_en = enable;
4971 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
4972 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
4974 return hclge_set_vlan_rx_offload_cfg(vport);
4977 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
4979 struct hclge_config_max_frm_size_cmd *req;
4980 struct hclge_desc desc;
4984 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4986 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
4987 max_frm_size > HCLGE_MAC_MAX_FRAME)
4990 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
4992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4994 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4995 req->max_frm_size = cpu_to_le16(max_frm_size);
4996 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
4998 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5000 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
5004 hdev->mps = max_frm_size;
5009 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
5011 struct hclge_vport *vport = hclge_get_vport(handle);
5012 struct hclge_dev *hdev = vport->back;
5015 ret = hclge_set_mac_mtu(hdev, new_mtu);
5017 dev_err(&hdev->pdev->dev,
5018 "Change mtu fail, ret =%d\n", ret);
5022 ret = hclge_buffer_alloc(hdev);
5024 dev_err(&hdev->pdev->dev,
5025 "Allocate buffer fail, ret =%d\n", ret);
5030 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
5033 struct hclge_reset_tqp_queue_cmd *req;
5034 struct hclge_desc desc;
5037 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
5039 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5040 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5041 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
5043 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5045 dev_err(&hdev->pdev->dev,
5046 "Send tqp reset cmd error, status =%d\n", ret);
5053 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
5055 struct hclge_reset_tqp_queue_cmd *req;
5056 struct hclge_desc desc;
5059 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
5061 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
5062 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
5064 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5066 dev_err(&hdev->pdev->dev,
5067 "Get reset status error, status =%d\n", ret);
5071 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
5074 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
5077 struct hnae3_queue *queue;
5078 struct hclge_tqp *tqp;
5080 queue = handle->kinfo.tqp[queue_id];
5081 tqp = container_of(queue, struct hclge_tqp, q);
5086 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
5088 struct hclge_vport *vport = hclge_get_vport(handle);
5089 struct hclge_dev *hdev = vport->back;
5090 int reset_try_times = 0;
5095 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5098 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
5100 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
5102 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
5106 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5108 dev_warn(&hdev->pdev->dev,
5109 "Send reset tqp cmd fail, ret = %d\n", ret);
5113 reset_try_times = 0;
5114 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5115 /* Wait for tqp hw reset */
5117 reset_status = hclge_get_reset_status(hdev, queue_gid);
5122 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5123 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5127 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5129 dev_warn(&hdev->pdev->dev,
5130 "Deassert the soft reset fail, ret = %d\n", ret);
5135 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
5137 struct hclge_dev *hdev = vport->back;
5138 int reset_try_times = 0;
5143 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
5145 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
5147 dev_warn(&hdev->pdev->dev,
5148 "Send reset tqp cmd fail, ret = %d\n", ret);
5152 reset_try_times = 0;
5153 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
5154 /* Wait for tqp hw reset */
5156 reset_status = hclge_get_reset_status(hdev, queue_gid);
5161 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
5162 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
5166 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
5168 dev_warn(&hdev->pdev->dev,
5169 "Deassert the soft reset fail, ret = %d\n", ret);
5172 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
5174 struct hclge_vport *vport = hclge_get_vport(handle);
5175 struct hclge_dev *hdev = vport->back;
5177 return hdev->fw_version;
5180 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
5183 struct hclge_vport *vport = hclge_get_vport(handle);
5184 struct hclge_dev *hdev = vport->back;
5185 struct phy_device *phydev = hdev->hw.mac.phydev;
5190 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
5191 (phydev->advertising & ADVERTISED_Asym_Pause);
5194 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5196 struct phy_device *phydev = hdev->hw.mac.phydev;
5201 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
5204 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
5207 phydev->advertising ^= ADVERTISED_Asym_Pause;
5210 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
5215 hdev->fc_mode_last_time = HCLGE_FC_FULL;
5216 else if (rx_en && !tx_en)
5217 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
5218 else if (!rx_en && tx_en)
5219 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
5221 hdev->fc_mode_last_time = HCLGE_FC_NONE;
5223 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
5226 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
5228 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
5233 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
5238 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
5240 struct phy_device *phydev = hdev->hw.mac.phydev;
5241 u16 remote_advertising = 0;
5242 u16 local_advertising = 0;
5243 u32 rx_pause, tx_pause;
5246 if (!phydev->link || !phydev->autoneg)
5249 if (phydev->advertising & ADVERTISED_Pause)
5250 local_advertising = ADVERTISE_PAUSE_CAP;
5252 if (phydev->advertising & ADVERTISED_Asym_Pause)
5253 local_advertising |= ADVERTISE_PAUSE_ASYM;
5256 remote_advertising = LPA_PAUSE_CAP;
5258 if (phydev->asym_pause)
5259 remote_advertising |= LPA_PAUSE_ASYM;
5261 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
5262 remote_advertising);
5263 tx_pause = flowctl & FLOW_CTRL_TX;
5264 rx_pause = flowctl & FLOW_CTRL_RX;
5266 if (phydev->duplex == HCLGE_MAC_HALF) {
5271 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
5274 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
5275 u32 *rx_en, u32 *tx_en)
5277 struct hclge_vport *vport = hclge_get_vport(handle);
5278 struct hclge_dev *hdev = vport->back;
5280 *auto_neg = hclge_get_autoneg(handle);
5282 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5288 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
5291 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
5294 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
5303 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
5304 u32 rx_en, u32 tx_en)
5306 struct hclge_vport *vport = hclge_get_vport(handle);
5307 struct hclge_dev *hdev = vport->back;
5308 struct phy_device *phydev = hdev->hw.mac.phydev;
5311 fc_autoneg = hclge_get_autoneg(handle);
5312 if (auto_neg != fc_autoneg) {
5313 dev_info(&hdev->pdev->dev,
5314 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
5318 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
5319 dev_info(&hdev->pdev->dev,
5320 "Priority flow control enabled. Cannot set link flow control.\n");
5324 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
5327 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
5329 /* Only support flow control negotiation for netdev with
5330 * phy attached for now.
5335 return phy_start_aneg(phydev);
5338 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
5339 u8 *auto_neg, u32 *speed, u8 *duplex)
5341 struct hclge_vport *vport = hclge_get_vport(handle);
5342 struct hclge_dev *hdev = vport->back;
5345 *speed = hdev->hw.mac.speed;
5347 *duplex = hdev->hw.mac.duplex;
5349 *auto_neg = hdev->hw.mac.autoneg;
5352 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
5354 struct hclge_vport *vport = hclge_get_vport(handle);
5355 struct hclge_dev *hdev = vport->back;
5358 *media_type = hdev->hw.mac.media_type;
5361 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
5362 u8 *tp_mdix_ctrl, u8 *tp_mdix)
5364 struct hclge_vport *vport = hclge_get_vport(handle);
5365 struct hclge_dev *hdev = vport->back;
5366 struct phy_device *phydev = hdev->hw.mac.phydev;
5367 int mdix_ctrl, mdix, retval, is_resolved;
5370 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5371 *tp_mdix = ETH_TP_MDI_INVALID;
5375 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
5377 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
5378 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
5379 HCLGE_PHY_MDIX_CTRL_S);
5381 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
5382 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
5383 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
5385 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
5387 switch (mdix_ctrl) {
5389 *tp_mdix_ctrl = ETH_TP_MDI;
5392 *tp_mdix_ctrl = ETH_TP_MDI_X;
5395 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
5398 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
5403 *tp_mdix = ETH_TP_MDI_INVALID;
5405 *tp_mdix = ETH_TP_MDI_X;
5407 *tp_mdix = ETH_TP_MDI;
5410 static int hclge_init_client_instance(struct hnae3_client *client,
5411 struct hnae3_ae_dev *ae_dev)
5413 struct hclge_dev *hdev = ae_dev->priv;
5414 struct hclge_vport *vport;
5417 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5418 vport = &hdev->vport[i];
5420 switch (client->type) {
5421 case HNAE3_CLIENT_KNIC:
5423 hdev->nic_client = client;
5424 vport->nic.client = client;
5425 ret = client->ops->init_instance(&vport->nic);
5429 if (hdev->roce_client &&
5430 hnae3_dev_roce_supported(hdev)) {
5431 struct hnae3_client *rc = hdev->roce_client;
5433 ret = hclge_init_roce_base_info(vport);
5437 ret = rc->ops->init_instance(&vport->roce);
5443 case HNAE3_CLIENT_UNIC:
5444 hdev->nic_client = client;
5445 vport->nic.client = client;
5447 ret = client->ops->init_instance(&vport->nic);
5452 case HNAE3_CLIENT_ROCE:
5453 if (hnae3_dev_roce_supported(hdev)) {
5454 hdev->roce_client = client;
5455 vport->roce.client = client;
5458 if (hdev->roce_client && hdev->nic_client) {
5459 ret = hclge_init_roce_base_info(vport);
5463 ret = client->ops->init_instance(&vport->roce);
5473 static void hclge_uninit_client_instance(struct hnae3_client *client,
5474 struct hnae3_ae_dev *ae_dev)
5476 struct hclge_dev *hdev = ae_dev->priv;
5477 struct hclge_vport *vport;
5480 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
5481 vport = &hdev->vport[i];
5482 if (hdev->roce_client) {
5483 hdev->roce_client->ops->uninit_instance(&vport->roce,
5485 hdev->roce_client = NULL;
5486 vport->roce.client = NULL;
5488 if (client->type == HNAE3_CLIENT_ROCE)
5490 if (client->ops->uninit_instance) {
5491 client->ops->uninit_instance(&vport->nic, 0);
5492 hdev->nic_client = NULL;
5493 vport->nic.client = NULL;
5498 static int hclge_pci_init(struct hclge_dev *hdev)
5500 struct pci_dev *pdev = hdev->pdev;
5501 struct hclge_hw *hw;
5504 ret = pci_enable_device(pdev);
5506 dev_err(&pdev->dev, "failed to enable PCI device\n");
5510 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5512 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5515 "can't set consistent PCI DMA");
5516 goto err_disable_device;
5518 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
5521 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
5523 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
5524 goto err_disable_device;
5527 pci_set_master(pdev);
5529 hw->io_base = pcim_iomap(pdev, 2, 0);
5531 dev_err(&pdev->dev, "Can't map configuration register space\n");
5533 goto err_clr_master;
5536 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
5540 pci_clear_master(pdev);
5541 pci_release_regions(pdev);
5543 pci_disable_device(pdev);
5548 static void hclge_pci_uninit(struct hclge_dev *hdev)
5550 struct pci_dev *pdev = hdev->pdev;
5552 pcim_iounmap(pdev, hdev->hw.io_base);
5553 pci_free_irq_vectors(pdev);
5554 pci_clear_master(pdev);
5555 pci_release_mem_regions(pdev);
5556 pci_disable_device(pdev);
5559 static void hclge_state_init(struct hclge_dev *hdev)
5561 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
5562 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5563 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
5564 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5565 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
5566 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
5569 static void hclge_state_uninit(struct hclge_dev *hdev)
5571 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5573 if (hdev->service_timer.function)
5574 del_timer_sync(&hdev->service_timer);
5575 if (hdev->service_task.func)
5576 cancel_work_sync(&hdev->service_task);
5577 if (hdev->rst_service_task.func)
5578 cancel_work_sync(&hdev->rst_service_task);
5579 if (hdev->mbx_service_task.func)
5580 cancel_work_sync(&hdev->mbx_service_task);
5583 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
5585 struct pci_dev *pdev = ae_dev->pdev;
5586 struct hclge_dev *hdev;
5589 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
5596 hdev->ae_dev = ae_dev;
5597 hdev->reset_type = HNAE3_NONE_RESET;
5598 hdev->reset_request = 0;
5599 hdev->reset_pending = 0;
5600 ae_dev->priv = hdev;
5602 ret = hclge_pci_init(hdev);
5604 dev_err(&pdev->dev, "PCI init failed\n");
5608 /* Firmware command queue initialize */
5609 ret = hclge_cmd_queue_init(hdev);
5611 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
5612 goto err_pci_uninit;
5615 /* Firmware command initialize */
5616 ret = hclge_cmd_init(hdev);
5618 goto err_cmd_uninit;
5620 ret = hclge_get_cap(hdev);
5622 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5624 goto err_cmd_uninit;
5627 ret = hclge_configure(hdev);
5629 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5630 goto err_cmd_uninit;
5633 ret = hclge_init_msi(hdev);
5635 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
5636 goto err_cmd_uninit;
5639 ret = hclge_misc_irq_init(hdev);
5642 "Misc IRQ(vector0) init error, ret = %d.\n",
5644 goto err_msi_uninit;
5647 ret = hclge_alloc_tqps(hdev);
5649 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
5650 goto err_msi_irq_uninit;
5653 ret = hclge_alloc_vport(hdev);
5655 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
5656 goto err_msi_irq_uninit;
5659 ret = hclge_map_tqp(hdev);
5661 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5662 goto err_msi_irq_uninit;
5665 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
5666 ret = hclge_mac_mdio_config(hdev);
5668 dev_err(&hdev->pdev->dev,
5669 "mdio config fail ret=%d\n", ret);
5670 goto err_msi_irq_uninit;
5674 ret = hclge_mac_init(hdev);
5676 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5677 goto err_mdiobus_unreg;
5680 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5682 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5683 goto err_mdiobus_unreg;
5686 ret = hclge_init_vlan_config(hdev);
5688 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5689 goto err_mdiobus_unreg;
5692 ret = hclge_tm_schd_init(hdev);
5694 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
5695 goto err_mdiobus_unreg;
5698 hclge_rss_init_cfg(hdev);
5699 ret = hclge_rss_init_hw(hdev);
5701 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5702 goto err_mdiobus_unreg;
5705 ret = init_mgr_tbl(hdev);
5707 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
5708 goto err_mdiobus_unreg;
5711 hclge_dcb_ops_set(hdev);
5713 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
5714 INIT_WORK(&hdev->service_task, hclge_service_task);
5715 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
5716 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
5718 hclge_clear_all_event_cause(hdev);
5720 /* Enable MISC vector(vector0) */
5721 hclge_enable_vector(&hdev->misc_vector, true);
5723 hclge_state_init(hdev);
5725 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
5729 if (hdev->hw.mac.phydev)
5730 mdiobus_unregister(hdev->hw.mac.mdio_bus);
5732 hclge_misc_irq_uninit(hdev);
5734 pci_free_irq_vectors(pdev);
5736 hclge_destroy_cmd_queue(&hdev->hw);
5738 pcim_iounmap(pdev, hdev->hw.io_base);
5739 pci_clear_master(pdev);
5740 pci_release_regions(pdev);
5741 pci_disable_device(pdev);
5746 static void hclge_stats_clear(struct hclge_dev *hdev)
5748 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
5751 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
5753 struct hclge_dev *hdev = ae_dev->priv;
5754 struct pci_dev *pdev = ae_dev->pdev;
5757 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5759 hclge_stats_clear(hdev);
5760 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
5762 ret = hclge_cmd_init(hdev);
5764 dev_err(&pdev->dev, "Cmd queue init failed\n");
5768 ret = hclge_get_cap(hdev);
5770 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
5775 ret = hclge_configure(hdev);
5777 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
5781 ret = hclge_map_tqp(hdev);
5783 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
5787 ret = hclge_mac_init(hdev);
5789 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
5793 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
5795 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
5799 ret = hclge_init_vlan_config(hdev);
5801 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
5805 ret = hclge_tm_init_hw(hdev);
5807 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
5811 ret = hclge_rss_init_hw(hdev);
5813 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
5817 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
5823 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
5825 struct hclge_dev *hdev = ae_dev->priv;
5826 struct hclge_mac *mac = &hdev->hw.mac;
5828 hclge_state_uninit(hdev);
5831 mdiobus_unregister(mac->mdio_bus);
5833 /* Disable MISC vector(vector0) */
5834 hclge_enable_vector(&hdev->misc_vector, false);
5835 synchronize_irq(hdev->misc_vector.vector_irq);
5837 hclge_destroy_cmd_queue(&hdev->hw);
5838 hclge_misc_irq_uninit(hdev);
5839 hclge_pci_uninit(hdev);
5840 ae_dev->priv = NULL;
5843 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
5845 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
5846 struct hclge_vport *vport = hclge_get_vport(handle);
5847 struct hclge_dev *hdev = vport->back;
5849 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
5852 static void hclge_get_channels(struct hnae3_handle *handle,
5853 struct ethtool_channels *ch)
5855 struct hclge_vport *vport = hclge_get_vport(handle);
5857 ch->max_combined = hclge_get_max_channels(handle);
5858 ch->other_count = 1;
5860 ch->combined_count = vport->alloc_tqps;
5863 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
5864 u16 *free_tqps, u16 *max_rss_size)
5866 struct hclge_vport *vport = hclge_get_vport(handle);
5867 struct hclge_dev *hdev = vport->back;
5871 for (i = 0; i < hdev->num_tqps; i++) {
5872 if (!hdev->htqp[i].alloced)
5875 *free_tqps = temp_tqps;
5876 *max_rss_size = hdev->rss_size_max;
5879 static void hclge_release_tqp(struct hclge_vport *vport)
5881 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5882 struct hclge_dev *hdev = vport->back;
5885 for (i = 0; i < kinfo->num_tqps; i++) {
5886 struct hclge_tqp *tqp =
5887 container_of(kinfo->tqp[i], struct hclge_tqp, q);
5889 tqp->q.handle = NULL;
5890 tqp->q.tqp_index = 0;
5891 tqp->alloced = false;
5894 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
5898 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
5900 struct hclge_vport *vport = hclge_get_vport(handle);
5901 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5902 struct hclge_dev *hdev = vport->back;
5903 int cur_rss_size = kinfo->rss_size;
5904 int cur_tqps = kinfo->num_tqps;
5905 u16 tc_offset[HCLGE_MAX_TC_NUM];
5906 u16 tc_valid[HCLGE_MAX_TC_NUM];
5907 u16 tc_size[HCLGE_MAX_TC_NUM];
5912 hclge_release_tqp(vport);
5914 ret = hclge_knic_setup(vport, new_tqps_num);
5916 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
5920 ret = hclge_map_tqp_to_vport(hdev, vport);
5922 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
5926 ret = hclge_tm_schd_init(hdev);
5928 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
5932 roundup_size = roundup_pow_of_two(kinfo->rss_size);
5933 roundup_size = ilog2(roundup_size);
5934 /* Set the RSS TC mode according to the new RSS size */
5935 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5938 if (!(hdev->hw_tc_map & BIT(i)))
5942 tc_size[i] = roundup_size;
5943 tc_offset[i] = kinfo->rss_size * i;
5945 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5949 /* Reinitializes the rss indirect table according to the new RSS size */
5950 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
5954 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
5955 rss_indir[i] = i % kinfo->rss_size;
5957 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
5959 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
5965 dev_info(&hdev->pdev->dev,
5966 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
5967 cur_rss_size, kinfo->rss_size,
5968 cur_tqps, kinfo->rss_size * kinfo->num_tc);
5973 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
5974 u32 *regs_num_64_bit)
5976 struct hclge_desc desc;
5980 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
5981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5983 dev_err(&hdev->pdev->dev,
5984 "Query register number cmd failed, ret = %d.\n", ret);
5988 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
5989 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
5991 total_num = *regs_num_32_bit + *regs_num_64_bit;
5998 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6001 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
6003 struct hclge_desc *desc;
6004 u32 *reg_val = data;
6013 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
6014 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6018 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
6019 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6021 dev_err(&hdev->pdev->dev,
6022 "Query 32 bit register cmd failed, ret = %d.\n", ret);
6027 for (i = 0; i < cmd_num; i++) {
6029 desc_data = (__le32 *)(&desc[i].data[0]);
6030 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
6032 desc_data = (__le32 *)(&desc[i]);
6033 n = HCLGE_32_BIT_REG_RTN_DATANUM;
6035 for (k = 0; k < n; k++) {
6036 *reg_val++ = le32_to_cpu(*desc_data++);
6048 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
6051 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
6053 struct hclge_desc *desc;
6054 u64 *reg_val = data;
6063 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
6064 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
6068 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
6069 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
6071 dev_err(&hdev->pdev->dev,
6072 "Query 64 bit register cmd failed, ret = %d.\n", ret);
6077 for (i = 0; i < cmd_num; i++) {
6079 desc_data = (__le64 *)(&desc[i].data[0]);
6080 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
6082 desc_data = (__le64 *)(&desc[i]);
6083 n = HCLGE_64_BIT_REG_RTN_DATANUM;
6085 for (k = 0; k < n; k++) {
6086 *reg_val++ = le64_to_cpu(*desc_data++);
6098 static int hclge_get_regs_len(struct hnae3_handle *handle)
6100 struct hclge_vport *vport = hclge_get_vport(handle);
6101 struct hclge_dev *hdev = vport->back;
6102 u32 regs_num_32_bit, regs_num_64_bit;
6105 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
6107 dev_err(&hdev->pdev->dev,
6108 "Get register number failed, ret = %d.\n", ret);
6112 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
6115 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
6118 struct hclge_vport *vport = hclge_get_vport(handle);
6119 struct hclge_dev *hdev = vport->back;
6120 u32 regs_num_32_bit, regs_num_64_bit;
6123 *version = hdev->fw_version;
6125 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
6127 dev_err(&hdev->pdev->dev,
6128 "Get register number failed, ret = %d.\n", ret);
6132 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
6134 dev_err(&hdev->pdev->dev,
6135 "Get 32 bit register failed, ret = %d.\n", ret);
6139 data = (u32 *)data + regs_num_32_bit;
6140 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
6143 dev_err(&hdev->pdev->dev,
6144 "Get 64 bit register failed, ret = %d.\n", ret);
6147 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
6149 struct hclge_set_led_state_cmd *req;
6150 struct hclge_desc desc;
6153 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
6155 req = (struct hclge_set_led_state_cmd *)desc.data;
6156 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
6157 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
6159 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6161 dev_err(&hdev->pdev->dev,
6162 "Send set led state cmd error, ret =%d\n", ret);
6167 enum hclge_led_status {
6170 HCLGE_LED_NO_CHANGE = 0xFF,
6173 static int hclge_set_led_id(struct hnae3_handle *handle,
6174 enum ethtool_phys_id_state status)
6176 struct hclge_vport *vport = hclge_get_vport(handle);
6177 struct hclge_dev *hdev = vport->back;
6180 case ETHTOOL_ID_ACTIVE:
6181 return hclge_set_led_status(hdev, HCLGE_LED_ON);
6182 case ETHTOOL_ID_INACTIVE:
6183 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
6189 static void hclge_get_link_mode(struct hnae3_handle *handle,
6190 unsigned long *supported,
6191 unsigned long *advertising)
6193 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
6194 struct hclge_vport *vport = hclge_get_vport(handle);
6195 struct hclge_dev *hdev = vport->back;
6196 unsigned int idx = 0;
6198 for (; idx < size; idx++) {
6199 supported[idx] = hdev->hw.mac.supported[idx];
6200 advertising[idx] = hdev->hw.mac.advertising[idx];
6204 static void hclge_get_port_type(struct hnae3_handle *handle,
6207 struct hclge_vport *vport = hclge_get_vport(handle);
6208 struct hclge_dev *hdev = vport->back;
6209 u8 media_type = hdev->hw.mac.media_type;
6211 switch (media_type) {
6212 case HNAE3_MEDIA_TYPE_FIBER:
6213 *port_type = PORT_FIBRE;
6215 case HNAE3_MEDIA_TYPE_COPPER:
6216 *port_type = PORT_TP;
6218 case HNAE3_MEDIA_TYPE_UNKNOWN:
6220 *port_type = PORT_OTHER;
6225 static const struct hnae3_ae_ops hclge_ops = {
6226 .init_ae_dev = hclge_init_ae_dev,
6227 .uninit_ae_dev = hclge_uninit_ae_dev,
6228 .init_client_instance = hclge_init_client_instance,
6229 .uninit_client_instance = hclge_uninit_client_instance,
6230 .map_ring_to_vector = hclge_map_ring_to_vector,
6231 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
6232 .get_vector = hclge_get_vector,
6233 .put_vector = hclge_put_vector,
6234 .set_promisc_mode = hclge_set_promisc_mode,
6235 .set_loopback = hclge_set_loopback,
6236 .start = hclge_ae_start,
6237 .stop = hclge_ae_stop,
6238 .get_status = hclge_get_status,
6239 .get_ksettings_an_result = hclge_get_ksettings_an_result,
6240 .update_speed_duplex_h = hclge_update_speed_duplex_h,
6241 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
6242 .get_media_type = hclge_get_media_type,
6243 .get_rss_key_size = hclge_get_rss_key_size,
6244 .get_rss_indir_size = hclge_get_rss_indir_size,
6245 .get_rss = hclge_get_rss,
6246 .set_rss = hclge_set_rss,
6247 .set_rss_tuple = hclge_set_rss_tuple,
6248 .get_rss_tuple = hclge_get_rss_tuple,
6249 .get_tc_size = hclge_get_tc_size,
6250 .get_mac_addr = hclge_get_mac_addr,
6251 .set_mac_addr = hclge_set_mac_addr,
6252 .add_uc_addr = hclge_add_uc_addr,
6253 .rm_uc_addr = hclge_rm_uc_addr,
6254 .add_mc_addr = hclge_add_mc_addr,
6255 .rm_mc_addr = hclge_rm_mc_addr,
6256 .update_mta_status = hclge_update_mta_status,
6257 .set_autoneg = hclge_set_autoneg,
6258 .get_autoneg = hclge_get_autoneg,
6259 .get_pauseparam = hclge_get_pauseparam,
6260 .set_pauseparam = hclge_set_pauseparam,
6261 .set_mtu = hclge_set_mtu,
6262 .reset_queue = hclge_reset_tqp,
6263 .get_stats = hclge_get_stats,
6264 .update_stats = hclge_update_stats,
6265 .get_strings = hclge_get_strings,
6266 .get_sset_count = hclge_get_sset_count,
6267 .get_fw_version = hclge_get_fw_version,
6268 .get_mdix_mode = hclge_get_mdix_mode,
6269 .enable_vlan_filter = hclge_enable_vlan_filter,
6270 .set_vlan_filter = hclge_set_vlan_filter,
6271 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
6272 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
6273 .reset_event = hclge_reset_event,
6274 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
6275 .set_channels = hclge_set_channels,
6276 .get_channels = hclge_get_channels,
6277 .get_flowctrl_adv = hclge_get_flowctrl_adv,
6278 .get_regs_len = hclge_get_regs_len,
6279 .get_regs = hclge_get_regs,
6280 .set_led_id = hclge_set_led_id,
6281 .get_link_mode = hclge_get_link_mode,
6282 .get_port_type = hclge_get_port_type,
6285 static struct hnae3_ae_algo ae_algo = {
6287 .pdev_id_table = ae_algo_pci_tbl,
6290 static int hclge_init(void)
6292 pr_info("%s is initializing\n", HCLGE_NAME);
6294 hnae3_register_ae_algo(&ae_algo);
6299 static void hclge_exit(void)
6301 hnae3_unregister_ae_algo(&ae_algo);
6303 module_init(hclge_init);
6304 module_exit(hclge_exit);
6306 MODULE_LICENSE("GPL");
6307 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
6308 MODULE_DESCRIPTION("HCLGE Driver");
6309 MODULE_VERSION(HCLGE_MOD_VERSION);