2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/acpi.h>
11 #include <linux/device.h>
12 #include <linux/etherdevice.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
21 #include "hclge_cmd.h"
22 #include "hclge_dcb.h"
23 #include "hclge_main.h"
24 #include "hclge_mdio.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
32 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
34 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
35 enum hclge_mta_dmac_sel_type mta_mac_sel,
37 static int hclge_init_vlan_config(struct hclge_dev *hdev);
39 static struct hnae3_ae_algo ae_algo;
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
53 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
55 "Serdes Loopback test",
59 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = {
60 {"igu_rx_oversize_pkt",
61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)},
62 {"igu_rx_undersize_pkt",
63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)},
64 {"igu_rx_out_all_pkt",
65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)},
67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)},
69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)},
71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)},
72 {"egu_tx_out_all_pkt",
73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)},
75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)},
77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)},
79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)},
80 {"ssu_ppp_mac_key_num",
81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)},
82 {"ssu_ppp_host_key_num",
83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)},
84 {"ppp_ssu_mac_rlt_num",
85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)},
86 {"ppp_ssu_host_rlt_num",
87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)},
89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)},
91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)},
93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)},
95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)}
98 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = {
100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)},
101 {"igu_rx_no_eof_pkt",
102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)},
103 {"igu_rx_no_sof_pkt",
104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)},
106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)},
107 {"ssu_full_drop_num",
108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)},
109 {"ssu_part_drop_num",
110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)},
112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)},
114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)},
116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)},
118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)},
120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)},
122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)},
123 {"qcn_fb_invaild_cnt",
124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)},
125 {"rx_packet_tc0_in_cnt",
126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)},
127 {"rx_packet_tc1_in_cnt",
128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)},
129 {"rx_packet_tc2_in_cnt",
130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)},
131 {"rx_packet_tc3_in_cnt",
132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)},
133 {"rx_packet_tc4_in_cnt",
134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)},
135 {"rx_packet_tc5_in_cnt",
136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)},
137 {"rx_packet_tc6_in_cnt",
138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)},
139 {"rx_packet_tc7_in_cnt",
140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)},
141 {"rx_packet_tc0_out_cnt",
142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)},
143 {"rx_packet_tc1_out_cnt",
144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)},
145 {"rx_packet_tc2_out_cnt",
146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)},
147 {"rx_packet_tc3_out_cnt",
148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)},
149 {"rx_packet_tc4_out_cnt",
150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)},
151 {"rx_packet_tc5_out_cnt",
152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)},
153 {"rx_packet_tc6_out_cnt",
154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)},
155 {"rx_packet_tc7_out_cnt",
156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)},
157 {"tx_packet_tc0_in_cnt",
158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)},
159 {"tx_packet_tc1_in_cnt",
160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)},
161 {"tx_packet_tc2_in_cnt",
162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)},
163 {"tx_packet_tc3_in_cnt",
164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)},
165 {"tx_packet_tc4_in_cnt",
166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)},
167 {"tx_packet_tc5_in_cnt",
168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)},
169 {"tx_packet_tc6_in_cnt",
170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)},
171 {"tx_packet_tc7_in_cnt",
172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)},
173 {"tx_packet_tc0_out_cnt",
174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)},
175 {"tx_packet_tc1_out_cnt",
176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)},
177 {"tx_packet_tc2_out_cnt",
178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)},
179 {"tx_packet_tc3_out_cnt",
180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)},
181 {"tx_packet_tc4_out_cnt",
182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)},
183 {"tx_packet_tc5_out_cnt",
184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)},
185 {"tx_packet_tc6_out_cnt",
186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)},
187 {"tx_packet_tc7_out_cnt",
188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)},
189 {"pkt_curr_buf_tc0_cnt",
190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)},
191 {"pkt_curr_buf_tc1_cnt",
192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)},
193 {"pkt_curr_buf_tc2_cnt",
194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)},
195 {"pkt_curr_buf_tc3_cnt",
196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)},
197 {"pkt_curr_buf_tc4_cnt",
198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)},
199 {"pkt_curr_buf_tc5_cnt",
200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)},
201 {"pkt_curr_buf_tc6_cnt",
202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)},
203 {"pkt_curr_buf_tc7_cnt",
204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)},
206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)},
207 {"lo_pri_unicast_rlt_drop_num",
208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)},
209 {"hi_pri_multicast_rlt_drop_num",
210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)},
211 {"lo_pri_multicast_rlt_drop_num",
212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)},
213 {"rx_oq_drop_pkt_cnt",
214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)},
215 {"tx_oq_drop_pkt_cnt",
216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)},
217 {"nic_l2_err_drop_pkt_cnt",
218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)},
219 {"roc_l2_err_drop_pkt_cnt",
220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)}
223 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
224 {"mac_tx_mac_pause_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
226 {"mac_rx_mac_pause_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
228 {"mac_tx_pfc_pri0_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
230 {"mac_tx_pfc_pri1_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
232 {"mac_tx_pfc_pri2_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
234 {"mac_tx_pfc_pri3_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
236 {"mac_tx_pfc_pri4_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
238 {"mac_tx_pfc_pri5_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
240 {"mac_tx_pfc_pri6_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
242 {"mac_tx_pfc_pri7_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
244 {"mac_rx_pfc_pri0_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
246 {"mac_rx_pfc_pri1_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
248 {"mac_rx_pfc_pri2_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
250 {"mac_rx_pfc_pri3_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
252 {"mac_rx_pfc_pri4_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
254 {"mac_rx_pfc_pri5_pkt_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
256 {"mac_rx_pfc_pri6_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
258 {"mac_rx_pfc_pri7_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
260 {"mac_tx_total_pkt_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
262 {"mac_tx_total_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
264 {"mac_tx_good_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
266 {"mac_tx_bad_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
268 {"mac_tx_good_oct_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
270 {"mac_tx_bad_oct_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
272 {"mac_tx_uni_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
274 {"mac_tx_multi_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
276 {"mac_tx_broad_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
278 {"mac_tx_undersize_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
280 {"mac_tx_overrsize_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
282 {"mac_tx_64_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
284 {"mac_tx_65_127_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
286 {"mac_tx_128_255_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
288 {"mac_tx_256_511_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
290 {"mac_tx_512_1023_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
292 {"mac_tx_1024_1518_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
294 {"mac_tx_1519_max_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
296 {"mac_rx_total_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
298 {"mac_rx_total_oct_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
300 {"mac_rx_good_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
302 {"mac_rx_bad_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
304 {"mac_rx_good_oct_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
306 {"mac_rx_bad_oct_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
308 {"mac_rx_uni_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
310 {"mac_rx_multi_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
312 {"mac_rx_broad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
314 {"mac_rx_undersize_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
316 {"mac_rx_overrsize_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
318 {"mac_rx_64_oct_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
320 {"mac_rx_65_127_oct_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
322 {"mac_rx_128_255_oct_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
324 {"mac_rx_256_511_oct_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
326 {"mac_rx_512_1023_oct_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
328 {"mac_rx_1024_1518_oct_pkt_num",
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
330 {"mac_rx_1519_max_oct_pkt_num",
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
333 {"mac_trans_fragment_pkt_num",
334 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
335 {"mac_trans_undermin_pkt_num",
336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
337 {"mac_trans_jabber_pkt_num",
338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
339 {"mac_trans_err_all_pkt_num",
340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
341 {"mac_trans_from_app_good_pkt_num",
342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
343 {"mac_trans_from_app_bad_pkt_num",
344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
345 {"mac_rcv_fragment_pkt_num",
346 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
347 {"mac_rcv_undermin_pkt_num",
348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
349 {"mac_rcv_jabber_pkt_num",
350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
351 {"mac_rcv_fcs_err_pkt_num",
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
353 {"mac_rcv_send_app_good_pkt_num",
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
355 {"mac_rcv_send_app_bad_pkt_num",
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
359 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
361 #define HCLGE_64_BIT_CMD_NUM 5
362 #define HCLGE_64_BIT_RTN_DATANUM 4
363 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
364 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
369 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true);
370 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM);
372 dev_err(&hdev->pdev->dev,
373 "Get 64 bit pkt stats fail, status = %d.\n", ret);
377 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
378 if (unlikely(i == 0)) {
379 desc_data = (__le64 *)(&desc[i].data[0]);
380 n = HCLGE_64_BIT_RTN_DATANUM - 1;
382 desc_data = (__le64 *)(&desc[i]);
383 n = HCLGE_64_BIT_RTN_DATANUM;
385 for (k = 0; k < n; k++) {
386 *data++ += le64_to_cpu(*desc_data);
394 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats)
396 stats->pkt_curr_buf_cnt = 0;
397 stats->pkt_curr_buf_tc0_cnt = 0;
398 stats->pkt_curr_buf_tc1_cnt = 0;
399 stats->pkt_curr_buf_tc2_cnt = 0;
400 stats->pkt_curr_buf_tc3_cnt = 0;
401 stats->pkt_curr_buf_tc4_cnt = 0;
402 stats->pkt_curr_buf_tc5_cnt = 0;
403 stats->pkt_curr_buf_tc6_cnt = 0;
404 stats->pkt_curr_buf_tc7_cnt = 0;
407 static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
409 #define HCLGE_32_BIT_CMD_NUM 8
410 #define HCLGE_32_BIT_RTN_DATANUM 8
412 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
413 struct hclge_32_bit_stats *all_32_bit_stats;
419 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats;
420 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt);
422 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true);
423 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM);
425 dev_err(&hdev->pdev->dev,
426 "Get 32 bit pkt stats fail, status = %d.\n", ret);
431 hclge_reset_partial_32bit_counter(all_32_bit_stats);
432 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
433 if (unlikely(i == 0)) {
434 __le16 *desc_data_16bit;
436 all_32_bit_stats->igu_rx_err_pkt +=
437 le32_to_cpu(desc[i].data[0]);
439 desc_data_16bit = (__le16 *)&desc[i].data[1];
440 all_32_bit_stats->igu_rx_no_eof_pkt +=
441 le16_to_cpu(*desc_data_16bit);
444 all_32_bit_stats->igu_rx_no_sof_pkt +=
445 le16_to_cpu(*desc_data_16bit);
447 desc_data = &desc[i].data[2];
448 n = HCLGE_32_BIT_RTN_DATANUM - 4;
450 desc_data = (__le32 *)&desc[i];
451 n = HCLGE_32_BIT_RTN_DATANUM;
453 for (k = 0; k < n; k++) {
454 *data++ += le32_to_cpu(*desc_data);
462 static int hclge_mac_update_stats(struct hclge_dev *hdev)
464 #define HCLGE_MAC_CMD_NUM 17
465 #define HCLGE_RTN_DATA_NUM 4
467 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
468 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
476 dev_err(&hdev->pdev->dev,
477 "Get MAC pkt stats fail, status = %d.\n", ret);
482 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
483 if (unlikely(i == 0)) {
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RTN_DATA_NUM - 2;
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RTN_DATA_NUM;
490 for (k = 0; k < n; k++) {
491 *data++ += le64_to_cpu(*desc_data);
499 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 struct hclge_vport *vport = hclge_get_vport(handle);
503 struct hclge_dev *hdev = vport->back;
504 struct hnae3_queue *queue;
505 struct hclge_desc desc[1];
506 struct hclge_tqp *tqp;
509 for (i = 0; i < kinfo->num_tqps; i++) {
510 queue = handle->kinfo.tqp[i];
511 tqp = container_of(queue, struct hclge_tqp, q);
512 /* command : HCLGE_OPC_QUERY_IGU_STAT */
513 hclge_cmd_setup_basic_desc(&desc[0],
514 HCLGE_OPC_QUERY_RX_STATUS,
517 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
518 ret = hclge_cmd_send(&hdev->hw, desc, 1);
520 dev_err(&hdev->pdev->dev,
521 "Query tqp stat fail, status = %d,queue = %d\n",
525 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
526 le32_to_cpu(desc[0].data[4]);
529 for (i = 0; i < kinfo->num_tqps; i++) {
530 queue = handle->kinfo.tqp[i];
531 tqp = container_of(queue, struct hclge_tqp, q);
532 /* command : HCLGE_OPC_QUERY_IGU_STAT */
533 hclge_cmd_setup_basic_desc(&desc[0],
534 HCLGE_OPC_QUERY_TX_STATUS,
537 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
538 ret = hclge_cmd_send(&hdev->hw, desc, 1);
540 dev_err(&hdev->pdev->dev,
541 "Query tqp stat fail, status = %d,queue = %d\n",
545 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
546 le32_to_cpu(desc[0].data[4]);
552 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
554 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
555 struct hclge_tqp *tqp;
559 for (i = 0; i < kinfo->num_tqps; i++) {
560 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
561 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
564 for (i = 0; i < kinfo->num_tqps; i++) {
565 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
566 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
572 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
574 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
576 return kinfo->num_tqps * (2);
579 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
581 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
585 for (i = 0; i < kinfo->num_tqps; i++) {
586 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
587 struct hclge_tqp, q);
588 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
590 buff = buff + ETH_GSTRING_LEN;
593 for (i = 0; i < kinfo->num_tqps; i++) {
594 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
595 struct hclge_tqp, q);
596 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
598 buff = buff + ETH_GSTRING_LEN;
604 static u64 *hclge_comm_get_stats(void *comm_stats,
605 const struct hclge_comm_stats_str strs[],
611 for (i = 0; i < size; i++)
612 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
617 static u8 *hclge_comm_get_strings(u32 stringset,
618 const struct hclge_comm_stats_str strs[],
621 char *buff = (char *)data;
624 if (stringset != ETH_SS_STATS)
627 for (i = 0; i < size; i++) {
628 snprintf(buff, ETH_GSTRING_LEN,
630 buff = buff + ETH_GSTRING_LEN;
636 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
637 struct net_device_stats *net_stats)
639 net_stats->tx_dropped = 0;
640 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num;
641 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
642 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
644 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
645 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
646 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
647 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
649 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
651 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
652 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
654 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
655 net_stats->rx_length_errors =
656 hw_stats->mac_stats.mac_rx_undersize_pkt_num;
657 net_stats->rx_length_errors +=
658 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
659 net_stats->rx_over_errors =
660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
663 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
665 struct hnae3_handle *handle;
668 handle = &hdev->vport[0].nic;
669 if (handle->client) {
670 status = hclge_tqps_update_stats(handle);
672 dev_err(&hdev->pdev->dev,
673 "Update TQPS stats fail, status = %d.\n",
678 status = hclge_mac_update_stats(hdev);
680 dev_err(&hdev->pdev->dev,
681 "Update MAC stats fail, status = %d.\n", status);
683 status = hclge_32_bit_update_stats(hdev);
685 dev_err(&hdev->pdev->dev,
686 "Update 32 bit stats fail, status = %d.\n",
689 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats);
692 static void hclge_update_stats(struct hnae3_handle *handle,
693 struct net_device_stats *net_stats)
695 struct hclge_vport *vport = hclge_get_vport(handle);
696 struct hclge_dev *hdev = vport->back;
697 struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
700 status = hclge_mac_update_stats(hdev);
702 dev_err(&hdev->pdev->dev,
703 "Update MAC stats fail, status = %d.\n",
706 status = hclge_32_bit_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update 32 bit stats fail, status = %d.\n",
712 status = hclge_64_bit_update_stats(hdev);
714 dev_err(&hdev->pdev->dev,
715 "Update 64 bit stats fail, status = %d.\n",
718 status = hclge_tqps_update_stats(handle);
720 dev_err(&hdev->pdev->dev,
721 "Update TQPS stats fail, status = %d.\n",
724 hclge_update_netstat(hw_stats, net_stats);
727 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
729 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK;
751 } else if (stringset == ETH_SS_STATS) {
752 count = ARRAY_SIZE(g_mac_stats_string) +
753 ARRAY_SIZE(g_all_32bit_stats_string) +
754 ARRAY_SIZE(g_all_64bit_stats_string) +
755 hclge_tqps_get_sset_count(handle, stringset);
761 static void hclge_get_strings(struct hnae3_handle *handle,
765 u8 *p = (char *)data;
768 if (stringset == ETH_SS_STATS) {
769 size = ARRAY_SIZE(g_mac_stats_string);
770 p = hclge_comm_get_strings(stringset,
774 size = ARRAY_SIZE(g_all_32bit_stats_string);
775 p = hclge_comm_get_strings(stringset,
776 g_all_32bit_stats_string,
779 size = ARRAY_SIZE(g_all_64bit_stats_string);
780 p = hclge_comm_get_strings(stringset,
781 g_all_64bit_stats_string,
784 p = hclge_tqps_get_strings(handle, p);
785 } else if (stringset == ETH_SS_TEST) {
786 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) {
788 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC],
790 p += ETH_GSTRING_LEN;
792 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) {
794 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES],
796 p += ETH_GSTRING_LEN;
798 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
800 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY],
802 p += ETH_GSTRING_LEN;
807 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
809 struct hclge_vport *vport = hclge_get_vport(handle);
810 struct hclge_dev *hdev = vport->back;
813 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
815 ARRAY_SIZE(g_mac_stats_string),
817 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats,
818 g_all_32bit_stats_string,
819 ARRAY_SIZE(g_all_32bit_stats_string),
821 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats,
822 g_all_64bit_stats_string,
823 ARRAY_SIZE(g_all_64bit_stats_string),
825 p = hclge_tqps_get_stats(handle, p);
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829 struct hclge_func_status_cmd *status)
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834 /* Set the pf to main pf */
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
843 static int hclge_query_function_status(struct hclge_dev *hdev)
845 struct hclge_func_status_cmd *req;
846 struct hclge_desc desc;
850 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
851 req = (struct hclge_func_status_cmd *)desc.data;
854 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 dev_err(&hdev->pdev->dev,
857 "query function status failed %d.\n",
863 /* Check pf reset is done */
866 usleep_range(1000, 2000);
867 } while (timeout++ < 5);
869 ret = hclge_parse_func_status(hdev, req);
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 struct hclge_pf_res_cmd *req;
877 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
888 req = (struct hclge_pf_res_cmd *)desc.data;
889 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
890 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 if (hnae3_dev_roce_supported(hdev)) {
893 hdev->num_roce_msix =
894 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
895 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
897 /* PF should have NIC vectors and Roce vectors,
898 * NIC vectors are queued before Roce vectors.
900 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
903 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
904 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
910 static int hclge_parse_speed(int speed_cmd, int *speed)
914 *speed = HCLGE_MAC_SPEED_10M;
917 *speed = HCLGE_MAC_SPEED_100M;
920 *speed = HCLGE_MAC_SPEED_1G;
923 *speed = HCLGE_MAC_SPEED_10G;
926 *speed = HCLGE_MAC_SPEED_25G;
929 *speed = HCLGE_MAC_SPEED_40G;
932 *speed = HCLGE_MAC_SPEED_50G;
935 *speed = HCLGE_MAC_SPEED_100G;
944 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
946 struct hclge_cfg_param_cmd *req;
947 u64 mac_addr_tmp_high;
951 req = (struct hclge_cfg_param_cmd *)desc[0].data;
953 /* get the configuration */
954 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
957 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
958 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
959 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]),
960 HCLGE_CFG_TQP_DESC_N_M,
961 HCLGE_CFG_TQP_DESC_N_S);
963 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]),
964 HCLGE_CFG_PHY_ADDR_M,
965 HCLGE_CFG_PHY_ADDR_S);
966 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]),
967 HCLGE_CFG_MEDIA_TP_M,
968 HCLGE_CFG_MEDIA_TP_S);
969 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]),
970 HCLGE_CFG_RX_BUF_LEN_M,
971 HCLGE_CFG_RX_BUF_LEN_S);
972 /* get mac_address */
973 mac_addr_tmp = __le32_to_cpu(req->param[2]);
974 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]),
975 HCLGE_CFG_MAC_ADDR_H_M,
976 HCLGE_CFG_MAC_ADDR_H_S);
978 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
980 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
981 HCLGE_CFG_DEFAULT_SPEED_M,
982 HCLGE_CFG_DEFAULT_SPEED_S);
983 for (i = 0; i < ETH_ALEN; i++)
984 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
986 req = (struct hclge_cfg_param_cmd *)desc[1].data;
987 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
990 /* hclge_get_cfg: query the static parameter from flash
991 * @hdev: pointer to struct hclge_dev
992 * @hcfg: the config structure to be getted
994 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
996 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
997 struct hclge_cfg_param_cmd *req;
1000 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1003 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1004 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1006 hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
1007 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1008 /* Len should be united by 4 bytes when send to hardware */
1009 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1010 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1011 req->offset = cpu_to_le32(offset);
1014 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1016 dev_err(&hdev->pdev->dev,
1017 "get config failed %d.\n", ret);
1021 hclge_parse_cfg(hcfg, desc);
1025 static int hclge_get_cap(struct hclge_dev *hdev)
1029 ret = hclge_query_function_status(hdev);
1031 dev_err(&hdev->pdev->dev,
1032 "query function status error %d.\n", ret);
1036 /* get pf resource */
1037 ret = hclge_query_pf_resource(hdev);
1039 dev_err(&hdev->pdev->dev,
1040 "query pf resource error %d.\n", ret);
1047 static int hclge_configure(struct hclge_dev *hdev)
1049 struct hclge_cfg cfg;
1052 ret = hclge_get_cfg(hdev, &cfg);
1054 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1058 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1059 hdev->base_tqp_pid = 0;
1060 hdev->rss_size_max = 1;
1061 hdev->rx_buf_len = cfg.rx_buf_len;
1062 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1063 hdev->hw.mac.media_type = cfg.media_type;
1064 hdev->hw.mac.phy_addr = cfg.phy_addr;
1065 hdev->num_desc = cfg.tqp_desc_num;
1066 hdev->tm_info.num_pg = 1;
1067 hdev->tc_max = cfg.tc_num;
1068 hdev->tm_info.hw_pfc_map = 0;
1070 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1072 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1076 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1077 (hdev->tc_max < 1)) {
1078 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1083 /* Dev does not support DCB */
1084 if (!hnae3_dev_dcb_supported(hdev)) {
1088 hdev->pfc_max = hdev->tc_max;
1091 hdev->tm_info.num_tc = hdev->tc_max;
1093 /* Currently not support uncontiuous tc */
1094 for (i = 0; i < hdev->tm_info.num_tc; i++)
1095 hnae_set_bit(hdev->hw_tc_map, i, 1);
1097 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
1098 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1100 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
1105 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1108 struct hclge_cfg_tso_status_cmd *req;
1109 struct hclge_desc desc;
1112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1114 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1117 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1118 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1119 req->tso_mss_min = cpu_to_le16(tso_mss);
1122 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1123 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1124 req->tso_mss_max = cpu_to_le16(tso_mss);
1126 return hclge_cmd_send(&hdev->hw, &desc, 1);
1129 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1131 struct hclge_tqp *tqp;
1134 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1135 sizeof(struct hclge_tqp), GFP_KERNEL);
1141 for (i = 0; i < hdev->num_tqps; i++) {
1142 tqp->dev = &hdev->pdev->dev;
1145 tqp->q.ae_algo = &ae_algo;
1146 tqp->q.buf_size = hdev->rx_buf_len;
1147 tqp->q.desc_num = hdev->num_desc;
1148 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1149 i * HCLGE_TQP_REG_SIZE;
1157 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1158 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1160 struct hclge_tqp_map_cmd *req;
1161 struct hclge_desc desc;
1164 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1166 req = (struct hclge_tqp_map_cmd *)desc.data;
1167 req->tqp_id = cpu_to_le16(tqp_pid);
1168 req->tqp_vf = func_id;
1169 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1170 1 << HCLGE_TQP_MAP_EN_B;
1171 req->tqp_vid = cpu_to_le16(tqp_vid);
1173 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1175 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n",
1183 static int hclge_assign_tqp(struct hclge_vport *vport,
1184 struct hnae3_queue **tqp, u16 num_tqps)
1186 struct hclge_dev *hdev = vport->back;
1189 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1190 alloced < num_tqps; i++) {
1191 if (!hdev->htqp[i].alloced) {
1192 hdev->htqp[i].q.handle = &vport->nic;
1193 hdev->htqp[i].q.tqp_index = alloced;
1194 tqp[alloced] = &hdev->htqp[i].q;
1195 hdev->htqp[i].alloced = true;
1199 vport->alloc_tqps = num_tqps;
1204 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
1206 struct hnae3_handle *nic = &vport->nic;
1207 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1208 struct hclge_dev *hdev = vport->back;
1211 kinfo->num_desc = hdev->num_desc;
1212 kinfo->rx_buf_len = hdev->rx_buf_len;
1213 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
1215 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
1216 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
1218 for (i = 0; i < HNAE3_MAX_TC; i++) {
1219 if (hdev->hw_tc_map & BIT(i)) {
1220 kinfo->tc_info[i].enable = true;
1221 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
1222 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
1223 kinfo->tc_info[i].tc = i;
1225 /* Set to default queue if TC is disable */
1226 kinfo->tc_info[i].enable = false;
1227 kinfo->tc_info[i].tqp_offset = 0;
1228 kinfo->tc_info[i].tqp_count = 1;
1229 kinfo->tc_info[i].tc = 0;
1233 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
1234 sizeof(struct hnae3_queue *), GFP_KERNEL);
1238 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps);
1240 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1247 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1248 struct hclge_vport *vport)
1250 struct hnae3_handle *nic = &vport->nic;
1251 struct hnae3_knic_private_info *kinfo;
1254 kinfo = &nic->kinfo;
1255 for (i = 0; i < kinfo->num_tqps; i++) {
1256 struct hclge_tqp *q =
1257 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1261 is_pf = !(vport->vport_id);
1262 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1271 static int hclge_map_tqp(struct hclge_dev *hdev)
1273 struct hclge_vport *vport = hdev->vport;
1276 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1277 for (i = 0; i < num_vport; i++) {
1280 ret = hclge_map_tqp_to_vport(hdev, vport);
1290 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1292 /* this would be initialized later */
1295 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1297 struct hnae3_handle *nic = &vport->nic;
1298 struct hclge_dev *hdev = vport->back;
1301 nic->pdev = hdev->pdev;
1302 nic->ae_algo = &ae_algo;
1303 nic->numa_node_mask = hdev->numa_node_mask;
1305 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1306 ret = hclge_knic_setup(vport, num_tqps);
1308 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1313 hclge_unic_setup(vport, num_tqps);
1319 static int hclge_alloc_vport(struct hclge_dev *hdev)
1321 struct pci_dev *pdev = hdev->pdev;
1322 struct hclge_vport *vport;
1328 /* We need to alloc a vport for main NIC of PF */
1329 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1331 if (hdev->num_tqps < num_vport)
1332 num_vport = hdev->num_tqps;
1334 /* Alloc the same number of TQPs for every vport */
1335 tqp_per_vport = hdev->num_tqps / num_vport;
1336 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1338 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1343 hdev->vport = vport;
1344 hdev->num_alloc_vport = num_vport;
1346 #ifdef CONFIG_PCI_IOV
1348 if (hdev->num_req_vfs) {
1349 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1351 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs);
1353 hdev->num_alloc_vfs = 0;
1354 dev_err(&pdev->dev, "SRIOV enable failed %d\n",
1359 hdev->num_alloc_vfs = hdev->num_req_vfs;
1362 for (i = 0; i < num_vport; i++) {
1364 vport->vport_id = i;
1367 ret = hclge_vport_setup(vport, tqp_main_vport);
1369 ret = hclge_vport_setup(vport, tqp_per_vport);
1372 "vport setup failed for vport %d, %d\n",
1383 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1384 struct hclge_pkt_buf_alloc *buf_alloc)
1386 /* TX buffer size is unit by 128 byte */
1387 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1388 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1389 struct hclge_tx_buff_alloc_cmd *req;
1390 struct hclge_desc desc;
1394 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1396 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1397 for (i = 0; i < HCLGE_TC_NUM; i++) {
1398 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1400 req->tx_pkt_buff[i] =
1401 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1402 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1405 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1407 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1415 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1416 struct hclge_pkt_buf_alloc *buf_alloc)
1418 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1421 dev_err(&hdev->pdev->dev,
1422 "tx buffer alloc failed %d\n", ret);
1429 static int hclge_get_tc_num(struct hclge_dev *hdev)
1433 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1434 if (hdev->hw_tc_map & BIT(i))
1439 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
1443 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1444 if (hdev->hw_tc_map & BIT(i) &&
1445 hdev->tm_info.hw_pfc_map & BIT(i))
1450 /* Get the number of pfc enabled TCs, which have private buffer */
1451 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1452 struct hclge_pkt_buf_alloc *buf_alloc)
1454 struct hclge_priv_buf *priv;
1457 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1458 priv = &buf_alloc->priv_buf[i];
1459 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1467 /* Get the number of pfc disabled TCs, which have private buffer */
1468 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1469 struct hclge_pkt_buf_alloc *buf_alloc)
1471 struct hclge_priv_buf *priv;
1474 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1475 priv = &buf_alloc->priv_buf[i];
1476 if (hdev->hw_tc_map & BIT(i) &&
1477 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1485 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1487 struct hclge_priv_buf *priv;
1491 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1492 priv = &buf_alloc->priv_buf[i];
1494 rx_priv += priv->buf_size;
1499 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1501 u32 i, total_tx_size = 0;
1503 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1504 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1506 return total_tx_size;
1509 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1510 struct hclge_pkt_buf_alloc *buf_alloc,
1513 u32 shared_buf_min, shared_buf_tc, shared_std;
1514 int tc_num, pfc_enable_num;
1519 tc_num = hclge_get_tc_num(hdev);
1520 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1522 if (hnae3_dev_dcb_supported(hdev))
1523 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1525 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1527 shared_buf_tc = pfc_enable_num * hdev->mps +
1528 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1530 shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
1532 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1533 if (rx_all <= rx_priv + shared_std)
1536 shared_buf = rx_all - rx_priv;
1537 buf_alloc->s_buf.buf_size = shared_buf;
1538 buf_alloc->s_buf.self.high = shared_buf;
1539 buf_alloc->s_buf.self.low = 2 * hdev->mps;
1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1542 if ((hdev->hw_tc_map & BIT(i)) &&
1543 (hdev->tm_info.hw_pfc_map & BIT(i))) {
1544 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
1545 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
1547 buf_alloc->s_buf.tc_thrd[i].low = 0;
1548 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
1555 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1556 struct hclge_pkt_buf_alloc *buf_alloc)
1560 total_size = hdev->pkt_buf_size;
1562 /* alloc tx buffer for all enabled tc */
1563 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1564 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1566 if (total_size < HCLGE_DEFAULT_TX_BUF)
1569 if (hdev->hw_tc_map & BIT(i))
1570 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
1572 priv->tx_buf_size = 0;
1574 total_size -= priv->tx_buf_size;
1580 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1581 * @hdev: pointer to struct hclge_dev
1582 * @buf_alloc: pointer to buffer calculation data
1583 * @return: 0: calculate sucessful, negative: fail
1585 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1586 struct hclge_pkt_buf_alloc *buf_alloc)
1588 u32 rx_all = hdev->pkt_buf_size;
1589 int no_pfc_priv_num, pfc_priv_num;
1590 struct hclge_priv_buf *priv;
1593 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1595 /* When DCB is not supported, rx private
1596 * buffer is not allocated.
1598 if (!hnae3_dev_dcb_supported(hdev)) {
1599 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1605 /* step 1, try to alloc private buffer for all enabled tc */
1606 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1607 priv = &buf_alloc->priv_buf[i];
1608 if (hdev->hw_tc_map & BIT(i)) {
1610 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1611 priv->wl.low = hdev->mps;
1612 priv->wl.high = priv->wl.low + hdev->mps;
1613 priv->buf_size = priv->wl.high +
1617 priv->wl.high = 2 * hdev->mps;
1618 priv->buf_size = priv->wl.high;
1628 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1631 /* step 2, try to decrease the buffer size of
1632 * no pfc TC's private buffer
1634 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1635 priv = &buf_alloc->priv_buf[i];
1642 if (!(hdev->hw_tc_map & BIT(i)))
1647 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1649 priv->wl.high = priv->wl.low + hdev->mps;
1650 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
1653 priv->wl.high = hdev->mps;
1654 priv->buf_size = priv->wl.high;
1658 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1661 /* step 3, try to reduce the number of pfc disabled TCs,
1662 * which have private buffer
1664 /* get the total no pfc enable TC number, which have private buffer */
1665 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1667 /* let the last to be cleared first */
1668 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1669 priv = &buf_alloc->priv_buf[i];
1671 if (hdev->hw_tc_map & BIT(i) &&
1672 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1673 /* Clear the no pfc TC private buffer */
1681 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1682 no_pfc_priv_num == 0)
1686 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1689 /* step 4, try to reduce the number of pfc enabled TCs
1690 * which have private buffer.
1692 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1694 /* let the last to be cleared first */
1695 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1696 priv = &buf_alloc->priv_buf[i];
1698 if (hdev->hw_tc_map & BIT(i) &&
1699 hdev->tm_info.hw_pfc_map & BIT(i)) {
1700 /* Reduce the number of pfc TC with private buffer */
1708 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1712 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1718 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1719 struct hclge_pkt_buf_alloc *buf_alloc)
1721 struct hclge_rx_priv_buff_cmd *req;
1722 struct hclge_desc desc;
1726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1727 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1729 /* Alloc private buffer TCs */
1730 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1731 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1734 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1736 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1740 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1741 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1743 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1745 dev_err(&hdev->pdev->dev,
1746 "rx private buffer alloc cmd failed %d\n", ret);
1753 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
1755 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1756 struct hclge_pkt_buf_alloc *buf_alloc)
1758 struct hclge_rx_priv_wl_buf *req;
1759 struct hclge_priv_buf *priv;
1760 struct hclge_desc desc[2];
1764 for (i = 0; i < 2; i++) {
1765 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1767 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1769 /* The first descriptor set the NEXT bit to 1 */
1771 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1773 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1775 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1776 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1778 priv = &buf_alloc->priv_buf[idx];
1779 req->tc_wl[j].high =
1780 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1781 req->tc_wl[j].high |=
1782 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) <<
1783 HCLGE_RX_PRIV_EN_B);
1785 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1786 req->tc_wl[j].low |=
1787 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) <<
1788 HCLGE_RX_PRIV_EN_B);
1792 /* Send 2 descriptor at one time */
1793 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1795 dev_err(&hdev->pdev->dev,
1796 "rx private waterline config cmd failed %d\n",
1803 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1804 struct hclge_pkt_buf_alloc *buf_alloc)
1806 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1807 struct hclge_rx_com_thrd *req;
1808 struct hclge_desc desc[2];
1809 struct hclge_tc_thrd *tc;
1813 for (i = 0; i < 2; i++) {
1814 hclge_cmd_setup_basic_desc(&desc[i],
1815 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1816 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1818 /* The first descriptor set the NEXT bit to 1 */
1820 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1822 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1824 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1825 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1827 req->com_thrd[j].high =
1828 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1829 req->com_thrd[j].high |=
1830 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) <<
1831 HCLGE_RX_PRIV_EN_B);
1832 req->com_thrd[j].low =
1833 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1834 req->com_thrd[j].low |=
1835 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) <<
1836 HCLGE_RX_PRIV_EN_B);
1840 /* Send 2 descriptors at one time */
1841 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1843 dev_err(&hdev->pdev->dev,
1844 "common threshold config cmd failed %d\n", ret);
1850 static int hclge_common_wl_config(struct hclge_dev *hdev,
1851 struct hclge_pkt_buf_alloc *buf_alloc)
1853 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1854 struct hclge_rx_com_wl *req;
1855 struct hclge_desc desc;
1858 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1860 req = (struct hclge_rx_com_wl *)desc.data;
1861 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1863 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
1864 HCLGE_RX_PRIV_EN_B);
1866 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1868 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
1869 HCLGE_RX_PRIV_EN_B);
1871 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1873 dev_err(&hdev->pdev->dev,
1874 "common waterline config cmd failed %d\n", ret);
1881 int hclge_buffer_alloc(struct hclge_dev *hdev)
1883 struct hclge_pkt_buf_alloc *pkt_buf;
1886 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1890 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1892 dev_err(&hdev->pdev->dev,
1893 "could not calc tx buffer size for all TCs %d\n", ret);
1897 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1899 dev_err(&hdev->pdev->dev,
1900 "could not alloc tx buffers %d\n", ret);
1904 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1906 dev_err(&hdev->pdev->dev,
1907 "could not calc rx priv buffer size for all TCs %d\n",
1912 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1914 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1919 if (hnae3_dev_dcb_supported(hdev)) {
1920 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1922 dev_err(&hdev->pdev->dev,
1923 "could not configure rx private waterline %d\n",
1928 ret = hclge_common_thrd_config(hdev, pkt_buf);
1930 dev_err(&hdev->pdev->dev,
1931 "could not configure common threshold %d\n",
1937 ret = hclge_common_wl_config(hdev, pkt_buf);
1939 dev_err(&hdev->pdev->dev,
1940 "could not configure common waterline %d\n", ret);
1947 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1949 struct hnae3_handle *roce = &vport->roce;
1950 struct hnae3_handle *nic = &vport->nic;
1952 roce->rinfo.num_vectors = vport->back->num_roce_msix;
1954 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1955 vport->back->num_msi_left == 0)
1958 roce->rinfo.base_vector = vport->back->roce_base_vector;
1960 roce->rinfo.netdev = nic->kinfo.netdev;
1961 roce->rinfo.roce_io_base = vport->back->hw.io_base;
1963 roce->pdev = nic->pdev;
1964 roce->ae_algo = nic->ae_algo;
1965 roce->numa_node_mask = nic->numa_node_mask;
1970 static int hclge_init_msix(struct hclge_dev *hdev)
1972 struct pci_dev *pdev = hdev->pdev;
1975 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
1976 sizeof(struct msix_entry),
1978 if (!hdev->msix_entries)
1981 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1982 sizeof(u16), GFP_KERNEL);
1983 if (!hdev->vector_status)
1986 for (i = 0; i < hdev->num_msi; i++) {
1987 hdev->msix_entries[i].entry = i;
1988 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1991 hdev->num_msi_left = hdev->num_msi;
1992 hdev->base_msi_vector = hdev->pdev->irq;
1993 hdev->roce_base_vector = hdev->base_msi_vector +
1994 HCLGE_ROCE_VECTOR_OFFSET;
1996 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
1997 hdev->num_msi, hdev->num_msi);
1999 dev_info(&hdev->pdev->dev,
2000 "MSI-X vector alloc failed: %d\n", ret);
2007 static int hclge_init_msi(struct hclge_dev *hdev)
2009 struct pci_dev *pdev = hdev->pdev;
2013 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2014 sizeof(u16), GFP_KERNEL);
2015 if (!hdev->vector_status)
2018 for (i = 0; i < hdev->num_msi; i++)
2019 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2021 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
2023 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
2026 hdev->num_msi = vectors;
2027 hdev->num_msi_left = vectors;
2028 hdev->base_msi_vector = pdev->irq;
2029 hdev->roce_base_vector = hdev->base_msi_vector +
2030 HCLGE_ROCE_VECTOR_OFFSET;
2035 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
2037 struct hclge_mac *mac = &hdev->hw.mac;
2039 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M))
2040 mac->duplex = (u8)duplex;
2042 mac->duplex = HCLGE_MAC_FULL;
2047 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2049 struct hclge_config_mac_speed_dup_cmd *req;
2050 struct hclge_desc desc;
2053 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2055 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2057 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2060 case HCLGE_MAC_SPEED_10M:
2061 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2062 HCLGE_CFG_SPEED_S, 6);
2064 case HCLGE_MAC_SPEED_100M:
2065 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2066 HCLGE_CFG_SPEED_S, 7);
2068 case HCLGE_MAC_SPEED_1G:
2069 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2070 HCLGE_CFG_SPEED_S, 0);
2072 case HCLGE_MAC_SPEED_10G:
2073 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2074 HCLGE_CFG_SPEED_S, 1);
2076 case HCLGE_MAC_SPEED_25G:
2077 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2078 HCLGE_CFG_SPEED_S, 2);
2080 case HCLGE_MAC_SPEED_40G:
2081 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2082 HCLGE_CFG_SPEED_S, 3);
2084 case HCLGE_MAC_SPEED_50G:
2085 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2086 HCLGE_CFG_SPEED_S, 4);
2088 case HCLGE_MAC_SPEED_100G:
2089 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2090 HCLGE_CFG_SPEED_S, 5);
2093 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2097 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2100 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2102 dev_err(&hdev->pdev->dev,
2103 "mac speed/duplex config cmd failed %d.\n", ret);
2107 hclge_check_speed_dup(hdev, duplex, speed);
2112 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2115 struct hclge_vport *vport = hclge_get_vport(handle);
2116 struct hclge_dev *hdev = vport->back;
2118 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2121 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
2124 struct hclge_query_an_speed_dup_cmd *req;
2125 struct hclge_desc desc;
2129 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2132 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2134 dev_err(&hdev->pdev->dev,
2135 "mac speed/autoneg/duplex query cmd failed %d\n",
2140 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
2141 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
2142 HCLGE_QUERY_SPEED_S);
2144 ret = hclge_parse_speed(speed_tmp, speed);
2146 dev_err(&hdev->pdev->dev,
2147 "could not parse speed(=%d), %d\n", speed_tmp, ret);
2154 static int hclge_query_autoneg_result(struct hclge_dev *hdev)
2156 struct hclge_mac *mac = &hdev->hw.mac;
2157 struct hclge_query_an_speed_dup_cmd *req;
2158 struct hclge_desc desc;
2161 req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
2163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
2164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2166 dev_err(&hdev->pdev->dev,
2167 "autoneg result query cmd failed %d.\n", ret);
2171 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
2176 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2178 struct hclge_config_auto_neg_cmd *req;
2179 struct hclge_desc desc;
2183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2185 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2186 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2187 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2189 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2191 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2199 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2201 struct hclge_vport *vport = hclge_get_vport(handle);
2202 struct hclge_dev *hdev = vport->back;
2204 return hclge_set_autoneg_en(hdev, enable);
2207 static int hclge_get_autoneg(struct hnae3_handle *handle)
2209 struct hclge_vport *vport = hclge_get_vport(handle);
2210 struct hclge_dev *hdev = vport->back;
2212 hclge_query_autoneg_result(hdev);
2214 return hdev->hw.mac.autoneg;
2217 static int hclge_mac_init(struct hclge_dev *hdev)
2219 struct hclge_mac *mac = &hdev->hw.mac;
2222 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
2224 dev_err(&hdev->pdev->dev,
2225 "Config mac speed dup fail ret=%d\n", ret);
2231 ret = hclge_mac_mdio_config(hdev);
2233 dev_warn(&hdev->pdev->dev,
2234 "mdio config fail ret=%d\n", ret);
2238 /* Initialize the MTA table work mode */
2239 hdev->accept_mta_mc = true;
2240 hdev->enable_mta = true;
2241 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36;
2243 ret = hclge_set_mta_filter_mode(hdev,
2244 hdev->mta_mac_sel_type,
2247 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n",
2252 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
2255 static void hclge_task_schedule(struct hclge_dev *hdev)
2257 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2258 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2259 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2260 (void)schedule_work(&hdev->service_task);
2263 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2265 struct hclge_link_status_cmd *req;
2266 struct hclge_desc desc;
2270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2273 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2278 req = (struct hclge_link_status_cmd *)desc.data;
2279 link_status = req->status & HCLGE_LINK_STATUS;
2281 return !!link_status;
2284 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2289 mac_state = hclge_get_mac_link_status(hdev);
2291 if (hdev->hw.mac.phydev) {
2292 if (!genphy_read_status(hdev->hw.mac.phydev))
2293 link_stat = mac_state &
2294 hdev->hw.mac.phydev->link;
2299 link_stat = mac_state;
2305 static void hclge_update_link_status(struct hclge_dev *hdev)
2307 struct hnae3_client *client = hdev->nic_client;
2308 struct hnae3_handle *handle;
2314 state = hclge_get_mac_phy_link(hdev);
2315 if (state != hdev->hw.mac.link) {
2316 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2317 handle = &hdev->vport[i].nic;
2318 client->ops->link_status_change(handle, state);
2320 hdev->hw.mac.link = state;
2324 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2326 struct hclge_mac mac = hdev->hw.mac;
2331 /* get the speed and duplex as autoneg'result from mac cmd when phy
2337 /* update mac->antoneg. */
2338 ret = hclge_query_autoneg_result(hdev);
2340 dev_err(&hdev->pdev->dev,
2341 "autoneg result query failed %d\n", ret);
2348 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
2350 dev_err(&hdev->pdev->dev,
2351 "mac autoneg/speed/duplex query failed %d\n", ret);
2355 if ((mac.speed != speed) || (mac.duplex != duplex)) {
2356 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2358 dev_err(&hdev->pdev->dev,
2359 "mac speed/duplex config failed %d\n", ret);
2367 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2369 struct hclge_vport *vport = hclge_get_vport(handle);
2370 struct hclge_dev *hdev = vport->back;
2372 return hclge_update_speed_duplex(hdev);
2375 static int hclge_get_status(struct hnae3_handle *handle)
2377 struct hclge_vport *vport = hclge_get_vport(handle);
2378 struct hclge_dev *hdev = vport->back;
2380 hclge_update_link_status(hdev);
2382 return hdev->hw.mac.link;
2385 static void hclge_service_timer(struct timer_list *t)
2387 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2389 mod_timer(&hdev->service_timer, jiffies + HZ);
2390 hclge_task_schedule(hdev);
2393 static void hclge_service_complete(struct hclge_dev *hdev)
2395 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2397 /* Flush memory before next watchdog */
2398 smp_mb__before_atomic();
2399 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2402 static void hclge_service_task(struct work_struct *work)
2404 struct hclge_dev *hdev =
2405 container_of(work, struct hclge_dev, service_task);
2407 hclge_update_speed_duplex(hdev);
2408 hclge_update_link_status(hdev);
2409 hclge_update_stats_for_all(hdev);
2410 hclge_service_complete(hdev);
2413 static void hclge_disable_sriov(struct hclge_dev *hdev)
2415 /* If our VFs are assigned we cannot shut down SR-IOV
2416 * without causing issues, so just leave the hardware
2417 * available but disabled
2419 if (pci_vfs_assigned(hdev->pdev)) {
2420 dev_warn(&hdev->pdev->dev,
2421 "disabling driver while VFs are assigned\n");
2425 pci_disable_sriov(hdev->pdev);
2428 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
2430 /* VF handle has no client */
2431 if (!handle->client)
2432 return container_of(handle, struct hclge_vport, nic);
2433 else if (handle->client->type == HNAE3_CLIENT_ROCE)
2434 return container_of(handle, struct hclge_vport, roce);
2436 return container_of(handle, struct hclge_vport, nic);
2439 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
2440 struct hnae3_vector_info *vector_info)
2442 struct hclge_vport *vport = hclge_get_vport(handle);
2443 struct hnae3_vector_info *vector = vector_info;
2444 struct hclge_dev *hdev = vport->back;
2448 vector_num = min(hdev->num_msi_left, vector_num);
2450 for (j = 0; j < vector_num; j++) {
2451 for (i = 1; i < hdev->num_msi; i++) {
2452 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
2453 vector->vector = pci_irq_vector(hdev->pdev, i);
2454 vector->io_addr = hdev->hw.io_base +
2455 HCLGE_VECTOR_REG_BASE +
2456 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
2458 HCLGE_VECTOR_VF_OFFSET;
2459 hdev->vector_status[i] = vport->vport_id;
2468 hdev->num_msi_left -= alloc;
2469 hdev->num_msi_used += alloc;
2474 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
2478 for (i = 0; i < hdev->num_msi; i++) {
2479 if (hdev->msix_entries) {
2480 if (vector == hdev->msix_entries[i].vector)
2483 if (vector == (hdev->base_msi_vector + i))
2490 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
2492 return HCLGE_RSS_KEY_SIZE;
2495 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
2497 return HCLGE_RSS_IND_TBL_SIZE;
2500 static int hclge_get_rss_algo(struct hclge_dev *hdev)
2502 struct hclge_rss_config_cmd *req;
2503 struct hclge_desc desc;
2507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
2509 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2511 dev_err(&hdev->pdev->dev,
2512 "Get link status error, status =%d\n", ret);
2516 req = (struct hclge_rss_config_cmd *)desc.data;
2517 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
2519 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
2520 return ETH_RSS_HASH_TOP;
2525 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
2526 const u8 hfunc, const u8 *key)
2528 struct hclge_rss_config_cmd *req;
2529 struct hclge_desc desc;
2534 req = (struct hclge_rss_config_cmd *)desc.data;
2536 for (key_offset = 0; key_offset < 3; key_offset++) {
2537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
2540 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
2541 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
2543 if (key_offset == 2)
2545 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
2547 key_size = HCLGE_RSS_HASH_KEY_NUM;
2549 memcpy(req->hash_key,
2550 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
2552 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2554 dev_err(&hdev->pdev->dev,
2555 "Configure RSS config fail, status = %d\n",
2563 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
2565 struct hclge_rss_indirection_table_cmd *req;
2566 struct hclge_desc desc;
2570 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
2572 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
2573 hclge_cmd_setup_basic_desc
2574 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
2576 req->start_table_index =
2577 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
2578 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
2580 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
2581 req->rss_result[j] =
2582 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
2584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2586 dev_err(&hdev->pdev->dev,
2587 "Configure rss indir table fail,status = %d\n",
2595 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
2596 u16 *tc_size, u16 *tc_offset)
2598 struct hclge_rss_tc_mode_cmd *req;
2599 struct hclge_desc desc;
2603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
2604 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
2606 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2609 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
2610 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
2611 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
2612 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
2613 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
2615 req->rss_tc_mode[i] = cpu_to_le16(mode);
2618 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2620 dev_err(&hdev->pdev->dev,
2621 "Configure rss tc mode fail, status = %d\n", ret);
2628 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
2630 struct hclge_rss_input_tuple_cmd *req;
2631 struct hclge_desc desc;
2634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
2636 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2637 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2638 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2639 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2640 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2641 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2642 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2643 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
2644 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2645 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2647 dev_err(&hdev->pdev->dev,
2648 "Configure rss input fail, status = %d\n", ret);
2655 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
2658 struct hclge_vport *vport = hclge_get_vport(handle);
2659 struct hclge_dev *hdev = vport->back;
2662 /* Get hash algorithm */
2664 *hfunc = hclge_get_rss_algo(hdev);
2666 /* Get the RSS Key required by the user */
2668 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
2670 /* Get indirect table */
2672 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2673 indir[i] = vport->rss_indirection_tbl[i];
2678 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
2679 const u8 *key, const u8 hfunc)
2681 struct hclge_vport *vport = hclge_get_vport(handle);
2682 struct hclge_dev *hdev = vport->back;
2686 /* Set the RSS Hash Key if specififed by the user */
2688 /* Update the shadow RSS key with user specified qids */
2689 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
2691 if (hfunc == ETH_RSS_HASH_TOP ||
2692 hfunc == ETH_RSS_HASH_NO_CHANGE)
2693 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2696 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
2701 /* Update the shadow RSS table with user specified qids */
2702 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
2703 vport->rss_indirection_tbl[i] = indir[i];
2705 /* Update the hardware */
2706 ret = hclge_set_rss_indir_table(hdev, indir);
2710 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
2712 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
2714 if (nfc->data & RXH_L4_B_2_3)
2715 hash_sets |= HCLGE_D_PORT_BIT;
2717 hash_sets &= ~HCLGE_D_PORT_BIT;
2719 if (nfc->data & RXH_IP_SRC)
2720 hash_sets |= HCLGE_S_IP_BIT;
2722 hash_sets &= ~HCLGE_S_IP_BIT;
2724 if (nfc->data & RXH_IP_DST)
2725 hash_sets |= HCLGE_D_IP_BIT;
2727 hash_sets &= ~HCLGE_D_IP_BIT;
2729 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
2730 hash_sets |= HCLGE_V_TAG_BIT;
2735 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
2736 struct ethtool_rxnfc *nfc)
2738 struct hclge_vport *vport = hclge_get_vport(handle);
2739 struct hclge_dev *hdev = vport->back;
2740 struct hclge_rss_input_tuple_cmd *req;
2741 struct hclge_desc desc;
2745 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2746 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2749 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2750 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2751 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2753 dev_err(&hdev->pdev->dev,
2754 "Read rss tuple fail, status = %d\n", ret);
2758 hclge_cmd_reuse_desc(&desc, false);
2760 tuple_sets = hclge_get_rss_hash_bits(nfc);
2761 switch (nfc->flow_type) {
2763 req->ipv4_tcp_en = tuple_sets;
2766 req->ipv6_tcp_en = tuple_sets;
2769 req->ipv4_udp_en = tuple_sets;
2772 req->ipv6_udp_en = tuple_sets;
2775 req->ipv4_sctp_en = tuple_sets;
2778 if ((nfc->data & RXH_L4_B_0_1) ||
2779 (nfc->data & RXH_L4_B_2_3))
2782 req->ipv6_sctp_en = tuple_sets;
2785 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2788 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
2794 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2796 dev_err(&hdev->pdev->dev,
2797 "Set rss tuple fail, status = %d\n", ret);
2802 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
2803 struct ethtool_rxnfc *nfc)
2805 struct hclge_vport *vport = hclge_get_vport(handle);
2806 struct hclge_dev *hdev = vport->back;
2807 struct hclge_rss_input_tuple_cmd *req;
2808 struct hclge_desc desc;
2814 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
2815 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
2816 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2818 dev_err(&hdev->pdev->dev,
2819 "Read rss tuple fail, status = %d\n", ret);
2823 switch (nfc->flow_type) {
2825 tuple_sets = req->ipv4_tcp_en;
2828 tuple_sets = req->ipv4_udp_en;
2831 tuple_sets = req->ipv6_tcp_en;
2834 tuple_sets = req->ipv6_udp_en;
2837 tuple_sets = req->ipv4_sctp_en;
2840 tuple_sets = req->ipv6_sctp_en;
2844 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
2853 if (tuple_sets & HCLGE_D_PORT_BIT)
2854 nfc->data |= RXH_L4_B_2_3;
2855 if (tuple_sets & HCLGE_S_PORT_BIT)
2856 nfc->data |= RXH_L4_B_0_1;
2857 if (tuple_sets & HCLGE_D_IP_BIT)
2858 nfc->data |= RXH_IP_DST;
2859 if (tuple_sets & HCLGE_S_IP_BIT)
2860 nfc->data |= RXH_IP_SRC;
2865 static int hclge_get_tc_size(struct hnae3_handle *handle)
2867 struct hclge_vport *vport = hclge_get_vport(handle);
2868 struct hclge_dev *hdev = vport->back;
2870 return hdev->rss_size_max;
2873 int hclge_rss_init_hw(struct hclge_dev *hdev)
2875 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
2876 struct hclge_vport *vport = hdev->vport;
2877 u16 tc_offset[HCLGE_MAX_TC_NUM];
2878 u8 rss_key[HCLGE_RSS_KEY_SIZE];
2879 u16 tc_valid[HCLGE_MAX_TC_NUM];
2880 u16 tc_size[HCLGE_MAX_TC_NUM];
2881 u32 *rss_indir = NULL;
2882 u16 rss_size = 0, roundup_size;
2886 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
2890 /* Get default RSS key */
2891 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
2893 /* Initialize RSS indirect table for each vport */
2894 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2895 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2896 vport[j].rss_indirection_tbl[i] =
2897 i % vport[j].alloc_rss_size;
2899 /* vport 0 is for PF */
2903 rss_size = vport[j].alloc_rss_size;
2904 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2907 ret = hclge_set_rss_indir_table(hdev, rss_indir);
2912 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
2916 ret = hclge_set_rss_input_tuple(hdev);
2920 /* Each TC have the same queue size, and tc_size set to hardware is
2921 * the log2 of roundup power of two of rss_size, the acutal queue
2922 * size is limited by indirection table.
2924 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
2925 dev_err(&hdev->pdev->dev,
2926 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2932 roundup_size = roundup_pow_of_two(rss_size);
2933 roundup_size = ilog2(roundup_size);
2935 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2938 if (!(hdev->hw_tc_map & BIT(i)))
2942 tc_size[i] = roundup_size;
2943 tc_offset[i] = rss_size * i;
2946 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2954 int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2955 struct hnae3_ring_chain_node *ring_chain)
2957 struct hclge_dev *hdev = vport->back;
2958 struct hclge_ctrl_vector_chain_cmd *req;
2959 struct hnae3_ring_chain_node *node;
2960 struct hclge_desc desc;
2964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
2966 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
2967 req->int_vector_id = vector_id;
2970 for (node = ring_chain; node; node = node->next) {
2971 u16 type_and_id = 0;
2973 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
2974 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2975 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
2977 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
2979 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2980 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
2981 req->vfid = vport->vport_id;
2983 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2984 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
2986 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2988 dev_err(&hdev->pdev->dev,
2989 "Map TQP fail, status is %d.\n",
2995 hclge_cmd_setup_basic_desc(&desc,
2996 HCLGE_OPC_ADD_RING_TO_VECTOR,
2998 req->int_vector_id = vector_id;
3003 req->int_cause_num = i;
3005 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3007 dev_err(&hdev->pdev->dev,
3008 "Map TQP fail, status is %d.\n", ret);
3016 static int hclge_map_handle_ring_to_vector(
3017 struct hnae3_handle *handle, int vector,
3018 struct hnae3_ring_chain_node *ring_chain)
3020 struct hclge_vport *vport = hclge_get_vport(handle);
3021 struct hclge_dev *hdev = vport->back;
3024 vector_id = hclge_get_vector_index(hdev, vector);
3025 if (vector_id < 0) {
3026 dev_err(&hdev->pdev->dev,
3027 "Get vector index fail. ret =%d\n", vector_id);
3031 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
3034 static int hclge_unmap_ring_from_vector(
3035 struct hnae3_handle *handle, int vector,
3036 struct hnae3_ring_chain_node *ring_chain)
3038 struct hclge_vport *vport = hclge_get_vport(handle);
3039 struct hclge_dev *hdev = vport->back;
3040 struct hclge_ctrl_vector_chain_cmd *req;
3041 struct hnae3_ring_chain_node *node;
3042 struct hclge_desc desc;
3046 vector_id = hclge_get_vector_index(hdev, vector);
3047 if (vector_id < 0) {
3048 dev_err(&handle->pdev->dev,
3049 "Get vector index fail. ret =%d\n", vector_id);
3053 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
3055 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3056 req->int_vector_id = vector_id;
3059 for (node = ring_chain; node; node = node->next) {
3060 u16 type_and_id = 0;
3062 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
3063 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3064 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
3066 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
3068 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
3070 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
3071 req->vfid = vport->vport_id;
3073 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3074 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3076 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3078 dev_err(&hdev->pdev->dev,
3079 "Unmap TQP fail, status is %d.\n",
3084 hclge_cmd_setup_basic_desc(&desc,
3085 HCLGE_OPC_DEL_RING_TO_VECTOR,
3087 req->int_vector_id = vector_id;
3092 req->int_cause_num = i;
3094 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3096 dev_err(&hdev->pdev->dev,
3097 "Unmap TQP fail, status is %d.\n", ret);
3105 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3106 struct hclge_promisc_param *param)
3108 struct hclge_promisc_cfg_cmd *req;
3109 struct hclge_desc desc;
3112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3114 req = (struct hclge_promisc_cfg_cmd *)desc.data;
3115 req->vf_id = param->vf_id;
3116 req->flag = (param->enable << HCLGE_PROMISC_EN_B);
3118 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3120 dev_err(&hdev->pdev->dev,
3121 "Set promisc mode fail, status is %d.\n", ret);
3127 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3128 bool en_mc, bool en_bc, int vport_id)
3133 memset(param, 0, sizeof(struct hclge_promisc_param));
3135 param->enable = HCLGE_PROMISC_EN_UC;
3137 param->enable |= HCLGE_PROMISC_EN_MC;
3139 param->enable |= HCLGE_PROMISC_EN_BC;
3140 param->vf_id = vport_id;
3143 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
3145 struct hclge_vport *vport = hclge_get_vport(handle);
3146 struct hclge_dev *hdev = vport->back;
3147 struct hclge_promisc_param param;
3149 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id);
3150 hclge_cmd_set_promisc_mode(hdev, ¶m);
3153 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
3155 struct hclge_desc desc;
3156 struct hclge_config_mac_mode_cmd *req =
3157 (struct hclge_config_mac_mode_cmd *)desc.data;
3161 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
3162 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
3163 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
3164 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
3165 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
3166 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
3167 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
3168 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3169 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
3170 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
3171 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
3172 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
3173 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
3174 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
3175 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
3176 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3178 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3180 dev_err(&hdev->pdev->dev,
3181 "mac enable fail, ret =%d.\n", ret);
3184 static int hclge_set_loopback(struct hnae3_handle *handle,
3185 enum hnae3_loop loop_mode, bool en)
3187 struct hclge_vport *vport = hclge_get_vport(handle);
3188 struct hclge_config_mac_mode_cmd *req;
3189 struct hclge_dev *hdev = vport->back;
3190 struct hclge_desc desc;
3194 switch (loop_mode) {
3195 case HNAE3_MAC_INTER_LOOP_MAC:
3196 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
3197 /* 1 Read out the MAC mode config at first */
3198 hclge_cmd_setup_basic_desc(&desc,
3199 HCLGE_OPC_CONFIG_MAC_MODE,
3201 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3203 dev_err(&hdev->pdev->dev,
3204 "mac loopback get fail, ret =%d.\n",
3209 /* 2 Then setup the loopback flag */
3210 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
3212 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
3214 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
3216 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
3218 /* 3 Config mac work mode with loopback flag
3219 * and its original configure parameters
3221 hclge_cmd_reuse_desc(&desc, false);
3222 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3224 dev_err(&hdev->pdev->dev,
3225 "mac loopback set fail, ret =%d.\n", ret);
3229 dev_err(&hdev->pdev->dev,
3230 "loop_mode %d is not supported\n", loop_mode);
3237 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
3238 int stream_id, bool enable)
3240 struct hclge_desc desc;
3241 struct hclge_cfg_com_tqp_queue_cmd *req =
3242 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
3245 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
3246 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
3247 req->stream_id = cpu_to_le16(stream_id);
3248 req->enable |= enable << HCLGE_TQP_ENABLE_B;
3250 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3252 dev_err(&hdev->pdev->dev,
3253 "Tqp enable fail, status =%d.\n", ret);
3257 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
3259 struct hclge_vport *vport = hclge_get_vport(handle);
3260 struct hnae3_queue *queue;
3261 struct hclge_tqp *tqp;
3264 for (i = 0; i < vport->alloc_tqps; i++) {
3265 queue = handle->kinfo.tqp[i];
3266 tqp = container_of(queue, struct hclge_tqp, q);
3267 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
3271 static int hclge_ae_start(struct hnae3_handle *handle)
3273 struct hclge_vport *vport = hclge_get_vport(handle);
3274 struct hclge_dev *hdev = vport->back;
3275 int i, queue_id, ret;
3277 for (i = 0; i < vport->alloc_tqps; i++) {
3278 /* todo clear interrupt */
3280 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3282 dev_warn(&hdev->pdev->dev,
3283 "Get invalid queue id, ignore it\n");
3287 hclge_tqp_enable(hdev, queue_id, 0, true);
3290 hclge_cfg_mac_mode(hdev, true);
3291 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
3292 mod_timer(&hdev->service_timer, jiffies + HZ);
3294 ret = hclge_mac_start_phy(hdev);
3298 /* reset tqp stats */
3299 hclge_reset_tqp_stats(handle);
3304 static void hclge_ae_stop(struct hnae3_handle *handle)
3306 struct hclge_vport *vport = hclge_get_vport(handle);
3307 struct hclge_dev *hdev = vport->back;
3310 for (i = 0; i < vport->alloc_tqps; i++) {
3312 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]);
3314 dev_warn(&hdev->pdev->dev,
3315 "Get invalid queue id, ignore it\n");
3319 hclge_tqp_enable(hdev, queue_id, 0, false);
3322 hclge_cfg_mac_mode(hdev, false);
3324 hclge_mac_stop_phy(hdev);
3326 /* reset tqp stats */
3327 hclge_reset_tqp_stats(handle);
3330 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
3331 u16 cmdq_resp, u8 resp_code,
3332 enum hclge_mac_vlan_tbl_opcode op)
3334 struct hclge_dev *hdev = vport->back;
3335 int return_status = -EIO;
3338 dev_err(&hdev->pdev->dev,
3339 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
3344 if (op == HCLGE_MAC_VLAN_ADD) {
3345 if ((!resp_code) || (resp_code == 1)) {
3347 } else if (resp_code == 2) {
3348 return_status = -EIO;
3349 dev_err(&hdev->pdev->dev,
3350 "add mac addr failed for uc_overflow.\n");
3351 } else if (resp_code == 3) {
3352 return_status = -EIO;
3353 dev_err(&hdev->pdev->dev,
3354 "add mac addr failed for mc_overflow.\n");
3356 dev_err(&hdev->pdev->dev,
3357 "add mac addr failed for undefined, code=%d.\n",
3360 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
3363 } else if (resp_code == 1) {
3364 return_status = -EIO;
3365 dev_dbg(&hdev->pdev->dev,
3366 "remove mac addr failed for miss.\n");
3368 dev_err(&hdev->pdev->dev,
3369 "remove mac addr failed for undefined, code=%d.\n",
3372 } else if (op == HCLGE_MAC_VLAN_LKUP) {
3375 } else if (resp_code == 1) {
3376 return_status = -EIO;
3377 dev_dbg(&hdev->pdev->dev,
3378 "lookup mac addr failed for miss.\n");
3380 dev_err(&hdev->pdev->dev,
3381 "lookup mac addr failed for undefined, code=%d.\n",
3385 return_status = -EIO;
3386 dev_err(&hdev->pdev->dev,
3387 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
3391 return return_status;
3394 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
3399 if (vfid > 255 || vfid < 0)
3402 if (vfid >= 0 && vfid <= 191) {
3403 word_num = vfid / 32;
3404 bit_num = vfid % 32;
3406 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3408 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
3410 word_num = (vfid - 192) / 32;
3411 bit_num = vfid % 32;
3413 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
3415 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
3421 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
3423 #define HCLGE_DESC_NUMBER 3
3424 #define HCLGE_FUNC_NUMBER_PER_DESC 6
3427 for (i = 0; i < HCLGE_DESC_NUMBER; i++)
3428 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
3429 if (desc[i].data[j])
3435 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
3438 const unsigned char *mac_addr = addr;
3439 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
3440 (mac_addr[0]) | (mac_addr[1] << 8);
3441 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
3443 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
3444 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
3447 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
3450 u16 high_val = addr[1] | (addr[0] << 8);
3451 struct hclge_dev *hdev = vport->back;
3452 u32 rsh = 4 - hdev->mta_mac_sel_type;
3453 u16 ret_val = (high_val >> rsh) & 0xfff;
3458 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
3459 enum hclge_mta_dmac_sel_type mta_mac_sel,
3462 struct hclge_mta_filter_mode_cmd *req;
3463 struct hclge_desc desc;
3466 req = (struct hclge_mta_filter_mode_cmd *)desc.data;
3467 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
3469 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
3471 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
3472 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
3474 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3476 dev_err(&hdev->pdev->dev,
3477 "Config mat filter mode failed for cmd_send, ret =%d.\n",
3485 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
3489 struct hclge_cfg_func_mta_filter_cmd *req;
3490 struct hclge_desc desc;
3493 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
3494 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
3496 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
3498 req->function_id = func_id;
3500 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3502 dev_err(&hdev->pdev->dev,
3503 "Config func_id enable failed for cmd_send, ret =%d.\n",
3511 static int hclge_set_mta_table_item(struct hclge_vport *vport,
3515 struct hclge_dev *hdev = vport->back;
3516 struct hclge_cfg_func_mta_item_cmd *req;
3517 struct hclge_desc desc;
3521 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
3522 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
3523 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
3525 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
3526 HCLGE_CFG_MTA_ITEM_IDX_S, idx);
3527 req->item_idx = cpu_to_le16(item_idx);
3529 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3531 dev_err(&hdev->pdev->dev,
3532 "Config mta table item failed for cmd_send, ret =%d.\n",
3540 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
3541 struct hclge_mac_vlan_tbl_entry_cmd *req)
3543 struct hclge_dev *hdev = vport->back;
3544 struct hclge_desc desc;
3549 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
3551 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3553 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3555 dev_err(&hdev->pdev->dev,
3556 "del mac addr failed for cmd_send, ret =%d.\n",
3560 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3561 retval = le16_to_cpu(desc.retval);
3563 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3564 HCLGE_MAC_VLAN_REMOVE);
3567 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
3568 struct hclge_mac_vlan_tbl_entry_cmd *req,
3569 struct hclge_desc *desc,
3572 struct hclge_dev *hdev = vport->back;
3577 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
3579 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3580 memcpy(desc[0].data,
3582 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3583 hclge_cmd_setup_basic_desc(&desc[1],
3584 HCLGE_OPC_MAC_VLAN_ADD,
3586 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3587 hclge_cmd_setup_basic_desc(&desc[2],
3588 HCLGE_OPC_MAC_VLAN_ADD,
3590 ret = hclge_cmd_send(&hdev->hw, desc, 3);
3592 memcpy(desc[0].data,
3594 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3595 ret = hclge_cmd_send(&hdev->hw, desc, 1);
3598 dev_err(&hdev->pdev->dev,
3599 "lookup mac addr failed for cmd_send, ret =%d.\n",
3603 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
3604 retval = le16_to_cpu(desc[0].retval);
3606 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
3607 HCLGE_MAC_VLAN_LKUP);
3610 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
3611 struct hclge_mac_vlan_tbl_entry_cmd *req,
3612 struct hclge_desc *mc_desc)
3614 struct hclge_dev *hdev = vport->back;
3621 struct hclge_desc desc;
3623 hclge_cmd_setup_basic_desc(&desc,
3624 HCLGE_OPC_MAC_VLAN_ADD,
3626 memcpy(desc.data, req,
3627 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3629 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
3630 retval = le16_to_cpu(desc.retval);
3632 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3634 HCLGE_MAC_VLAN_ADD);
3636 hclge_cmd_reuse_desc(&mc_desc[0], false);
3637 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3638 hclge_cmd_reuse_desc(&mc_desc[1], false);
3639 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3640 hclge_cmd_reuse_desc(&mc_desc[2], false);
3641 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
3642 memcpy(mc_desc[0].data, req,
3643 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
3644 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
3645 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
3646 retval = le16_to_cpu(mc_desc[0].retval);
3648 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
3650 HCLGE_MAC_VLAN_ADD);
3654 dev_err(&hdev->pdev->dev,
3655 "add mac addr failed for cmd_send, ret =%d.\n",
3663 static int hclge_add_uc_addr(struct hnae3_handle *handle,
3664 const unsigned char *addr)
3666 struct hclge_vport *vport = hclge_get_vport(handle);
3668 return hclge_add_uc_addr_common(vport, addr);
3671 int hclge_add_uc_addr_common(struct hclge_vport *vport,
3672 const unsigned char *addr)
3674 struct hclge_dev *hdev = vport->back;
3675 struct hclge_mac_vlan_tbl_entry_cmd req;
3676 enum hclge_cmd_status status;
3677 u16 egress_port = 0;
3679 /* mac addr check */
3680 if (is_zero_ether_addr(addr) ||
3681 is_broadcast_ether_addr(addr) ||
3682 is_multicast_ether_addr(addr)) {
3683 dev_err(&hdev->pdev->dev,
3684 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
3686 is_zero_ether_addr(addr),
3687 is_broadcast_ether_addr(addr),
3688 is_multicast_ether_addr(addr));
3692 memset(&req, 0, sizeof(req));
3693 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3694 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3695 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
3696 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3698 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
3699 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
3700 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
3701 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
3702 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
3703 HCLGE_MAC_EPORT_PFID_S, 0);
3705 req.egress_port = cpu_to_le16(egress_port);
3707 hclge_prepare_mac_addr(&req, addr);
3709 status = hclge_add_mac_vlan_tbl(vport, &req, NULL);
3714 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
3715 const unsigned char *addr)
3717 struct hclge_vport *vport = hclge_get_vport(handle);
3719 return hclge_rm_uc_addr_common(vport, addr);
3722 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
3723 const unsigned char *addr)
3725 struct hclge_dev *hdev = vport->back;
3726 struct hclge_mac_vlan_tbl_entry_cmd req;
3727 enum hclge_cmd_status status;
3729 /* mac addr check */
3730 if (is_zero_ether_addr(addr) ||
3731 is_broadcast_ether_addr(addr) ||
3732 is_multicast_ether_addr(addr)) {
3733 dev_dbg(&hdev->pdev->dev,
3734 "Remove mac err! invalid mac:%pM.\n",
3739 memset(&req, 0, sizeof(req));
3740 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3741 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3742 hclge_prepare_mac_addr(&req, addr);
3743 status = hclge_remove_mac_vlan_tbl(vport, &req);
3748 static int hclge_add_mc_addr(struct hnae3_handle *handle,
3749 const unsigned char *addr)
3751 struct hclge_vport *vport = hclge_get_vport(handle);
3753 return hclge_add_mc_addr_common(vport, addr);
3756 int hclge_add_mc_addr_common(struct hclge_vport *vport,
3757 const unsigned char *addr)
3759 struct hclge_dev *hdev = vport->back;
3760 struct hclge_mac_vlan_tbl_entry_cmd req;
3761 struct hclge_desc desc[3];
3765 /* mac addr check */
3766 if (!is_multicast_ether_addr(addr)) {
3767 dev_err(&hdev->pdev->dev,
3768 "Add mc mac err! invalid mac:%pM.\n",
3772 memset(&req, 0, sizeof(req));
3773 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3774 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3775 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3776 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3777 hclge_prepare_mac_addr(&req, addr);
3778 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3780 /* This mac addr exist, update VFID for it */
3781 hclge_update_desc_vfid(desc, vport->vport_id, false);
3782 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3784 /* This mac addr do not exist, add new entry for it */
3785 memset(desc[0].data, 0, sizeof(desc[0].data));
3786 memset(desc[1].data, 0, sizeof(desc[0].data));
3787 memset(desc[2].data, 0, sizeof(desc[0].data));
3788 hclge_update_desc_vfid(desc, vport->vport_id, false);
3789 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3792 /* Set MTA table for this MAC address */
3793 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3794 status = hclge_set_mta_table_item(vport, tbl_idx, true);
3799 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
3800 const unsigned char *addr)
3802 struct hclge_vport *vport = hclge_get_vport(handle);
3804 return hclge_rm_mc_addr_common(vport, addr);
3807 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
3808 const unsigned char *addr)
3810 struct hclge_dev *hdev = vport->back;
3811 struct hclge_mac_vlan_tbl_entry_cmd req;
3812 enum hclge_cmd_status status;
3813 struct hclge_desc desc[3];
3816 /* mac addr check */
3817 if (!is_multicast_ether_addr(addr)) {
3818 dev_dbg(&hdev->pdev->dev,
3819 "Remove mc mac err! invalid mac:%pM.\n",
3824 memset(&req, 0, sizeof(req));
3825 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
3826 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3827 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
3828 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
3829 hclge_prepare_mac_addr(&req, addr);
3830 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
3832 /* This mac addr exist, remove this handle's VFID for it */
3833 hclge_update_desc_vfid(desc, vport->vport_id, true);
3835 if (hclge_is_all_function_id_zero(desc))
3836 /* All the vfid is zero, so need to delete this entry */
3837 status = hclge_remove_mac_vlan_tbl(vport, &req);
3839 /* Not all the vfid is zero, update the vfid */
3840 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
3843 /* This mac addr do not exist, can't delete it */
3844 dev_err(&hdev->pdev->dev,
3845 "Rm multicast mac addr failed, ret = %d.\n",
3850 /* Set MTB table for this MAC address */
3851 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr);
3852 status = hclge_set_mta_table_item(vport, tbl_idx, false);
3857 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
3859 struct hclge_vport *vport = hclge_get_vport(handle);
3860 struct hclge_dev *hdev = vport->back;
3862 ether_addr_copy(p, hdev->hw.mac.mac_addr);
3865 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
3867 const unsigned char *new_addr = (const unsigned char *)p;
3868 struct hclge_vport *vport = hclge_get_vport(handle);
3869 struct hclge_dev *hdev = vport->back;
3871 /* mac addr check */
3872 if (is_zero_ether_addr(new_addr) ||
3873 is_broadcast_ether_addr(new_addr) ||
3874 is_multicast_ether_addr(new_addr)) {
3875 dev_err(&hdev->pdev->dev,
3876 "Change uc mac err! invalid mac:%p.\n",
3881 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
3883 if (!hclge_add_uc_addr(handle, new_addr)) {
3884 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
3891 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
3894 struct hclge_vlan_filter_ctrl_cmd *req;
3895 struct hclge_desc desc;
3898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
3900 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
3901 req->vlan_type = vlan_type;
3902 req->vlan_fe = filter_en;
3904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3906 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
3914 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
3915 bool is_kill, u16 vlan, u8 qos, __be16 proto)
3917 #define HCLGE_MAX_VF_BYTES 16
3918 struct hclge_vlan_filter_vf_cfg_cmd *req0;
3919 struct hclge_vlan_filter_vf_cfg_cmd *req1;
3920 struct hclge_desc desc[2];
3925 hclge_cmd_setup_basic_desc(&desc[0],
3926 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3927 hclge_cmd_setup_basic_desc(&desc[1],
3928 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
3930 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3932 vf_byte_off = vfid / 8;
3933 vf_byte_val = 1 << (vfid % 8);
3935 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
3936 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
3938 req0->vlan_id = cpu_to_le16(vlan);
3939 req0->vlan_cfg = is_kill;
3941 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
3942 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
3944 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
3946 ret = hclge_cmd_send(&hdev->hw, desc, 2);
3948 dev_err(&hdev->pdev->dev,
3949 "Send vf vlan command fail, ret =%d.\n",
3955 if (!req0->resp_code || req0->resp_code == 1)
3958 dev_err(&hdev->pdev->dev,
3959 "Add vf vlan filter fail, ret =%d.\n",
3962 if (!req0->resp_code)
3965 dev_err(&hdev->pdev->dev,
3966 "Kill vf vlan filter fail, ret =%d.\n",
3973 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
3974 __be16 proto, u16 vlan_id,
3977 struct hclge_vport *vport = hclge_get_vport(handle);
3978 struct hclge_dev *hdev = vport->back;
3979 struct hclge_vlan_filter_pf_cfg_cmd *req;
3980 struct hclge_desc desc;
3981 u8 vlan_offset_byte_val;
3982 u8 vlan_offset_byte;
3986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
3988 vlan_offset_160 = vlan_id / 160;
3989 vlan_offset_byte = (vlan_id % 160) / 8;
3990 vlan_offset_byte_val = 1 << (vlan_id % 8);
3992 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
3993 req->vlan_offset = vlan_offset_160;
3994 req->vlan_cfg = is_kill;
3995 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
3997 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3999 dev_err(&hdev->pdev->dev,
4000 "port vlan command, send fail, ret =%d.\n",
4005 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto);
4007 dev_err(&hdev->pdev->dev,
4008 "Set pf vlan filter config fail, ret =%d.\n",
4016 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
4017 u16 vlan, u8 qos, __be16 proto)
4019 struct hclge_vport *vport = hclge_get_vport(handle);
4020 struct hclge_dev *hdev = vport->back;
4022 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
4024 if (proto != htons(ETH_P_8021Q))
4025 return -EPROTONOSUPPORT;
4027 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
4030 static int hclge_init_vlan_config(struct hclge_dev *hdev)
4032 #define HCLGE_VLAN_TYPE_VF_TABLE 0
4033 #define HCLGE_VLAN_TYPE_PORT_TABLE 1
4034 struct hnae3_handle *handle;
4037 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
4042 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
4047 handle = &hdev->vport[0].nic;
4048 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
4051 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
4053 struct hclge_vport *vport = hclge_get_vport(handle);
4054 struct hclge_config_max_frm_size_cmd *req;
4055 struct hclge_dev *hdev = vport->back;
4056 struct hclge_desc desc;
4059 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
4062 hdev->mps = new_mtu;
4063 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
4065 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
4066 req->max_frm_size = cpu_to_le16(new_mtu);
4068 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4070 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
4077 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
4080 struct hclge_reset_tqp_queue_cmd *req;
4081 struct hclge_desc desc;
4084 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
4086 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4087 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4088 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
4090 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4092 dev_err(&hdev->pdev->dev,
4093 "Send tqp reset cmd error, status =%d\n", ret);
4100 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
4102 struct hclge_reset_tqp_queue_cmd *req;
4103 struct hclge_desc desc;
4106 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
4108 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
4109 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
4111 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4113 dev_err(&hdev->pdev->dev,
4114 "Get reset status error, status =%d\n", ret);
4118 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
4121 static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
4123 struct hclge_vport *vport = hclge_get_vport(handle);
4124 struct hclge_dev *hdev = vport->back;
4125 int reset_try_times = 0;
4129 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
4131 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
4135 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true);
4137 dev_warn(&hdev->pdev->dev,
4138 "Send reset tqp cmd fail, ret = %d\n", ret);
4142 reset_try_times = 0;
4143 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
4144 /* Wait for tqp hw reset */
4146 reset_status = hclge_get_reset_status(hdev, queue_id);
4151 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
4152 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
4156 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false);
4158 dev_warn(&hdev->pdev->dev,
4159 "Deassert the soft reset fail, ret = %d\n", ret);
4164 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
4166 struct hclge_vport *vport = hclge_get_vport(handle);
4167 struct hclge_dev *hdev = vport->back;
4169 return hdev->fw_version;
4172 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
4173 u32 *rx_en, u32 *tx_en)
4175 struct hclge_vport *vport = hclge_get_vport(handle);
4176 struct hclge_dev *hdev = vport->back;
4178 *auto_neg = hclge_get_autoneg(handle);
4180 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
4186 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
4189 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
4192 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
4201 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
4202 u8 *auto_neg, u32 *speed, u8 *duplex)
4204 struct hclge_vport *vport = hclge_get_vport(handle);
4205 struct hclge_dev *hdev = vport->back;
4208 *speed = hdev->hw.mac.speed;
4210 *duplex = hdev->hw.mac.duplex;
4212 *auto_neg = hdev->hw.mac.autoneg;
4215 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
4217 struct hclge_vport *vport = hclge_get_vport(handle);
4218 struct hclge_dev *hdev = vport->back;
4221 *media_type = hdev->hw.mac.media_type;
4224 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
4225 u8 *tp_mdix_ctrl, u8 *tp_mdix)
4227 struct hclge_vport *vport = hclge_get_vport(handle);
4228 struct hclge_dev *hdev = vport->back;
4229 struct phy_device *phydev = hdev->hw.mac.phydev;
4230 int mdix_ctrl, mdix, retval, is_resolved;
4233 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4234 *tp_mdix = ETH_TP_MDI_INVALID;
4238 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
4240 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
4241 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
4242 HCLGE_PHY_MDIX_CTRL_S);
4244 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
4245 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
4246 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
4248 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
4250 switch (mdix_ctrl) {
4252 *tp_mdix_ctrl = ETH_TP_MDI;
4255 *tp_mdix_ctrl = ETH_TP_MDI_X;
4258 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
4261 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
4266 *tp_mdix = ETH_TP_MDI_INVALID;
4268 *tp_mdix = ETH_TP_MDI_X;
4270 *tp_mdix = ETH_TP_MDI;
4273 static int hclge_init_client_instance(struct hnae3_client *client,
4274 struct hnae3_ae_dev *ae_dev)
4276 struct hclge_dev *hdev = ae_dev->priv;
4277 struct hclge_vport *vport;
4280 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4281 vport = &hdev->vport[i];
4283 switch (client->type) {
4284 case HNAE3_CLIENT_KNIC:
4286 hdev->nic_client = client;
4287 vport->nic.client = client;
4288 ret = client->ops->init_instance(&vport->nic);
4292 if (hdev->roce_client &&
4293 hnae3_dev_roce_supported(hdev)) {
4294 struct hnae3_client *rc = hdev->roce_client;
4296 ret = hclge_init_roce_base_info(vport);
4300 ret = rc->ops->init_instance(&vport->roce);
4306 case HNAE3_CLIENT_UNIC:
4307 hdev->nic_client = client;
4308 vport->nic.client = client;
4310 ret = client->ops->init_instance(&vport->nic);
4315 case HNAE3_CLIENT_ROCE:
4316 if (hnae3_dev_roce_supported(hdev)) {
4317 hdev->roce_client = client;
4318 vport->roce.client = client;
4321 if (hdev->roce_client && hdev->nic_client) {
4322 ret = hclge_init_roce_base_info(vport);
4326 ret = client->ops->init_instance(&vport->roce);
4338 static void hclge_uninit_client_instance(struct hnae3_client *client,
4339 struct hnae3_ae_dev *ae_dev)
4341 struct hclge_dev *hdev = ae_dev->priv;
4342 struct hclge_vport *vport;
4345 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4346 vport = &hdev->vport[i];
4347 if (hdev->roce_client) {
4348 hdev->roce_client->ops->uninit_instance(&vport->roce,
4350 hdev->roce_client = NULL;
4351 vport->roce.client = NULL;
4353 if (client->type == HNAE3_CLIENT_ROCE)
4355 if (client->ops->uninit_instance) {
4356 client->ops->uninit_instance(&vport->nic, 0);
4357 hdev->nic_client = NULL;
4358 vport->nic.client = NULL;
4363 static int hclge_pci_init(struct hclge_dev *hdev)
4365 struct pci_dev *pdev = hdev->pdev;
4366 struct hclge_hw *hw;
4369 ret = pci_enable_device(pdev);
4371 dev_err(&pdev->dev, "failed to enable PCI device\n");
4372 goto err_no_drvdata;
4375 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4377 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4380 "can't set consistent PCI DMA");
4381 goto err_disable_device;
4383 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
4386 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
4388 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
4389 goto err_disable_device;
4392 pci_set_master(pdev);
4395 hw->io_base = pcim_iomap(pdev, 2, 0);
4397 dev_err(&pdev->dev, "Can't map configuration register space\n");
4399 goto err_clr_master;
4402 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
4406 pci_clear_master(pdev);
4407 pci_release_regions(pdev);
4409 pci_disable_device(pdev);
4411 pci_set_drvdata(pdev, NULL);
4416 static void hclge_pci_uninit(struct hclge_dev *hdev)
4418 struct pci_dev *pdev = hdev->pdev;
4420 if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
4421 pci_disable_msix(pdev);
4422 devm_kfree(&pdev->dev, hdev->msix_entries);
4423 hdev->msix_entries = NULL;
4425 pci_disable_msi(pdev);
4428 pci_clear_master(pdev);
4429 pci_release_mem_regions(pdev);
4430 pci_disable_device(pdev);
4433 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4435 struct pci_dev *pdev = ae_dev->pdev;
4436 struct hclge_dev *hdev;
4439 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
4445 hdev->flag |= HCLGE_FLAG_USE_MSIX;
4447 hdev->ae_dev = ae_dev;
4448 ae_dev->priv = hdev;
4450 ret = hclge_pci_init(hdev);
4452 dev_err(&pdev->dev, "PCI init failed\n");
4456 /* Command queue initialize */
4457 ret = hclge_cmd_init(hdev);
4461 ret = hclge_get_cap(hdev);
4463 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
4468 ret = hclge_configure(hdev);
4470 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
4474 if (hdev->flag & HCLGE_FLAG_USE_MSIX)
4475 ret = hclge_init_msix(hdev);
4477 ret = hclge_init_msi(hdev);
4479 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
4483 ret = hclge_alloc_tqps(hdev);
4485 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
4489 ret = hclge_alloc_vport(hdev);
4491 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
4495 ret = hclge_map_tqp(hdev);
4497 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
4501 ret = hclge_mac_init(hdev);
4503 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
4506 ret = hclge_buffer_alloc(hdev);
4508 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
4512 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
4514 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
4518 ret = hclge_init_vlan_config(hdev);
4520 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
4524 ret = hclge_tm_schd_init(hdev);
4526 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
4530 ret = hclge_rss_init_hw(hdev);
4532 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4536 hclge_dcb_ops_set(hdev);
4538 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
4539 INIT_WORK(&hdev->service_task, hclge_service_task);
4541 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
4542 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4544 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
4548 pci_release_regions(pdev);
4550 pci_set_drvdata(pdev, NULL);
4555 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
4557 struct hclge_dev *hdev = ae_dev->priv;
4558 struct hclge_mac *mac = &hdev->hw.mac;
4560 set_bit(HCLGE_STATE_DOWN, &hdev->state);
4562 if (IS_ENABLED(CONFIG_PCI_IOV))
4563 hclge_disable_sriov(hdev);
4565 if (hdev->service_timer.function)
4566 del_timer_sync(&hdev->service_timer);
4567 if (hdev->service_task.func)
4568 cancel_work_sync(&hdev->service_task);
4571 mdiobus_unregister(mac->mdio_bus);
4573 hclge_destroy_cmd_queue(&hdev->hw);
4574 hclge_pci_uninit(hdev);
4575 ae_dev->priv = NULL;
4578 static const struct hnae3_ae_ops hclge_ops = {
4579 .init_ae_dev = hclge_init_ae_dev,
4580 .uninit_ae_dev = hclge_uninit_ae_dev,
4581 .init_client_instance = hclge_init_client_instance,
4582 .uninit_client_instance = hclge_uninit_client_instance,
4583 .map_ring_to_vector = hclge_map_handle_ring_to_vector,
4584 .unmap_ring_from_vector = hclge_unmap_ring_from_vector,
4585 .get_vector = hclge_get_vector,
4586 .set_promisc_mode = hclge_set_promisc_mode,
4587 .set_loopback = hclge_set_loopback,
4588 .start = hclge_ae_start,
4589 .stop = hclge_ae_stop,
4590 .get_status = hclge_get_status,
4591 .get_ksettings_an_result = hclge_get_ksettings_an_result,
4592 .update_speed_duplex_h = hclge_update_speed_duplex_h,
4593 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
4594 .get_media_type = hclge_get_media_type,
4595 .get_rss_key_size = hclge_get_rss_key_size,
4596 .get_rss_indir_size = hclge_get_rss_indir_size,
4597 .get_rss = hclge_get_rss,
4598 .set_rss = hclge_set_rss,
4599 .set_rss_tuple = hclge_set_rss_tuple,
4600 .get_rss_tuple = hclge_get_rss_tuple,
4601 .get_tc_size = hclge_get_tc_size,
4602 .get_mac_addr = hclge_get_mac_addr,
4603 .set_mac_addr = hclge_set_mac_addr,
4604 .add_uc_addr = hclge_add_uc_addr,
4605 .rm_uc_addr = hclge_rm_uc_addr,
4606 .add_mc_addr = hclge_add_mc_addr,
4607 .rm_mc_addr = hclge_rm_mc_addr,
4608 .set_autoneg = hclge_set_autoneg,
4609 .get_autoneg = hclge_get_autoneg,
4610 .get_pauseparam = hclge_get_pauseparam,
4611 .set_mtu = hclge_set_mtu,
4612 .reset_queue = hclge_reset_tqp,
4613 .get_stats = hclge_get_stats,
4614 .update_stats = hclge_update_stats,
4615 .get_strings = hclge_get_strings,
4616 .get_sset_count = hclge_get_sset_count,
4617 .get_fw_version = hclge_get_fw_version,
4618 .get_mdix_mode = hclge_get_mdix_mode,
4619 .set_vlan_filter = hclge_set_port_vlan_filter,
4620 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
4623 static struct hnae3_ae_algo ae_algo = {
4626 .pdev_id_table = ae_algo_pci_tbl,
4629 static int hclge_init(void)
4631 pr_info("%s is initializing\n", HCLGE_NAME);
4633 return hnae3_register_ae_algo(&ae_algo);
4636 static void hclge_exit(void)
4638 hnae3_unregister_ae_algo(&ae_algo);
4640 module_init(hclge_init);
4641 module_exit(hclge_exit);
4643 MODULE_LICENSE("GPL");
4644 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4645 MODULE_DESCRIPTION("HCLGE Driver");
4646 MODULE_VERSION(HCLGE_MOD_VERSION);