1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
75 static struct hnae3_ae_algo ae_algo;
77 static struct workqueue_struct *hclge_wq;
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 /* required last entry */
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 HCLGE_CMDQ_TX_ADDR_H_REG,
96 HCLGE_CMDQ_TX_DEPTH_REG,
97 HCLGE_CMDQ_TX_TAIL_REG,
98 HCLGE_CMDQ_TX_HEAD_REG,
99 HCLGE_CMDQ_RX_ADDR_L_REG,
100 HCLGE_CMDQ_RX_ADDR_H_REG,
101 HCLGE_CMDQ_RX_DEPTH_REG,
102 HCLGE_CMDQ_RX_TAIL_REG,
103 HCLGE_CMDQ_RX_HEAD_REG,
104 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_CMDQ_INTR_STS_REG,
106 HCLGE_CMDQ_INTR_EN_REG,
107 HCLGE_CMDQ_INTR_GEN_REG};
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_VECTOR0_OTER_EN_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num",
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num",
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_control_pkt_num",
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 {"mac_rx_control_pkt_num",
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 {"mac_tx_pfc_pkt_num",
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 {"mac_tx_pfc_pri0_pkt_num",
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 {"mac_tx_pfc_pri1_pkt_num",
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 {"mac_tx_pfc_pri2_pkt_num",
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 {"mac_tx_pfc_pri3_pkt_num",
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 {"mac_tx_pfc_pri4_pkt_num",
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 {"mac_tx_pfc_pri5_pkt_num",
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 {"mac_tx_pfc_pri6_pkt_num",
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 {"mac_tx_pfc_pri7_pkt_num",
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 {"mac_rx_pfc_pkt_num",
185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 {"mac_rx_pfc_pri0_pkt_num",
187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 {"mac_rx_pfc_pri1_pkt_num",
189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 {"mac_rx_pfc_pri2_pkt_num",
191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 {"mac_rx_pfc_pri3_pkt_num",
193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 {"mac_rx_pfc_pri4_pkt_num",
195 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 {"mac_rx_pfc_pri5_pkt_num",
197 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 {"mac_rx_pfc_pri6_pkt_num",
199 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 {"mac_rx_pfc_pri7_pkt_num",
201 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 {"mac_tx_total_pkt_num",
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 {"mac_tx_total_oct_num",
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 {"mac_tx_good_pkt_num",
207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 {"mac_tx_bad_pkt_num",
209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 {"mac_tx_good_oct_num",
211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 {"mac_tx_bad_oct_num",
213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 {"mac_tx_uni_pkt_num",
215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 {"mac_tx_multi_pkt_num",
217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 {"mac_tx_broad_pkt_num",
219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 {"mac_tx_undersize_pkt_num",
221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 {"mac_tx_oversize_pkt_num",
223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 {"mac_tx_64_oct_pkt_num",
225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 {"mac_tx_65_127_oct_pkt_num",
227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 {"mac_tx_128_255_oct_pkt_num",
229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 {"mac_tx_256_511_oct_pkt_num",
231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 {"mac_tx_512_1023_oct_pkt_num",
233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 {"mac_tx_1024_1518_oct_pkt_num",
235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 {"mac_tx_1519_2047_oct_pkt_num",
237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 {"mac_tx_2048_4095_oct_pkt_num",
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 {"mac_tx_4096_8191_oct_pkt_num",
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 {"mac_tx_8192_9216_oct_pkt_num",
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 {"mac_tx_9217_12287_oct_pkt_num",
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 {"mac_tx_12288_16383_oct_pkt_num",
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 {"mac_tx_1519_max_good_pkt_num",
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 {"mac_tx_1519_max_bad_pkt_num",
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 {"mac_rx_total_pkt_num",
253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 {"mac_rx_total_oct_num",
255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 {"mac_rx_good_pkt_num",
257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 {"mac_rx_bad_pkt_num",
259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 {"mac_rx_good_oct_num",
261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 {"mac_rx_bad_oct_num",
263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 {"mac_rx_uni_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 {"mac_rx_multi_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 {"mac_rx_broad_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 {"mac_rx_undersize_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 {"mac_rx_oversize_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 {"mac_rx_64_oct_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 {"mac_rx_65_127_oct_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 {"mac_rx_128_255_oct_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 {"mac_rx_256_511_oct_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 {"mac_rx_512_1023_oct_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 {"mac_rx_1024_1518_oct_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 {"mac_rx_1519_2047_oct_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 {"mac_rx_2048_4095_oct_pkt_num",
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 {"mac_rx_4096_8191_oct_pkt_num",
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 {"mac_rx_8192_9216_oct_pkt_num",
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 {"mac_rx_9217_12287_oct_pkt_num",
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 {"mac_rx_12288_16383_oct_pkt_num",
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 {"mac_rx_1519_max_good_pkt_num",
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 {"mac_rx_1519_max_bad_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 {"mac_tx_fragment_pkt_num",
304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 {"mac_tx_undermin_pkt_num",
306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 {"mac_tx_jabber_pkt_num",
308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 {"mac_tx_err_all_pkt_num",
310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 {"mac_tx_from_app_good_pkt_num",
312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 {"mac_tx_from_app_bad_pkt_num",
314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 {"mac_rx_fragment_pkt_num",
316 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 {"mac_rx_undermin_pkt_num",
318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 {"mac_rx_jabber_pkt_num",
320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 {"mac_rx_fcs_err_pkt_num",
322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 {"mac_rx_send_app_good_pkt_num",
324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 {"mac_rx_send_app_bad_pkt_num",
326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 .i_port_bitmap = 0x1,
338 static const u8 hclge_hash_key[] = {
339 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 HCLGE_DFX_BIOS_BD_OFFSET,
348 HCLGE_DFX_SSU_0_BD_OFFSET,
349 HCLGE_DFX_SSU_1_BD_OFFSET,
350 HCLGE_DFX_IGU_BD_OFFSET,
351 HCLGE_DFX_RPU_0_BD_OFFSET,
352 HCLGE_DFX_RPU_1_BD_OFFSET,
353 HCLGE_DFX_NCSI_BD_OFFSET,
354 HCLGE_DFX_RTC_BD_OFFSET,
355 HCLGE_DFX_PPP_BD_OFFSET,
356 HCLGE_DFX_RCB_BD_OFFSET,
357 HCLGE_DFX_TQP_BD_OFFSET,
358 HCLGE_DFX_SSU_2_BD_OFFSET
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 HCLGE_OPC_DFX_SSU_REG_0,
364 HCLGE_OPC_DFX_SSU_REG_1,
365 HCLGE_OPC_DFX_IGU_EGU_REG,
366 HCLGE_OPC_DFX_RPU_REG_0,
367 HCLGE_OPC_DFX_RPU_REG_1,
368 HCLGE_OPC_DFX_NCSI_REG,
369 HCLGE_OPC_DFX_RTC_REG,
370 HCLGE_OPC_DFX_PPP_REG,
371 HCLGE_OPC_DFX_RCB_REG,
372 HCLGE_OPC_DFX_TQP_REG,
373 HCLGE_OPC_DFX_SSU_REG_2
376 static const struct key_info meta_data_key_info[] = {
377 { PACKET_TYPE_ID, 6},
387 static const struct key_info tuple_key_info[] = {
388 { OUTER_DST_MAC, 48},
389 { OUTER_SRC_MAC, 48},
390 { OUTER_VLAN_TAG_FST, 16},
391 { OUTER_VLAN_TAG_SEC, 16},
392 { OUTER_ETH_TYPE, 16},
395 { OUTER_IP_PROTO, 8},
399 { OUTER_SRC_PORT, 16},
400 { OUTER_DST_PORT, 16},
402 { OUTER_TUN_VNI, 24},
403 { OUTER_TUN_FLOW_ID, 8},
404 { INNER_DST_MAC, 48},
405 { INNER_SRC_MAC, 48},
406 { INNER_VLAN_TAG_FST, 16},
407 { INNER_VLAN_TAG_SEC, 16},
408 { INNER_ETH_TYPE, 16},
411 { INNER_IP_PROTO, 8},
415 { INNER_SRC_PORT, 16},
416 { INNER_DST_PORT, 16},
420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
422 #define HCLGE_MAC_CMD_NUM 21
424 u64 *data = (u64 *)(&hdev->mac_stats);
425 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
430 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
433 dev_err(&hdev->pdev->dev,
434 "Get MAC pkt stats fail, status = %d.\n", ret);
439 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 /* for special opcode 0032, only the first desc has the head */
441 if (unlikely(i == 0)) {
442 desc_data = (__le64 *)(&desc[i].data[0]);
443 n = HCLGE_RD_FIRST_STATS_NUM;
445 desc_data = (__le64 *)(&desc[i]);
446 n = HCLGE_RD_OTHER_STATS_NUM;
449 for (k = 0; k < n; k++) {
450 *data += le64_to_cpu(*desc_data);
459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
461 u64 *data = (u64 *)(&hdev->mac_stats);
462 struct hclge_desc *desc;
467 /* This may be called inside atomic sections,
468 * so GFP_ATOMIC is more suitalbe here
470 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
474 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
481 for (i = 0; i < desc_num; i++) {
482 /* for special opcode 0034, only the first desc has the head */
484 desc_data = (__le64 *)(&desc[i].data[0]);
485 n = HCLGE_RD_FIRST_STATS_NUM;
487 desc_data = (__le64 *)(&desc[i]);
488 n = HCLGE_RD_OTHER_STATS_NUM;
491 for (k = 0; k < n; k++) {
492 *data += le64_to_cpu(*desc_data);
503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
505 struct hclge_desc desc;
510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
515 desc_data = (__le32 *)(&desc.data[0]);
516 reg_num = le32_to_cpu(*desc_data);
518 *desc_num = 1 + ((reg_num - 3) >> 2) +
519 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
529 ret = hclge_mac_query_reg_num(hdev, &desc_num);
531 /* The firmware supports the new statistics acquisition method */
533 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 else if (ret == -EOPNOTSUPP)
535 ret = hclge_mac_update_stats_defective(hdev);
537 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
544 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 struct hclge_vport *vport = hclge_get_vport(handle);
546 struct hclge_dev *hdev = vport->back;
547 struct hnae3_queue *queue;
548 struct hclge_desc desc[1];
549 struct hclge_tqp *tqp;
552 for (i = 0; i < kinfo->num_tqps; i++) {
553 queue = handle->kinfo.tqp[i];
554 tqp = container_of(queue, struct hclge_tqp, q);
555 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
559 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
560 ret = hclge_cmd_send(&hdev->hw, desc, 1);
562 dev_err(&hdev->pdev->dev,
563 "Query tqp stat fail, status = %d,queue = %d\n",
567 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 le32_to_cpu(desc[0].data[1]);
571 for (i = 0; i < kinfo->num_tqps; i++) {
572 queue = handle->kinfo.tqp[i];
573 tqp = container_of(queue, struct hclge_tqp, q);
574 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575 hclge_cmd_setup_basic_desc(&desc[0],
576 HCLGE_OPC_QUERY_TX_STATS,
579 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
580 ret = hclge_cmd_send(&hdev->hw, desc, 1);
582 dev_err(&hdev->pdev->dev,
583 "Query tqp stat fail, status = %d,queue = %d\n",
587 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 le32_to_cpu(desc[0].data[1]);
594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
596 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 struct hclge_tqp *tqp;
601 for (i = 0; i < kinfo->num_tqps; i++) {
602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
606 for (i = 0; i < kinfo->num_tqps; i++) {
607 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
616 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
618 /* each tqp has TX & RX two queues */
619 return kinfo->num_tqps * (2);
622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
624 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
628 for (i = 0; i < kinfo->num_tqps; i++) {
629 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 struct hclge_tqp, q);
631 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
633 buff = buff + ETH_GSTRING_LEN;
636 for (i = 0; i < kinfo->num_tqps; i++) {
637 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 struct hclge_tqp, q);
639 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
641 buff = buff + ETH_GSTRING_LEN;
647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 const struct hclge_comm_stats_str strs[],
654 for (i = 0; i < size; i++)
655 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
660 static u8 *hclge_comm_get_strings(u32 stringset,
661 const struct hclge_comm_stats_str strs[],
664 char *buff = (char *)data;
667 if (stringset != ETH_SS_STATS)
670 for (i = 0; i < size; i++) {
671 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 buff = buff + ETH_GSTRING_LEN;
678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
680 struct hnae3_handle *handle;
683 handle = &hdev->vport[0].nic;
684 if (handle->client) {
685 status = hclge_tqps_update_stats(handle);
687 dev_err(&hdev->pdev->dev,
688 "Update TQPS stats fail, status = %d.\n",
693 status = hclge_mac_update_stats(hdev);
695 dev_err(&hdev->pdev->dev,
696 "Update MAC stats fail, status = %d.\n", status);
699 static void hclge_update_stats(struct hnae3_handle *handle,
700 struct net_device_stats *net_stats)
702 struct hclge_vport *vport = hclge_get_vport(handle);
703 struct hclge_dev *hdev = vport->back;
706 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
709 status = hclge_mac_update_stats(hdev);
711 dev_err(&hdev->pdev->dev,
712 "Update MAC stats fail, status = %d.\n",
715 status = hclge_tqps_update_stats(handle);
717 dev_err(&hdev->pdev->dev,
718 "Update TQPS stats fail, status = %d.\n",
721 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 HNAE3_SUPPORT_PHY_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
731 struct hclge_vport *vport = hclge_get_vport(handle);
732 struct hclge_dev *hdev = vport->back;
735 /* Loopback test support rules:
736 * mac: only GE mode support
737 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 * phy: only support when phy device exist on board
740 if (stringset == ETH_SS_TEST) {
741 /* clear loopback bit flags at first */
742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
748 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
755 if (hdev->hw.mac.phydev) {
757 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
760 } else if (stringset == ETH_SS_STATS) {
761 count = ARRAY_SIZE(g_mac_stats_string) +
762 hclge_tqps_get_sset_count(handle, stringset);
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
771 u8 *p = (char *)data;
774 if (stringset == ETH_SS_STATS) {
775 size = ARRAY_SIZE(g_mac_stats_string);
776 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778 p = hclge_tqps_get_strings(handle, p);
779 } else if (stringset == ETH_SS_TEST) {
780 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783 p += ETH_GSTRING_LEN;
785 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788 p += ETH_GSTRING_LEN;
790 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794 p += ETH_GSTRING_LEN;
796 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799 p += ETH_GSTRING_LEN;
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 struct hclge_vport *vport = hclge_get_vport(handle);
807 struct hclge_dev *hdev = vport->back;
810 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 ARRAY_SIZE(g_mac_stats_string), data);
812 p = hclge_tqps_get_stats(handle, p);
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 struct hns3_mac_stats *mac_stats)
818 struct hclge_vport *vport = hclge_get_vport(handle);
819 struct hclge_dev *hdev = vport->back;
821 hclge_update_stats(handle, NULL);
823 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 struct hclge_func_status_cmd *status)
830 #define HCLGE_MAC_ID_MASK 0xF
832 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
835 /* Set the pf to main pf */
836 if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 hdev->flag |= HCLGE_FLAG_MAIN;
839 hdev->flag &= ~HCLGE_FLAG_MAIN;
841 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
845 static int hclge_query_function_status(struct hclge_dev *hdev)
847 #define HCLGE_QUERY_MAX_CNT 5
849 struct hclge_func_status_cmd *req;
850 struct hclge_desc desc;
854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 req = (struct hclge_func_status_cmd *)desc.data;
858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860 dev_err(&hdev->pdev->dev,
861 "query function status failed %d.\n", ret);
865 /* Check pf reset is done */
868 usleep_range(1000, 2000);
869 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871 return hclge_parse_func_status(hdev, req);
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 struct hclge_pf_res_cmd *req;
877 struct hclge_desc desc;
880 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883 dev_err(&hdev->pdev->dev,
884 "query pf resource failed %d.\n", ret);
888 req = (struct hclge_pf_res_cmd *)desc.data;
889 hdev->num_tqps = le16_to_cpu(req->tqp_num);
890 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 if (req->tx_buf_size)
894 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900 if (req->dv_buf_size)
902 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908 if (hnae3_dev_roce_supported(hdev)) {
909 hdev->roce_base_msix_offset =
910 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
911 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
913 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
914 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
916 /* nic's msix numbers is always equals to the roce's. */
917 hdev->num_nic_msi = hdev->num_roce_msi;
919 /* PF should have NIC vectors and Roce vectors,
920 * NIC vectors are queued before Roce vectors.
922 hdev->num_msi = hdev->num_roce_msi +
923 hdev->roce_base_msix_offset;
926 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
927 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
929 hdev->num_nic_msi = hdev->num_msi;
932 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
933 dev_err(&hdev->pdev->dev,
934 "Just %u msi resources, not enough for pf(min:2).\n",
942 static int hclge_parse_speed(int speed_cmd, int *speed)
946 *speed = HCLGE_MAC_SPEED_10M;
949 *speed = HCLGE_MAC_SPEED_100M;
952 *speed = HCLGE_MAC_SPEED_1G;
955 *speed = HCLGE_MAC_SPEED_10G;
958 *speed = HCLGE_MAC_SPEED_25G;
961 *speed = HCLGE_MAC_SPEED_40G;
964 *speed = HCLGE_MAC_SPEED_50G;
967 *speed = HCLGE_MAC_SPEED_100G;
970 *speed = HCLGE_MAC_SPEED_200G;
979 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
981 struct hclge_vport *vport = hclge_get_vport(handle);
982 struct hclge_dev *hdev = vport->back;
983 u32 speed_ability = hdev->hw.mac.speed_ability;
987 case HCLGE_MAC_SPEED_10M:
988 speed_bit = HCLGE_SUPPORT_10M_BIT;
990 case HCLGE_MAC_SPEED_100M:
991 speed_bit = HCLGE_SUPPORT_100M_BIT;
993 case HCLGE_MAC_SPEED_1G:
994 speed_bit = HCLGE_SUPPORT_1G_BIT;
996 case HCLGE_MAC_SPEED_10G:
997 speed_bit = HCLGE_SUPPORT_10G_BIT;
999 case HCLGE_MAC_SPEED_25G:
1000 speed_bit = HCLGE_SUPPORT_25G_BIT;
1002 case HCLGE_MAC_SPEED_40G:
1003 speed_bit = HCLGE_SUPPORT_40G_BIT;
1005 case HCLGE_MAC_SPEED_50G:
1006 speed_bit = HCLGE_SUPPORT_50G_BIT;
1008 case HCLGE_MAC_SPEED_100G:
1009 speed_bit = HCLGE_SUPPORT_100G_BIT;
1011 case HCLGE_MAC_SPEED_200G:
1012 speed_bit = HCLGE_SUPPORT_200G_BIT;
1018 if (speed_bit & speed_ability)
1024 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1026 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1029 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1032 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1035 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1038 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1046 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1048 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1065 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1069 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1071 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1074 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1075 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1077 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1080 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1081 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1083 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1084 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1086 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1087 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1091 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1093 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1105 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1108 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1109 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1111 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1112 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1116 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1118 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1119 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1121 switch (mac->speed) {
1122 case HCLGE_MAC_SPEED_10G:
1123 case HCLGE_MAC_SPEED_40G:
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1127 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1129 case HCLGE_MAC_SPEED_25G:
1130 case HCLGE_MAC_SPEED_50G:
1131 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1134 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1135 BIT(HNAE3_FEC_AUTO);
1137 case HCLGE_MAC_SPEED_100G:
1138 case HCLGE_MAC_SPEED_200G:
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1140 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1143 mac->fec_ability = 0;
1148 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1151 struct hclge_mac *mac = &hdev->hw.mac;
1153 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1157 hclge_convert_setting_sr(mac, speed_ability);
1158 hclge_convert_setting_lr(mac, speed_ability);
1159 hclge_convert_setting_cr(mac, speed_ability);
1160 if (hnae3_dev_fec_supported(hdev))
1161 hclge_convert_setting_fec(mac);
1163 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1164 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1165 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1168 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1171 struct hclge_mac *mac = &hdev->hw.mac;
1173 hclge_convert_setting_kr(mac, speed_ability);
1174 if (hnae3_dev_fec_supported(hdev))
1175 hclge_convert_setting_fec(mac);
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1177 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1181 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1184 unsigned long *supported = hdev->hw.mac.supported;
1186 /* default to support all speed for GE port */
1188 speed_ability = HCLGE_SUPPORT_GE;
1190 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1191 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1194 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1195 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1197 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1201 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1202 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1206 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1208 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1212 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1214 u8 media_type = hdev->hw.mac.media_type;
1216 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1217 hclge_parse_fiber_link_mode(hdev, speed_ability);
1218 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1219 hclge_parse_copper_link_mode(hdev, speed_ability);
1220 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1221 hclge_parse_backplane_link_mode(hdev, speed_ability);
1224 static u32 hclge_get_max_speed(u16 speed_ability)
1226 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1227 return HCLGE_MAC_SPEED_200G;
1229 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1230 return HCLGE_MAC_SPEED_100G;
1232 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1233 return HCLGE_MAC_SPEED_50G;
1235 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1236 return HCLGE_MAC_SPEED_40G;
1238 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1239 return HCLGE_MAC_SPEED_25G;
1241 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1242 return HCLGE_MAC_SPEED_10G;
1244 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1245 return HCLGE_MAC_SPEED_1G;
1247 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1248 return HCLGE_MAC_SPEED_100M;
1250 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1251 return HCLGE_MAC_SPEED_10M;
1253 return HCLGE_MAC_SPEED_1G;
1256 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1258 #define SPEED_ABILITY_EXT_SHIFT 8
1260 struct hclge_cfg_param_cmd *req;
1261 u64 mac_addr_tmp_high;
1262 u16 speed_ability_ext;
1266 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1268 /* get the configuration */
1269 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1272 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1273 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1274 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1275 HCLGE_CFG_TQP_DESC_N_M,
1276 HCLGE_CFG_TQP_DESC_N_S);
1278 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279 HCLGE_CFG_PHY_ADDR_M,
1280 HCLGE_CFG_PHY_ADDR_S);
1281 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_MEDIA_TP_M,
1283 HCLGE_CFG_MEDIA_TP_S);
1284 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_RX_BUF_LEN_M,
1286 HCLGE_CFG_RX_BUF_LEN_S);
1287 /* get mac_address */
1288 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1289 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1290 HCLGE_CFG_MAC_ADDR_H_M,
1291 HCLGE_CFG_MAC_ADDR_H_S);
1293 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1295 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1296 HCLGE_CFG_DEFAULT_SPEED_M,
1297 HCLGE_CFG_DEFAULT_SPEED_S);
1298 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1299 HCLGE_CFG_RSS_SIZE_M,
1300 HCLGE_CFG_RSS_SIZE_S);
1302 for (i = 0; i < ETH_ALEN; i++)
1303 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1305 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1306 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1308 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1309 HCLGE_CFG_SPEED_ABILITY_M,
1310 HCLGE_CFG_SPEED_ABILITY_S);
1311 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1312 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1313 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1314 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1316 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1317 HCLGE_CFG_UMV_TBL_SPACE_M,
1318 HCLGE_CFG_UMV_TBL_SPACE_S);
1319 if (!cfg->umv_space)
1320 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1323 /* hclge_get_cfg: query the static parameter from flash
1324 * @hdev: pointer to struct hclge_dev
1325 * @hcfg: the config structure to be getted
1327 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1329 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1330 struct hclge_cfg_param_cmd *req;
1334 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1337 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1338 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1340 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1341 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1342 /* Len should be united by 4 bytes when send to hardware */
1343 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1344 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1345 req->offset = cpu_to_le32(offset);
1348 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1350 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1354 hclge_parse_cfg(hcfg, desc);
1359 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1361 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1363 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1365 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1366 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1367 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1368 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1369 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1372 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1373 struct hclge_desc *desc)
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1376 struct hclge_dev_specs_0_cmd *req0;
1377 struct hclge_dev_specs_1_cmd *req1;
1379 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1380 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1382 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1383 ae_dev->dev_specs.rss_ind_tbl_size =
1384 le16_to_cpu(req0->rss_ind_tbl_size);
1385 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1386 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1387 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1388 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1391 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1393 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1395 if (!dev_specs->max_non_tso_bd_num)
1396 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1397 if (!dev_specs->rss_ind_tbl_size)
1398 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1399 if (!dev_specs->rss_key_size)
1400 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1401 if (!dev_specs->max_tm_rate)
1402 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1403 if (!dev_specs->max_int_gl)
1404 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1407 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1409 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1413 /* set default specifications as devices lower than version V3 do not
1414 * support querying specifications from firmware.
1416 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1417 hclge_set_default_dev_specs(hdev);
1421 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1422 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1424 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1426 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1432 hclge_parse_dev_specs(hdev, desc);
1433 hclge_check_dev_specs(hdev);
1438 static int hclge_get_cap(struct hclge_dev *hdev)
1442 ret = hclge_query_function_status(hdev);
1444 dev_err(&hdev->pdev->dev,
1445 "query function status error %d.\n", ret);
1449 /* get pf resource */
1450 return hclge_query_pf_resource(hdev);
1453 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1455 #define HCLGE_MIN_TX_DESC 64
1456 #define HCLGE_MIN_RX_DESC 64
1458 if (!is_kdump_kernel())
1461 dev_info(&hdev->pdev->dev,
1462 "Running kdump kernel. Using minimal resources\n");
1464 /* minimal queue pairs equals to the number of vports */
1465 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1466 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1467 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1470 static int hclge_configure(struct hclge_dev *hdev)
1472 struct hclge_cfg cfg;
1476 ret = hclge_get_cfg(hdev, &cfg);
1480 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1481 hdev->base_tqp_pid = 0;
1482 hdev->rss_size_max = cfg.rss_size_max;
1483 hdev->rx_buf_len = cfg.rx_buf_len;
1484 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1485 hdev->hw.mac.media_type = cfg.media_type;
1486 hdev->hw.mac.phy_addr = cfg.phy_addr;
1487 hdev->num_tx_desc = cfg.tqp_desc_num;
1488 hdev->num_rx_desc = cfg.tqp_desc_num;
1489 hdev->tm_info.num_pg = 1;
1490 hdev->tc_max = cfg.tc_num;
1491 hdev->tm_info.hw_pfc_map = 0;
1492 hdev->wanted_umv_size = cfg.umv_space;
1494 if (hnae3_dev_fd_supported(hdev)) {
1496 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1499 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1501 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1502 cfg.default_speed, ret);
1506 hclge_parse_link_mode(hdev, cfg.speed_ability);
1508 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1510 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1511 (hdev->tc_max < 1)) {
1512 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1517 /* Dev does not support DCB */
1518 if (!hnae3_dev_dcb_supported(hdev)) {
1522 hdev->pfc_max = hdev->tc_max;
1525 hdev->tm_info.num_tc = 1;
1527 /* Currently not support uncontiuous tc */
1528 for (i = 0; i < hdev->tm_info.num_tc; i++)
1529 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1531 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1533 hclge_init_kdump_kernel_config(hdev);
1535 /* Set the init affinity based on pci func number */
1536 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1537 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1538 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1539 &hdev->affinity_mask);
1544 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1547 struct hclge_cfg_tso_status_cmd *req;
1548 struct hclge_desc desc;
1550 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1552 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1553 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1554 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1556 return hclge_cmd_send(&hdev->hw, &desc, 1);
1559 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1561 struct hclge_cfg_gro_status_cmd *req;
1562 struct hclge_desc desc;
1565 if (!hnae3_dev_gro_supported(hdev))
1568 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1569 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1571 req->gro_en = en ? 1 : 0;
1573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1575 dev_err(&hdev->pdev->dev,
1576 "GRO hardware config cmd failed, ret = %d\n", ret);
1581 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1583 struct hclge_tqp *tqp;
1586 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1587 sizeof(struct hclge_tqp), GFP_KERNEL);
1593 for (i = 0; i < hdev->num_tqps; i++) {
1594 tqp->dev = &hdev->pdev->dev;
1597 tqp->q.ae_algo = &ae_algo;
1598 tqp->q.buf_size = hdev->rx_buf_len;
1599 tqp->q.tx_desc_num = hdev->num_tx_desc;
1600 tqp->q.rx_desc_num = hdev->num_rx_desc;
1601 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1602 i * HCLGE_TQP_REG_SIZE;
1610 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1611 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1613 struct hclge_tqp_map_cmd *req;
1614 struct hclge_desc desc;
1617 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1619 req = (struct hclge_tqp_map_cmd *)desc.data;
1620 req->tqp_id = cpu_to_le16(tqp_pid);
1621 req->tqp_vf = func_id;
1622 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1624 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1625 req->tqp_vid = cpu_to_le16(tqp_vid);
1627 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1629 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1634 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1637 struct hclge_dev *hdev = vport->back;
1640 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1641 alloced < num_tqps; i++) {
1642 if (!hdev->htqp[i].alloced) {
1643 hdev->htqp[i].q.handle = &vport->nic;
1644 hdev->htqp[i].q.tqp_index = alloced;
1645 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1646 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1647 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1648 hdev->htqp[i].alloced = true;
1652 vport->alloc_tqps = alloced;
1653 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1654 vport->alloc_tqps / hdev->tm_info.num_tc);
1656 /* ensure one to one mapping between irq and queue at default */
1657 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1658 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1663 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1664 u16 num_tx_desc, u16 num_rx_desc)
1667 struct hnae3_handle *nic = &vport->nic;
1668 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1669 struct hclge_dev *hdev = vport->back;
1672 kinfo->num_tx_desc = num_tx_desc;
1673 kinfo->num_rx_desc = num_rx_desc;
1675 kinfo->rx_buf_len = hdev->rx_buf_len;
1677 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1678 sizeof(struct hnae3_queue *), GFP_KERNEL);
1682 ret = hclge_assign_tqp(vport, num_tqps);
1684 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1689 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1690 struct hclge_vport *vport)
1692 struct hnae3_handle *nic = &vport->nic;
1693 struct hnae3_knic_private_info *kinfo;
1696 kinfo = &nic->kinfo;
1697 for (i = 0; i < vport->alloc_tqps; i++) {
1698 struct hclge_tqp *q =
1699 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1703 is_pf = !(vport->vport_id);
1704 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1713 static int hclge_map_tqp(struct hclge_dev *hdev)
1715 struct hclge_vport *vport = hdev->vport;
1718 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1719 for (i = 0; i < num_vport; i++) {
1722 ret = hclge_map_tqp_to_vport(hdev, vport);
1732 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1734 struct hnae3_handle *nic = &vport->nic;
1735 struct hclge_dev *hdev = vport->back;
1738 nic->pdev = hdev->pdev;
1739 nic->ae_algo = &ae_algo;
1740 nic->numa_node_mask = hdev->numa_node_mask;
1742 ret = hclge_knic_setup(vport, num_tqps,
1743 hdev->num_tx_desc, hdev->num_rx_desc);
1745 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1750 static int hclge_alloc_vport(struct hclge_dev *hdev)
1752 struct pci_dev *pdev = hdev->pdev;
1753 struct hclge_vport *vport;
1759 /* We need to alloc a vport for main NIC of PF */
1760 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1762 if (hdev->num_tqps < num_vport) {
1763 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1764 hdev->num_tqps, num_vport);
1768 /* Alloc the same number of TQPs for every vport */
1769 tqp_per_vport = hdev->num_tqps / num_vport;
1770 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1772 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1777 hdev->vport = vport;
1778 hdev->num_alloc_vport = num_vport;
1780 if (IS_ENABLED(CONFIG_PCI_IOV))
1781 hdev->num_alloc_vfs = hdev->num_req_vfs;
1783 for (i = 0; i < num_vport; i++) {
1785 vport->vport_id = i;
1786 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1787 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1788 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1789 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1790 INIT_LIST_HEAD(&vport->vlan_list);
1791 INIT_LIST_HEAD(&vport->uc_mac_list);
1792 INIT_LIST_HEAD(&vport->mc_mac_list);
1793 spin_lock_init(&vport->mac_list_lock);
1796 ret = hclge_vport_setup(vport, tqp_main_vport);
1798 ret = hclge_vport_setup(vport, tqp_per_vport);
1801 "vport setup failed for vport %d, %d\n",
1812 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1813 struct hclge_pkt_buf_alloc *buf_alloc)
1815 /* TX buffer size is unit by 128 byte */
1816 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1817 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1818 struct hclge_tx_buff_alloc_cmd *req;
1819 struct hclge_desc desc;
1823 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1825 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1826 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1827 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1829 req->tx_pkt_buff[i] =
1830 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1831 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1834 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1836 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1842 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1843 struct hclge_pkt_buf_alloc *buf_alloc)
1845 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1848 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1853 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1858 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1859 if (hdev->hw_tc_map & BIT(i))
1864 /* Get the number of pfc enabled TCs, which have private buffer */
1865 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1866 struct hclge_pkt_buf_alloc *buf_alloc)
1868 struct hclge_priv_buf *priv;
1872 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873 priv = &buf_alloc->priv_buf[i];
1874 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1882 /* Get the number of pfc disabled TCs, which have private buffer */
1883 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1884 struct hclge_pkt_buf_alloc *buf_alloc)
1886 struct hclge_priv_buf *priv;
1890 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891 priv = &buf_alloc->priv_buf[i];
1892 if (hdev->hw_tc_map & BIT(i) &&
1893 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1901 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1903 struct hclge_priv_buf *priv;
1907 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1908 priv = &buf_alloc->priv_buf[i];
1910 rx_priv += priv->buf_size;
1915 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1917 u32 i, total_tx_size = 0;
1919 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1920 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1922 return total_tx_size;
1925 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1926 struct hclge_pkt_buf_alloc *buf_alloc,
1929 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1930 u32 tc_num = hclge_get_tc_num(hdev);
1931 u32 shared_buf, aligned_mps;
1935 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1937 if (hnae3_dev_dcb_supported(hdev))
1938 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1941 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1942 + hdev->dv_buf_size;
1944 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1945 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1946 HCLGE_BUF_SIZE_UNIT);
1948 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1949 if (rx_all < rx_priv + shared_std)
1952 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1953 buf_alloc->s_buf.buf_size = shared_buf;
1954 if (hnae3_dev_dcb_supported(hdev)) {
1955 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1956 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1957 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1958 HCLGE_BUF_SIZE_UNIT);
1960 buf_alloc->s_buf.self.high = aligned_mps +
1961 HCLGE_NON_DCB_ADDITIONAL_BUF;
1962 buf_alloc->s_buf.self.low = aligned_mps;
1965 if (hnae3_dev_dcb_supported(hdev)) {
1966 hi_thrd = shared_buf - hdev->dv_buf_size;
1968 if (tc_num <= NEED_RESERVE_TC_NUM)
1969 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1973 hi_thrd = hi_thrd / tc_num;
1975 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1976 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1977 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1979 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1980 lo_thrd = aligned_mps;
1983 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1984 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1985 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1991 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1992 struct hclge_pkt_buf_alloc *buf_alloc)
1996 total_size = hdev->pkt_buf_size;
1998 /* alloc tx buffer for all enabled tc */
1999 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2000 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2002 if (hdev->hw_tc_map & BIT(i)) {
2003 if (total_size < hdev->tx_buf_size)
2006 priv->tx_buf_size = hdev->tx_buf_size;
2008 priv->tx_buf_size = 0;
2011 total_size -= priv->tx_buf_size;
2017 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2018 struct hclge_pkt_buf_alloc *buf_alloc)
2020 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2021 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2024 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2025 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2032 if (!(hdev->hw_tc_map & BIT(i)))
2037 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2038 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2039 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2040 HCLGE_BUF_SIZE_UNIT);
2043 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2047 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2050 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2053 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2054 struct hclge_pkt_buf_alloc *buf_alloc)
2056 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2057 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2060 /* let the last to be cleared first */
2061 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2062 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2063 unsigned int mask = BIT((unsigned int)i);
2065 if (hdev->hw_tc_map & mask &&
2066 !(hdev->tm_info.hw_pfc_map & mask)) {
2067 /* Clear the no pfc TC private buffer */
2075 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2076 no_pfc_priv_num == 0)
2080 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2083 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2084 struct hclge_pkt_buf_alloc *buf_alloc)
2086 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2087 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2090 /* let the last to be cleared first */
2091 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2092 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2093 unsigned int mask = BIT((unsigned int)i);
2095 if (hdev->hw_tc_map & mask &&
2096 hdev->tm_info.hw_pfc_map & mask) {
2097 /* Reduce the number of pfc TC with private buffer */
2105 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2110 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2113 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2114 struct hclge_pkt_buf_alloc *buf_alloc)
2116 #define COMPENSATE_BUFFER 0x3C00
2117 #define COMPENSATE_HALF_MPS_NUM 5
2118 #define PRIV_WL_GAP 0x1800
2120 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121 u32 tc_num = hclge_get_tc_num(hdev);
2122 u32 half_mps = hdev->mps >> 1;
2127 rx_priv = rx_priv / tc_num;
2129 if (tc_num <= NEED_RESERVE_TC_NUM)
2130 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2132 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2133 COMPENSATE_HALF_MPS_NUM * half_mps;
2134 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2135 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2137 if (rx_priv < min_rx_priv)
2140 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2141 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2148 if (!(hdev->hw_tc_map & BIT(i)))
2152 priv->buf_size = rx_priv;
2153 priv->wl.high = rx_priv - hdev->dv_buf_size;
2154 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2157 buf_alloc->s_buf.buf_size = 0;
2162 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2163 * @hdev: pointer to struct hclge_dev
2164 * @buf_alloc: pointer to buffer calculation data
2165 * @return: 0: calculate sucessful, negative: fail
2167 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2168 struct hclge_pkt_buf_alloc *buf_alloc)
2170 /* When DCB is not supported, rx private buffer is not allocated. */
2171 if (!hnae3_dev_dcb_supported(hdev)) {
2172 u32 rx_all = hdev->pkt_buf_size;
2174 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2175 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2181 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2184 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2187 /* try to decrease the buffer size */
2188 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2191 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2194 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2200 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2201 struct hclge_pkt_buf_alloc *buf_alloc)
2203 struct hclge_rx_priv_buff_cmd *req;
2204 struct hclge_desc desc;
2208 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2209 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2211 /* Alloc private buffer TCs */
2212 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2213 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2216 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2218 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2222 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2223 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2225 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2227 dev_err(&hdev->pdev->dev,
2228 "rx private buffer alloc cmd failed %d\n", ret);
2233 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2234 struct hclge_pkt_buf_alloc *buf_alloc)
2236 struct hclge_rx_priv_wl_buf *req;
2237 struct hclge_priv_buf *priv;
2238 struct hclge_desc desc[2];
2242 for (i = 0; i < 2; i++) {
2243 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2245 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2247 /* The first descriptor set the NEXT bit to 1 */
2249 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2251 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2253 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2254 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2256 priv = &buf_alloc->priv_buf[idx];
2257 req->tc_wl[j].high =
2258 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2259 req->tc_wl[j].high |=
2260 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2262 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2263 req->tc_wl[j].low |=
2264 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2268 /* Send 2 descriptor at one time */
2269 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2271 dev_err(&hdev->pdev->dev,
2272 "rx private waterline config cmd failed %d\n",
2277 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2278 struct hclge_pkt_buf_alloc *buf_alloc)
2280 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2281 struct hclge_rx_com_thrd *req;
2282 struct hclge_desc desc[2];
2283 struct hclge_tc_thrd *tc;
2287 for (i = 0; i < 2; i++) {
2288 hclge_cmd_setup_basic_desc(&desc[i],
2289 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2290 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2292 /* The first descriptor set the NEXT bit to 1 */
2294 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2296 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2298 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2299 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2301 req->com_thrd[j].high =
2302 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2303 req->com_thrd[j].high |=
2304 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2305 req->com_thrd[j].low =
2306 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2307 req->com_thrd[j].low |=
2308 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2312 /* Send 2 descriptors at one time */
2313 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2315 dev_err(&hdev->pdev->dev,
2316 "common threshold config cmd failed %d\n", ret);
2320 static int hclge_common_wl_config(struct hclge_dev *hdev,
2321 struct hclge_pkt_buf_alloc *buf_alloc)
2323 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2324 struct hclge_rx_com_wl *req;
2325 struct hclge_desc desc;
2328 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2330 req = (struct hclge_rx_com_wl *)desc.data;
2331 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2332 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2334 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2335 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2339 dev_err(&hdev->pdev->dev,
2340 "common waterline config cmd failed %d\n", ret);
2345 int hclge_buffer_alloc(struct hclge_dev *hdev)
2347 struct hclge_pkt_buf_alloc *pkt_buf;
2350 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2354 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2356 dev_err(&hdev->pdev->dev,
2357 "could not calc tx buffer size for all TCs %d\n", ret);
2361 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2363 dev_err(&hdev->pdev->dev,
2364 "could not alloc tx buffers %d\n", ret);
2368 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2370 dev_err(&hdev->pdev->dev,
2371 "could not calc rx priv buffer size for all TCs %d\n",
2376 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2378 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2383 if (hnae3_dev_dcb_supported(hdev)) {
2384 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2386 dev_err(&hdev->pdev->dev,
2387 "could not configure rx private waterline %d\n",
2392 ret = hclge_common_thrd_config(hdev, pkt_buf);
2394 dev_err(&hdev->pdev->dev,
2395 "could not configure common threshold %d\n",
2401 ret = hclge_common_wl_config(hdev, pkt_buf);
2403 dev_err(&hdev->pdev->dev,
2404 "could not configure common waterline %d\n", ret);
2411 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2413 struct hnae3_handle *roce = &vport->roce;
2414 struct hnae3_handle *nic = &vport->nic;
2416 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2418 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2419 vport->back->num_msi_left == 0)
2422 roce->rinfo.base_vector = vport->back->roce_base_vector;
2424 roce->rinfo.netdev = nic->kinfo.netdev;
2425 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2427 roce->pdev = nic->pdev;
2428 roce->ae_algo = nic->ae_algo;
2429 roce->numa_node_mask = nic->numa_node_mask;
2434 static int hclge_init_msi(struct hclge_dev *hdev)
2436 struct pci_dev *pdev = hdev->pdev;
2440 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2442 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2445 "failed(%d) to allocate MSI/MSI-X vectors\n",
2449 if (vectors < hdev->num_msi)
2450 dev_warn(&hdev->pdev->dev,
2451 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2452 hdev->num_msi, vectors);
2454 hdev->num_msi = vectors;
2455 hdev->num_msi_left = vectors;
2457 hdev->base_msi_vector = pdev->irq;
2458 hdev->roce_base_vector = hdev->base_msi_vector +
2459 hdev->roce_base_msix_offset;
2461 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2462 sizeof(u16), GFP_KERNEL);
2463 if (!hdev->vector_status) {
2464 pci_free_irq_vectors(pdev);
2468 for (i = 0; i < hdev->num_msi; i++)
2469 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2471 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2472 sizeof(int), GFP_KERNEL);
2473 if (!hdev->vector_irq) {
2474 pci_free_irq_vectors(pdev);
2481 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2483 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2484 duplex = HCLGE_MAC_FULL;
2489 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2492 struct hclge_config_mac_speed_dup_cmd *req;
2493 struct hclge_desc desc;
2496 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2498 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2501 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2504 case HCLGE_MAC_SPEED_10M:
2505 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2506 HCLGE_CFG_SPEED_S, 6);
2508 case HCLGE_MAC_SPEED_100M:
2509 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2510 HCLGE_CFG_SPEED_S, 7);
2512 case HCLGE_MAC_SPEED_1G:
2513 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2514 HCLGE_CFG_SPEED_S, 0);
2516 case HCLGE_MAC_SPEED_10G:
2517 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2518 HCLGE_CFG_SPEED_S, 1);
2520 case HCLGE_MAC_SPEED_25G:
2521 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2522 HCLGE_CFG_SPEED_S, 2);
2524 case HCLGE_MAC_SPEED_40G:
2525 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526 HCLGE_CFG_SPEED_S, 3);
2528 case HCLGE_MAC_SPEED_50G:
2529 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530 HCLGE_CFG_SPEED_S, 4);
2532 case HCLGE_MAC_SPEED_100G:
2533 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534 HCLGE_CFG_SPEED_S, 5);
2536 case HCLGE_MAC_SPEED_200G:
2537 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2538 HCLGE_CFG_SPEED_S, 8);
2541 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2545 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2550 dev_err(&hdev->pdev->dev,
2551 "mac speed/duplex config cmd failed %d.\n", ret);
2558 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2560 struct hclge_mac *mac = &hdev->hw.mac;
2563 duplex = hclge_check_speed_dup(duplex, speed);
2564 if (!mac->support_autoneg && mac->speed == speed &&
2565 mac->duplex == duplex)
2568 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2572 hdev->hw.mac.speed = speed;
2573 hdev->hw.mac.duplex = duplex;
2578 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2581 struct hclge_vport *vport = hclge_get_vport(handle);
2582 struct hclge_dev *hdev = vport->back;
2584 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2587 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2589 struct hclge_config_auto_neg_cmd *req;
2590 struct hclge_desc desc;
2594 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2596 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2598 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2599 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2603 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2609 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2611 struct hclge_vport *vport = hclge_get_vport(handle);
2612 struct hclge_dev *hdev = vport->back;
2614 if (!hdev->hw.mac.support_autoneg) {
2616 dev_err(&hdev->pdev->dev,
2617 "autoneg is not supported by current port\n");
2624 return hclge_set_autoneg_en(hdev, enable);
2627 static int hclge_get_autoneg(struct hnae3_handle *handle)
2629 struct hclge_vport *vport = hclge_get_vport(handle);
2630 struct hclge_dev *hdev = vport->back;
2631 struct phy_device *phydev = hdev->hw.mac.phydev;
2634 return phydev->autoneg;
2636 return hdev->hw.mac.autoneg;
2639 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2641 struct hclge_vport *vport = hclge_get_vport(handle);
2642 struct hclge_dev *hdev = vport->back;
2645 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2647 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2650 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2653 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2655 struct hclge_vport *vport = hclge_get_vport(handle);
2656 struct hclge_dev *hdev = vport->back;
2658 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2659 return hclge_set_autoneg_en(hdev, !halt);
2664 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2666 struct hclge_config_fec_cmd *req;
2667 struct hclge_desc desc;
2670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2672 req = (struct hclge_config_fec_cmd *)desc.data;
2673 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2674 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2675 if (fec_mode & BIT(HNAE3_FEC_RS))
2676 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2677 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2678 if (fec_mode & BIT(HNAE3_FEC_BASER))
2679 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2680 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2684 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2689 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2691 struct hclge_vport *vport = hclge_get_vport(handle);
2692 struct hclge_dev *hdev = vport->back;
2693 struct hclge_mac *mac = &hdev->hw.mac;
2696 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2697 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2701 ret = hclge_set_fec_hw(hdev, fec_mode);
2705 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2709 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2712 struct hclge_vport *vport = hclge_get_vport(handle);
2713 struct hclge_dev *hdev = vport->back;
2714 struct hclge_mac *mac = &hdev->hw.mac;
2717 *fec_ability = mac->fec_ability;
2719 *fec_mode = mac->fec_mode;
2722 static int hclge_mac_init(struct hclge_dev *hdev)
2724 struct hclge_mac *mac = &hdev->hw.mac;
2727 hdev->support_sfp_query = true;
2728 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2729 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2730 hdev->hw.mac.duplex);
2734 if (hdev->hw.mac.support_autoneg) {
2735 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2742 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2743 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2748 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2750 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2754 ret = hclge_set_default_loopback(hdev);
2758 ret = hclge_buffer_alloc(hdev);
2760 dev_err(&hdev->pdev->dev,
2761 "allocate buffer fail, ret=%d\n", ret);
2766 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2768 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2769 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2770 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2771 hclge_wq, &hdev->service_task, 0);
2774 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2776 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2777 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2778 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2779 hclge_wq, &hdev->service_task, 0);
2782 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2784 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2785 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2786 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2787 hclge_wq, &hdev->service_task,
2791 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2793 struct hclge_link_status_cmd *req;
2794 struct hclge_desc desc;
2797 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2800 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2805 req = (struct hclge_link_status_cmd *)desc.data;
2806 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2807 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2812 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2814 struct phy_device *phydev = hdev->hw.mac.phydev;
2816 *link_status = HCLGE_LINK_STATUS_DOWN;
2818 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2821 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2824 return hclge_get_mac_link_status(hdev, link_status);
2827 static void hclge_update_link_status(struct hclge_dev *hdev)
2829 struct hnae3_client *rclient = hdev->roce_client;
2830 struct hnae3_client *client = hdev->nic_client;
2831 struct hnae3_handle *rhandle;
2832 struct hnae3_handle *handle;
2840 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2843 ret = hclge_get_mac_phy_link(hdev, &state);
2845 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2849 if (state != hdev->hw.mac.link) {
2850 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2851 handle = &hdev->vport[i].nic;
2852 client->ops->link_status_change(handle, state);
2853 hclge_config_mac_tnl_int(hdev, state);
2854 rhandle = &hdev->vport[i].roce;
2855 if (rclient && rclient->ops->link_status_change)
2856 rclient->ops->link_status_change(rhandle,
2859 hdev->hw.mac.link = state;
2862 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2865 static void hclge_update_port_capability(struct hclge_mac *mac)
2867 /* update fec ability by speed */
2868 hclge_convert_setting_fec(mac);
2870 /* firmware can not identify back plane type, the media type
2871 * read from configuration can help deal it
2873 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2874 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2875 mac->module_type = HNAE3_MODULE_TYPE_KR;
2876 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2877 mac->module_type = HNAE3_MODULE_TYPE_TP;
2879 if (mac->support_autoneg) {
2880 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2881 linkmode_copy(mac->advertising, mac->supported);
2883 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2885 linkmode_zero(mac->advertising);
2889 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2891 struct hclge_sfp_info_cmd *resp;
2892 struct hclge_desc desc;
2895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2896 resp = (struct hclge_sfp_info_cmd *)desc.data;
2897 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2898 if (ret == -EOPNOTSUPP) {
2899 dev_warn(&hdev->pdev->dev,
2900 "IMP do not support get SFP speed %d\n", ret);
2903 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2907 *speed = le32_to_cpu(resp->speed);
2912 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2914 struct hclge_sfp_info_cmd *resp;
2915 struct hclge_desc desc;
2918 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2919 resp = (struct hclge_sfp_info_cmd *)desc.data;
2921 resp->query_type = QUERY_ACTIVE_SPEED;
2923 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2924 if (ret == -EOPNOTSUPP) {
2925 dev_warn(&hdev->pdev->dev,
2926 "IMP does not support get SFP info %d\n", ret);
2929 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2933 /* In some case, mac speed get from IMP may be 0, it shouldn't be
2934 * set to mac->speed.
2936 if (!le32_to_cpu(resp->speed))
2939 mac->speed = le32_to_cpu(resp->speed);
2940 /* if resp->speed_ability is 0, it means it's an old version
2941 * firmware, do not update these params
2943 if (resp->speed_ability) {
2944 mac->module_type = le32_to_cpu(resp->module_type);
2945 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2946 mac->autoneg = resp->autoneg;
2947 mac->support_autoneg = resp->autoneg_ability;
2948 mac->speed_type = QUERY_ACTIVE_SPEED;
2949 if (!resp->active_fec)
2952 mac->fec_mode = BIT(resp->active_fec);
2954 mac->speed_type = QUERY_SFP_SPEED;
2960 static int hclge_update_port_info(struct hclge_dev *hdev)
2962 struct hclge_mac *mac = &hdev->hw.mac;
2963 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2966 /* get the port info from SFP cmd if not copper port */
2967 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2970 /* if IMP does not support get SFP/qSFP info, return directly */
2971 if (!hdev->support_sfp_query)
2974 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2975 ret = hclge_get_sfp_info(hdev, mac);
2977 ret = hclge_get_sfp_speed(hdev, &speed);
2979 if (ret == -EOPNOTSUPP) {
2980 hdev->support_sfp_query = false;
2986 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2987 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2988 hclge_update_port_capability(mac);
2991 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2994 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2995 return 0; /* do nothing if no SFP */
2997 /* must config full duplex for SFP */
2998 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3002 static int hclge_get_status(struct hnae3_handle *handle)
3004 struct hclge_vport *vport = hclge_get_vport(handle);
3005 struct hclge_dev *hdev = vport->back;
3007 hclge_update_link_status(hdev);
3009 return hdev->hw.mac.link;
3012 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3014 if (!pci_num_vf(hdev->pdev)) {
3015 dev_err(&hdev->pdev->dev,
3016 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3020 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3021 dev_err(&hdev->pdev->dev,
3022 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3023 vf, pci_num_vf(hdev->pdev));
3027 /* VF start from 1 in vport */
3028 vf += HCLGE_VF_VPORT_START_NUM;
3029 return &hdev->vport[vf];
3032 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3033 struct ifla_vf_info *ivf)
3035 struct hclge_vport *vport = hclge_get_vport(handle);
3036 struct hclge_dev *hdev = vport->back;
3038 vport = hclge_get_vf_vport(hdev, vf);
3043 ivf->linkstate = vport->vf_info.link_state;
3044 ivf->spoofchk = vport->vf_info.spoofchk;
3045 ivf->trusted = vport->vf_info.trusted;
3046 ivf->min_tx_rate = 0;
3047 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3048 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3049 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3050 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3051 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3056 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3059 struct hclge_vport *vport = hclge_get_vport(handle);
3060 struct hclge_dev *hdev = vport->back;
3062 vport = hclge_get_vf_vport(hdev, vf);
3066 vport->vf_info.link_state = link_state;
3071 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3073 u32 cmdq_src_reg, msix_src_reg;
3075 /* fetch the events from their corresponding regs */
3076 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3077 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3079 /* Assumption: If by any chance reset and mailbox events are reported
3080 * together then we will only process reset event in this go and will
3081 * defer the processing of the mailbox events. Since, we would have not
3082 * cleared RX CMDQ event this time we would receive again another
3083 * interrupt from H/W just for the mailbox.
3085 * check for vector0 reset event sources
3087 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3088 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3089 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3090 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3091 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3092 hdev->rst_stats.imp_rst_cnt++;
3093 return HCLGE_VECTOR0_EVENT_RST;
3096 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3097 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3098 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3099 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3100 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3101 hdev->rst_stats.global_rst_cnt++;
3102 return HCLGE_VECTOR0_EVENT_RST;
3105 /* check for vector0 msix event source */
3106 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3107 *clearval = msix_src_reg;
3108 return HCLGE_VECTOR0_EVENT_ERR;
3111 /* check for vector0 mailbox(=CMDQ RX) event source */
3112 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3113 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3114 *clearval = cmdq_src_reg;
3115 return HCLGE_VECTOR0_EVENT_MBX;
3118 /* print other vector0 event source */
3119 dev_info(&hdev->pdev->dev,
3120 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3121 cmdq_src_reg, msix_src_reg);
3122 *clearval = msix_src_reg;
3124 return HCLGE_VECTOR0_EVENT_OTHER;
3127 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3130 switch (event_type) {
3131 case HCLGE_VECTOR0_EVENT_RST:
3132 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3134 case HCLGE_VECTOR0_EVENT_MBX:
3135 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3142 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3144 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3145 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3146 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3147 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3148 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3151 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3153 writel(enable ? 1 : 0, vector->addr);
3156 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3158 struct hclge_dev *hdev = data;
3162 hclge_enable_vector(&hdev->misc_vector, false);
3163 event_cause = hclge_check_event_cause(hdev, &clearval);
3165 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3166 switch (event_cause) {
3167 case HCLGE_VECTOR0_EVENT_ERR:
3168 /* we do not know what type of reset is required now. This could
3169 * only be decided after we fetch the type of errors which
3170 * caused this event. Therefore, we will do below for now:
3171 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3172 * have defered type of reset to be used.
3173 * 2. Schedule the reset serivce task.
3174 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3175 * will fetch the correct type of reset. This would be done
3176 * by first decoding the types of errors.
3178 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3180 case HCLGE_VECTOR0_EVENT_RST:
3181 hclge_reset_task_schedule(hdev);
3183 case HCLGE_VECTOR0_EVENT_MBX:
3184 /* If we are here then,
3185 * 1. Either we are not handling any mbx task and we are not
3188 * 2. We could be handling a mbx task but nothing more is
3190 * In both cases, we should schedule mbx task as there are more
3191 * mbx messages reported by this interrupt.
3193 hclge_mbx_task_schedule(hdev);
3196 dev_warn(&hdev->pdev->dev,
3197 "received unknown or unhandled event of vector0\n");
3201 hclge_clear_event_cause(hdev, event_cause, clearval);
3203 /* Enable interrupt if it is not cause by reset. And when
3204 * clearval equal to 0, it means interrupt status may be
3205 * cleared by hardware before driver reads status register.
3206 * For this case, vector0 interrupt also should be enabled.
3209 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3210 hclge_enable_vector(&hdev->misc_vector, true);
3216 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3218 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3219 dev_warn(&hdev->pdev->dev,
3220 "vector(vector_id %d) has been freed.\n", vector_id);
3224 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3225 hdev->num_msi_left += 1;
3226 hdev->num_msi_used -= 1;
3229 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3231 struct hclge_misc_vector *vector = &hdev->misc_vector;
3233 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3235 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3236 hdev->vector_status[0] = 0;
3238 hdev->num_msi_left -= 1;
3239 hdev->num_msi_used += 1;
3242 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3243 const cpumask_t *mask)
3245 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3248 cpumask_copy(&hdev->affinity_mask, mask);
3251 static void hclge_irq_affinity_release(struct kref *ref)
3255 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3257 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3258 &hdev->affinity_mask);
3260 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3261 hdev->affinity_notify.release = hclge_irq_affinity_release;
3262 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3263 &hdev->affinity_notify);
3266 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3268 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3269 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3272 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3276 hclge_get_misc_vector(hdev);
3278 /* this would be explicitly freed in the end */
3279 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3280 HCLGE_NAME, pci_name(hdev->pdev));
3281 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3282 0, hdev->misc_vector.name, hdev);
3284 hclge_free_vector(hdev, 0);
3285 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3286 hdev->misc_vector.vector_irq);
3292 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3294 free_irq(hdev->misc_vector.vector_irq, hdev);
3295 hclge_free_vector(hdev, 0);
3298 int hclge_notify_client(struct hclge_dev *hdev,
3299 enum hnae3_reset_notify_type type)
3301 struct hnae3_client *client = hdev->nic_client;
3304 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3307 if (!client->ops->reset_notify)
3310 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3311 struct hnae3_handle *handle = &hdev->vport[i].nic;
3314 ret = client->ops->reset_notify(handle, type);
3316 dev_err(&hdev->pdev->dev,
3317 "notify nic client failed %d(%d)\n", type, ret);
3325 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3326 enum hnae3_reset_notify_type type)
3328 struct hnae3_client *client = hdev->roce_client;
3332 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3335 if (!client->ops->reset_notify)
3338 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3339 struct hnae3_handle *handle = &hdev->vport[i].roce;
3341 ret = client->ops->reset_notify(handle, type);
3343 dev_err(&hdev->pdev->dev,
3344 "notify roce client failed %d(%d)",
3353 static int hclge_reset_wait(struct hclge_dev *hdev)
3355 #define HCLGE_RESET_WATI_MS 100
3356 #define HCLGE_RESET_WAIT_CNT 350
3358 u32 val, reg, reg_bit;
3361 switch (hdev->reset_type) {
3362 case HNAE3_IMP_RESET:
3363 reg = HCLGE_GLOBAL_RESET_REG;
3364 reg_bit = HCLGE_IMP_RESET_BIT;
3366 case HNAE3_GLOBAL_RESET:
3367 reg = HCLGE_GLOBAL_RESET_REG;
3368 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3370 case HNAE3_FUNC_RESET:
3371 reg = HCLGE_FUN_RST_ING;
3372 reg_bit = HCLGE_FUN_RST_ING_B;
3375 dev_err(&hdev->pdev->dev,
3376 "Wait for unsupported reset type: %d\n",
3381 val = hclge_read_dev(&hdev->hw, reg);
3382 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3383 msleep(HCLGE_RESET_WATI_MS);
3384 val = hclge_read_dev(&hdev->hw, reg);
3388 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3389 dev_warn(&hdev->pdev->dev,
3390 "Wait for reset timeout: %d\n", hdev->reset_type);
3397 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3399 struct hclge_vf_rst_cmd *req;
3400 struct hclge_desc desc;
3402 req = (struct hclge_vf_rst_cmd *)desc.data;
3403 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3404 req->dest_vfid = func_id;
3409 return hclge_cmd_send(&hdev->hw, &desc, 1);
3412 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3416 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3417 struct hclge_vport *vport = &hdev->vport[i];
3420 /* Send cmd to set/clear VF's FUNC_RST_ING */
3421 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3423 dev_err(&hdev->pdev->dev,
3424 "set vf(%u) rst failed %d!\n",
3425 vport->vport_id, ret);
3429 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3432 /* Inform VF to process the reset.
3433 * hclge_inform_reset_assert_to_vf may fail if VF
3434 * driver is not loaded.
3436 ret = hclge_inform_reset_assert_to_vf(vport);
3438 dev_warn(&hdev->pdev->dev,
3439 "inform reset to vf(%u) failed %d!\n",
3440 vport->vport_id, ret);
3446 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3448 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3449 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3450 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3453 hclge_mbx_handler(hdev);
3455 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3458 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3460 struct hclge_pf_rst_sync_cmd *req;
3461 struct hclge_desc desc;
3465 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3469 /* vf need to down netdev by mbx during PF or FLR reset */
3470 hclge_mailbox_service_task(hdev);
3472 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3473 /* for compatible with old firmware, wait
3474 * 100 ms for VF to stop IO
3476 if (ret == -EOPNOTSUPP) {
3477 msleep(HCLGE_RESET_SYNC_TIME);
3480 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3483 } else if (req->all_vf_ready) {
3486 msleep(HCLGE_PF_RESET_SYNC_TIME);
3487 hclge_cmd_reuse_desc(&desc, true);
3488 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3490 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3493 void hclge_report_hw_error(struct hclge_dev *hdev,
3494 enum hnae3_hw_error_type type)
3496 struct hnae3_client *client = hdev->nic_client;
3499 if (!client || !client->ops->process_hw_error ||
3500 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3503 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3504 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3507 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3511 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3512 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3513 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3514 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3515 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3518 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3519 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3520 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3521 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3525 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3527 struct hclge_desc desc;
3528 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3531 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3532 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3533 req->fun_reset_vfid = func_id;
3535 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3537 dev_err(&hdev->pdev->dev,
3538 "send function reset cmd fail, status =%d\n", ret);
3543 static void hclge_do_reset(struct hclge_dev *hdev)
3545 struct hnae3_handle *handle = &hdev->vport[0].nic;
3546 struct pci_dev *pdev = hdev->pdev;
3549 if (hclge_get_hw_reset_stat(handle)) {
3550 dev_info(&pdev->dev, "hardware reset not finish\n");
3551 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3552 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3553 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3557 switch (hdev->reset_type) {
3558 case HNAE3_GLOBAL_RESET:
3559 dev_info(&pdev->dev, "global reset requested\n");
3560 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3561 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3562 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3564 case HNAE3_FUNC_RESET:
3565 dev_info(&pdev->dev, "PF reset requested\n");
3566 /* schedule again to check later */
3567 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3568 hclge_reset_task_schedule(hdev);
3571 dev_warn(&pdev->dev,
3572 "unsupported reset type: %d\n", hdev->reset_type);
3577 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3578 unsigned long *addr)
3580 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3581 struct hclge_dev *hdev = ae_dev->priv;
3583 /* first, resolve any unknown reset type to the known type(s) */
3584 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3585 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3586 HCLGE_MISC_VECTOR_INT_STS);
3587 /* we will intentionally ignore any errors from this function
3588 * as we will end up in *some* reset request in any case
3590 if (hclge_handle_hw_msix_error(hdev, addr))
3591 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3594 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3595 /* We defered the clearing of the error event which caused
3596 * interrupt since it was not posssible to do that in
3597 * interrupt context (and this is the reason we introduced
3598 * new UNKNOWN reset type). Now, the errors have been
3599 * handled and cleared in hardware we can safely enable
3600 * interrupts. This is an exception to the norm.
3602 hclge_enable_vector(&hdev->misc_vector, true);
3605 /* return the highest priority reset level amongst all */
3606 if (test_bit(HNAE3_IMP_RESET, addr)) {
3607 rst_level = HNAE3_IMP_RESET;
3608 clear_bit(HNAE3_IMP_RESET, addr);
3609 clear_bit(HNAE3_GLOBAL_RESET, addr);
3610 clear_bit(HNAE3_FUNC_RESET, addr);
3611 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3612 rst_level = HNAE3_GLOBAL_RESET;
3613 clear_bit(HNAE3_GLOBAL_RESET, addr);
3614 clear_bit(HNAE3_FUNC_RESET, addr);
3615 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3616 rst_level = HNAE3_FUNC_RESET;
3617 clear_bit(HNAE3_FUNC_RESET, addr);
3618 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3619 rst_level = HNAE3_FLR_RESET;
3620 clear_bit(HNAE3_FLR_RESET, addr);
3623 if (hdev->reset_type != HNAE3_NONE_RESET &&
3624 rst_level < hdev->reset_type)
3625 return HNAE3_NONE_RESET;
3630 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3634 switch (hdev->reset_type) {
3635 case HNAE3_IMP_RESET:
3636 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3638 case HNAE3_GLOBAL_RESET:
3639 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3648 /* For revision 0x20, the reset interrupt source
3649 * can only be cleared after hardware reset done
3651 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3652 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3655 hclge_enable_vector(&hdev->misc_vector, true);
3658 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3662 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3664 reg_val |= HCLGE_NIC_SW_RST_RDY;
3666 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3668 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3671 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3675 ret = hclge_set_all_vf_rst(hdev, true);
3679 hclge_func_reset_sync_vf(hdev);
3684 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3689 switch (hdev->reset_type) {
3690 case HNAE3_FUNC_RESET:
3691 ret = hclge_func_reset_notify_vf(hdev);
3695 ret = hclge_func_reset_cmd(hdev, 0);
3697 dev_err(&hdev->pdev->dev,
3698 "asserting function reset fail %d!\n", ret);
3702 /* After performaning pf reset, it is not necessary to do the
3703 * mailbox handling or send any command to firmware, because
3704 * any mailbox handling or command to firmware is only valid
3705 * after hclge_cmd_init is called.
3707 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3708 hdev->rst_stats.pf_rst_cnt++;
3710 case HNAE3_FLR_RESET:
3711 ret = hclge_func_reset_notify_vf(hdev);
3715 case HNAE3_IMP_RESET:
3716 hclge_handle_imp_error(hdev);
3717 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3718 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3719 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3725 /* inform hardware that preparatory work is done */
3726 msleep(HCLGE_RESET_SYNC_TIME);
3727 hclge_reset_handshake(hdev, true);
3728 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3733 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3735 #define MAX_RESET_FAIL_CNT 5
3737 if (hdev->reset_pending) {
3738 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3739 hdev->reset_pending);
3741 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3742 HCLGE_RESET_INT_M) {
3743 dev_info(&hdev->pdev->dev,
3744 "reset failed because new reset interrupt\n");
3745 hclge_clear_reset_cause(hdev);
3747 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3748 hdev->rst_stats.reset_fail_cnt++;
3749 set_bit(hdev->reset_type, &hdev->reset_pending);
3750 dev_info(&hdev->pdev->dev,
3751 "re-schedule reset task(%u)\n",
3752 hdev->rst_stats.reset_fail_cnt);
3756 hclge_clear_reset_cause(hdev);
3758 /* recover the handshake status when reset fail */
3759 hclge_reset_handshake(hdev, true);
3761 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3763 hclge_dbg_dump_rst_info(hdev);
3765 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3770 static int hclge_set_rst_done(struct hclge_dev *hdev)
3772 struct hclge_pf_rst_done_cmd *req;
3773 struct hclge_desc desc;
3776 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3777 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3778 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3780 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3781 /* To be compatible with the old firmware, which does not support
3782 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3785 if (ret == -EOPNOTSUPP) {
3786 dev_warn(&hdev->pdev->dev,
3787 "current firmware does not support command(0x%x)!\n",
3788 HCLGE_OPC_PF_RST_DONE);
3791 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3798 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3802 switch (hdev->reset_type) {
3803 case HNAE3_FUNC_RESET:
3804 case HNAE3_FLR_RESET:
3805 ret = hclge_set_all_vf_rst(hdev, false);
3807 case HNAE3_GLOBAL_RESET:
3808 case HNAE3_IMP_RESET:
3809 ret = hclge_set_rst_done(hdev);
3815 /* clear up the handshake status after re-initialize done */
3816 hclge_reset_handshake(hdev, false);
3821 static int hclge_reset_stack(struct hclge_dev *hdev)
3825 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3829 ret = hclge_reset_ae_dev(hdev->ae_dev);
3833 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3836 static int hclge_reset_prepare(struct hclge_dev *hdev)
3840 hdev->rst_stats.reset_cnt++;
3841 /* perform reset of the stack & ae device for a client */
3842 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3847 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3852 return hclge_reset_prepare_wait(hdev);
3855 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3857 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3858 enum hnae3_reset_type reset_level;
3861 hdev->rst_stats.hw_reset_done_cnt++;
3863 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3868 ret = hclge_reset_stack(hdev);
3873 hclge_clear_reset_cause(hdev);
3875 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3876 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3880 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3883 ret = hclge_reset_prepare_up(hdev);
3888 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3893 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3897 hdev->last_reset_time = jiffies;
3898 hdev->rst_stats.reset_fail_cnt = 0;
3899 hdev->rst_stats.reset_done_cnt++;
3900 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3902 /* if default_reset_request has a higher level reset request,
3903 * it should be handled as soon as possible. since some errors
3904 * need this kind of reset to fix.
3906 reset_level = hclge_get_reset_level(ae_dev,
3907 &hdev->default_reset_request);
3908 if (reset_level != HNAE3_NONE_RESET)
3909 set_bit(reset_level, &hdev->reset_request);
3914 static void hclge_reset(struct hclge_dev *hdev)
3916 if (hclge_reset_prepare(hdev))
3919 if (hclge_reset_wait(hdev))
3922 if (hclge_reset_rebuild(hdev))
3928 if (hclge_reset_err_handle(hdev))
3929 hclge_reset_task_schedule(hdev);
3932 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3934 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3935 struct hclge_dev *hdev = ae_dev->priv;
3937 /* We might end up getting called broadly because of 2 below cases:
3938 * 1. Recoverable error was conveyed through APEI and only way to bring
3939 * normalcy is to reset.
3940 * 2. A new reset request from the stack due to timeout
3942 * For the first case,error event might not have ae handle available.
3943 * check if this is a new reset request and we are not here just because
3944 * last reset attempt did not succeed and watchdog hit us again. We will
3945 * know this if last reset request did not occur very recently (watchdog
3946 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3947 * In case of new request we reset the "reset level" to PF reset.
3948 * And if it is a repeat reset request of the most recent one then we
3949 * want to make sure we throttle the reset request. Therefore, we will
3950 * not allow it again before 3*HZ times.
3953 handle = &hdev->vport[0].nic;
3955 if (time_before(jiffies, (hdev->last_reset_time +
3956 HCLGE_RESET_INTERVAL))) {
3957 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3959 } else if (hdev->default_reset_request) {
3961 hclge_get_reset_level(ae_dev,
3962 &hdev->default_reset_request);
3963 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3964 hdev->reset_level = HNAE3_FUNC_RESET;
3967 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3970 /* request reset & schedule reset task */
3971 set_bit(hdev->reset_level, &hdev->reset_request);
3972 hclge_reset_task_schedule(hdev);
3974 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3975 hdev->reset_level++;
3978 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3979 enum hnae3_reset_type rst_type)
3981 struct hclge_dev *hdev = ae_dev->priv;
3983 set_bit(rst_type, &hdev->default_reset_request);
3986 static void hclge_reset_timer(struct timer_list *t)
3988 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3990 /* if default_reset_request has no value, it means that this reset
3991 * request has already be handled, so just return here
3993 if (!hdev->default_reset_request)
3996 dev_info(&hdev->pdev->dev,
3997 "triggering reset in reset timer\n");
3998 hclge_reset_event(hdev->pdev, NULL);
4001 static void hclge_reset_subtask(struct hclge_dev *hdev)
4003 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4005 /* check if there is any ongoing reset in the hardware. This status can
4006 * be checked from reset_pending. If there is then, we need to wait for
4007 * hardware to complete reset.
4008 * a. If we are able to figure out in reasonable time that hardware
4009 * has fully resetted then, we can proceed with driver, client
4011 * b. else, we can come back later to check this status so re-sched
4014 hdev->last_reset_time = jiffies;
4015 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4016 if (hdev->reset_type != HNAE3_NONE_RESET)
4019 /* check if we got any *new* reset requests to be honored */
4020 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4021 if (hdev->reset_type != HNAE3_NONE_RESET)
4022 hclge_do_reset(hdev);
4024 hdev->reset_type = HNAE3_NONE_RESET;
4027 static void hclge_reset_service_task(struct hclge_dev *hdev)
4029 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4032 down(&hdev->reset_sem);
4033 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4035 hclge_reset_subtask(hdev);
4037 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4038 up(&hdev->reset_sem);
4041 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4045 /* start from vport 1 for PF is always alive */
4046 for (i = 1; i < hdev->num_alloc_vport; i++) {
4047 struct hclge_vport *vport = &hdev->vport[i];
4049 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4050 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4052 /* If vf is not alive, set to default value */
4053 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4054 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4058 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4060 unsigned long delta = round_jiffies_relative(HZ);
4062 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4065 /* Always handle the link updating to make sure link state is
4066 * updated when it is triggered by mbx.
4068 hclge_update_link_status(hdev);
4069 hclge_sync_mac_table(hdev);
4070 hclge_sync_promisc_mode(hdev);
4072 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4073 delta = jiffies - hdev->last_serv_processed;
4075 if (delta < round_jiffies_relative(HZ)) {
4076 delta = round_jiffies_relative(HZ) - delta;
4081 hdev->serv_processed_cnt++;
4082 hclge_update_vport_alive(hdev);
4084 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4085 hdev->last_serv_processed = jiffies;
4089 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4090 hclge_update_stats_for_all(hdev);
4092 hclge_update_port_info(hdev);
4093 hclge_sync_vlan_filter(hdev);
4095 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4096 hclge_rfs_filter_expire(hdev);
4098 hdev->last_serv_processed = jiffies;
4101 hclge_task_schedule(hdev, delta);
4104 static void hclge_service_task(struct work_struct *work)
4106 struct hclge_dev *hdev =
4107 container_of(work, struct hclge_dev, service_task.work);
4109 hclge_reset_service_task(hdev);
4110 hclge_mailbox_service_task(hdev);
4111 hclge_periodic_service_task(hdev);
4113 /* Handle reset and mbx again in case periodical task delays the
4114 * handling by calling hclge_task_schedule() in
4115 * hclge_periodic_service_task().
4117 hclge_reset_service_task(hdev);
4118 hclge_mailbox_service_task(hdev);
4121 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4123 /* VF handle has no client */
4124 if (!handle->client)
4125 return container_of(handle, struct hclge_vport, nic);
4126 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4127 return container_of(handle, struct hclge_vport, roce);
4129 return container_of(handle, struct hclge_vport, nic);
4132 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4133 struct hnae3_vector_info *vector_info)
4135 struct hclge_vport *vport = hclge_get_vport(handle);
4136 struct hnae3_vector_info *vector = vector_info;
4137 struct hclge_dev *hdev = vport->back;
4141 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4142 vector_num = min(hdev->num_msi_left, vector_num);
4144 for (j = 0; j < vector_num; j++) {
4145 for (i = 1; i < hdev->num_msi; i++) {
4146 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4147 vector->vector = pci_irq_vector(hdev->pdev, i);
4148 vector->io_addr = hdev->hw.io_base +
4149 HCLGE_VECTOR_REG_BASE +
4150 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4152 HCLGE_VECTOR_VF_OFFSET;
4153 hdev->vector_status[i] = vport->vport_id;
4154 hdev->vector_irq[i] = vector->vector;
4163 hdev->num_msi_left -= alloc;
4164 hdev->num_msi_used += alloc;
4169 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4173 for (i = 0; i < hdev->num_msi; i++)
4174 if (vector == hdev->vector_irq[i])
4180 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4182 struct hclge_vport *vport = hclge_get_vport(handle);
4183 struct hclge_dev *hdev = vport->back;
4186 vector_id = hclge_get_vector_index(hdev, vector);
4187 if (vector_id < 0) {
4188 dev_err(&hdev->pdev->dev,
4189 "Get vector index fail. vector = %d\n", vector);
4193 hclge_free_vector(hdev, vector_id);
4198 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4200 return HCLGE_RSS_KEY_SIZE;
4203 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4205 return HCLGE_RSS_IND_TBL_SIZE;
4208 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4209 const u8 hfunc, const u8 *key)
4211 struct hclge_rss_config_cmd *req;
4212 unsigned int key_offset = 0;
4213 struct hclge_desc desc;
4218 key_counts = HCLGE_RSS_KEY_SIZE;
4219 req = (struct hclge_rss_config_cmd *)desc.data;
4221 while (key_counts) {
4222 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4225 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4226 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4228 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4229 memcpy(req->hash_key,
4230 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4232 key_counts -= key_size;
4234 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4236 dev_err(&hdev->pdev->dev,
4237 "Configure RSS config fail, status = %d\n",
4245 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4247 struct hclge_rss_indirection_table_cmd *req;
4248 struct hclge_desc desc;
4252 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4254 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4255 hclge_cmd_setup_basic_desc
4256 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4258 req->start_table_index =
4259 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4260 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4262 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4263 req->rss_result[j] =
4264 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4266 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4268 dev_err(&hdev->pdev->dev,
4269 "Configure rss indir table fail,status = %d\n",
4277 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4278 u16 *tc_size, u16 *tc_offset)
4280 struct hclge_rss_tc_mode_cmd *req;
4281 struct hclge_desc desc;
4285 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4286 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4288 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4291 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4292 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4293 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4294 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4295 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4297 req->rss_tc_mode[i] = cpu_to_le16(mode);
4300 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4302 dev_err(&hdev->pdev->dev,
4303 "Configure rss tc mode fail, status = %d\n", ret);
4308 static void hclge_get_rss_type(struct hclge_vport *vport)
4310 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4311 vport->rss_tuple_sets.ipv4_udp_en ||
4312 vport->rss_tuple_sets.ipv4_sctp_en ||
4313 vport->rss_tuple_sets.ipv6_tcp_en ||
4314 vport->rss_tuple_sets.ipv6_udp_en ||
4315 vport->rss_tuple_sets.ipv6_sctp_en)
4316 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4317 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4318 vport->rss_tuple_sets.ipv6_fragment_en)
4319 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4321 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4324 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4326 struct hclge_rss_input_tuple_cmd *req;
4327 struct hclge_desc desc;
4330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4332 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4334 /* Get the tuple cfg from pf */
4335 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4336 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4337 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4338 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4339 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4340 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4341 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4342 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4343 hclge_get_rss_type(&hdev->vport[0]);
4344 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4346 dev_err(&hdev->pdev->dev,
4347 "Configure rss input fail, status = %d\n", ret);
4351 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4354 struct hclge_vport *vport = hclge_get_vport(handle);
4357 /* Get hash algorithm */
4359 switch (vport->rss_algo) {
4360 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4361 *hfunc = ETH_RSS_HASH_TOP;
4363 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4364 *hfunc = ETH_RSS_HASH_XOR;
4367 *hfunc = ETH_RSS_HASH_UNKNOWN;
4372 /* Get the RSS Key required by the user */
4374 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4376 /* Get indirect table */
4378 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4379 indir[i] = vport->rss_indirection_tbl[i];
4384 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4385 const u8 *key, const u8 hfunc)
4387 struct hclge_vport *vport = hclge_get_vport(handle);
4388 struct hclge_dev *hdev = vport->back;
4392 /* Set the RSS Hash Key if specififed by the user */
4395 case ETH_RSS_HASH_TOP:
4396 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4398 case ETH_RSS_HASH_XOR:
4399 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4401 case ETH_RSS_HASH_NO_CHANGE:
4402 hash_algo = vport->rss_algo;
4408 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4412 /* Update the shadow RSS key with user specified qids */
4413 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4414 vport->rss_algo = hash_algo;
4417 /* Update the shadow RSS table with user specified qids */
4418 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4419 vport->rss_indirection_tbl[i] = indir[i];
4421 /* Update the hardware */
4422 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4425 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4427 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4429 if (nfc->data & RXH_L4_B_2_3)
4430 hash_sets |= HCLGE_D_PORT_BIT;
4432 hash_sets &= ~HCLGE_D_PORT_BIT;
4434 if (nfc->data & RXH_IP_SRC)
4435 hash_sets |= HCLGE_S_IP_BIT;
4437 hash_sets &= ~HCLGE_S_IP_BIT;
4439 if (nfc->data & RXH_IP_DST)
4440 hash_sets |= HCLGE_D_IP_BIT;
4442 hash_sets &= ~HCLGE_D_IP_BIT;
4444 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4445 hash_sets |= HCLGE_V_TAG_BIT;
4450 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4451 struct ethtool_rxnfc *nfc)
4453 struct hclge_vport *vport = hclge_get_vport(handle);
4454 struct hclge_dev *hdev = vport->back;
4455 struct hclge_rss_input_tuple_cmd *req;
4456 struct hclge_desc desc;
4460 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4461 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4464 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4467 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4468 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4469 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4470 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4471 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4472 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4473 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4474 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4476 tuple_sets = hclge_get_rss_hash_bits(nfc);
4477 switch (nfc->flow_type) {
4479 req->ipv4_tcp_en = tuple_sets;
4482 req->ipv6_tcp_en = tuple_sets;
4485 req->ipv4_udp_en = tuple_sets;
4488 req->ipv6_udp_en = tuple_sets;
4491 req->ipv4_sctp_en = tuple_sets;
4494 if ((nfc->data & RXH_L4_B_0_1) ||
4495 (nfc->data & RXH_L4_B_2_3))
4498 req->ipv6_sctp_en = tuple_sets;
4501 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4504 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4512 dev_err(&hdev->pdev->dev,
4513 "Set rss tuple fail, status = %d\n", ret);
4517 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4518 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4519 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4520 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4521 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4522 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4523 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4524 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4525 hclge_get_rss_type(vport);
4529 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4530 struct ethtool_rxnfc *nfc)
4532 struct hclge_vport *vport = hclge_get_vport(handle);
4537 switch (nfc->flow_type) {
4539 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4542 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4545 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4548 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4551 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4554 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4558 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4567 if (tuple_sets & HCLGE_D_PORT_BIT)
4568 nfc->data |= RXH_L4_B_2_3;
4569 if (tuple_sets & HCLGE_S_PORT_BIT)
4570 nfc->data |= RXH_L4_B_0_1;
4571 if (tuple_sets & HCLGE_D_IP_BIT)
4572 nfc->data |= RXH_IP_DST;
4573 if (tuple_sets & HCLGE_S_IP_BIT)
4574 nfc->data |= RXH_IP_SRC;
4579 static int hclge_get_tc_size(struct hnae3_handle *handle)
4581 struct hclge_vport *vport = hclge_get_vport(handle);
4582 struct hclge_dev *hdev = vport->back;
4584 return hdev->rss_size_max;
4587 int hclge_rss_init_hw(struct hclge_dev *hdev)
4589 struct hclge_vport *vport = hdev->vport;
4590 u8 *rss_indir = vport[0].rss_indirection_tbl;
4591 u16 rss_size = vport[0].alloc_rss_size;
4592 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4593 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4594 u8 *key = vport[0].rss_hash_key;
4595 u8 hfunc = vport[0].rss_algo;
4596 u16 tc_valid[HCLGE_MAX_TC_NUM];
4601 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4605 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4609 ret = hclge_set_rss_input_tuple(hdev);
4613 /* Each TC have the same queue size, and tc_size set to hardware is
4614 * the log2 of roundup power of two of rss_size, the acutal queue
4615 * size is limited by indirection table.
4617 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4618 dev_err(&hdev->pdev->dev,
4619 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4624 roundup_size = roundup_pow_of_two(rss_size);
4625 roundup_size = ilog2(roundup_size);
4627 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4630 if (!(hdev->hw_tc_map & BIT(i)))
4634 tc_size[i] = roundup_size;
4635 tc_offset[i] = rss_size * i;
4638 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4641 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4643 struct hclge_vport *vport = hdev->vport;
4646 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4647 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4648 vport[j].rss_indirection_tbl[i] =
4649 i % vport[j].alloc_rss_size;
4653 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4655 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4656 struct hclge_vport *vport = hdev->vport;
4658 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4659 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4661 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4662 vport[i].rss_tuple_sets.ipv4_tcp_en =
4663 HCLGE_RSS_INPUT_TUPLE_OTHER;
4664 vport[i].rss_tuple_sets.ipv4_udp_en =
4665 HCLGE_RSS_INPUT_TUPLE_OTHER;
4666 vport[i].rss_tuple_sets.ipv4_sctp_en =
4667 HCLGE_RSS_INPUT_TUPLE_SCTP;
4668 vport[i].rss_tuple_sets.ipv4_fragment_en =
4669 HCLGE_RSS_INPUT_TUPLE_OTHER;
4670 vport[i].rss_tuple_sets.ipv6_tcp_en =
4671 HCLGE_RSS_INPUT_TUPLE_OTHER;
4672 vport[i].rss_tuple_sets.ipv6_udp_en =
4673 HCLGE_RSS_INPUT_TUPLE_OTHER;
4674 vport[i].rss_tuple_sets.ipv6_sctp_en =
4675 HCLGE_RSS_INPUT_TUPLE_SCTP;
4676 vport[i].rss_tuple_sets.ipv6_fragment_en =
4677 HCLGE_RSS_INPUT_TUPLE_OTHER;
4679 vport[i].rss_algo = rss_algo;
4681 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4682 HCLGE_RSS_KEY_SIZE);
4685 hclge_rss_indir_init_cfg(hdev);
4688 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4689 int vector_id, bool en,
4690 struct hnae3_ring_chain_node *ring_chain)
4692 struct hclge_dev *hdev = vport->back;
4693 struct hnae3_ring_chain_node *node;
4694 struct hclge_desc desc;
4695 struct hclge_ctrl_vector_chain_cmd *req =
4696 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4697 enum hclge_cmd_status status;
4698 enum hclge_opcode_type op;
4699 u16 tqp_type_and_id;
4702 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4703 hclge_cmd_setup_basic_desc(&desc, op, false);
4704 req->int_vector_id = vector_id;
4707 for (node = ring_chain; node; node = node->next) {
4708 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4709 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4711 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4712 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4713 HCLGE_TQP_ID_S, node->tqp_index);
4714 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4716 hnae3_get_field(node->int_gl_idx,
4717 HNAE3_RING_GL_IDX_M,
4718 HNAE3_RING_GL_IDX_S));
4719 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4720 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4721 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4722 req->vfid = vport->vport_id;
4724 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4726 dev_err(&hdev->pdev->dev,
4727 "Map TQP fail, status is %d.\n",
4733 hclge_cmd_setup_basic_desc(&desc,
4736 req->int_vector_id = vector_id;
4741 req->int_cause_num = i;
4742 req->vfid = vport->vport_id;
4743 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4745 dev_err(&hdev->pdev->dev,
4746 "Map TQP fail, status is %d.\n", status);
4754 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4755 struct hnae3_ring_chain_node *ring_chain)
4757 struct hclge_vport *vport = hclge_get_vport(handle);
4758 struct hclge_dev *hdev = vport->back;
4761 vector_id = hclge_get_vector_index(hdev, vector);
4762 if (vector_id < 0) {
4763 dev_err(&hdev->pdev->dev,
4764 "failed to get vector index. vector=%d\n", vector);
4768 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4771 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4772 struct hnae3_ring_chain_node *ring_chain)
4774 struct hclge_vport *vport = hclge_get_vport(handle);
4775 struct hclge_dev *hdev = vport->back;
4778 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4781 vector_id = hclge_get_vector_index(hdev, vector);
4782 if (vector_id < 0) {
4783 dev_err(&handle->pdev->dev,
4784 "Get vector index fail. ret =%d\n", vector_id);
4788 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4790 dev_err(&handle->pdev->dev,
4791 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4797 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4798 struct hclge_promisc_param *param)
4800 struct hclge_promisc_cfg_cmd *req;
4801 struct hclge_desc desc;
4804 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4806 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4807 req->vf_id = param->vf_id;
4809 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4810 * pdev revision(0x20), new revision support them. The
4811 * value of this two fields will not return error when driver
4812 * send command to fireware in revision(0x20).
4814 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4815 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4817 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4819 dev_err(&hdev->pdev->dev,
4820 "failed to set vport %d promisc mode, ret = %d.\n",
4826 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4827 bool en_uc, bool en_mc, bool en_bc,
4833 memset(param, 0, sizeof(struct hclge_promisc_param));
4835 param->enable = HCLGE_PROMISC_EN_UC;
4837 param->enable |= HCLGE_PROMISC_EN_MC;
4839 param->enable |= HCLGE_PROMISC_EN_BC;
4840 param->vf_id = vport_id;
4843 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4844 bool en_mc_pmc, bool en_bc_pmc)
4846 struct hclge_dev *hdev = vport->back;
4847 struct hclge_promisc_param param;
4849 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4851 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4854 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4857 struct hclge_vport *vport = hclge_get_vport(handle);
4858 struct hclge_dev *hdev = vport->back;
4859 bool en_bc_pmc = true;
4861 /* For device whose version below V2, if broadcast promisc enabled,
4862 * vlan filter is always bypassed. So broadcast promisc should be
4863 * disabled until user enable promisc mode
4865 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4866 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4868 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4872 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4874 struct hclge_vport *vport = hclge_get_vport(handle);
4875 struct hclge_dev *hdev = vport->back;
4877 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4880 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4882 struct hclge_get_fd_mode_cmd *req;
4883 struct hclge_desc desc;
4886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4888 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4890 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4892 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4896 *fd_mode = req->mode;
4901 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4902 u32 *stage1_entry_num,
4903 u32 *stage2_entry_num,
4904 u16 *stage1_counter_num,
4905 u16 *stage2_counter_num)
4907 struct hclge_get_fd_allocation_cmd *req;
4908 struct hclge_desc desc;
4911 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4913 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4915 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4917 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4922 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4923 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4924 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4925 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4930 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4931 enum HCLGE_FD_STAGE stage_num)
4933 struct hclge_set_fd_key_config_cmd *req;
4934 struct hclge_fd_key_cfg *stage;
4935 struct hclge_desc desc;
4938 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4940 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4941 stage = &hdev->fd_cfg.key_cfg[stage_num];
4942 req->stage = stage_num;
4943 req->key_select = stage->key_sel;
4944 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4945 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4946 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4947 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4948 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4949 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4951 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4953 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4958 static int hclge_init_fd_config(struct hclge_dev *hdev)
4960 #define LOW_2_WORDS 0x03
4961 struct hclge_fd_key_cfg *key_cfg;
4964 if (!hnae3_dev_fd_supported(hdev))
4967 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4971 switch (hdev->fd_cfg.fd_mode) {
4972 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4973 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4975 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4976 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4979 dev_err(&hdev->pdev->dev,
4980 "Unsupported flow director mode %u\n",
4981 hdev->fd_cfg.fd_mode);
4985 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4986 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4987 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4988 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4989 key_cfg->outer_sipv6_word_en = 0;
4990 key_cfg->outer_dipv6_word_en = 0;
4992 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4993 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4994 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4995 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4997 /* If use max 400bit key, we can support tuples for ether type */
4998 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4999 key_cfg->tuple_active |=
5000 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5002 /* roce_type is used to filter roce frames
5003 * dst_vport is used to specify the rule
5005 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5007 ret = hclge_get_fd_allocation(hdev,
5008 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5009 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5010 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5011 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5015 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5018 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5019 int loc, u8 *key, bool is_add)
5021 struct hclge_fd_tcam_config_1_cmd *req1;
5022 struct hclge_fd_tcam_config_2_cmd *req2;
5023 struct hclge_fd_tcam_config_3_cmd *req3;
5024 struct hclge_desc desc[3];
5027 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5028 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5029 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5030 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5031 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5033 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5034 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5035 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5037 req1->stage = stage;
5038 req1->xy_sel = sel_x ? 1 : 0;
5039 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5040 req1->index = cpu_to_le32(loc);
5041 req1->entry_vld = sel_x ? is_add : 0;
5044 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5045 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5046 sizeof(req2->tcam_data));
5047 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5048 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5051 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5053 dev_err(&hdev->pdev->dev,
5054 "config tcam key fail, ret=%d\n",
5060 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5061 struct hclge_fd_ad_data *action)
5063 struct hclge_fd_ad_config_cmd *req;
5064 struct hclge_desc desc;
5068 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5070 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5071 req->index = cpu_to_le32(loc);
5074 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5075 action->write_rule_id_to_bd);
5076 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5079 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5080 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5081 action->forward_to_direct_queue);
5082 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5084 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5085 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5086 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5087 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5088 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5089 action->counter_id);
5091 req->ad_data = cpu_to_le64(ad_data);
5092 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5094 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5099 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5100 struct hclge_fd_rule *rule)
5102 u16 tmp_x_s, tmp_y_s;
5103 u32 tmp_x_l, tmp_y_l;
5106 if (rule->unused_tuple & tuple_bit)
5109 switch (tuple_bit) {
5110 case BIT(INNER_DST_MAC):
5111 for (i = 0; i < ETH_ALEN; i++) {
5112 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5113 rule->tuples_mask.dst_mac[i]);
5114 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5115 rule->tuples_mask.dst_mac[i]);
5119 case BIT(INNER_SRC_MAC):
5120 for (i = 0; i < ETH_ALEN; i++) {
5121 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5122 rule->tuples.src_mac[i]);
5123 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5124 rule->tuples.src_mac[i]);
5128 case BIT(INNER_VLAN_TAG_FST):
5129 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5130 rule->tuples_mask.vlan_tag1);
5131 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5132 rule->tuples_mask.vlan_tag1);
5133 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5134 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5137 case BIT(INNER_ETH_TYPE):
5138 calc_x(tmp_x_s, rule->tuples.ether_proto,
5139 rule->tuples_mask.ether_proto);
5140 calc_y(tmp_y_s, rule->tuples.ether_proto,
5141 rule->tuples_mask.ether_proto);
5142 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5143 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5146 case BIT(INNER_IP_TOS):
5147 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5148 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5151 case BIT(INNER_IP_PROTO):
5152 calc_x(*key_x, rule->tuples.ip_proto,
5153 rule->tuples_mask.ip_proto);
5154 calc_y(*key_y, rule->tuples.ip_proto,
5155 rule->tuples_mask.ip_proto);
5158 case BIT(INNER_SRC_IP):
5159 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5160 rule->tuples_mask.src_ip[IPV4_INDEX]);
5161 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5162 rule->tuples_mask.src_ip[IPV4_INDEX]);
5163 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5164 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5167 case BIT(INNER_DST_IP):
5168 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5169 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5170 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5171 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5172 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5173 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5176 case BIT(INNER_SRC_PORT):
5177 calc_x(tmp_x_s, rule->tuples.src_port,
5178 rule->tuples_mask.src_port);
5179 calc_y(tmp_y_s, rule->tuples.src_port,
5180 rule->tuples_mask.src_port);
5181 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5182 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5185 case BIT(INNER_DST_PORT):
5186 calc_x(tmp_x_s, rule->tuples.dst_port,
5187 rule->tuples_mask.dst_port);
5188 calc_y(tmp_y_s, rule->tuples.dst_port,
5189 rule->tuples_mask.dst_port);
5190 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5191 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5199 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5200 u8 vf_id, u8 network_port_id)
5202 u32 port_number = 0;
5204 if (port_type == HOST_PORT) {
5205 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5207 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5209 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5211 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5212 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5213 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5219 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5220 __le32 *key_x, __le32 *key_y,
5221 struct hclge_fd_rule *rule)
5223 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5224 u8 cur_pos = 0, tuple_size, shift_bits;
5227 for (i = 0; i < MAX_META_DATA; i++) {
5228 tuple_size = meta_data_key_info[i].key_length;
5229 tuple_bit = key_cfg->meta_data_active & BIT(i);
5231 switch (tuple_bit) {
5232 case BIT(ROCE_TYPE):
5233 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5234 cur_pos += tuple_size;
5236 case BIT(DST_VPORT):
5237 port_number = hclge_get_port_number(HOST_PORT, 0,
5239 hnae3_set_field(meta_data,
5240 GENMASK(cur_pos + tuple_size, cur_pos),
5241 cur_pos, port_number);
5242 cur_pos += tuple_size;
5249 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5250 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5251 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5253 *key_x = cpu_to_le32(tmp_x << shift_bits);
5254 *key_y = cpu_to_le32(tmp_y << shift_bits);
5257 /* A complete key is combined with meta data key and tuple key.
5258 * Meta data key is stored at the MSB region, and tuple key is stored at
5259 * the LSB region, unused bits will be filled 0.
5261 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5262 struct hclge_fd_rule *rule)
5264 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5265 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5266 u8 *cur_key_x, *cur_key_y;
5267 u8 meta_data_region;
5272 memset(key_x, 0, sizeof(key_x));
5273 memset(key_y, 0, sizeof(key_y));
5277 for (i = 0 ; i < MAX_TUPLE; i++) {
5281 tuple_size = tuple_key_info[i].key_length / 8;
5282 check_tuple = key_cfg->tuple_active & BIT(i);
5284 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5287 cur_key_x += tuple_size;
5288 cur_key_y += tuple_size;
5292 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5293 MAX_META_DATA_LENGTH / 8;
5295 hclge_fd_convert_meta_data(key_cfg,
5296 (__le32 *)(key_x + meta_data_region),
5297 (__le32 *)(key_y + meta_data_region),
5300 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5303 dev_err(&hdev->pdev->dev,
5304 "fd key_y config fail, loc=%u, ret=%d\n",
5305 rule->queue_id, ret);
5309 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5312 dev_err(&hdev->pdev->dev,
5313 "fd key_x config fail, loc=%u, ret=%d\n",
5314 rule->queue_id, ret);
5318 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5319 struct hclge_fd_rule *rule)
5321 struct hclge_fd_ad_data ad_data;
5323 ad_data.ad_id = rule->location;
5325 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5326 ad_data.drop_packet = true;
5327 ad_data.forward_to_direct_queue = false;
5328 ad_data.queue_id = 0;
5330 ad_data.drop_packet = false;
5331 ad_data.forward_to_direct_queue = true;
5332 ad_data.queue_id = rule->queue_id;
5335 ad_data.use_counter = false;
5336 ad_data.counter_id = 0;
5338 ad_data.use_next_stage = false;
5339 ad_data.next_input_key = 0;
5341 ad_data.write_rule_id_to_bd = true;
5342 ad_data.rule_id = rule->location;
5344 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5347 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5350 if (!spec || !unused_tuple)
5353 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5356 *unused_tuple |= BIT(INNER_SRC_IP);
5359 *unused_tuple |= BIT(INNER_DST_IP);
5362 *unused_tuple |= BIT(INNER_SRC_PORT);
5365 *unused_tuple |= BIT(INNER_DST_PORT);
5368 *unused_tuple |= BIT(INNER_IP_TOS);
5373 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5376 if (!spec || !unused_tuple)
5379 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5380 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5383 *unused_tuple |= BIT(INNER_SRC_IP);
5386 *unused_tuple |= BIT(INNER_DST_IP);
5389 *unused_tuple |= BIT(INNER_IP_TOS);
5392 *unused_tuple |= BIT(INNER_IP_PROTO);
5394 if (spec->l4_4_bytes)
5397 if (spec->ip_ver != ETH_RX_NFC_IP4)
5403 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5406 if (!spec || !unused_tuple)
5409 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5412 /* check whether src/dst ip address used */
5413 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5414 !spec->ip6src[2] && !spec->ip6src[3])
5415 *unused_tuple |= BIT(INNER_SRC_IP);
5417 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5418 !spec->ip6dst[2] && !spec->ip6dst[3])
5419 *unused_tuple |= BIT(INNER_DST_IP);
5422 *unused_tuple |= BIT(INNER_SRC_PORT);
5425 *unused_tuple |= BIT(INNER_DST_PORT);
5433 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5436 if (!spec || !unused_tuple)
5439 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5440 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5442 /* check whether src/dst ip address used */
5443 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5444 !spec->ip6src[2] && !spec->ip6src[3])
5445 *unused_tuple |= BIT(INNER_SRC_IP);
5447 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5448 !spec->ip6dst[2] && !spec->ip6dst[3])
5449 *unused_tuple |= BIT(INNER_DST_IP);
5451 if (!spec->l4_proto)
5452 *unused_tuple |= BIT(INNER_IP_PROTO);
5457 if (spec->l4_4_bytes)
5463 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5465 if (!spec || !unused_tuple)
5468 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5469 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5470 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5472 if (is_zero_ether_addr(spec->h_source))
5473 *unused_tuple |= BIT(INNER_SRC_MAC);
5475 if (is_zero_ether_addr(spec->h_dest))
5476 *unused_tuple |= BIT(INNER_DST_MAC);
5479 *unused_tuple |= BIT(INNER_ETH_TYPE);
5484 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5485 struct ethtool_rx_flow_spec *fs,
5488 if (fs->flow_type & FLOW_EXT) {
5489 if (fs->h_ext.vlan_etype) {
5490 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5494 if (!fs->h_ext.vlan_tci)
5495 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5497 if (fs->m_ext.vlan_tci &&
5498 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5499 dev_err(&hdev->pdev->dev,
5500 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5501 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5505 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5508 if (fs->flow_type & FLOW_MAC_EXT) {
5509 if (hdev->fd_cfg.fd_mode !=
5510 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5511 dev_err(&hdev->pdev->dev,
5512 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5516 if (is_zero_ether_addr(fs->h_ext.h_dest))
5517 *unused_tuple |= BIT(INNER_DST_MAC);
5519 *unused_tuple &= ~BIT(INNER_DST_MAC);
5525 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5526 struct ethtool_rx_flow_spec *fs,
5532 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5533 dev_err(&hdev->pdev->dev,
5534 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5536 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5540 if ((fs->flow_type & FLOW_EXT) &&
5541 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5542 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5546 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5547 switch (flow_type) {
5551 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5555 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5561 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5564 case IPV6_USER_FLOW:
5565 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5569 if (hdev->fd_cfg.fd_mode !=
5570 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5571 dev_err(&hdev->pdev->dev,
5572 "ETHER_FLOW is not supported in current fd mode!\n");
5576 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5580 dev_err(&hdev->pdev->dev,
5581 "unsupported protocol type, protocol type = %#x\n",
5587 dev_err(&hdev->pdev->dev,
5588 "failed to check flow union tuple, ret = %d\n",
5593 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5596 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5598 struct hclge_fd_rule *rule = NULL;
5599 struct hlist_node *node2;
5601 spin_lock_bh(&hdev->fd_rule_lock);
5602 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5603 if (rule->location >= location)
5607 spin_unlock_bh(&hdev->fd_rule_lock);
5609 return rule && rule->location == location;
5612 /* make sure being called after lock up with fd_rule_lock */
5613 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5614 struct hclge_fd_rule *new_rule,
5618 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5619 struct hlist_node *node2;
5621 if (is_add && !new_rule)
5624 hlist_for_each_entry_safe(rule, node2,
5625 &hdev->fd_rule_list, rule_node) {
5626 if (rule->location >= location)
5631 if (rule && rule->location == location) {
5632 hlist_del(&rule->rule_node);
5634 hdev->hclge_fd_rule_num--;
5637 if (!hdev->hclge_fd_rule_num)
5638 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5639 clear_bit(location, hdev->fd_bmap);
5643 } else if (!is_add) {
5644 dev_err(&hdev->pdev->dev,
5645 "delete fail, rule %u is inexistent\n",
5650 INIT_HLIST_NODE(&new_rule->rule_node);
5653 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5655 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5657 set_bit(location, hdev->fd_bmap);
5658 hdev->hclge_fd_rule_num++;
5659 hdev->fd_active_type = new_rule->rule_type;
5664 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5665 struct ethtool_rx_flow_spec *fs,
5666 struct hclge_fd_rule *rule)
5668 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5670 switch (flow_type) {
5674 rule->tuples.src_ip[IPV4_INDEX] =
5675 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5676 rule->tuples_mask.src_ip[IPV4_INDEX] =
5677 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5679 rule->tuples.dst_ip[IPV4_INDEX] =
5680 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5681 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5682 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5684 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5685 rule->tuples_mask.src_port =
5686 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5688 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5689 rule->tuples_mask.dst_port =
5690 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5692 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5693 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5695 rule->tuples.ether_proto = ETH_P_IP;
5696 rule->tuples_mask.ether_proto = 0xFFFF;
5700 rule->tuples.src_ip[IPV4_INDEX] =
5701 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5702 rule->tuples_mask.src_ip[IPV4_INDEX] =
5703 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5705 rule->tuples.dst_ip[IPV4_INDEX] =
5706 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5707 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5708 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5710 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5711 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5713 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5714 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5716 rule->tuples.ether_proto = ETH_P_IP;
5717 rule->tuples_mask.ether_proto = 0xFFFF;
5723 be32_to_cpu_array(rule->tuples.src_ip,
5724 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5725 be32_to_cpu_array(rule->tuples_mask.src_ip,
5726 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5728 be32_to_cpu_array(rule->tuples.dst_ip,
5729 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5730 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5731 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5733 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5734 rule->tuples_mask.src_port =
5735 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5737 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5738 rule->tuples_mask.dst_port =
5739 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5741 rule->tuples.ether_proto = ETH_P_IPV6;
5742 rule->tuples_mask.ether_proto = 0xFFFF;
5745 case IPV6_USER_FLOW:
5746 be32_to_cpu_array(rule->tuples.src_ip,
5747 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5748 be32_to_cpu_array(rule->tuples_mask.src_ip,
5749 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5751 be32_to_cpu_array(rule->tuples.dst_ip,
5752 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5753 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5754 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5756 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5757 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5759 rule->tuples.ether_proto = ETH_P_IPV6;
5760 rule->tuples_mask.ether_proto = 0xFFFF;
5764 ether_addr_copy(rule->tuples.src_mac,
5765 fs->h_u.ether_spec.h_source);
5766 ether_addr_copy(rule->tuples_mask.src_mac,
5767 fs->m_u.ether_spec.h_source);
5769 ether_addr_copy(rule->tuples.dst_mac,
5770 fs->h_u.ether_spec.h_dest);
5771 ether_addr_copy(rule->tuples_mask.dst_mac,
5772 fs->m_u.ether_spec.h_dest);
5774 rule->tuples.ether_proto =
5775 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5776 rule->tuples_mask.ether_proto =
5777 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5784 switch (flow_type) {
5787 rule->tuples.ip_proto = IPPROTO_SCTP;
5788 rule->tuples_mask.ip_proto = 0xFF;
5792 rule->tuples.ip_proto = IPPROTO_TCP;
5793 rule->tuples_mask.ip_proto = 0xFF;
5797 rule->tuples.ip_proto = IPPROTO_UDP;
5798 rule->tuples_mask.ip_proto = 0xFF;
5804 if (fs->flow_type & FLOW_EXT) {
5805 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5806 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5809 if (fs->flow_type & FLOW_MAC_EXT) {
5810 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5811 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5817 /* make sure being called after lock up with fd_rule_lock */
5818 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5819 struct hclge_fd_rule *rule)
5824 dev_err(&hdev->pdev->dev,
5825 "The flow director rule is NULL\n");
5829 /* it will never fail here, so needn't to check return value */
5830 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5832 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5836 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5843 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5847 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5848 struct ethtool_rxnfc *cmd)
5850 struct hclge_vport *vport = hclge_get_vport(handle);
5851 struct hclge_dev *hdev = vport->back;
5852 u16 dst_vport_id = 0, q_index = 0;
5853 struct ethtool_rx_flow_spec *fs;
5854 struct hclge_fd_rule *rule;
5859 if (!hnae3_dev_fd_supported(hdev)) {
5860 dev_err(&hdev->pdev->dev,
5861 "flow table director is not supported\n");
5866 dev_err(&hdev->pdev->dev,
5867 "please enable flow director first\n");
5871 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5873 ret = hclge_fd_check_spec(hdev, fs, &unused);
5877 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5878 action = HCLGE_FD_ACTION_DROP_PACKET;
5880 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5881 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5884 if (vf > hdev->num_req_vfs) {
5885 dev_err(&hdev->pdev->dev,
5886 "Error: vf id (%u) > max vf num (%u)\n",
5887 vf, hdev->num_req_vfs);
5891 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5892 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5895 dev_err(&hdev->pdev->dev,
5896 "Error: queue id (%u) > max tqp num (%u)\n",
5901 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5905 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5909 ret = hclge_fd_get_tuple(hdev, fs, rule);
5915 rule->flow_type = fs->flow_type;
5916 rule->location = fs->location;
5917 rule->unused_tuple = unused;
5918 rule->vf_id = dst_vport_id;
5919 rule->queue_id = q_index;
5920 rule->action = action;
5921 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5923 /* to avoid rule conflict, when user configure rule by ethtool,
5924 * we need to clear all arfs rules
5926 spin_lock_bh(&hdev->fd_rule_lock);
5927 hclge_clear_arfs_rules(handle);
5929 ret = hclge_fd_config_rule(hdev, rule);
5931 spin_unlock_bh(&hdev->fd_rule_lock);
5936 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5937 struct ethtool_rxnfc *cmd)
5939 struct hclge_vport *vport = hclge_get_vport(handle);
5940 struct hclge_dev *hdev = vport->back;
5941 struct ethtool_rx_flow_spec *fs;
5944 if (!hnae3_dev_fd_supported(hdev))
5947 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5949 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5952 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5953 dev_err(&hdev->pdev->dev,
5954 "Delete fail, rule %u is inexistent\n", fs->location);
5958 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5963 spin_lock_bh(&hdev->fd_rule_lock);
5964 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5966 spin_unlock_bh(&hdev->fd_rule_lock);
5971 /* make sure being called after lock up with fd_rule_lock */
5972 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5975 struct hclge_vport *vport = hclge_get_vport(handle);
5976 struct hclge_dev *hdev = vport->back;
5977 struct hclge_fd_rule *rule;
5978 struct hlist_node *node;
5981 if (!hnae3_dev_fd_supported(hdev))
5984 for_each_set_bit(location, hdev->fd_bmap,
5985 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5986 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5990 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5992 hlist_del(&rule->rule_node);
5995 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5996 hdev->hclge_fd_rule_num = 0;
5997 bitmap_zero(hdev->fd_bmap,
5998 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6002 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6004 struct hclge_vport *vport = hclge_get_vport(handle);
6005 struct hclge_dev *hdev = vport->back;
6006 struct hclge_fd_rule *rule;
6007 struct hlist_node *node;
6010 /* Return ok here, because reset error handling will check this
6011 * return value. If error is returned here, the reset process will
6014 if (!hnae3_dev_fd_supported(hdev))
6017 /* if fd is disabled, should not restore it when reset */
6021 spin_lock_bh(&hdev->fd_rule_lock);
6022 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6023 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6025 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6028 dev_warn(&hdev->pdev->dev,
6029 "Restore rule %u failed, remove it\n",
6031 clear_bit(rule->location, hdev->fd_bmap);
6032 hlist_del(&rule->rule_node);
6034 hdev->hclge_fd_rule_num--;
6038 if (hdev->hclge_fd_rule_num)
6039 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6041 spin_unlock_bh(&hdev->fd_rule_lock);
6046 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6047 struct ethtool_rxnfc *cmd)
6049 struct hclge_vport *vport = hclge_get_vport(handle);
6050 struct hclge_dev *hdev = vport->back;
6052 if (!hnae3_dev_fd_supported(hdev))
6055 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6056 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6061 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6062 struct ethtool_tcpip4_spec *spec,
6063 struct ethtool_tcpip4_spec *spec_mask)
6065 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6066 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6067 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6069 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6070 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6071 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6073 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6074 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6075 0 : cpu_to_be16(rule->tuples_mask.src_port);
6077 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6078 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6079 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6081 spec->tos = rule->tuples.ip_tos;
6082 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6083 0 : rule->tuples_mask.ip_tos;
6086 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6087 struct ethtool_usrip4_spec *spec,
6088 struct ethtool_usrip4_spec *spec_mask)
6090 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6091 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6092 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6094 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6095 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6096 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6098 spec->tos = rule->tuples.ip_tos;
6099 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6100 0 : rule->tuples_mask.ip_tos;
6102 spec->proto = rule->tuples.ip_proto;
6103 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6104 0 : rule->tuples_mask.ip_proto;
6106 spec->ip_ver = ETH_RX_NFC_IP4;
6109 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6110 struct ethtool_tcpip6_spec *spec,
6111 struct ethtool_tcpip6_spec *spec_mask)
6113 cpu_to_be32_array(spec->ip6src,
6114 rule->tuples.src_ip, IPV6_SIZE);
6115 cpu_to_be32_array(spec->ip6dst,
6116 rule->tuples.dst_ip, IPV6_SIZE);
6117 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6118 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6120 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6123 if (rule->unused_tuple & BIT(INNER_DST_IP))
6124 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6126 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6129 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6130 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6131 0 : cpu_to_be16(rule->tuples_mask.src_port);
6133 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6134 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6135 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6138 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6139 struct ethtool_usrip6_spec *spec,
6140 struct ethtool_usrip6_spec *spec_mask)
6142 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6143 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6144 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6145 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6147 cpu_to_be32_array(spec_mask->ip6src,
6148 rule->tuples_mask.src_ip, IPV6_SIZE);
6150 if (rule->unused_tuple & BIT(INNER_DST_IP))
6151 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6153 cpu_to_be32_array(spec_mask->ip6dst,
6154 rule->tuples_mask.dst_ip, IPV6_SIZE);
6156 spec->l4_proto = rule->tuples.ip_proto;
6157 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6158 0 : rule->tuples_mask.ip_proto;
6161 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6162 struct ethhdr *spec,
6163 struct ethhdr *spec_mask)
6165 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6166 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6168 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6169 eth_zero_addr(spec_mask->h_source);
6171 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6173 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6174 eth_zero_addr(spec_mask->h_dest);
6176 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6178 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6179 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6180 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6183 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6184 struct hclge_fd_rule *rule)
6186 if (fs->flow_type & FLOW_EXT) {
6187 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6188 fs->m_ext.vlan_tci =
6189 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6190 cpu_to_be16(VLAN_VID_MASK) :
6191 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6194 if (fs->flow_type & FLOW_MAC_EXT) {
6195 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6196 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6197 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6199 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6200 rule->tuples_mask.dst_mac);
6204 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6205 struct ethtool_rxnfc *cmd)
6207 struct hclge_vport *vport = hclge_get_vport(handle);
6208 struct hclge_fd_rule *rule = NULL;
6209 struct hclge_dev *hdev = vport->back;
6210 struct ethtool_rx_flow_spec *fs;
6211 struct hlist_node *node2;
6213 if (!hnae3_dev_fd_supported(hdev))
6216 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6218 spin_lock_bh(&hdev->fd_rule_lock);
6220 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6221 if (rule->location >= fs->location)
6225 if (!rule || fs->location != rule->location) {
6226 spin_unlock_bh(&hdev->fd_rule_lock);
6231 fs->flow_type = rule->flow_type;
6232 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6236 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6237 &fs->m_u.tcp_ip4_spec);
6240 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6241 &fs->m_u.usr_ip4_spec);
6246 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6247 &fs->m_u.tcp_ip6_spec);
6249 case IPV6_USER_FLOW:
6250 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6251 &fs->m_u.usr_ip6_spec);
6253 /* The flow type of fd rule has been checked before adding in to rule
6254 * list. As other flow types have been handled, it must be ETHER_FLOW
6255 * for the default case
6258 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6259 &fs->m_u.ether_spec);
6263 hclge_fd_get_ext_info(fs, rule);
6265 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6266 fs->ring_cookie = RX_CLS_FLOW_DISC;
6270 fs->ring_cookie = rule->queue_id;
6271 vf_id = rule->vf_id;
6272 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6273 fs->ring_cookie |= vf_id;
6276 spin_unlock_bh(&hdev->fd_rule_lock);
6281 static int hclge_get_all_rules(struct hnae3_handle *handle,
6282 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6284 struct hclge_vport *vport = hclge_get_vport(handle);
6285 struct hclge_dev *hdev = vport->back;
6286 struct hclge_fd_rule *rule;
6287 struct hlist_node *node2;
6290 if (!hnae3_dev_fd_supported(hdev))
6293 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6295 spin_lock_bh(&hdev->fd_rule_lock);
6296 hlist_for_each_entry_safe(rule, node2,
6297 &hdev->fd_rule_list, rule_node) {
6298 if (cnt == cmd->rule_cnt) {
6299 spin_unlock_bh(&hdev->fd_rule_lock);
6303 rule_locs[cnt] = rule->location;
6307 spin_unlock_bh(&hdev->fd_rule_lock);
6309 cmd->rule_cnt = cnt;
6314 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6315 struct hclge_fd_rule_tuples *tuples)
6317 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6318 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6320 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6321 tuples->ip_proto = fkeys->basic.ip_proto;
6322 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6324 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6325 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6326 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6330 for (i = 0; i < IPV6_SIZE; i++) {
6331 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6332 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6337 /* traverse all rules, check whether an existed rule has the same tuples */
6338 static struct hclge_fd_rule *
6339 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6340 const struct hclge_fd_rule_tuples *tuples)
6342 struct hclge_fd_rule *rule = NULL;
6343 struct hlist_node *node;
6345 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6346 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6353 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6354 struct hclge_fd_rule *rule)
6356 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6357 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6358 BIT(INNER_SRC_PORT);
6361 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6362 if (tuples->ether_proto == ETH_P_IP) {
6363 if (tuples->ip_proto == IPPROTO_TCP)
6364 rule->flow_type = TCP_V4_FLOW;
6366 rule->flow_type = UDP_V4_FLOW;
6368 if (tuples->ip_proto == IPPROTO_TCP)
6369 rule->flow_type = TCP_V6_FLOW;
6371 rule->flow_type = UDP_V6_FLOW;
6373 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6374 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6377 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6378 u16 flow_id, struct flow_keys *fkeys)
6380 struct hclge_vport *vport = hclge_get_vport(handle);
6381 struct hclge_fd_rule_tuples new_tuples = {};
6382 struct hclge_dev *hdev = vport->back;
6383 struct hclge_fd_rule *rule;
6388 if (!hnae3_dev_fd_supported(hdev))
6391 /* when there is already fd rule existed add by user,
6392 * arfs should not work
6394 spin_lock_bh(&hdev->fd_rule_lock);
6395 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6396 spin_unlock_bh(&hdev->fd_rule_lock);
6400 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6402 /* check is there flow director filter existed for this flow,
6403 * if not, create a new filter for it;
6404 * if filter exist with different queue id, modify the filter;
6405 * if filter exist with same queue id, do nothing
6407 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6409 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6410 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6411 spin_unlock_bh(&hdev->fd_rule_lock);
6415 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6417 spin_unlock_bh(&hdev->fd_rule_lock);
6421 set_bit(bit_id, hdev->fd_bmap);
6422 rule->location = bit_id;
6423 rule->flow_id = flow_id;
6424 rule->queue_id = queue_id;
6425 hclge_fd_build_arfs_rule(&new_tuples, rule);
6426 ret = hclge_fd_config_rule(hdev, rule);
6428 spin_unlock_bh(&hdev->fd_rule_lock);
6433 return rule->location;
6436 spin_unlock_bh(&hdev->fd_rule_lock);
6438 if (rule->queue_id == queue_id)
6439 return rule->location;
6441 tmp_queue_id = rule->queue_id;
6442 rule->queue_id = queue_id;
6443 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6445 rule->queue_id = tmp_queue_id;
6449 return rule->location;
6452 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6454 #ifdef CONFIG_RFS_ACCEL
6455 struct hnae3_handle *handle = &hdev->vport[0].nic;
6456 struct hclge_fd_rule *rule;
6457 struct hlist_node *node;
6458 HLIST_HEAD(del_list);
6460 spin_lock_bh(&hdev->fd_rule_lock);
6461 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6462 spin_unlock_bh(&hdev->fd_rule_lock);
6465 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6466 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6467 rule->flow_id, rule->location)) {
6468 hlist_del_init(&rule->rule_node);
6469 hlist_add_head(&rule->rule_node, &del_list);
6470 hdev->hclge_fd_rule_num--;
6471 clear_bit(rule->location, hdev->fd_bmap);
6474 spin_unlock_bh(&hdev->fd_rule_lock);
6476 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6477 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6478 rule->location, NULL, false);
6484 /* make sure being called after lock up with fd_rule_lock */
6485 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6487 #ifdef CONFIG_RFS_ACCEL
6488 struct hclge_vport *vport = hclge_get_vport(handle);
6489 struct hclge_dev *hdev = vport->back;
6491 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6492 hclge_del_all_fd_entries(handle, true);
6496 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6498 struct hclge_vport *vport = hclge_get_vport(handle);
6499 struct hclge_dev *hdev = vport->back;
6501 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6502 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6505 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6507 struct hclge_vport *vport = hclge_get_vport(handle);
6508 struct hclge_dev *hdev = vport->back;
6510 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6513 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6515 struct hclge_vport *vport = hclge_get_vport(handle);
6516 struct hclge_dev *hdev = vport->back;
6518 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6521 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6523 struct hclge_vport *vport = hclge_get_vport(handle);
6524 struct hclge_dev *hdev = vport->back;
6526 return hdev->rst_stats.hw_reset_done_cnt;
6529 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6531 struct hclge_vport *vport = hclge_get_vport(handle);
6532 struct hclge_dev *hdev = vport->back;
6535 hdev->fd_en = enable;
6536 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6539 spin_lock_bh(&hdev->fd_rule_lock);
6540 hclge_del_all_fd_entries(handle, clear);
6541 spin_unlock_bh(&hdev->fd_rule_lock);
6543 hclge_restore_fd_entries(handle);
6547 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6549 struct hclge_desc desc;
6550 struct hclge_config_mac_mode_cmd *req =
6551 (struct hclge_config_mac_mode_cmd *)desc.data;
6555 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6558 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6559 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6560 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6561 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6562 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6563 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6564 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6565 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6566 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6567 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6570 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6574 dev_err(&hdev->pdev->dev,
6575 "mac enable fail, ret =%d.\n", ret);
6578 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6579 u8 switch_param, u8 param_mask)
6581 struct hclge_mac_vlan_switch_cmd *req;
6582 struct hclge_desc desc;
6586 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6587 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6589 /* read current config parameter */
6590 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6592 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6593 req->func_id = cpu_to_le32(func_id);
6595 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6597 dev_err(&hdev->pdev->dev,
6598 "read mac vlan switch parameter fail, ret = %d\n", ret);
6602 /* modify and write new config parameter */
6603 hclge_cmd_reuse_desc(&desc, false);
6604 req->switch_param = (req->switch_param & param_mask) | switch_param;
6605 req->param_mask = param_mask;
6607 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6609 dev_err(&hdev->pdev->dev,
6610 "set mac vlan switch parameter fail, ret = %d\n", ret);
6614 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6617 #define HCLGE_PHY_LINK_STATUS_NUM 200
6619 struct phy_device *phydev = hdev->hw.mac.phydev;
6624 ret = phy_read_status(phydev);
6626 dev_err(&hdev->pdev->dev,
6627 "phy update link status fail, ret = %d\n", ret);
6631 if (phydev->link == link_ret)
6634 msleep(HCLGE_LINK_STATUS_MS);
6635 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6638 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6640 #define HCLGE_MAC_LINK_STATUS_NUM 100
6647 ret = hclge_get_mac_link_status(hdev, &link_status);
6650 if (link_status == link_ret)
6653 msleep(HCLGE_LINK_STATUS_MS);
6654 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6658 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6663 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6666 hclge_phy_link_status_wait(hdev, link_ret);
6668 return hclge_mac_link_status_wait(hdev, link_ret);
6671 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6673 struct hclge_config_mac_mode_cmd *req;
6674 struct hclge_desc desc;
6678 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6679 /* 1 Read out the MAC mode config at first */
6680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6683 dev_err(&hdev->pdev->dev,
6684 "mac loopback get fail, ret =%d.\n", ret);
6688 /* 2 Then setup the loopback flag */
6689 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6690 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6692 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6694 /* 3 Config mac work mode with loopback flag
6695 * and its original configure parameters
6697 hclge_cmd_reuse_desc(&desc, false);
6698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6700 dev_err(&hdev->pdev->dev,
6701 "mac loopback set fail, ret =%d.\n", ret);
6705 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6706 enum hnae3_loop loop_mode)
6708 #define HCLGE_SERDES_RETRY_MS 10
6709 #define HCLGE_SERDES_RETRY_NUM 100
6711 struct hclge_serdes_lb_cmd *req;
6712 struct hclge_desc desc;
6716 req = (struct hclge_serdes_lb_cmd *)desc.data;
6717 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6719 switch (loop_mode) {
6720 case HNAE3_LOOP_SERIAL_SERDES:
6721 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6723 case HNAE3_LOOP_PARALLEL_SERDES:
6724 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6727 dev_err(&hdev->pdev->dev,
6728 "unsupported serdes loopback mode %d\n", loop_mode);
6733 req->enable = loop_mode_b;
6734 req->mask = loop_mode_b;
6736 req->mask = loop_mode_b;
6739 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6741 dev_err(&hdev->pdev->dev,
6742 "serdes loopback set fail, ret = %d\n", ret);
6747 msleep(HCLGE_SERDES_RETRY_MS);
6748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6752 dev_err(&hdev->pdev->dev,
6753 "serdes loopback get, ret = %d\n", ret);
6756 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6757 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6759 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6760 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6762 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6763 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6769 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6770 enum hnae3_loop loop_mode)
6774 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6778 hclge_cfg_mac_mode(hdev, en);
6780 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6782 dev_err(&hdev->pdev->dev,
6783 "serdes loopback config mac mode timeout\n");
6788 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6789 struct phy_device *phydev)
6793 if (!phydev->suspended) {
6794 ret = phy_suspend(phydev);
6799 ret = phy_resume(phydev);
6803 return phy_loopback(phydev, true);
6806 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6807 struct phy_device *phydev)
6811 ret = phy_loopback(phydev, false);
6815 return phy_suspend(phydev);
6818 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6820 struct phy_device *phydev = hdev->hw.mac.phydev;
6827 ret = hclge_enable_phy_loopback(hdev, phydev);
6829 ret = hclge_disable_phy_loopback(hdev, phydev);
6831 dev_err(&hdev->pdev->dev,
6832 "set phy loopback fail, ret = %d\n", ret);
6836 hclge_cfg_mac_mode(hdev, en);
6838 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6840 dev_err(&hdev->pdev->dev,
6841 "phy loopback config mac mode timeout\n");
6846 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6847 int stream_id, bool enable)
6849 struct hclge_desc desc;
6850 struct hclge_cfg_com_tqp_queue_cmd *req =
6851 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6855 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6856 req->stream_id = cpu_to_le16(stream_id);
6858 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6860 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6862 dev_err(&hdev->pdev->dev,
6863 "Tqp enable fail, status =%d.\n", ret);
6867 static int hclge_set_loopback(struct hnae3_handle *handle,
6868 enum hnae3_loop loop_mode, bool en)
6870 struct hclge_vport *vport = hclge_get_vport(handle);
6871 struct hnae3_knic_private_info *kinfo;
6872 struct hclge_dev *hdev = vport->back;
6875 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6876 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6877 * the same, the packets are looped back in the SSU. If SSU loopback
6878 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6880 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6881 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6883 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6884 HCLGE_SWITCH_ALW_LPBK_MASK);
6889 switch (loop_mode) {
6890 case HNAE3_LOOP_APP:
6891 ret = hclge_set_app_loopback(hdev, en);
6893 case HNAE3_LOOP_SERIAL_SERDES:
6894 case HNAE3_LOOP_PARALLEL_SERDES:
6895 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6897 case HNAE3_LOOP_PHY:
6898 ret = hclge_set_phy_loopback(hdev, en);
6902 dev_err(&hdev->pdev->dev,
6903 "loop_mode %d is not supported\n", loop_mode);
6910 kinfo = &vport->nic.kinfo;
6911 for (i = 0; i < kinfo->num_tqps; i++) {
6912 ret = hclge_tqp_enable(hdev, i, 0, en);
6920 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6924 ret = hclge_set_app_loopback(hdev, false);
6928 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6932 return hclge_cfg_serdes_loopback(hdev, false,
6933 HNAE3_LOOP_PARALLEL_SERDES);
6936 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6938 struct hclge_vport *vport = hclge_get_vport(handle);
6939 struct hnae3_knic_private_info *kinfo;
6940 struct hnae3_queue *queue;
6941 struct hclge_tqp *tqp;
6944 kinfo = &vport->nic.kinfo;
6945 for (i = 0; i < kinfo->num_tqps; i++) {
6946 queue = handle->kinfo.tqp[i];
6947 tqp = container_of(queue, struct hclge_tqp, q);
6948 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6952 static void hclge_flush_link_update(struct hclge_dev *hdev)
6954 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6956 unsigned long last = hdev->serv_processed_cnt;
6959 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6960 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6961 last == hdev->serv_processed_cnt)
6965 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6967 struct hclge_vport *vport = hclge_get_vport(handle);
6968 struct hclge_dev *hdev = vport->back;
6971 hclge_task_schedule(hdev, 0);
6973 /* Set the DOWN flag here to disable link updating */
6974 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6976 /* flush memory to make sure DOWN is seen by service task */
6977 smp_mb__before_atomic();
6978 hclge_flush_link_update(hdev);
6982 static int hclge_ae_start(struct hnae3_handle *handle)
6984 struct hclge_vport *vport = hclge_get_vport(handle);
6985 struct hclge_dev *hdev = vport->back;
6988 hclge_cfg_mac_mode(hdev, true);
6989 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6990 hdev->hw.mac.link = 0;
6992 /* reset tqp stats */
6993 hclge_reset_tqp_stats(handle);
6995 hclge_mac_start_phy(hdev);
7000 static void hclge_ae_stop(struct hnae3_handle *handle)
7002 struct hclge_vport *vport = hclge_get_vport(handle);
7003 struct hclge_dev *hdev = vport->back;
7006 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7007 spin_lock_bh(&hdev->fd_rule_lock);
7008 hclge_clear_arfs_rules(handle);
7009 spin_unlock_bh(&hdev->fd_rule_lock);
7011 /* If it is not PF reset, the firmware will disable the MAC,
7012 * so it only need to stop phy here.
7014 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7015 hdev->reset_type != HNAE3_FUNC_RESET) {
7016 hclge_mac_stop_phy(hdev);
7017 hclge_update_link_status(hdev);
7021 for (i = 0; i < handle->kinfo.num_tqps; i++)
7022 hclge_reset_tqp(handle, i);
7024 hclge_config_mac_tnl_int(hdev, false);
7027 hclge_cfg_mac_mode(hdev, false);
7029 hclge_mac_stop_phy(hdev);
7031 /* reset tqp stats */
7032 hclge_reset_tqp_stats(handle);
7033 hclge_update_link_status(hdev);
7036 int hclge_vport_start(struct hclge_vport *vport)
7038 struct hclge_dev *hdev = vport->back;
7040 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7041 vport->last_active_jiffies = jiffies;
7043 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7044 if (vport->vport_id) {
7045 hclge_restore_mac_table_common(vport);
7046 hclge_restore_vport_vlan_table(vport);
7048 hclge_restore_hw_table(hdev);
7052 clear_bit(vport->vport_id, hdev->vport_config_block);
7057 void hclge_vport_stop(struct hclge_vport *vport)
7059 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7062 static int hclge_client_start(struct hnae3_handle *handle)
7064 struct hclge_vport *vport = hclge_get_vport(handle);
7066 return hclge_vport_start(vport);
7069 static void hclge_client_stop(struct hnae3_handle *handle)
7071 struct hclge_vport *vport = hclge_get_vport(handle);
7073 hclge_vport_stop(vport);
7076 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7077 u16 cmdq_resp, u8 resp_code,
7078 enum hclge_mac_vlan_tbl_opcode op)
7080 struct hclge_dev *hdev = vport->back;
7083 dev_err(&hdev->pdev->dev,
7084 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7089 if (op == HCLGE_MAC_VLAN_ADD) {
7090 if (!resp_code || resp_code == 1)
7092 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7093 resp_code == HCLGE_ADD_MC_OVERFLOW)
7096 dev_err(&hdev->pdev->dev,
7097 "add mac addr failed for undefined, code=%u.\n",
7100 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7103 } else if (resp_code == 1) {
7104 dev_dbg(&hdev->pdev->dev,
7105 "remove mac addr failed for miss.\n");
7109 dev_err(&hdev->pdev->dev,
7110 "remove mac addr failed for undefined, code=%u.\n",
7113 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7116 } else if (resp_code == 1) {
7117 dev_dbg(&hdev->pdev->dev,
7118 "lookup mac addr failed for miss.\n");
7122 dev_err(&hdev->pdev->dev,
7123 "lookup mac addr failed for undefined, code=%u.\n",
7128 dev_err(&hdev->pdev->dev,
7129 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7134 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7136 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7138 unsigned int word_num;
7139 unsigned int bit_num;
7141 if (vfid > 255 || vfid < 0)
7144 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7145 word_num = vfid / 32;
7146 bit_num = vfid % 32;
7148 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7150 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7152 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7153 bit_num = vfid % 32;
7155 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7157 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7163 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7165 #define HCLGE_DESC_NUMBER 3
7166 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7169 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7170 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7171 if (desc[i].data[j])
7177 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7178 const u8 *addr, bool is_mc)
7180 const unsigned char *mac_addr = addr;
7181 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7182 (mac_addr[0]) | (mac_addr[1] << 8);
7183 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7185 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7187 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7188 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7191 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7192 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7195 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7196 struct hclge_mac_vlan_tbl_entry_cmd *req)
7198 struct hclge_dev *hdev = vport->back;
7199 struct hclge_desc desc;
7204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7206 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7208 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7210 dev_err(&hdev->pdev->dev,
7211 "del mac addr failed for cmd_send, ret =%d.\n",
7215 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7216 retval = le16_to_cpu(desc.retval);
7218 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7219 HCLGE_MAC_VLAN_REMOVE);
7222 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7223 struct hclge_mac_vlan_tbl_entry_cmd *req,
7224 struct hclge_desc *desc,
7227 struct hclge_dev *hdev = vport->back;
7232 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7234 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7235 memcpy(desc[0].data,
7237 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7238 hclge_cmd_setup_basic_desc(&desc[1],
7239 HCLGE_OPC_MAC_VLAN_ADD,
7241 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7242 hclge_cmd_setup_basic_desc(&desc[2],
7243 HCLGE_OPC_MAC_VLAN_ADD,
7245 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7247 memcpy(desc[0].data,
7249 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7250 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7253 dev_err(&hdev->pdev->dev,
7254 "lookup mac addr failed for cmd_send, ret =%d.\n",
7258 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7259 retval = le16_to_cpu(desc[0].retval);
7261 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7262 HCLGE_MAC_VLAN_LKUP);
7265 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7266 struct hclge_mac_vlan_tbl_entry_cmd *req,
7267 struct hclge_desc *mc_desc)
7269 struct hclge_dev *hdev = vport->back;
7276 struct hclge_desc desc;
7278 hclge_cmd_setup_basic_desc(&desc,
7279 HCLGE_OPC_MAC_VLAN_ADD,
7281 memcpy(desc.data, req,
7282 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7283 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7284 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7285 retval = le16_to_cpu(desc.retval);
7287 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7289 HCLGE_MAC_VLAN_ADD);
7291 hclge_cmd_reuse_desc(&mc_desc[0], false);
7292 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7293 hclge_cmd_reuse_desc(&mc_desc[1], false);
7294 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7295 hclge_cmd_reuse_desc(&mc_desc[2], false);
7296 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7297 memcpy(mc_desc[0].data, req,
7298 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7299 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7300 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7301 retval = le16_to_cpu(mc_desc[0].retval);
7303 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7305 HCLGE_MAC_VLAN_ADD);
7309 dev_err(&hdev->pdev->dev,
7310 "add mac addr failed for cmd_send, ret =%d.\n",
7318 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7319 u16 *allocated_size)
7321 struct hclge_umv_spc_alc_cmd *req;
7322 struct hclge_desc desc;
7325 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7328 req->space_size = cpu_to_le32(space_size);
7330 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7332 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7337 *allocated_size = le32_to_cpu(desc.data[1]);
7342 static int hclge_init_umv_space(struct hclge_dev *hdev)
7344 u16 allocated_size = 0;
7347 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7351 if (allocated_size < hdev->wanted_umv_size)
7352 dev_warn(&hdev->pdev->dev,
7353 "failed to alloc umv space, want %u, get %u\n",
7354 hdev->wanted_umv_size, allocated_size);
7356 hdev->max_umv_size = allocated_size;
7357 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7358 hdev->share_umv_size = hdev->priv_umv_size +
7359 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7364 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7366 struct hclge_vport *vport;
7369 for (i = 0; i < hdev->num_alloc_vport; i++) {
7370 vport = &hdev->vport[i];
7371 vport->used_umv_num = 0;
7374 mutex_lock(&hdev->vport_lock);
7375 hdev->share_umv_size = hdev->priv_umv_size +
7376 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7377 mutex_unlock(&hdev->vport_lock);
7380 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7382 struct hclge_dev *hdev = vport->back;
7386 mutex_lock(&hdev->vport_lock);
7388 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7389 hdev->share_umv_size == 0);
7392 mutex_unlock(&hdev->vport_lock);
7397 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7399 struct hclge_dev *hdev = vport->back;
7402 if (vport->used_umv_num > hdev->priv_umv_size)
7403 hdev->share_umv_size++;
7405 if (vport->used_umv_num > 0)
7406 vport->used_umv_num--;
7408 if (vport->used_umv_num >= hdev->priv_umv_size &&
7409 hdev->share_umv_size > 0)
7410 hdev->share_umv_size--;
7411 vport->used_umv_num++;
7415 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7418 struct hclge_mac_node *mac_node, *tmp;
7420 list_for_each_entry_safe(mac_node, tmp, list, node)
7421 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7427 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7428 enum HCLGE_MAC_NODE_STATE state)
7431 /* from set_rx_mode or tmp_add_list */
7432 case HCLGE_MAC_TO_ADD:
7433 if (mac_node->state == HCLGE_MAC_TO_DEL)
7434 mac_node->state = HCLGE_MAC_ACTIVE;
7436 /* only from set_rx_mode */
7437 case HCLGE_MAC_TO_DEL:
7438 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7439 list_del(&mac_node->node);
7442 mac_node->state = HCLGE_MAC_TO_DEL;
7445 /* only from tmp_add_list, the mac_node->state won't be
7448 case HCLGE_MAC_ACTIVE:
7449 if (mac_node->state == HCLGE_MAC_TO_ADD)
7450 mac_node->state = HCLGE_MAC_ACTIVE;
7456 int hclge_update_mac_list(struct hclge_vport *vport,
7457 enum HCLGE_MAC_NODE_STATE state,
7458 enum HCLGE_MAC_ADDR_TYPE mac_type,
7459 const unsigned char *addr)
7461 struct hclge_dev *hdev = vport->back;
7462 struct hclge_mac_node *mac_node;
7463 struct list_head *list;
7465 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7466 &vport->uc_mac_list : &vport->mc_mac_list;
7468 spin_lock_bh(&vport->mac_list_lock);
7470 /* if the mac addr is already in the mac list, no need to add a new
7471 * one into it, just check the mac addr state, convert it to a new
7472 * new state, or just remove it, or do nothing.
7474 mac_node = hclge_find_mac_node(list, addr);
7476 hclge_update_mac_node(mac_node, state);
7477 spin_unlock_bh(&vport->mac_list_lock);
7478 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7482 /* if this address is never added, unnecessary to delete */
7483 if (state == HCLGE_MAC_TO_DEL) {
7484 spin_unlock_bh(&vport->mac_list_lock);
7485 dev_err(&hdev->pdev->dev,
7486 "failed to delete address %pM from mac list\n",
7491 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7493 spin_unlock_bh(&vport->mac_list_lock);
7497 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7499 mac_node->state = state;
7500 ether_addr_copy(mac_node->mac_addr, addr);
7501 list_add_tail(&mac_node->node, list);
7503 spin_unlock_bh(&vport->mac_list_lock);
7508 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7509 const unsigned char *addr)
7511 struct hclge_vport *vport = hclge_get_vport(handle);
7513 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7517 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7518 const unsigned char *addr)
7520 struct hclge_dev *hdev = vport->back;
7521 struct hclge_mac_vlan_tbl_entry_cmd req;
7522 struct hclge_desc desc;
7523 u16 egress_port = 0;
7526 /* mac addr check */
7527 if (is_zero_ether_addr(addr) ||
7528 is_broadcast_ether_addr(addr) ||
7529 is_multicast_ether_addr(addr)) {
7530 dev_err(&hdev->pdev->dev,
7531 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7532 addr, is_zero_ether_addr(addr),
7533 is_broadcast_ether_addr(addr),
7534 is_multicast_ether_addr(addr));
7538 memset(&req, 0, sizeof(req));
7540 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7541 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7543 req.egress_port = cpu_to_le16(egress_port);
7545 hclge_prepare_mac_addr(&req, addr, false);
7547 /* Lookup the mac address in the mac_vlan table, and add
7548 * it if the entry is inexistent. Repeated unicast entry
7549 * is not allowed in the mac vlan table.
7551 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7552 if (ret == -ENOENT) {
7553 mutex_lock(&hdev->vport_lock);
7554 if (!hclge_is_umv_space_full(vport, false)) {
7555 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7557 hclge_update_umv_space(vport, false);
7558 mutex_unlock(&hdev->vport_lock);
7561 mutex_unlock(&hdev->vport_lock);
7563 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7564 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7565 hdev->priv_umv_size);
7570 /* check if we just hit the duplicate */
7572 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7573 vport->vport_id, addr);
7577 dev_err(&hdev->pdev->dev,
7578 "PF failed to add unicast entry(%pM) in the MAC table\n",
7584 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7585 const unsigned char *addr)
7587 struct hclge_vport *vport = hclge_get_vport(handle);
7589 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7593 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7594 const unsigned char *addr)
7596 struct hclge_dev *hdev = vport->back;
7597 struct hclge_mac_vlan_tbl_entry_cmd req;
7600 /* mac addr check */
7601 if (is_zero_ether_addr(addr) ||
7602 is_broadcast_ether_addr(addr) ||
7603 is_multicast_ether_addr(addr)) {
7604 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7609 memset(&req, 0, sizeof(req));
7610 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7611 hclge_prepare_mac_addr(&req, addr, false);
7612 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7614 mutex_lock(&hdev->vport_lock);
7615 hclge_update_umv_space(vport, true);
7616 mutex_unlock(&hdev->vport_lock);
7617 } else if (ret == -ENOENT) {
7624 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7625 const unsigned char *addr)
7627 struct hclge_vport *vport = hclge_get_vport(handle);
7629 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7633 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7634 const unsigned char *addr)
7636 struct hclge_dev *hdev = vport->back;
7637 struct hclge_mac_vlan_tbl_entry_cmd req;
7638 struct hclge_desc desc[3];
7641 /* mac addr check */
7642 if (!is_multicast_ether_addr(addr)) {
7643 dev_err(&hdev->pdev->dev,
7644 "Add mc mac err! invalid mac:%pM.\n",
7648 memset(&req, 0, sizeof(req));
7649 hclge_prepare_mac_addr(&req, addr, true);
7650 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7652 /* This mac addr do not exist, add new entry for it */
7653 memset(desc[0].data, 0, sizeof(desc[0].data));
7654 memset(desc[1].data, 0, sizeof(desc[0].data));
7655 memset(desc[2].data, 0, sizeof(desc[0].data));
7657 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7660 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7662 /* if already overflow, not to print each time */
7663 if (status == -ENOSPC &&
7664 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7665 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7670 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7671 const unsigned char *addr)
7673 struct hclge_vport *vport = hclge_get_vport(handle);
7675 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7679 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7680 const unsigned char *addr)
7682 struct hclge_dev *hdev = vport->back;
7683 struct hclge_mac_vlan_tbl_entry_cmd req;
7684 enum hclge_cmd_status status;
7685 struct hclge_desc desc[3];
7687 /* mac addr check */
7688 if (!is_multicast_ether_addr(addr)) {
7689 dev_dbg(&hdev->pdev->dev,
7690 "Remove mc mac err! invalid mac:%pM.\n",
7695 memset(&req, 0, sizeof(req));
7696 hclge_prepare_mac_addr(&req, addr, true);
7697 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7699 /* This mac addr exist, remove this handle's VFID for it */
7700 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7704 if (hclge_is_all_function_id_zero(desc))
7705 /* All the vfid is zero, so need to delete this entry */
7706 status = hclge_remove_mac_vlan_tbl(vport, &req);
7708 /* Not all the vfid is zero, update the vfid */
7709 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7711 } else if (status == -ENOENT) {
7718 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7719 struct list_head *list,
7720 int (*sync)(struct hclge_vport *,
7721 const unsigned char *))
7723 struct hclge_mac_node *mac_node, *tmp;
7726 list_for_each_entry_safe(mac_node, tmp, list, node) {
7727 ret = sync(vport, mac_node->mac_addr);
7729 mac_node->state = HCLGE_MAC_ACTIVE;
7731 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7738 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7739 struct list_head *list,
7740 int (*unsync)(struct hclge_vport *,
7741 const unsigned char *))
7743 struct hclge_mac_node *mac_node, *tmp;
7746 list_for_each_entry_safe(mac_node, tmp, list, node) {
7747 ret = unsync(vport, mac_node->mac_addr);
7748 if (!ret || ret == -ENOENT) {
7749 list_del(&mac_node->node);
7752 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7759 static bool hclge_sync_from_add_list(struct list_head *add_list,
7760 struct list_head *mac_list)
7762 struct hclge_mac_node *mac_node, *tmp, *new_node;
7763 bool all_added = true;
7765 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7766 if (mac_node->state == HCLGE_MAC_TO_ADD)
7769 /* if the mac address from tmp_add_list is not in the
7770 * uc/mc_mac_list, it means have received a TO_DEL request
7771 * during the time window of adding the mac address into mac
7772 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7773 * then it will be removed at next time. else it must be TO_ADD,
7774 * this address hasn't been added into mac table,
7775 * so just remove the mac node.
7777 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7779 hclge_update_mac_node(new_node, mac_node->state);
7780 list_del(&mac_node->node);
7782 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7783 mac_node->state = HCLGE_MAC_TO_DEL;
7784 list_del(&mac_node->node);
7785 list_add_tail(&mac_node->node, mac_list);
7787 list_del(&mac_node->node);
7795 static void hclge_sync_from_del_list(struct list_head *del_list,
7796 struct list_head *mac_list)
7798 struct hclge_mac_node *mac_node, *tmp, *new_node;
7800 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7801 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7803 /* If the mac addr exists in the mac list, it means
7804 * received a new TO_ADD request during the time window
7805 * of configuring the mac address. For the mac node
7806 * state is TO_ADD, and the address is already in the
7807 * in the hardware(due to delete fail), so we just need
7808 * to change the mac node state to ACTIVE.
7810 new_node->state = HCLGE_MAC_ACTIVE;
7811 list_del(&mac_node->node);
7814 list_del(&mac_node->node);
7815 list_add_tail(&mac_node->node, mac_list);
7820 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7821 enum HCLGE_MAC_ADDR_TYPE mac_type,
7824 if (mac_type == HCLGE_MAC_ADDR_UC) {
7826 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7828 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7831 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7833 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7837 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7838 enum HCLGE_MAC_ADDR_TYPE mac_type)
7840 struct hclge_mac_node *mac_node, *tmp, *new_node;
7841 struct list_head tmp_add_list, tmp_del_list;
7842 struct list_head *list;
7845 INIT_LIST_HEAD(&tmp_add_list);
7846 INIT_LIST_HEAD(&tmp_del_list);
7848 /* move the mac addr to the tmp_add_list and tmp_del_list, then
7849 * we can add/delete these mac addr outside the spin lock
7851 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7852 &vport->uc_mac_list : &vport->mc_mac_list;
7854 spin_lock_bh(&vport->mac_list_lock);
7856 list_for_each_entry_safe(mac_node, tmp, list, node) {
7857 switch (mac_node->state) {
7858 case HCLGE_MAC_TO_DEL:
7859 list_del(&mac_node->node);
7860 list_add_tail(&mac_node->node, &tmp_del_list);
7862 case HCLGE_MAC_TO_ADD:
7863 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7866 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7867 new_node->state = mac_node->state;
7868 list_add_tail(&new_node->node, &tmp_add_list);
7876 spin_unlock_bh(&vport->mac_list_lock);
7878 /* delete first, in order to get max mac table space for adding */
7879 if (mac_type == HCLGE_MAC_ADDR_UC) {
7880 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7881 hclge_rm_uc_addr_common);
7882 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7883 hclge_add_uc_addr_common);
7885 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7886 hclge_rm_mc_addr_common);
7887 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7888 hclge_add_mc_addr_common);
7891 /* if some mac addresses were added/deleted fail, move back to the
7892 * mac_list, and retry at next time.
7894 spin_lock_bh(&vport->mac_list_lock);
7896 hclge_sync_from_del_list(&tmp_del_list, list);
7897 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7899 spin_unlock_bh(&vport->mac_list_lock);
7901 hclge_update_overflow_flags(vport, mac_type, all_added);
7904 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7906 struct hclge_dev *hdev = vport->back;
7908 if (test_bit(vport->vport_id, hdev->vport_config_block))
7911 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7917 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7921 for (i = 0; i < hdev->num_alloc_vport; i++) {
7922 struct hclge_vport *vport = &hdev->vport[i];
7924 if (!hclge_need_sync_mac_table(vport))
7927 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7928 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7932 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7933 enum HCLGE_MAC_ADDR_TYPE mac_type)
7935 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7936 struct hclge_mac_node *mac_cfg, *tmp;
7937 struct hclge_dev *hdev = vport->back;
7938 struct list_head tmp_del_list, *list;
7941 if (mac_type == HCLGE_MAC_ADDR_UC) {
7942 list = &vport->uc_mac_list;
7943 unsync = hclge_rm_uc_addr_common;
7945 list = &vport->mc_mac_list;
7946 unsync = hclge_rm_mc_addr_common;
7949 INIT_LIST_HEAD(&tmp_del_list);
7952 set_bit(vport->vport_id, hdev->vport_config_block);
7954 spin_lock_bh(&vport->mac_list_lock);
7956 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7957 switch (mac_cfg->state) {
7958 case HCLGE_MAC_TO_DEL:
7959 case HCLGE_MAC_ACTIVE:
7960 list_del(&mac_cfg->node);
7961 list_add_tail(&mac_cfg->node, &tmp_del_list);
7963 case HCLGE_MAC_TO_ADD:
7965 list_del(&mac_cfg->node);
7972 spin_unlock_bh(&vport->mac_list_lock);
7974 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7975 ret = unsync(vport, mac_cfg->mac_addr);
7976 if (!ret || ret == -ENOENT) {
7977 /* clear all mac addr from hardware, but remain these
7978 * mac addr in the mac list, and restore them after
7979 * vf reset finished.
7982 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7983 mac_cfg->state = HCLGE_MAC_TO_ADD;
7985 list_del(&mac_cfg->node);
7988 } else if (is_del_list) {
7989 mac_cfg->state = HCLGE_MAC_TO_DEL;
7993 spin_lock_bh(&vport->mac_list_lock);
7995 hclge_sync_from_del_list(&tmp_del_list, list);
7997 spin_unlock_bh(&vport->mac_list_lock);
8000 /* remove all mac address when uninitailize */
8001 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8002 enum HCLGE_MAC_ADDR_TYPE mac_type)
8004 struct hclge_mac_node *mac_node, *tmp;
8005 struct hclge_dev *hdev = vport->back;
8006 struct list_head tmp_del_list, *list;
8008 INIT_LIST_HEAD(&tmp_del_list);
8010 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8011 &vport->uc_mac_list : &vport->mc_mac_list;
8013 spin_lock_bh(&vport->mac_list_lock);
8015 list_for_each_entry_safe(mac_node, tmp, list, node) {
8016 switch (mac_node->state) {
8017 case HCLGE_MAC_TO_DEL:
8018 case HCLGE_MAC_ACTIVE:
8019 list_del(&mac_node->node);
8020 list_add_tail(&mac_node->node, &tmp_del_list);
8022 case HCLGE_MAC_TO_ADD:
8023 list_del(&mac_node->node);
8029 spin_unlock_bh(&vport->mac_list_lock);
8031 if (mac_type == HCLGE_MAC_ADDR_UC)
8032 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8033 hclge_rm_uc_addr_common);
8035 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8036 hclge_rm_mc_addr_common);
8038 if (!list_empty(&tmp_del_list))
8039 dev_warn(&hdev->pdev->dev,
8040 "uninit %s mac list for vport %u not completely.\n",
8041 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8044 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8045 list_del(&mac_node->node);
8050 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8052 struct hclge_vport *vport;
8055 for (i = 0; i < hdev->num_alloc_vport; i++) {
8056 vport = &hdev->vport[i];
8057 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8058 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8062 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8063 u16 cmdq_resp, u8 resp_code)
8065 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8066 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
8067 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8068 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8073 dev_err(&hdev->pdev->dev,
8074 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8079 switch (resp_code) {
8080 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8081 case HCLGE_ETHERTYPE_ALREADY_ADD:
8084 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8085 dev_err(&hdev->pdev->dev,
8086 "add mac ethertype failed for manager table overflow.\n");
8087 return_status = -EIO;
8089 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8090 dev_err(&hdev->pdev->dev,
8091 "add mac ethertype failed for key conflict.\n");
8092 return_status = -EIO;
8095 dev_err(&hdev->pdev->dev,
8096 "add mac ethertype failed for undefined, code=%u.\n",
8098 return_status = -EIO;
8101 return return_status;
8104 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8107 struct hclge_mac_vlan_tbl_entry_cmd req;
8108 struct hclge_dev *hdev = vport->back;
8109 struct hclge_desc desc;
8110 u16 egress_port = 0;
8113 if (is_zero_ether_addr(mac_addr))
8116 memset(&req, 0, sizeof(req));
8117 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8118 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8119 req.egress_port = cpu_to_le16(egress_port);
8120 hclge_prepare_mac_addr(&req, mac_addr, false);
8122 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8125 vf_idx += HCLGE_VF_VPORT_START_NUM;
8126 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8128 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8134 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8137 struct hclge_vport *vport = hclge_get_vport(handle);
8138 struct hclge_dev *hdev = vport->back;
8140 vport = hclge_get_vf_vport(hdev, vf);
8144 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8145 dev_info(&hdev->pdev->dev,
8146 "Specified MAC(=%pM) is same as before, no change committed!\n",
8151 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8152 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8157 ether_addr_copy(vport->vf_info.mac, mac_addr);
8159 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8160 dev_info(&hdev->pdev->dev,
8161 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8163 return hclge_inform_reset_assert_to_vf(vport);
8166 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8171 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8172 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8174 struct hclge_desc desc;
8179 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8180 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8182 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8184 dev_err(&hdev->pdev->dev,
8185 "add mac ethertype failed for cmd_send, ret =%d.\n",
8190 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8191 retval = le16_to_cpu(desc.retval);
8193 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8196 static int init_mgr_tbl(struct hclge_dev *hdev)
8201 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8202 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8204 dev_err(&hdev->pdev->dev,
8205 "add mac ethertype failed, ret =%d.\n",
8214 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8216 struct hclge_vport *vport = hclge_get_vport(handle);
8217 struct hclge_dev *hdev = vport->back;
8219 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8222 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8223 const u8 *old_addr, const u8 *new_addr)
8225 struct list_head *list = &vport->uc_mac_list;
8226 struct hclge_mac_node *old_node, *new_node;
8228 new_node = hclge_find_mac_node(list, new_addr);
8230 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8234 new_node->state = HCLGE_MAC_TO_ADD;
8235 ether_addr_copy(new_node->mac_addr, new_addr);
8236 list_add(&new_node->node, list);
8238 if (new_node->state == HCLGE_MAC_TO_DEL)
8239 new_node->state = HCLGE_MAC_ACTIVE;
8241 /* make sure the new addr is in the list head, avoid dev
8242 * addr may be not re-added into mac table for the umv space
8243 * limitation after global/imp reset which will clear mac
8244 * table by hardware.
8246 list_move(&new_node->node, list);
8249 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8250 old_node = hclge_find_mac_node(list, old_addr);
8252 if (old_node->state == HCLGE_MAC_TO_ADD) {
8253 list_del(&old_node->node);
8256 old_node->state = HCLGE_MAC_TO_DEL;
8261 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8266 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8269 const unsigned char *new_addr = (const unsigned char *)p;
8270 struct hclge_vport *vport = hclge_get_vport(handle);
8271 struct hclge_dev *hdev = vport->back;
8272 unsigned char *old_addr = NULL;
8275 /* mac addr check */
8276 if (is_zero_ether_addr(new_addr) ||
8277 is_broadcast_ether_addr(new_addr) ||
8278 is_multicast_ether_addr(new_addr)) {
8279 dev_err(&hdev->pdev->dev,
8280 "change uc mac err! invalid mac: %pM.\n",
8285 ret = hclge_pause_addr_cfg(hdev, new_addr);
8287 dev_err(&hdev->pdev->dev,
8288 "failed to configure mac pause address, ret = %d\n",
8294 old_addr = hdev->hw.mac.mac_addr;
8296 spin_lock_bh(&vport->mac_list_lock);
8297 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8299 dev_err(&hdev->pdev->dev,
8300 "failed to change the mac addr:%pM, ret = %d\n",
8302 spin_unlock_bh(&vport->mac_list_lock);
8305 hclge_pause_addr_cfg(hdev, old_addr);
8309 /* we must update dev addr with spin lock protect, preventing dev addr
8310 * being removed by set_rx_mode path.
8312 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8313 spin_unlock_bh(&vport->mac_list_lock);
8315 hclge_task_schedule(hdev, 0);
8320 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8323 struct hclge_vport *vport = hclge_get_vport(handle);
8324 struct hclge_dev *hdev = vport->back;
8326 if (!hdev->hw.mac.phydev)
8329 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8332 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8333 u8 fe_type, bool filter_en, u8 vf_id)
8335 struct hclge_vlan_filter_ctrl_cmd *req;
8336 struct hclge_desc desc;
8339 /* read current vlan filter parameter */
8340 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8341 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8342 req->vlan_type = vlan_type;
8345 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8347 dev_err(&hdev->pdev->dev,
8348 "failed to get vlan filter config, ret = %d.\n", ret);
8352 /* modify and write new config parameter */
8353 hclge_cmd_reuse_desc(&desc, false);
8354 req->vlan_fe = filter_en ?
8355 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8357 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8359 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8365 #define HCLGE_FILTER_TYPE_VF 0
8366 #define HCLGE_FILTER_TYPE_PORT 1
8367 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8368 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8369 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8370 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8371 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8372 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8373 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8374 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8375 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8377 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8379 struct hclge_vport *vport = hclge_get_vport(handle);
8380 struct hclge_dev *hdev = vport->back;
8382 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8383 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8384 HCLGE_FILTER_FE_EGRESS, enable, 0);
8385 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8386 HCLGE_FILTER_FE_INGRESS, enable, 0);
8388 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8389 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8393 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8395 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8398 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8399 bool is_kill, u16 vlan,
8402 struct hclge_vport *vport = &hdev->vport[vfid];
8403 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8404 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8405 struct hclge_desc desc[2];
8410 /* if vf vlan table is full, firmware will close vf vlan filter, it
8411 * is unable and unnecessary to add new vlan id to vf vlan filter.
8412 * If spoof check is enable, and vf vlan is full, it shouldn't add
8413 * new vlan, because tx packets with these vlan id will be dropped.
8415 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8416 if (vport->vf_info.spoofchk && vlan) {
8417 dev_err(&hdev->pdev->dev,
8418 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8424 hclge_cmd_setup_basic_desc(&desc[0],
8425 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8426 hclge_cmd_setup_basic_desc(&desc[1],
8427 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8429 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8431 vf_byte_off = vfid / 8;
8432 vf_byte_val = 1 << (vfid % 8);
8434 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8435 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8437 req0->vlan_id = cpu_to_le16(vlan);
8438 req0->vlan_cfg = is_kill;
8440 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8441 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8443 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8445 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8447 dev_err(&hdev->pdev->dev,
8448 "Send vf vlan command fail, ret =%d.\n",
8454 #define HCLGE_VF_VLAN_NO_ENTRY 2
8455 if (!req0->resp_code || req0->resp_code == 1)
8458 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8459 set_bit(vfid, hdev->vf_vlan_full);
8460 dev_warn(&hdev->pdev->dev,
8461 "vf vlan table is full, vf vlan filter is disabled\n");
8465 dev_err(&hdev->pdev->dev,
8466 "Add vf vlan filter fail, ret =%u.\n",
8469 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8470 if (!req0->resp_code)
8473 /* vf vlan filter is disabled when vf vlan table is full,
8474 * then new vlan id will not be added into vf vlan table.
8475 * Just return 0 without warning, avoid massive verbose
8476 * print logs when unload.
8478 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8481 dev_err(&hdev->pdev->dev,
8482 "Kill vf vlan filter fail, ret =%u.\n",
8489 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8490 u16 vlan_id, bool is_kill)
8492 struct hclge_vlan_filter_pf_cfg_cmd *req;
8493 struct hclge_desc desc;
8494 u8 vlan_offset_byte_val;
8495 u8 vlan_offset_byte;
8499 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8501 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8502 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8503 HCLGE_VLAN_BYTE_SIZE;
8504 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8506 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8507 req->vlan_offset = vlan_offset_160;
8508 req->vlan_cfg = is_kill;
8509 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8511 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8513 dev_err(&hdev->pdev->dev,
8514 "port vlan command, send fail, ret =%d.\n", ret);
8518 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8519 u16 vport_id, u16 vlan_id,
8522 u16 vport_idx, vport_num = 0;
8525 if (is_kill && !vlan_id)
8528 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8531 dev_err(&hdev->pdev->dev,
8532 "Set %u vport vlan filter config fail, ret =%d.\n",
8537 /* vlan 0 may be added twice when 8021q module is enabled */
8538 if (!is_kill && !vlan_id &&
8539 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8542 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8543 dev_err(&hdev->pdev->dev,
8544 "Add port vlan failed, vport %u is already in vlan %u\n",
8550 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8551 dev_err(&hdev->pdev->dev,
8552 "Delete port vlan failed, vport %u is not in vlan %u\n",
8557 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8560 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8561 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8567 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8569 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8570 struct hclge_vport_vtag_tx_cfg_cmd *req;
8571 struct hclge_dev *hdev = vport->back;
8572 struct hclge_desc desc;
8576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8578 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8579 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8580 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8581 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8582 vcfg->accept_tag1 ? 1 : 0);
8583 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8584 vcfg->accept_untag1 ? 1 : 0);
8585 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8586 vcfg->accept_tag2 ? 1 : 0);
8587 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8588 vcfg->accept_untag2 ? 1 : 0);
8589 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8590 vcfg->insert_tag1_en ? 1 : 0);
8591 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8592 vcfg->insert_tag2_en ? 1 : 0);
8593 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8595 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8596 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8597 HCLGE_VF_NUM_PER_BYTE;
8598 req->vf_bitmap[bmap_index] =
8599 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8601 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8603 dev_err(&hdev->pdev->dev,
8604 "Send port txvlan cfg command fail, ret =%d\n",
8610 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8612 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8613 struct hclge_vport_vtag_rx_cfg_cmd *req;
8614 struct hclge_dev *hdev = vport->back;
8615 struct hclge_desc desc;
8619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8621 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8622 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8623 vcfg->strip_tag1_en ? 1 : 0);
8624 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8625 vcfg->strip_tag2_en ? 1 : 0);
8626 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8627 vcfg->vlan1_vlan_prionly ? 1 : 0);
8628 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8629 vcfg->vlan2_vlan_prionly ? 1 : 0);
8631 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8632 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8633 HCLGE_VF_NUM_PER_BYTE;
8634 req->vf_bitmap[bmap_index] =
8635 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8637 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8639 dev_err(&hdev->pdev->dev,
8640 "Send port rxvlan cfg command fail, ret =%d\n",
8646 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8647 u16 port_base_vlan_state,
8652 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8653 vport->txvlan_cfg.accept_tag1 = true;
8654 vport->txvlan_cfg.insert_tag1_en = false;
8655 vport->txvlan_cfg.default_tag1 = 0;
8657 vport->txvlan_cfg.accept_tag1 = false;
8658 vport->txvlan_cfg.insert_tag1_en = true;
8659 vport->txvlan_cfg.default_tag1 = vlan_tag;
8662 vport->txvlan_cfg.accept_untag1 = true;
8664 /* accept_tag2 and accept_untag2 are not supported on
8665 * pdev revision(0x20), new revision support them,
8666 * this two fields can not be configured by user.
8668 vport->txvlan_cfg.accept_tag2 = true;
8669 vport->txvlan_cfg.accept_untag2 = true;
8670 vport->txvlan_cfg.insert_tag2_en = false;
8671 vport->txvlan_cfg.default_tag2 = 0;
8673 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8674 vport->rxvlan_cfg.strip_tag1_en = false;
8675 vport->rxvlan_cfg.strip_tag2_en =
8676 vport->rxvlan_cfg.rx_vlan_offload_en;
8678 vport->rxvlan_cfg.strip_tag1_en =
8679 vport->rxvlan_cfg.rx_vlan_offload_en;
8680 vport->rxvlan_cfg.strip_tag2_en = true;
8682 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8683 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8685 ret = hclge_set_vlan_tx_offload_cfg(vport);
8689 return hclge_set_vlan_rx_offload_cfg(vport);
8692 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8694 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8695 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8696 struct hclge_desc desc;
8699 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8700 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8701 rx_req->ot_fst_vlan_type =
8702 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8703 rx_req->ot_sec_vlan_type =
8704 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8705 rx_req->in_fst_vlan_type =
8706 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8707 rx_req->in_sec_vlan_type =
8708 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8710 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8712 dev_err(&hdev->pdev->dev,
8713 "Send rxvlan protocol type command fail, ret =%d\n",
8718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8720 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8721 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8722 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8724 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8726 dev_err(&hdev->pdev->dev,
8727 "Send txvlan protocol type command fail, ret =%d\n",
8733 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8735 #define HCLGE_DEF_VLAN_TYPE 0x8100
8737 struct hnae3_handle *handle = &hdev->vport[0].nic;
8738 struct hclge_vport *vport;
8742 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8743 /* for revision 0x21, vf vlan filter is per function */
8744 for (i = 0; i < hdev->num_alloc_vport; i++) {
8745 vport = &hdev->vport[i];
8746 ret = hclge_set_vlan_filter_ctrl(hdev,
8747 HCLGE_FILTER_TYPE_VF,
8748 HCLGE_FILTER_FE_EGRESS,
8755 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8756 HCLGE_FILTER_FE_INGRESS, true,
8761 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8762 HCLGE_FILTER_FE_EGRESS_V1_B,
8768 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8770 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8771 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8772 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8773 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8774 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8775 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8777 ret = hclge_set_vlan_protocol_type(hdev);
8781 for (i = 0; i < hdev->num_alloc_vport; i++) {
8784 vport = &hdev->vport[i];
8785 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8787 ret = hclge_vlan_offload_cfg(vport,
8788 vport->port_base_vlan_cfg.state,
8794 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8797 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8800 struct hclge_vport_vlan_cfg *vlan;
8802 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8806 vlan->hd_tbl_status = writen_to_tbl;
8807 vlan->vlan_id = vlan_id;
8809 list_add_tail(&vlan->node, &vport->vlan_list);
8812 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8814 struct hclge_vport_vlan_cfg *vlan, *tmp;
8815 struct hclge_dev *hdev = vport->back;
8818 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8819 if (!vlan->hd_tbl_status) {
8820 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8822 vlan->vlan_id, false);
8824 dev_err(&hdev->pdev->dev,
8825 "restore vport vlan list failed, ret=%d\n",
8830 vlan->hd_tbl_status = true;
8836 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8839 struct hclge_vport_vlan_cfg *vlan, *tmp;
8840 struct hclge_dev *hdev = vport->back;
8842 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8843 if (vlan->vlan_id == vlan_id) {
8844 if (is_write_tbl && vlan->hd_tbl_status)
8845 hclge_set_vlan_filter_hw(hdev,
8851 list_del(&vlan->node);
8858 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8860 struct hclge_vport_vlan_cfg *vlan, *tmp;
8861 struct hclge_dev *hdev = vport->back;
8863 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8864 if (vlan->hd_tbl_status)
8865 hclge_set_vlan_filter_hw(hdev,
8871 vlan->hd_tbl_status = false;
8873 list_del(&vlan->node);
8877 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8880 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8882 struct hclge_vport_vlan_cfg *vlan, *tmp;
8883 struct hclge_vport *vport;
8886 for (i = 0; i < hdev->num_alloc_vport; i++) {
8887 vport = &hdev->vport[i];
8888 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8889 list_del(&vlan->node);
8895 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8897 struct hclge_vport_vlan_cfg *vlan, *tmp;
8898 struct hclge_dev *hdev = vport->back;
8904 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8905 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8906 state = vport->port_base_vlan_cfg.state;
8908 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8909 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8910 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8911 vport->vport_id, vlan_id,
8916 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8917 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8919 vlan->vlan_id, false);
8922 vlan->hd_tbl_status = true;
8926 /* For global reset and imp reset, hardware will clear the mac table,
8927 * so we change the mac address state from ACTIVE to TO_ADD, then they
8928 * can be restored in the service task after reset complete. Furtherly,
8929 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8930 * be restored after reset, so just remove these mac nodes from mac_list.
8932 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8934 struct hclge_mac_node *mac_node, *tmp;
8936 list_for_each_entry_safe(mac_node, tmp, list, node) {
8937 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8938 mac_node->state = HCLGE_MAC_TO_ADD;
8939 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8940 list_del(&mac_node->node);
8946 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8948 spin_lock_bh(&vport->mac_list_lock);
8950 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8951 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8952 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8954 spin_unlock_bh(&vport->mac_list_lock);
8957 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8959 struct hclge_vport *vport = &hdev->vport[0];
8960 struct hnae3_handle *handle = &vport->nic;
8962 hclge_restore_mac_table_common(vport);
8963 hclge_restore_vport_vlan_table(vport);
8964 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8966 hclge_restore_fd_entries(handle);
8969 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8971 struct hclge_vport *vport = hclge_get_vport(handle);
8973 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8974 vport->rxvlan_cfg.strip_tag1_en = false;
8975 vport->rxvlan_cfg.strip_tag2_en = enable;
8977 vport->rxvlan_cfg.strip_tag1_en = enable;
8978 vport->rxvlan_cfg.strip_tag2_en = true;
8980 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8981 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8982 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8984 return hclge_set_vlan_rx_offload_cfg(vport);
8987 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8988 u16 port_base_vlan_state,
8989 struct hclge_vlan_info *new_info,
8990 struct hclge_vlan_info *old_info)
8992 struct hclge_dev *hdev = vport->back;
8995 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8996 hclge_rm_vport_all_vlan_table(vport, false);
8997 return hclge_set_vlan_filter_hw(hdev,
8998 htons(new_info->vlan_proto),
9004 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9005 vport->vport_id, old_info->vlan_tag,
9010 return hclge_add_vport_all_vlan_table(vport);
9013 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9014 struct hclge_vlan_info *vlan_info)
9016 struct hnae3_handle *nic = &vport->nic;
9017 struct hclge_vlan_info *old_vlan_info;
9018 struct hclge_dev *hdev = vport->back;
9021 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9023 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9027 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9028 /* add new VLAN tag */
9029 ret = hclge_set_vlan_filter_hw(hdev,
9030 htons(vlan_info->vlan_proto),
9032 vlan_info->vlan_tag,
9037 /* remove old VLAN tag */
9038 ret = hclge_set_vlan_filter_hw(hdev,
9039 htons(old_vlan_info->vlan_proto),
9041 old_vlan_info->vlan_tag,
9049 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9054 /* update state only when disable/enable port based VLAN */
9055 vport->port_base_vlan_cfg.state = state;
9056 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9057 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9059 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9062 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9063 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9064 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9069 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9070 enum hnae3_port_base_vlan_state state,
9073 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9075 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9077 return HNAE3_PORT_BASE_VLAN_ENABLE;
9080 return HNAE3_PORT_BASE_VLAN_DISABLE;
9081 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9082 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9084 return HNAE3_PORT_BASE_VLAN_MODIFY;
9088 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9089 u16 vlan, u8 qos, __be16 proto)
9091 struct hclge_vport *vport = hclge_get_vport(handle);
9092 struct hclge_dev *hdev = vport->back;
9093 struct hclge_vlan_info vlan_info;
9097 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9100 vport = hclge_get_vf_vport(hdev, vfid);
9104 /* qos is a 3 bits value, so can not be bigger than 7 */
9105 if (vlan > VLAN_N_VID - 1 || qos > 7)
9107 if (proto != htons(ETH_P_8021Q))
9108 return -EPROTONOSUPPORT;
9110 state = hclge_get_port_base_vlan_state(vport,
9111 vport->port_base_vlan_cfg.state,
9113 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9116 vlan_info.vlan_tag = vlan;
9117 vlan_info.qos = qos;
9118 vlan_info.vlan_proto = ntohs(proto);
9120 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9121 return hclge_update_port_base_vlan_cfg(vport, state,
9124 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9125 vport->vport_id, state,
9132 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9134 struct hclge_vlan_info *vlan_info;
9135 struct hclge_vport *vport;
9139 /* clear port base vlan for all vf */
9140 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9141 vport = &hdev->vport[vf];
9142 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9144 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9146 vlan_info->vlan_tag, true);
9148 dev_err(&hdev->pdev->dev,
9149 "failed to clear vf vlan for vf%d, ret = %d\n",
9150 vf - HCLGE_VF_VPORT_START_NUM, ret);
9154 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9155 u16 vlan_id, bool is_kill)
9157 struct hclge_vport *vport = hclge_get_vport(handle);
9158 struct hclge_dev *hdev = vport->back;
9159 bool writen_to_tbl = false;
9162 /* When device is resetting or reset failed, firmware is unable to
9163 * handle mailbox. Just record the vlan id, and remove it after
9166 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9167 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9168 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9172 /* when port base vlan enabled, we use port base vlan as the vlan
9173 * filter entry. In this case, we don't update vlan filter table
9174 * when user add new vlan or remove exist vlan, just update the vport
9175 * vlan list. The vlan id in vlan list will be writen in vlan filter
9176 * table until port base vlan disabled
9178 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9179 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9181 writen_to_tbl = true;
9186 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9188 hclge_add_vport_vlan_table(vport, vlan_id,
9190 } else if (is_kill) {
9191 /* when remove hw vlan filter failed, record the vlan id,
9192 * and try to remove it from hw later, to be consistence
9195 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9200 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9202 #define HCLGE_MAX_SYNC_COUNT 60
9204 int i, ret, sync_cnt = 0;
9207 /* start from vport 1 for PF is always alive */
9208 for (i = 0; i < hdev->num_alloc_vport; i++) {
9209 struct hclge_vport *vport = &hdev->vport[i];
9211 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9213 while (vlan_id != VLAN_N_VID) {
9214 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9215 vport->vport_id, vlan_id,
9217 if (ret && ret != -EINVAL)
9220 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9221 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9224 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9227 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9233 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9235 struct hclge_config_max_frm_size_cmd *req;
9236 struct hclge_desc desc;
9238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9240 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9241 req->max_frm_size = cpu_to_le16(new_mps);
9242 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9244 return hclge_cmd_send(&hdev->hw, &desc, 1);
9247 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9249 struct hclge_vport *vport = hclge_get_vport(handle);
9251 return hclge_set_vport_mtu(vport, new_mtu);
9254 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9256 struct hclge_dev *hdev = vport->back;
9257 int i, max_frm_size, ret;
9259 /* HW supprt 2 layer vlan */
9260 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9261 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9262 max_frm_size > HCLGE_MAC_MAX_FRAME)
9265 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9266 mutex_lock(&hdev->vport_lock);
9267 /* VF's mps must fit within hdev->mps */
9268 if (vport->vport_id && max_frm_size > hdev->mps) {
9269 mutex_unlock(&hdev->vport_lock);
9271 } else if (vport->vport_id) {
9272 vport->mps = max_frm_size;
9273 mutex_unlock(&hdev->vport_lock);
9277 /* PF's mps must be greater then VF's mps */
9278 for (i = 1; i < hdev->num_alloc_vport; i++)
9279 if (max_frm_size < hdev->vport[i].mps) {
9280 mutex_unlock(&hdev->vport_lock);
9284 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9286 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9288 dev_err(&hdev->pdev->dev,
9289 "Change mtu fail, ret =%d\n", ret);
9293 hdev->mps = max_frm_size;
9294 vport->mps = max_frm_size;
9296 ret = hclge_buffer_alloc(hdev);
9298 dev_err(&hdev->pdev->dev,
9299 "Allocate buffer fail, ret =%d\n", ret);
9302 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9303 mutex_unlock(&hdev->vport_lock);
9307 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9310 struct hclge_reset_tqp_queue_cmd *req;
9311 struct hclge_desc desc;
9314 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9316 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9317 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9319 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9321 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9323 dev_err(&hdev->pdev->dev,
9324 "Send tqp reset cmd error, status =%d\n", ret);
9331 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9333 struct hclge_reset_tqp_queue_cmd *req;
9334 struct hclge_desc desc;
9337 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9339 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9340 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9342 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9344 dev_err(&hdev->pdev->dev,
9345 "Get reset status error, status =%d\n", ret);
9349 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9352 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9354 struct hnae3_queue *queue;
9355 struct hclge_tqp *tqp;
9357 queue = handle->kinfo.tqp[queue_id];
9358 tqp = container_of(queue, struct hclge_tqp, q);
9363 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9365 struct hclge_vport *vport = hclge_get_vport(handle);
9366 struct hclge_dev *hdev = vport->back;
9367 int reset_try_times = 0;
9372 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9374 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9376 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9380 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9382 dev_err(&hdev->pdev->dev,
9383 "Send reset tqp cmd fail, ret = %d\n", ret);
9387 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9388 reset_status = hclge_get_reset_status(hdev, queue_gid);
9392 /* Wait for tqp hw reset */
9393 usleep_range(1000, 1200);
9396 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9397 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9401 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9403 dev_err(&hdev->pdev->dev,
9404 "Deassert the soft reset fail, ret = %d\n", ret);
9409 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9411 struct hclge_dev *hdev = vport->back;
9412 int reset_try_times = 0;
9417 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9419 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9421 dev_warn(&hdev->pdev->dev,
9422 "Send reset tqp cmd fail, ret = %d\n", ret);
9426 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9427 reset_status = hclge_get_reset_status(hdev, queue_gid);
9431 /* Wait for tqp hw reset */
9432 usleep_range(1000, 1200);
9435 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9436 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9440 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9442 dev_warn(&hdev->pdev->dev,
9443 "Deassert the soft reset fail, ret = %d\n", ret);
9446 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9448 struct hclge_vport *vport = hclge_get_vport(handle);
9449 struct hclge_dev *hdev = vport->back;
9451 return hdev->fw_version;
9454 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9456 struct phy_device *phydev = hdev->hw.mac.phydev;
9461 phy_set_asym_pause(phydev, rx_en, tx_en);
9464 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9468 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9471 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9473 dev_err(&hdev->pdev->dev,
9474 "configure pauseparam error, ret = %d.\n", ret);
9479 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9481 struct phy_device *phydev = hdev->hw.mac.phydev;
9482 u16 remote_advertising = 0;
9483 u16 local_advertising;
9484 u32 rx_pause, tx_pause;
9487 if (!phydev->link || !phydev->autoneg)
9490 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9493 remote_advertising = LPA_PAUSE_CAP;
9495 if (phydev->asym_pause)
9496 remote_advertising |= LPA_PAUSE_ASYM;
9498 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9499 remote_advertising);
9500 tx_pause = flowctl & FLOW_CTRL_TX;
9501 rx_pause = flowctl & FLOW_CTRL_RX;
9503 if (phydev->duplex == HCLGE_MAC_HALF) {
9508 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9511 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9512 u32 *rx_en, u32 *tx_en)
9514 struct hclge_vport *vport = hclge_get_vport(handle);
9515 struct hclge_dev *hdev = vport->back;
9516 struct phy_device *phydev = hdev->hw.mac.phydev;
9518 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9520 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9526 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9529 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9532 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9541 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9542 u32 rx_en, u32 tx_en)
9545 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9546 else if (rx_en && !tx_en)
9547 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9548 else if (!rx_en && tx_en)
9549 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9551 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9553 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9556 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9557 u32 rx_en, u32 tx_en)
9559 struct hclge_vport *vport = hclge_get_vport(handle);
9560 struct hclge_dev *hdev = vport->back;
9561 struct phy_device *phydev = hdev->hw.mac.phydev;
9565 fc_autoneg = hclge_get_autoneg(handle);
9566 if (auto_neg != fc_autoneg) {
9567 dev_info(&hdev->pdev->dev,
9568 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9573 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9574 dev_info(&hdev->pdev->dev,
9575 "Priority flow control enabled. Cannot set link flow control.\n");
9579 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9581 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9584 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9587 return phy_start_aneg(phydev);
9592 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9593 u8 *auto_neg, u32 *speed, u8 *duplex)
9595 struct hclge_vport *vport = hclge_get_vport(handle);
9596 struct hclge_dev *hdev = vport->back;
9599 *speed = hdev->hw.mac.speed;
9601 *duplex = hdev->hw.mac.duplex;
9603 *auto_neg = hdev->hw.mac.autoneg;
9606 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9609 struct hclge_vport *vport = hclge_get_vport(handle);
9610 struct hclge_dev *hdev = vport->back;
9612 /* When nic is down, the service task is not running, doesn't update
9613 * the port information per second. Query the port information before
9614 * return the media type, ensure getting the correct media information.
9616 hclge_update_port_info(hdev);
9619 *media_type = hdev->hw.mac.media_type;
9622 *module_type = hdev->hw.mac.module_type;
9625 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9626 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9628 struct hclge_vport *vport = hclge_get_vport(handle);
9629 struct hclge_dev *hdev = vport->back;
9630 struct phy_device *phydev = hdev->hw.mac.phydev;
9631 int mdix_ctrl, mdix, is_resolved;
9632 unsigned int retval;
9635 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9636 *tp_mdix = ETH_TP_MDI_INVALID;
9640 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9642 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9643 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9644 HCLGE_PHY_MDIX_CTRL_S);
9646 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9647 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9648 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9650 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9652 switch (mdix_ctrl) {
9654 *tp_mdix_ctrl = ETH_TP_MDI;
9657 *tp_mdix_ctrl = ETH_TP_MDI_X;
9660 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9663 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9668 *tp_mdix = ETH_TP_MDI_INVALID;
9670 *tp_mdix = ETH_TP_MDI_X;
9672 *tp_mdix = ETH_TP_MDI;
9675 static void hclge_info_show(struct hclge_dev *hdev)
9677 struct device *dev = &hdev->pdev->dev;
9679 dev_info(dev, "PF info begin:\n");
9681 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9682 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9683 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9684 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9685 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9686 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9687 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9688 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9689 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9690 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9691 dev_info(dev, "This is %s PF\n",
9692 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9693 dev_info(dev, "DCB %s\n",
9694 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9695 dev_info(dev, "MQPRIO %s\n",
9696 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9698 dev_info(dev, "PF info end.\n");
9701 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9702 struct hclge_vport *vport)
9704 struct hnae3_client *client = vport->nic.client;
9705 struct hclge_dev *hdev = ae_dev->priv;
9706 int rst_cnt = hdev->rst_stats.reset_cnt;
9709 ret = client->ops->init_instance(&vport->nic);
9713 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9714 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9715 rst_cnt != hdev->rst_stats.reset_cnt) {
9720 /* Enable nic hw error interrupts */
9721 ret = hclge_config_nic_hw_error(hdev, true);
9723 dev_err(&ae_dev->pdev->dev,
9724 "fail(%d) to enable hw error interrupts\n", ret);
9728 hnae3_set_client_init_flag(client, ae_dev, 1);
9730 if (netif_msg_drv(&hdev->vport->nic))
9731 hclge_info_show(hdev);
9736 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9737 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9738 msleep(HCLGE_WAIT_RESET_DONE);
9740 client->ops->uninit_instance(&vport->nic, 0);
9745 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9746 struct hclge_vport *vport)
9748 struct hclge_dev *hdev = ae_dev->priv;
9749 struct hnae3_client *client;
9753 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9757 client = hdev->roce_client;
9758 ret = hclge_init_roce_base_info(vport);
9762 rst_cnt = hdev->rst_stats.reset_cnt;
9763 ret = client->ops->init_instance(&vport->roce);
9767 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9768 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9769 rst_cnt != hdev->rst_stats.reset_cnt) {
9774 /* Enable roce ras interrupts */
9775 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9777 dev_err(&ae_dev->pdev->dev,
9778 "fail(%d) to enable roce ras interrupts\n", ret);
9782 hnae3_set_client_init_flag(client, ae_dev, 1);
9787 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9788 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9789 msleep(HCLGE_WAIT_RESET_DONE);
9791 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9796 static int hclge_init_client_instance(struct hnae3_client *client,
9797 struct hnae3_ae_dev *ae_dev)
9799 struct hclge_dev *hdev = ae_dev->priv;
9800 struct hclge_vport *vport;
9803 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9804 vport = &hdev->vport[i];
9806 switch (client->type) {
9807 case HNAE3_CLIENT_KNIC:
9808 hdev->nic_client = client;
9809 vport->nic.client = client;
9810 ret = hclge_init_nic_client_instance(ae_dev, vport);
9814 ret = hclge_init_roce_client_instance(ae_dev, vport);
9819 case HNAE3_CLIENT_ROCE:
9820 if (hnae3_dev_roce_supported(hdev)) {
9821 hdev->roce_client = client;
9822 vport->roce.client = client;
9825 ret = hclge_init_roce_client_instance(ae_dev, vport);
9838 hdev->nic_client = NULL;
9839 vport->nic.client = NULL;
9842 hdev->roce_client = NULL;
9843 vport->roce.client = NULL;
9847 static void hclge_uninit_client_instance(struct hnae3_client *client,
9848 struct hnae3_ae_dev *ae_dev)
9850 struct hclge_dev *hdev = ae_dev->priv;
9851 struct hclge_vport *vport;
9854 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9855 vport = &hdev->vport[i];
9856 if (hdev->roce_client) {
9857 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9858 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9859 msleep(HCLGE_WAIT_RESET_DONE);
9861 hdev->roce_client->ops->uninit_instance(&vport->roce,
9863 hdev->roce_client = NULL;
9864 vport->roce.client = NULL;
9866 if (client->type == HNAE3_CLIENT_ROCE)
9868 if (hdev->nic_client && client->ops->uninit_instance) {
9869 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9870 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9871 msleep(HCLGE_WAIT_RESET_DONE);
9873 client->ops->uninit_instance(&vport->nic, 0);
9874 hdev->nic_client = NULL;
9875 vport->nic.client = NULL;
9880 static int hclge_pci_init(struct hclge_dev *hdev)
9882 struct pci_dev *pdev = hdev->pdev;
9883 struct hclge_hw *hw;
9886 ret = pci_enable_device(pdev);
9888 dev_err(&pdev->dev, "failed to enable PCI device\n");
9892 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9894 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9897 "can't set consistent PCI DMA");
9898 goto err_disable_device;
9900 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9903 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9905 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9906 goto err_disable_device;
9909 pci_set_master(pdev);
9911 hw->io_base = pcim_iomap(pdev, 2, 0);
9913 dev_err(&pdev->dev, "Can't map configuration register space\n");
9915 goto err_clr_master;
9918 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9922 pci_clear_master(pdev);
9923 pci_release_regions(pdev);
9925 pci_disable_device(pdev);
9930 static void hclge_pci_uninit(struct hclge_dev *hdev)
9932 struct pci_dev *pdev = hdev->pdev;
9934 pcim_iounmap(pdev, hdev->hw.io_base);
9935 pci_free_irq_vectors(pdev);
9936 pci_clear_master(pdev);
9937 pci_release_mem_regions(pdev);
9938 pci_disable_device(pdev);
9941 static void hclge_state_init(struct hclge_dev *hdev)
9943 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9944 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9945 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9946 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9947 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9948 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9949 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9952 static void hclge_state_uninit(struct hclge_dev *hdev)
9954 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9955 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9957 if (hdev->reset_timer.function)
9958 del_timer_sync(&hdev->reset_timer);
9959 if (hdev->service_task.work.func)
9960 cancel_delayed_work_sync(&hdev->service_task);
9963 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9965 #define HCLGE_FLR_RETRY_WAIT_MS 500
9966 #define HCLGE_FLR_RETRY_CNT 5
9968 struct hclge_dev *hdev = ae_dev->priv;
9973 down(&hdev->reset_sem);
9974 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9975 hdev->reset_type = HNAE3_FLR_RESET;
9976 ret = hclge_reset_prepare(hdev);
9977 if (ret || hdev->reset_pending) {
9978 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9980 if (hdev->reset_pending ||
9981 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9982 dev_err(&hdev->pdev->dev,
9983 "reset_pending:0x%lx, retry_cnt:%d\n",
9984 hdev->reset_pending, retry_cnt);
9985 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9986 up(&hdev->reset_sem);
9987 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9992 /* disable misc vector before FLR done */
9993 hclge_enable_vector(&hdev->misc_vector, false);
9994 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9995 hdev->rst_stats.flr_rst_cnt++;
9998 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10000 struct hclge_dev *hdev = ae_dev->priv;
10003 hclge_enable_vector(&hdev->misc_vector, true);
10005 ret = hclge_reset_rebuild(hdev);
10007 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10009 hdev->reset_type = HNAE3_NONE_RESET;
10010 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10011 up(&hdev->reset_sem);
10014 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10018 for (i = 0; i < hdev->num_alloc_vport; i++) {
10019 struct hclge_vport *vport = &hdev->vport[i];
10022 /* Send cmd to clear VF's FUNC_RST_ING */
10023 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10025 dev_warn(&hdev->pdev->dev,
10026 "clear vf(%u) rst failed %d!\n",
10027 vport->vport_id, ret);
10031 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10033 struct pci_dev *pdev = ae_dev->pdev;
10034 struct hclge_dev *hdev;
10037 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10042 hdev->ae_dev = ae_dev;
10043 hdev->reset_type = HNAE3_NONE_RESET;
10044 hdev->reset_level = HNAE3_FUNC_RESET;
10045 ae_dev->priv = hdev;
10047 /* HW supprt 2 layer vlan */
10048 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10050 mutex_init(&hdev->vport_lock);
10051 spin_lock_init(&hdev->fd_rule_lock);
10052 sema_init(&hdev->reset_sem, 1);
10054 ret = hclge_pci_init(hdev);
10058 /* Firmware command queue initialize */
10059 ret = hclge_cmd_queue_init(hdev);
10061 goto err_pci_uninit;
10063 /* Firmware command initialize */
10064 ret = hclge_cmd_init(hdev);
10066 goto err_cmd_uninit;
10068 ret = hclge_get_cap(hdev);
10070 goto err_cmd_uninit;
10072 ret = hclge_query_dev_specs(hdev);
10074 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10076 goto err_cmd_uninit;
10079 ret = hclge_configure(hdev);
10081 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10082 goto err_cmd_uninit;
10085 ret = hclge_init_msi(hdev);
10087 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10088 goto err_cmd_uninit;
10091 ret = hclge_misc_irq_init(hdev);
10093 goto err_msi_uninit;
10095 ret = hclge_alloc_tqps(hdev);
10097 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10098 goto err_msi_irq_uninit;
10101 ret = hclge_alloc_vport(hdev);
10103 goto err_msi_irq_uninit;
10105 ret = hclge_map_tqp(hdev);
10107 goto err_msi_irq_uninit;
10109 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10110 ret = hclge_mac_mdio_config(hdev);
10112 goto err_msi_irq_uninit;
10115 ret = hclge_init_umv_space(hdev);
10117 goto err_mdiobus_unreg;
10119 ret = hclge_mac_init(hdev);
10121 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10122 goto err_mdiobus_unreg;
10125 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10127 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10128 goto err_mdiobus_unreg;
10131 ret = hclge_config_gro(hdev, true);
10133 goto err_mdiobus_unreg;
10135 ret = hclge_init_vlan_config(hdev);
10137 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10138 goto err_mdiobus_unreg;
10141 ret = hclge_tm_schd_init(hdev);
10143 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10144 goto err_mdiobus_unreg;
10147 hclge_rss_init_cfg(hdev);
10148 ret = hclge_rss_init_hw(hdev);
10150 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10151 goto err_mdiobus_unreg;
10154 ret = init_mgr_tbl(hdev);
10156 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10157 goto err_mdiobus_unreg;
10160 ret = hclge_init_fd_config(hdev);
10162 dev_err(&pdev->dev,
10163 "fd table init fail, ret=%d\n", ret);
10164 goto err_mdiobus_unreg;
10167 INIT_KFIFO(hdev->mac_tnl_log);
10169 hclge_dcb_ops_set(hdev);
10171 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10172 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10174 /* Setup affinity after service timer setup because add_timer_on
10175 * is called in affinity notify.
10177 hclge_misc_affinity_setup(hdev);
10179 hclge_clear_all_event_cause(hdev);
10180 hclge_clear_resetting_state(hdev);
10182 /* Log and clear the hw errors those already occurred */
10183 hclge_handle_all_hns_hw_errors(ae_dev);
10185 /* request delayed reset for the error recovery because an immediate
10186 * global reset on a PF affecting pending initialization of other PFs
10188 if (ae_dev->hw_err_reset_req) {
10189 enum hnae3_reset_type reset_level;
10191 reset_level = hclge_get_reset_level(ae_dev,
10192 &ae_dev->hw_err_reset_req);
10193 hclge_set_def_reset_request(ae_dev, reset_level);
10194 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10197 /* Enable MISC vector(vector0) */
10198 hclge_enable_vector(&hdev->misc_vector, true);
10200 hclge_state_init(hdev);
10201 hdev->last_reset_time = jiffies;
10203 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10204 HCLGE_DRIVER_NAME);
10206 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10211 if (hdev->hw.mac.phydev)
10212 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10213 err_msi_irq_uninit:
10214 hclge_misc_irq_uninit(hdev);
10216 pci_free_irq_vectors(pdev);
10218 hclge_cmd_uninit(hdev);
10220 pcim_iounmap(pdev, hdev->hw.io_base);
10221 pci_clear_master(pdev);
10222 pci_release_regions(pdev);
10223 pci_disable_device(pdev);
10225 mutex_destroy(&hdev->vport_lock);
10229 static void hclge_stats_clear(struct hclge_dev *hdev)
10231 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10234 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10236 return hclge_config_switch_param(hdev, vf, enable,
10237 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10240 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10242 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10243 HCLGE_FILTER_FE_NIC_INGRESS_B,
10247 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10251 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10253 dev_err(&hdev->pdev->dev,
10254 "Set vf %d mac spoof check %s failed, ret=%d\n",
10255 vf, enable ? "on" : "off", ret);
10259 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10261 dev_err(&hdev->pdev->dev,
10262 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10263 vf, enable ? "on" : "off", ret);
10268 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10271 struct hclge_vport *vport = hclge_get_vport(handle);
10272 struct hclge_dev *hdev = vport->back;
10273 u32 new_spoofchk = enable ? 1 : 0;
10276 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10277 return -EOPNOTSUPP;
10279 vport = hclge_get_vf_vport(hdev, vf);
10283 if (vport->vf_info.spoofchk == new_spoofchk)
10286 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10287 dev_warn(&hdev->pdev->dev,
10288 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10290 else if (enable && hclge_is_umv_space_full(vport, true))
10291 dev_warn(&hdev->pdev->dev,
10292 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10295 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10299 vport->vf_info.spoofchk = new_spoofchk;
10303 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10305 struct hclge_vport *vport = hdev->vport;
10309 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10312 /* resume the vf spoof check state after reset */
10313 for (i = 0; i < hdev->num_alloc_vport; i++) {
10314 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10315 vport->vf_info.spoofchk);
10325 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10327 struct hclge_vport *vport = hclge_get_vport(handle);
10328 struct hclge_dev *hdev = vport->back;
10329 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10330 u32 new_trusted = enable ? 1 : 0;
10334 vport = hclge_get_vf_vport(hdev, vf);
10338 if (vport->vf_info.trusted == new_trusted)
10341 /* Disable promisc mode for VF if it is not trusted any more. */
10342 if (!enable && vport->vf_info.promisc_enable) {
10343 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10344 ret = hclge_set_vport_promisc_mode(vport, false, false,
10348 vport->vf_info.promisc_enable = 0;
10349 hclge_inform_vf_promisc_info(vport);
10352 vport->vf_info.trusted = new_trusted;
10357 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10362 /* reset vf rate to default value */
10363 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10364 struct hclge_vport *vport = &hdev->vport[vf];
10366 vport->vf_info.max_tx_rate = 0;
10367 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10369 dev_err(&hdev->pdev->dev,
10370 "vf%d failed to reset to default, ret=%d\n",
10371 vf - HCLGE_VF_VPORT_START_NUM, ret);
10375 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10376 int min_tx_rate, int max_tx_rate)
10378 if (min_tx_rate != 0 ||
10379 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10380 dev_err(&hdev->pdev->dev,
10381 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10382 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10389 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10390 int min_tx_rate, int max_tx_rate, bool force)
10392 struct hclge_vport *vport = hclge_get_vport(handle);
10393 struct hclge_dev *hdev = vport->back;
10396 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10400 vport = hclge_get_vf_vport(hdev, vf);
10404 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10407 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10411 vport->vf_info.max_tx_rate = max_tx_rate;
10416 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10418 struct hnae3_handle *handle = &hdev->vport->nic;
10419 struct hclge_vport *vport;
10423 /* resume the vf max_tx_rate after reset */
10424 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10425 vport = hclge_get_vf_vport(hdev, vf);
10429 /* zero means max rate, after reset, firmware already set it to
10430 * max rate, so just continue.
10432 if (!vport->vf_info.max_tx_rate)
10435 ret = hclge_set_vf_rate(handle, vf, 0,
10436 vport->vf_info.max_tx_rate, true);
10438 dev_err(&hdev->pdev->dev,
10439 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10440 vf, vport->vf_info.max_tx_rate, ret);
10448 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10450 struct hclge_vport *vport = hdev->vport;
10453 for (i = 0; i < hdev->num_alloc_vport; i++) {
10454 hclge_vport_stop(vport);
10459 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10461 struct hclge_dev *hdev = ae_dev->priv;
10462 struct pci_dev *pdev = ae_dev->pdev;
10465 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10467 hclge_stats_clear(hdev);
10468 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10469 * so here should not clean table in memory.
10471 if (hdev->reset_type == HNAE3_IMP_RESET ||
10472 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10473 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10474 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10475 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10476 hclge_reset_umv_space(hdev);
10479 ret = hclge_cmd_init(hdev);
10481 dev_err(&pdev->dev, "Cmd queue init failed\n");
10485 ret = hclge_map_tqp(hdev);
10487 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10491 ret = hclge_mac_init(hdev);
10493 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10497 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10499 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10503 ret = hclge_config_gro(hdev, true);
10507 ret = hclge_init_vlan_config(hdev);
10509 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10513 ret = hclge_tm_init_hw(hdev, true);
10515 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10519 ret = hclge_rss_init_hw(hdev);
10521 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10525 ret = init_mgr_tbl(hdev);
10527 dev_err(&pdev->dev,
10528 "failed to reinit manager table, ret = %d\n", ret);
10532 ret = hclge_init_fd_config(hdev);
10534 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10538 /* Log and clear the hw errors those already occurred */
10539 hclge_handle_all_hns_hw_errors(ae_dev);
10541 /* Re-enable the hw error interrupts because
10542 * the interrupts get disabled on global reset.
10544 ret = hclge_config_nic_hw_error(hdev, true);
10546 dev_err(&pdev->dev,
10547 "fail(%d) to re-enable NIC hw error interrupts\n",
10552 if (hdev->roce_client) {
10553 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10555 dev_err(&pdev->dev,
10556 "fail(%d) to re-enable roce ras interrupts\n",
10562 hclge_reset_vport_state(hdev);
10563 ret = hclge_reset_vport_spoofchk(hdev);
10567 ret = hclge_resume_vf_rate(hdev);
10571 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10572 HCLGE_DRIVER_NAME);
10577 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10579 struct hclge_dev *hdev = ae_dev->priv;
10580 struct hclge_mac *mac = &hdev->hw.mac;
10582 hclge_reset_vf_rate(hdev);
10583 hclge_clear_vf_vlan(hdev);
10584 hclge_misc_affinity_teardown(hdev);
10585 hclge_state_uninit(hdev);
10586 hclge_uninit_mac_table(hdev);
10589 mdiobus_unregister(mac->mdio_bus);
10591 /* Disable MISC vector(vector0) */
10592 hclge_enable_vector(&hdev->misc_vector, false);
10593 synchronize_irq(hdev->misc_vector.vector_irq);
10595 /* Disable all hw interrupts */
10596 hclge_config_mac_tnl_int(hdev, false);
10597 hclge_config_nic_hw_error(hdev, false);
10598 hclge_config_rocee_ras_interrupt(hdev, false);
10600 hclge_cmd_uninit(hdev);
10601 hclge_misc_irq_uninit(hdev);
10602 hclge_pci_uninit(hdev);
10603 mutex_destroy(&hdev->vport_lock);
10604 hclge_uninit_vport_vlan_table(hdev);
10605 ae_dev->priv = NULL;
10608 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10610 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10611 struct hclge_vport *vport = hclge_get_vport(handle);
10612 struct hclge_dev *hdev = vport->back;
10614 return min_t(u32, hdev->rss_size_max,
10615 vport->alloc_tqps / kinfo->num_tc);
10618 static void hclge_get_channels(struct hnae3_handle *handle,
10619 struct ethtool_channels *ch)
10621 ch->max_combined = hclge_get_max_channels(handle);
10622 ch->other_count = 1;
10624 ch->combined_count = handle->kinfo.rss_size;
10627 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10628 u16 *alloc_tqps, u16 *max_rss_size)
10630 struct hclge_vport *vport = hclge_get_vport(handle);
10631 struct hclge_dev *hdev = vport->back;
10633 *alloc_tqps = vport->alloc_tqps;
10634 *max_rss_size = hdev->rss_size_max;
10637 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10638 bool rxfh_configured)
10640 struct hclge_vport *vport = hclge_get_vport(handle);
10641 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10642 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10643 struct hclge_dev *hdev = vport->back;
10644 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10645 u16 cur_rss_size = kinfo->rss_size;
10646 u16 cur_tqps = kinfo->num_tqps;
10647 u16 tc_valid[HCLGE_MAX_TC_NUM];
10653 kinfo->req_rss_size = new_tqps_num;
10655 ret = hclge_tm_vport_map_update(hdev);
10657 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10661 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10662 roundup_size = ilog2(roundup_size);
10663 /* Set the RSS TC mode according to the new RSS size */
10664 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10667 if (!(hdev->hw_tc_map & BIT(i)))
10671 tc_size[i] = roundup_size;
10672 tc_offset[i] = kinfo->rss_size * i;
10674 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10678 /* RSS indirection table has been configuared by user */
10679 if (rxfh_configured)
10682 /* Reinitializes the rss indirect table according to the new RSS size */
10683 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10687 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10688 rss_indir[i] = i % kinfo->rss_size;
10690 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10692 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10699 dev_info(&hdev->pdev->dev,
10700 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10701 cur_rss_size, kinfo->rss_size,
10702 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10707 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10708 u32 *regs_num_64_bit)
10710 struct hclge_desc desc;
10714 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10717 dev_err(&hdev->pdev->dev,
10718 "Query register number cmd failed, ret = %d.\n", ret);
10722 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10723 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10725 total_num = *regs_num_32_bit + *regs_num_64_bit;
10732 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10735 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10736 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10738 struct hclge_desc *desc;
10739 u32 *reg_val = data;
10749 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10750 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10751 HCLGE_32_BIT_REG_RTN_DATANUM);
10752 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10756 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10757 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10759 dev_err(&hdev->pdev->dev,
10760 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10765 for (i = 0; i < cmd_num; i++) {
10767 desc_data = (__le32 *)(&desc[i].data[0]);
10768 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10770 desc_data = (__le32 *)(&desc[i]);
10771 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10773 for (k = 0; k < n; k++) {
10774 *reg_val++ = le32_to_cpu(*desc_data++);
10786 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10789 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10790 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10792 struct hclge_desc *desc;
10793 u64 *reg_val = data;
10803 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10804 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10805 HCLGE_64_BIT_REG_RTN_DATANUM);
10806 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10810 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10811 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10813 dev_err(&hdev->pdev->dev,
10814 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10819 for (i = 0; i < cmd_num; i++) {
10821 desc_data = (__le64 *)(&desc[i].data[0]);
10822 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10824 desc_data = (__le64 *)(&desc[i]);
10825 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10827 for (k = 0; k < n; k++) {
10828 *reg_val++ = le64_to_cpu(*desc_data++);
10840 #define MAX_SEPARATE_NUM 4
10841 #define SEPARATOR_VALUE 0xFDFCFBFA
10842 #define REG_NUM_PER_LINE 4
10843 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10844 #define REG_SEPARATOR_LINE 1
10845 #define REG_NUM_REMAIN_MASK 3
10846 #define BD_LIST_MAX_NUM 30
10848 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10852 /* initialize command BD except the last one */
10853 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10854 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10856 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10859 /* initialize the last command BD */
10860 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10862 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10865 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10869 u32 entries_per_desc, desc_index, index, offset, i;
10870 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10873 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10875 dev_err(&hdev->pdev->dev,
10876 "Get dfx bd num fail, status is %d.\n", ret);
10880 entries_per_desc = ARRAY_SIZE(desc[0].data);
10881 for (i = 0; i < type_num; i++) {
10882 offset = hclge_dfx_bd_offset_list[i];
10883 index = offset % entries_per_desc;
10884 desc_index = offset / entries_per_desc;
10885 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10891 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10892 struct hclge_desc *desc_src, int bd_num,
10893 enum hclge_opcode_type cmd)
10895 struct hclge_desc *desc = desc_src;
10898 hclge_cmd_setup_basic_desc(desc, cmd, true);
10899 for (i = 0; i < bd_num - 1; i++) {
10900 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10902 hclge_cmd_setup_basic_desc(desc, cmd, true);
10906 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10908 dev_err(&hdev->pdev->dev,
10909 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10915 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10918 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10919 struct hclge_desc *desc = desc_src;
10922 entries_per_desc = ARRAY_SIZE(desc->data);
10923 reg_num = entries_per_desc * bd_num;
10924 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10925 for (i = 0; i < reg_num; i++) {
10926 index = i % entries_per_desc;
10927 desc_index = i / entries_per_desc;
10928 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10930 for (i = 0; i < separator_num; i++)
10931 *reg++ = SEPARATOR_VALUE;
10933 return reg_num + separator_num;
10936 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10938 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10939 int data_len_per_desc, bd_num, i;
10940 int bd_num_list[BD_LIST_MAX_NUM];
10944 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10946 dev_err(&hdev->pdev->dev,
10947 "Get dfx reg bd num fail, status is %d.\n", ret);
10951 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10953 for (i = 0; i < dfx_reg_type_num; i++) {
10954 bd_num = bd_num_list[i];
10955 data_len = data_len_per_desc * bd_num;
10956 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10962 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10964 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10965 int bd_num, bd_num_max, buf_len, i;
10966 int bd_num_list[BD_LIST_MAX_NUM];
10967 struct hclge_desc *desc_src;
10971 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10973 dev_err(&hdev->pdev->dev,
10974 "Get dfx reg bd num fail, status is %d.\n", ret);
10978 bd_num_max = bd_num_list[0];
10979 for (i = 1; i < dfx_reg_type_num; i++)
10980 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10982 buf_len = sizeof(*desc_src) * bd_num_max;
10983 desc_src = kzalloc(buf_len, GFP_KERNEL);
10987 for (i = 0; i < dfx_reg_type_num; i++) {
10988 bd_num = bd_num_list[i];
10989 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10990 hclge_dfx_reg_opcode_list[i]);
10992 dev_err(&hdev->pdev->dev,
10993 "Get dfx reg fail, status is %d.\n", ret);
10997 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11004 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11005 struct hnae3_knic_private_info *kinfo)
11007 #define HCLGE_RING_REG_OFFSET 0x200
11008 #define HCLGE_RING_INT_REG_OFFSET 0x4
11010 int i, j, reg_num, separator_num;
11014 /* fetching per-PF registers valus from PF PCIe register space */
11015 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11016 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11017 for (i = 0; i < reg_num; i++)
11018 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11019 for (i = 0; i < separator_num; i++)
11020 *reg++ = SEPARATOR_VALUE;
11021 data_num_sum = reg_num + separator_num;
11023 reg_num = ARRAY_SIZE(common_reg_addr_list);
11024 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11025 for (i = 0; i < reg_num; i++)
11026 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11027 for (i = 0; i < separator_num; i++)
11028 *reg++ = SEPARATOR_VALUE;
11029 data_num_sum += reg_num + separator_num;
11031 reg_num = ARRAY_SIZE(ring_reg_addr_list);
11032 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11033 for (j = 0; j < kinfo->num_tqps; j++) {
11034 for (i = 0; i < reg_num; i++)
11035 *reg++ = hclge_read_dev(&hdev->hw,
11036 ring_reg_addr_list[i] +
11037 HCLGE_RING_REG_OFFSET * j);
11038 for (i = 0; i < separator_num; i++)
11039 *reg++ = SEPARATOR_VALUE;
11041 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11043 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11044 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11045 for (j = 0; j < hdev->num_msi_used - 1; j++) {
11046 for (i = 0; i < reg_num; i++)
11047 *reg++ = hclge_read_dev(&hdev->hw,
11048 tqp_intr_reg_addr_list[i] +
11049 HCLGE_RING_INT_REG_OFFSET * j);
11050 for (i = 0; i < separator_num; i++)
11051 *reg++ = SEPARATOR_VALUE;
11053 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11055 return data_num_sum;
11058 static int hclge_get_regs_len(struct hnae3_handle *handle)
11060 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11061 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11062 struct hclge_vport *vport = hclge_get_vport(handle);
11063 struct hclge_dev *hdev = vport->back;
11064 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11065 int regs_lines_32_bit, regs_lines_64_bit;
11068 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11070 dev_err(&hdev->pdev->dev,
11071 "Get register number failed, ret = %d.\n", ret);
11075 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11077 dev_err(&hdev->pdev->dev,
11078 "Get dfx reg len failed, ret = %d.\n", ret);
11082 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11083 REG_SEPARATOR_LINE;
11084 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11085 REG_SEPARATOR_LINE;
11086 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11087 REG_SEPARATOR_LINE;
11088 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11089 REG_SEPARATOR_LINE;
11090 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11091 REG_SEPARATOR_LINE;
11092 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11093 REG_SEPARATOR_LINE;
11095 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11096 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11097 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11100 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11103 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11104 struct hclge_vport *vport = hclge_get_vport(handle);
11105 struct hclge_dev *hdev = vport->back;
11106 u32 regs_num_32_bit, regs_num_64_bit;
11107 int i, reg_num, separator_num, ret;
11110 *version = hdev->fw_version;
11112 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
11114 dev_err(&hdev->pdev->dev,
11115 "Get register number failed, ret = %d.\n", ret);
11119 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11121 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11123 dev_err(&hdev->pdev->dev,
11124 "Get 32 bit register failed, ret = %d.\n", ret);
11127 reg_num = regs_num_32_bit;
11129 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11130 for (i = 0; i < separator_num; i++)
11131 *reg++ = SEPARATOR_VALUE;
11133 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11135 dev_err(&hdev->pdev->dev,
11136 "Get 64 bit register failed, ret = %d.\n", ret);
11139 reg_num = regs_num_64_bit * 2;
11141 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11142 for (i = 0; i < separator_num; i++)
11143 *reg++ = SEPARATOR_VALUE;
11145 ret = hclge_get_dfx_reg(hdev, reg);
11147 dev_err(&hdev->pdev->dev,
11148 "Get dfx register failed, ret = %d.\n", ret);
11151 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11153 struct hclge_set_led_state_cmd *req;
11154 struct hclge_desc desc;
11157 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11159 req = (struct hclge_set_led_state_cmd *)desc.data;
11160 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11161 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11163 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11165 dev_err(&hdev->pdev->dev,
11166 "Send set led state cmd error, ret =%d\n", ret);
11171 enum hclge_led_status {
11174 HCLGE_LED_NO_CHANGE = 0xFF,
11177 static int hclge_set_led_id(struct hnae3_handle *handle,
11178 enum ethtool_phys_id_state status)
11180 struct hclge_vport *vport = hclge_get_vport(handle);
11181 struct hclge_dev *hdev = vport->back;
11184 case ETHTOOL_ID_ACTIVE:
11185 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11186 case ETHTOOL_ID_INACTIVE:
11187 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11193 static void hclge_get_link_mode(struct hnae3_handle *handle,
11194 unsigned long *supported,
11195 unsigned long *advertising)
11197 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11198 struct hclge_vport *vport = hclge_get_vport(handle);
11199 struct hclge_dev *hdev = vport->back;
11200 unsigned int idx = 0;
11202 for (; idx < size; idx++) {
11203 supported[idx] = hdev->hw.mac.supported[idx];
11204 advertising[idx] = hdev->hw.mac.advertising[idx];
11208 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11210 struct hclge_vport *vport = hclge_get_vport(handle);
11211 struct hclge_dev *hdev = vport->back;
11213 return hclge_config_gro(hdev, enable);
11216 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11218 struct hclge_vport *vport = &hdev->vport[0];
11219 struct hnae3_handle *handle = &vport->nic;
11223 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11224 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11225 vport->last_promisc_flags = vport->overflow_promisc_flags;
11228 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11229 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11230 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11231 tmp_flags & HNAE3_MPE);
11233 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11234 hclge_enable_vlan_filter(handle,
11235 tmp_flags & HNAE3_VLAN_FLTR);
11240 static bool hclge_module_existed(struct hclge_dev *hdev)
11242 struct hclge_desc desc;
11246 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11247 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11249 dev_err(&hdev->pdev->dev,
11250 "failed to get SFP exist state, ret = %d\n", ret);
11254 existed = le32_to_cpu(desc.data[0]);
11256 return existed != 0;
11259 /* need 6 bds(total 140 bytes) in one reading
11260 * return the number of bytes actually read, 0 means read failed.
11262 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11265 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11266 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11272 /* setup all 6 bds to read module eeprom info. */
11273 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11274 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11277 /* bd0~bd4 need next flag */
11278 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11279 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11282 /* setup bd0, this bd contains offset and read length. */
11283 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11284 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11285 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11286 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11288 ret = hclge_cmd_send(&hdev->hw, desc, i);
11290 dev_err(&hdev->pdev->dev,
11291 "failed to get SFP eeprom info, ret = %d\n", ret);
11295 /* copy sfp info from bd0 to out buffer. */
11296 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11297 memcpy(data, sfp_info_bd0->data, copy_len);
11298 read_len = copy_len;
11300 /* copy sfp info from bd1~bd5 to out buffer if needed. */
11301 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11302 if (read_len >= len)
11305 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11306 memcpy(data + read_len, desc[i].data, copy_len);
11307 read_len += copy_len;
11313 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11316 struct hclge_vport *vport = hclge_get_vport(handle);
11317 struct hclge_dev *hdev = vport->back;
11321 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11322 return -EOPNOTSUPP;
11324 if (!hclge_module_existed(hdev))
11327 while (read_len < len) {
11328 data_len = hclge_get_sfp_eeprom_info(hdev,
11335 read_len += data_len;
11341 static const struct hnae3_ae_ops hclge_ops = {
11342 .init_ae_dev = hclge_init_ae_dev,
11343 .uninit_ae_dev = hclge_uninit_ae_dev,
11344 .flr_prepare = hclge_flr_prepare,
11345 .flr_done = hclge_flr_done,
11346 .init_client_instance = hclge_init_client_instance,
11347 .uninit_client_instance = hclge_uninit_client_instance,
11348 .map_ring_to_vector = hclge_map_ring_to_vector,
11349 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11350 .get_vector = hclge_get_vector,
11351 .put_vector = hclge_put_vector,
11352 .set_promisc_mode = hclge_set_promisc_mode,
11353 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11354 .set_loopback = hclge_set_loopback,
11355 .start = hclge_ae_start,
11356 .stop = hclge_ae_stop,
11357 .client_start = hclge_client_start,
11358 .client_stop = hclge_client_stop,
11359 .get_status = hclge_get_status,
11360 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11361 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11362 .get_media_type = hclge_get_media_type,
11363 .check_port_speed = hclge_check_port_speed,
11364 .get_fec = hclge_get_fec,
11365 .set_fec = hclge_set_fec,
11366 .get_rss_key_size = hclge_get_rss_key_size,
11367 .get_rss_indir_size = hclge_get_rss_indir_size,
11368 .get_rss = hclge_get_rss,
11369 .set_rss = hclge_set_rss,
11370 .set_rss_tuple = hclge_set_rss_tuple,
11371 .get_rss_tuple = hclge_get_rss_tuple,
11372 .get_tc_size = hclge_get_tc_size,
11373 .get_mac_addr = hclge_get_mac_addr,
11374 .set_mac_addr = hclge_set_mac_addr,
11375 .do_ioctl = hclge_do_ioctl,
11376 .add_uc_addr = hclge_add_uc_addr,
11377 .rm_uc_addr = hclge_rm_uc_addr,
11378 .add_mc_addr = hclge_add_mc_addr,
11379 .rm_mc_addr = hclge_rm_mc_addr,
11380 .set_autoneg = hclge_set_autoneg,
11381 .get_autoneg = hclge_get_autoneg,
11382 .restart_autoneg = hclge_restart_autoneg,
11383 .halt_autoneg = hclge_halt_autoneg,
11384 .get_pauseparam = hclge_get_pauseparam,
11385 .set_pauseparam = hclge_set_pauseparam,
11386 .set_mtu = hclge_set_mtu,
11387 .reset_queue = hclge_reset_tqp,
11388 .get_stats = hclge_get_stats,
11389 .get_mac_stats = hclge_get_mac_stat,
11390 .update_stats = hclge_update_stats,
11391 .get_strings = hclge_get_strings,
11392 .get_sset_count = hclge_get_sset_count,
11393 .get_fw_version = hclge_get_fw_version,
11394 .get_mdix_mode = hclge_get_mdix_mode,
11395 .enable_vlan_filter = hclge_enable_vlan_filter,
11396 .set_vlan_filter = hclge_set_vlan_filter,
11397 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11398 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11399 .reset_event = hclge_reset_event,
11400 .get_reset_level = hclge_get_reset_level,
11401 .set_default_reset_request = hclge_set_def_reset_request,
11402 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11403 .set_channels = hclge_set_channels,
11404 .get_channels = hclge_get_channels,
11405 .get_regs_len = hclge_get_regs_len,
11406 .get_regs = hclge_get_regs,
11407 .set_led_id = hclge_set_led_id,
11408 .get_link_mode = hclge_get_link_mode,
11409 .add_fd_entry = hclge_add_fd_entry,
11410 .del_fd_entry = hclge_del_fd_entry,
11411 .del_all_fd_entries = hclge_del_all_fd_entries,
11412 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11413 .get_fd_rule_info = hclge_get_fd_rule_info,
11414 .get_fd_all_rules = hclge_get_all_rules,
11415 .enable_fd = hclge_enable_fd,
11416 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11417 .dbg_run_cmd = hclge_dbg_run_cmd,
11418 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11419 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11420 .ae_dev_resetting = hclge_ae_dev_resetting,
11421 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11422 .set_gro_en = hclge_gro_en,
11423 .get_global_queue_id = hclge_covert_handle_qid_global,
11424 .set_timer_task = hclge_set_timer_task,
11425 .mac_connect_phy = hclge_mac_connect_phy,
11426 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11427 .get_vf_config = hclge_get_vf_config,
11428 .set_vf_link_state = hclge_set_vf_link_state,
11429 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11430 .set_vf_trust = hclge_set_vf_trust,
11431 .set_vf_rate = hclge_set_vf_rate,
11432 .set_vf_mac = hclge_set_vf_mac,
11433 .get_module_eeprom = hclge_get_module_eeprom,
11434 .get_cmdq_stat = hclge_get_cmdq_stat,
11437 static struct hnae3_ae_algo ae_algo = {
11439 .pdev_id_table = ae_algo_pci_tbl,
11442 static int hclge_init(void)
11444 pr_info("%s is initializing\n", HCLGE_NAME);
11446 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11448 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11452 hnae3_register_ae_algo(&ae_algo);
11457 static void hclge_exit(void)
11459 hnae3_unregister_ae_algo(&ae_algo);
11460 destroy_workqueue(hclge_wq);
11462 module_init(hclge_init);
11463 module_exit(hclge_exit);
11465 MODULE_LICENSE("GPL");
11466 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11467 MODULE_DESCRIPTION("HCLGE Driver");
11468 MODULE_VERSION(HCLGE_MOD_VERSION);